source stringlengths 3 92 | c stringlengths 26 2.25M |
|---|---|
hybrid_pbfs.h | //
// Created by xehoth on 2021/11/20.
//
#ifndef HYBRID_PBFS_H_
#define HYBRID_PBFS_H_
#include <bfs.h>
#include <frontier.h>
#include <bitmap.h>
#include <prefix_sum.h>
#include <memory>
struct HybridBfs : public Bfs {
using Bfs::Bfs;
void init() override {
Bfs::init();
if (!f) f = std::make_unique<Frontier>(this->m);
if (!nf) nf = std::make_unique<Frontier>(this->m);
if (!b) b = std::make_unique<BitMap64>(this->n);
if (!nb) nb = std::make_unique<BitMap64>(this->n);
if (!pi) pi = MemoryManager::get().alloc<std::uint32_t>(this->n + 1);
if (!cb) cb = MemoryManager::get().alloc<std::uint32_t>(this->m + 1);
f->clear();
nf->clear();
b->clear();
pi[0] = 0;
}
void bfs(std::uint32_t s) override {
if (s >= this->n) return;
dist[s] = 0;
parent[s] = s;
f->push(s);
b->set(s);
// current step is top-down / bottom-up
bool topdownStep = true;
// the number of edges to check from the frontier
std::uint32_t m_f = this->deg(s);
// the number of vertices in the frontier
std::uint32_t n_f = f->n;
// the number of edges to check from unexplored vertices
std::uint32_t mu = this->m;
const std::uint32_t alpha = 14;
const std::uint32_t beta = 24;
auto doTopDown = [&]() {
#pragma omp parallel for
for (std::uint32_t i = 0; i < f->n; ++i) pi[i + 1] = this->deg(f->d[i]);
prefixSumOmp(pi, f->n + 1);
nf->n = pi[f->n];
#pragma omp parallel for schedule(guided) reduction(+:m_f, n_f) reduction(-:mu)
for (std::uint32_t i = 0; i < f->n; ++i) {
std::uint32_t u = f->d[i], offset = pi[i];
for (std::uint32_t j = 0; j < this->deg(u); ++j) {
std::uint32_t v = this->g[this->o[u] + j];
if (!b->test(v) && dist[v] == -1u) {
b->set(v);
nf->d[offset + j] = v;
dist[v] = dist[u] + 1;
parent[v] = u;
m_f += this->deg(v);
mu -= this->deg(v);
++n_f;
} else {
nf->d[offset + j] = -1u;
}
}
}
f->cullFrom(*nf, cb);
};
auto doBottomUp = [&]() {
nb->clear();
#pragma omp parallel for schedule(guided) reduction(+:m_f, n_f) reduction(-:mu)
for (std::uint32_t v = 0; v < this->n; ++v) {
if (!b->test(v)) {
for (std::uint32_t i = this->o[v]; i < this->o[v + 1]; ++i) {
std::uint32_t u = this->g[i];
if (b->test(u)) {
nb->set(v);
dist[v] = dist[u] + 1;
parent[v] = u;
m_f += this->deg(v);
mu -= this->deg(v);
++n_f;
break;
}
}
}
}
*b |= *nb;
};
while (!(topdownStep && f->empty())) {
if (true) {
doTopDown();
} else {
doBottomUp();
}
if (topdownStep) {
if (m_f > mu / alpha) topdownStep = false;
} else {
if (n_f < this->n / beta) {
topdownStep = true;
f->clear();
for (std::uint32_t i = 0; i < nb->capacity(); ++i)
if (nb->test(i)) f->push(i);
}
topdownStep = true;
}
m_f = n_f = 0;
}
}
std::unique_ptr<Frontier> f{}, nf{}; // frontier and next frontier
std::unique_ptr<BitMap64> b{}, nb{};
std::uint32_t *pi{}; // index: prefix sum (degree)
std::uint32_t *cb{}; // cull index buffer
};
#endif |
maiorelemento.c | // compilar: make
// executar: make run tam=100
//
// original por: Profº. Dr. Paulo Sérgio Lopes de Souza
//
// modificado por:
// Gustavo T. Mastrobuono NUSP 10734411, Henrique de S. Q. dos Santos NUSP 10819029,
// Jhordan P. V. Pesantes NUSP 11733353, Witor M. A. de Oliveira NUSP 10692190 e Yorvin A. R. Carrion NUSP 11733332
#include <omp.h>
#include <stdio.h>
#include <stdlib.h>
#define T 4 // macro para definir o numero de threads utilizadas
int main(int argc, char **argv) {
double wtime; // variável para controlar o tempo de busca
int *vetor, i, maior_global = 0, maior_local = 0, tam;
if (argc != 2)
{
printf("Wrong arguments. Please use binary <amount_of_elements>\n");
exit(0);
}
tam = atoi(argv[1]);
printf("Amount of vetor=%d\n", tam);
fflush(0);
vetor = (int *)malloc(tam * sizeof(int)); // Aloca o vetor da dimensão lida
int iteracoes = 0; // variavel para controlar o numero de iteracoes de cada thread
int thread_num = 0, nthreads = 0, resto = 0;
iteracoes = tam / T; // limite de iterações de cada laço "for
resto = tam % T; // resto da divisao para quando tam/T não for exata
omp_lock_t lock;
omp_init_lock(&lock);
vetor[tam / 2] = tam; // atribui o maior valor ao meio do vetor
if(resto == 0){
wtime = omp_get_wtime(); // Salva o tempo de início
#pragma omp parallel num_threads(T) private(i, maior_local) shared(maior_global)
{
// Região de preenchimento do vetor
for (i = omp_get_thread_num() * iteracoes; i < iteracoes + omp_get_thread_num() * iteracoes; i++) {
if(i == (tam/2)) continue;
else vetor[i] = 1;
}
// Fim da região
#pragma omp barrier // Sincronização das threads: esperamos todas as threads terminarem o preenchimento do vetor
// Região de busca pelo maior elemento
for (i = omp_get_thread_num() * iteracoes; i < iteracoes + omp_get_thread_num() * iteracoes; i++) {
if(vetor[i] > maior_local)
maior_local=vetor[i];
}
// região crítica da operação, deve ser protegida pra não sobrescrever o valor do "maior" global
omp_set_lock(&lock);
if (maior_local > maior_global)
maior_global = maior_local;
omp_unset_lock(&lock);
// fim da região crítica
}
wtime = omp_get_wtime() - wtime; // salva o tempo de término
// Fim da região de busca
}
else {
wtime = omp_get_wtime();
#pragma omp parallel num_threads(T) private(i, maior_local) shared(maior_global)
{
if(omp_get_thread_num() == 0){
for(i = 0; i < (tam/2); i++){
if(i == (tam/2)) continue;
else vetor[i] = 1;
}
}
else{
for(i = (tam/2)+1; i < tam; i++){
if(i == (tam/2)) continue;
else vetor[i] = 1;
}
}
#pragma omp barrier // estamos esperando todas as threads terminarem de atribuir os valores ao vetor
if(omp_get_thread_num() == 0){
for (i = 0; i < (tam/2); i++) {
if(vetor[i] > maior_local)
maior_local=vetor[i];
}
omp_set_lock(&lock); // região crítica da operação, deve ser protegida pra não sobrescrever o valor do "maior" global
if (maior_local > maior_global)
maior_global = maior_local;
omp_unset_lock(&lock);
}
else{
for (i = (tam/2); i < tam; i++) {
if(vetor[i] > maior_local)
maior_local=vetor[i];
}
omp_set_lock(&lock); // região crítica da operação, deve ser protegida pra não sobrescrever o valor do "maior" global
if (maior_local > maior_global)
maior_global = maior_local;
omp_unset_lock(&lock);
}
}
wtime = omp_get_wtime() - wtime;
}
printf("PAR REDUCTION: Tam=%d, maior=%d, Elapsed wall clock time = %f \n", tam, maior_global, wtime); //Imprime o vetor ordenado
free(vetor); //Desaloca o vetor lido
return 0;
}
|
draw.c | /*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% DDDD RRRR AAA W W %
% D D R R A A W W %
% D D RRRR AAAAA W W W %
% D D R RN A A WW WW %
% DDDD R R A A W W %
% %
% %
% MagickCore Image Drawing Methods %
% %
% %
% Software Design %
% Cristy %
% July 1998 %
% %
% %
% Copyright 1999-2018 ImageMagick Studio LLC, a non-profit organization %
% dedicated to making software imaging solutions freely available. %
% %
% You may not use this file except in compliance with the License. You may %
% obtain a copy of the License at %
% %
% https://www.imagemagick.org/script/license.php %
% %
% Unless required by applicable law or agreed to in writing, software %
% distributed under the License is distributed on an "AS IS" BASIS, %
% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. %
% See the License for the specific language governing permissions and %
% limitations under the License. %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% Bill Radcliffe of Corbis (www.corbis.com) contributed the polygon
% rendering code based on Paul Heckbert's "Concave Polygon Scan Conversion",
% Graphics Gems, 1990. Leonard Rosenthal and David Harr of Appligent
% (www.appligent.com) contributed the dash pattern, linecap stroking
% algorithm, and minor rendering improvements.
%
*/
/*
Include declarations.
*/
#include "MagickCore/studio.h"
#include "MagickCore/annotate.h"
#include "MagickCore/artifact.h"
#include "MagickCore/blob.h"
#include "MagickCore/cache.h"
#include "MagickCore/cache-private.h"
#include "MagickCore/cache-view.h"
#include "MagickCore/channel.h"
#include "MagickCore/color.h"
#include "MagickCore/colorspace-private.h"
#include "MagickCore/composite.h"
#include "MagickCore/composite-private.h"
#include "MagickCore/constitute.h"
#include "MagickCore/draw.h"
#include "MagickCore/draw-private.h"
#include "MagickCore/enhance.h"
#include "MagickCore/exception.h"
#include "MagickCore/exception-private.h"
#include "MagickCore/gem.h"
#include "MagickCore/geometry.h"
#include "MagickCore/image-private.h"
#include "MagickCore/list.h"
#include "MagickCore/log.h"
#include "MagickCore/memory-private.h"
#include "MagickCore/monitor.h"
#include "MagickCore/monitor-private.h"
#include "MagickCore/option.h"
#include "MagickCore/paint.h"
#include "MagickCore/pixel-accessor.h"
#include "MagickCore/pixel-private.h"
#include "MagickCore/property.h"
#include "MagickCore/resample.h"
#include "MagickCore/resample-private.h"
#include "MagickCore/resource_.h"
#include "MagickCore/splay-tree.h"
#include "MagickCore/string_.h"
#include "MagickCore/string-private.h"
#include "MagickCore/thread-private.h"
#include "MagickCore/token.h"
#include "MagickCore/transform-private.h"
#include "MagickCore/utility.h"
/*
Define declarations.
*/
#define BezierQuantum 200
#define MaxBezierCoordinates 4194304
#define ThrowPointExpectedException(token,exception) \
{ \
(void) ThrowMagickException(exception,GetMagickModule(),DrawError, \
"NonconformingDrawingPrimitiveDefinition","`%s'",token); \
status=MagickFalse; \
break; \
}
/*
Typedef declarations.
*/
typedef struct _EdgeInfo
{
SegmentInfo
bounds;
double
scanline;
PointInfo
*points;
size_t
number_points;
ssize_t
direction;
MagickBooleanType
ghostline;
size_t
highwater;
} EdgeInfo;
typedef struct _ElementInfo
{
double
cx,
cy,
major,
minor,
angle;
} ElementInfo;
typedef struct _MVGInfo
{
PrimitiveInfo
**primitive_info;
size_t
*extent;
ssize_t
offset;
PointInfo
point;
ExceptionInfo
*exception;
} MVGInfo;
typedef struct _PolygonInfo
{
EdgeInfo
*edges;
size_t
number_edges;
} PolygonInfo;
typedef enum
{
MoveToCode,
OpenCode,
GhostlineCode,
LineToCode,
EndCode
} PathInfoCode;
typedef struct _PathInfo
{
PointInfo
point;
PathInfoCode
code;
} PathInfo;
/*
Forward declarations.
*/
static Image
*DrawClippingMask(Image *,const DrawInfo *,const char *,const char *,
ExceptionInfo *);
static MagickBooleanType
DrawStrokePolygon(Image *,const DrawInfo *,const PrimitiveInfo *,
ExceptionInfo *),
RenderMVGContent(Image *,const DrawInfo *,const size_t,ExceptionInfo *);
static PrimitiveInfo
*TraceStrokePolygon(const Image *,const DrawInfo *,const PrimitiveInfo *);
static size_t
TracePath(MVGInfo *,const char *,ExceptionInfo *);
static void
TraceArc(MVGInfo *,const PointInfo,const PointInfo,const PointInfo),
TraceArcPath(MVGInfo *,const PointInfo,const PointInfo,const PointInfo,
const double,const MagickBooleanType,const MagickBooleanType),
TraceBezier(MVGInfo *,const size_t),
TraceCircle(MVGInfo *,const PointInfo,const PointInfo),
TraceEllipse(MVGInfo *,const PointInfo,const PointInfo,const PointInfo),
TraceLine(PrimitiveInfo *,const PointInfo,const PointInfo),
TraceRectangle(PrimitiveInfo *,const PointInfo,const PointInfo),
TraceRoundRectangle(MVGInfo *,const PointInfo,const PointInfo,PointInfo),
TraceSquareLinecap(PrimitiveInfo *,const size_t,const double);
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% A c q u i r e D r a w I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% AcquireDrawInfo() returns a DrawInfo structure properly initialized.
%
% The format of the AcquireDrawInfo method is:
%
% DrawInfo *AcquireDrawInfo(void)
%
*/
MagickExport DrawInfo *AcquireDrawInfo(void)
{
DrawInfo
*draw_info;
draw_info=(DrawInfo *) AcquireCriticalMemory(sizeof(*draw_info));
GetDrawInfo((ImageInfo *) NULL,draw_info);
return(draw_info);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% C l o n e D r a w I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% CloneDrawInfo() makes a copy of the given draw_info structure. If NULL
% is specified, a new DrawInfo structure is created initialized to default
% values.
%
% The format of the CloneDrawInfo method is:
%
% DrawInfo *CloneDrawInfo(const ImageInfo *image_info,
% const DrawInfo *draw_info)
%
% A description of each parameter follows:
%
% o image_info: the image info.
%
% o draw_info: the draw info.
%
*/
MagickExport DrawInfo *CloneDrawInfo(const ImageInfo *image_info,
const DrawInfo *draw_info)
{
DrawInfo
*clone_info;
ExceptionInfo
*exception;
clone_info=(DrawInfo *) AcquireCriticalMemory(sizeof(*clone_info));
GetDrawInfo(image_info,clone_info);
if (draw_info == (DrawInfo *) NULL)
return(clone_info);
exception=AcquireExceptionInfo();
if (draw_info->primitive != (char *) NULL)
(void) CloneString(&clone_info->primitive,draw_info->primitive);
if (draw_info->geometry != (char *) NULL)
(void) CloneString(&clone_info->geometry,draw_info->geometry);
clone_info->compliance=draw_info->compliance;
clone_info->viewbox=draw_info->viewbox;
clone_info->affine=draw_info->affine;
clone_info->gravity=draw_info->gravity;
clone_info->fill=draw_info->fill;
clone_info->stroke=draw_info->stroke;
clone_info->stroke_width=draw_info->stroke_width;
if (draw_info->fill_pattern != (Image *) NULL)
clone_info->fill_pattern=CloneImage(draw_info->fill_pattern,0,0,MagickTrue,
exception);
if (draw_info->stroke_pattern != (Image *) NULL)
clone_info->stroke_pattern=CloneImage(draw_info->stroke_pattern,0,0,
MagickTrue,exception);
clone_info->stroke_antialias=draw_info->stroke_antialias;
clone_info->text_antialias=draw_info->text_antialias;
clone_info->fill_rule=draw_info->fill_rule;
clone_info->linecap=draw_info->linecap;
clone_info->linejoin=draw_info->linejoin;
clone_info->miterlimit=draw_info->miterlimit;
clone_info->dash_offset=draw_info->dash_offset;
clone_info->decorate=draw_info->decorate;
clone_info->compose=draw_info->compose;
if (draw_info->text != (char *) NULL)
(void) CloneString(&clone_info->text,draw_info->text);
if (draw_info->font != (char *) NULL)
(void) CloneString(&clone_info->font,draw_info->font);
if (draw_info->metrics != (char *) NULL)
(void) CloneString(&clone_info->metrics,draw_info->metrics);
if (draw_info->family != (char *) NULL)
(void) CloneString(&clone_info->family,draw_info->family);
clone_info->style=draw_info->style;
clone_info->stretch=draw_info->stretch;
clone_info->weight=draw_info->weight;
if (draw_info->encoding != (char *) NULL)
(void) CloneString(&clone_info->encoding,draw_info->encoding);
clone_info->pointsize=draw_info->pointsize;
clone_info->kerning=draw_info->kerning;
clone_info->interline_spacing=draw_info->interline_spacing;
clone_info->interword_spacing=draw_info->interword_spacing;
clone_info->direction=draw_info->direction;
if (draw_info->density != (char *) NULL)
(void) CloneString(&clone_info->density,draw_info->density);
clone_info->align=draw_info->align;
clone_info->undercolor=draw_info->undercolor;
clone_info->border_color=draw_info->border_color;
if (draw_info->server_name != (char *) NULL)
(void) CloneString(&clone_info->server_name,draw_info->server_name);
if (draw_info->dash_pattern != (double *) NULL)
{
register ssize_t
x;
for (x=0; fabs(draw_info->dash_pattern[x]) >= MagickEpsilon; x++) ;
clone_info->dash_pattern=(double *) AcquireQuantumMemory((size_t) (x+2),
sizeof(*clone_info->dash_pattern));
if (clone_info->dash_pattern == (double *) NULL)
ThrowFatalException(ResourceLimitFatalError,
"UnableToAllocateDashPattern");
(void) memcpy(clone_info->dash_pattern,draw_info->dash_pattern,(size_t)
(x+2)*sizeof(*clone_info->dash_pattern));
}
clone_info->gradient=draw_info->gradient;
if (draw_info->gradient.stops != (StopInfo *) NULL)
{
size_t
number_stops;
number_stops=clone_info->gradient.number_stops;
clone_info->gradient.stops=(StopInfo *) AcquireQuantumMemory((size_t)
number_stops,sizeof(*clone_info->gradient.stops));
if (clone_info->gradient.stops == (StopInfo *) NULL)
ThrowFatalException(ResourceLimitFatalError,
"UnableToAllocateDashPattern");
(void) memcpy(clone_info->gradient.stops,draw_info->gradient.stops,
(size_t) number_stops*sizeof(*clone_info->gradient.stops));
}
clone_info->bounds=draw_info->bounds;
clone_info->fill_alpha=draw_info->fill_alpha;
clone_info->stroke_alpha=draw_info->stroke_alpha;
clone_info->element_reference=draw_info->element_reference;
clone_info->clip_path=draw_info->clip_path;
clone_info->clip_units=draw_info->clip_units;
if (draw_info->clip_mask != (char *) NULL)
(void) CloneString(&clone_info->clip_mask,draw_info->clip_mask);
if (draw_info->clipping_mask != (Image *) NULL)
clone_info->clipping_mask=CloneImage(draw_info->clipping_mask,0,0,
MagickTrue,exception);
if (draw_info->composite_mask != (Image *) NULL)
clone_info->composite_mask=CloneImage(draw_info->composite_mask,0,0,
MagickTrue,exception);
clone_info->render=draw_info->render;
clone_info->debug=IsEventLogging();
exception=DestroyExceptionInfo(exception);
return(clone_info);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ C o n v e r t P a t h T o P o l y g o n %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ConvertPathToPolygon() converts a path to the more efficient sorted
% rendering form.
%
% The format of the ConvertPathToPolygon method is:
%
% PolygonInfo *ConvertPathToPolygon(const PathInfo *path_info)
%
% A description of each parameter follows:
%
% o Method ConvertPathToPolygon returns the path in a more efficient sorted
% rendering form of type PolygonInfo.
%
% o draw_info: Specifies a pointer to an DrawInfo structure.
%
% o path_info: Specifies a pointer to an PathInfo structure.
%
%
*/
#if defined(__cplusplus) || defined(c_plusplus)
extern "C" {
#endif
static int DrawCompareEdges(const void *p_edge,const void *q_edge)
{
#define DrawCompareEdge(p,q) \
{ \
if (((p)-(q)) < 0.0) \
return(-1); \
if (((p)-(q)) > 0.0) \
return(1); \
}
register const PointInfo
*p,
*q;
/*
Edge sorting for right-handed coordinate system.
*/
p=((const EdgeInfo *) p_edge)->points;
q=((const EdgeInfo *) q_edge)->points;
DrawCompareEdge(p[0].y,q[0].y);
DrawCompareEdge(p[0].x,q[0].x);
DrawCompareEdge((p[1].x-p[0].x)*(q[1].y-q[0].y),(p[1].y-p[0].y)*
(q[1].x-q[0].x));
DrawCompareEdge(p[1].y,q[1].y);
DrawCompareEdge(p[1].x,q[1].x);
return(0);
}
#if defined(__cplusplus) || defined(c_plusplus)
}
#endif
static void LogPolygonInfo(const PolygonInfo *polygon_info)
{
register EdgeInfo
*p;
register ssize_t
i,
j;
(void) LogMagickEvent(DrawEvent,GetMagickModule()," begin active-edge");
p=polygon_info->edges;
for (i=0; i < (ssize_t) polygon_info->number_edges; i++)
{
(void) LogMagickEvent(DrawEvent,GetMagickModule()," edge %.20g:",
(double) i);
(void) LogMagickEvent(DrawEvent,GetMagickModule()," direction: %s",
p->direction != MagickFalse ? "down" : "up");
(void) LogMagickEvent(DrawEvent,GetMagickModule()," ghostline: %s",
p->ghostline != MagickFalse ? "transparent" : "opaque");
(void) LogMagickEvent(DrawEvent,GetMagickModule(),
" bounds: %g,%g - %g,%g",p->bounds.x1,p->bounds.y1,
p->bounds.x2,p->bounds.y2);
for (j=0; j < (ssize_t) p->number_points; j++)
(void) LogMagickEvent(DrawEvent,GetMagickModule()," %g,%g",
p->points[j].x,p->points[j].y);
p++;
}
(void) LogMagickEvent(DrawEvent,GetMagickModule()," end active-edge");
}
static void ReversePoints(PointInfo *points,const size_t number_points)
{
PointInfo
point;
register ssize_t
i;
for (i=0; i < (ssize_t) (number_points >> 1); i++)
{
point=points[i];
points[i]=points[number_points-(i+1)];
points[number_points-(i+1)]=point;
}
}
static PolygonInfo *ConvertPathToPolygon(const PathInfo *path_info)
{
long
direction,
next_direction;
PointInfo
point,
*points;
PolygonInfo
*polygon_info;
SegmentInfo
bounds;
register ssize_t
i,
n;
MagickBooleanType
ghostline;
size_t
edge,
number_edges,
number_points;
/*
Convert a path to the more efficient sorted rendering form.
*/
polygon_info=(PolygonInfo *) AcquireMagickMemory(sizeof(*polygon_info));
if (polygon_info == (PolygonInfo *) NULL)
return((PolygonInfo *) NULL);
number_edges=16;
polygon_info->edges=(EdgeInfo *) AcquireQuantumMemory(number_edges,
sizeof(*polygon_info->edges));
if (polygon_info->edges == (EdgeInfo *) NULL)
return((PolygonInfo *) NULL);
(void) memset(polygon_info->edges,0,number_edges*
sizeof(*polygon_info->edges));
direction=0;
edge=0;
ghostline=MagickFalse;
n=0;
number_points=0;
points=(PointInfo *) NULL;
(void) memset(&point,0,sizeof(point));
(void) memset(&bounds,0,sizeof(bounds));
polygon_info->edges[edge].number_points=(size_t) n;
polygon_info->edges[edge].scanline=0.0;
polygon_info->edges[edge].highwater=0;
polygon_info->edges[edge].ghostline=ghostline;
polygon_info->edges[edge].direction=(ssize_t) direction;
polygon_info->edges[edge].points=points;
polygon_info->edges[edge].bounds=bounds;
polygon_info->number_edges=0;
for (i=0; path_info[i].code != EndCode; i++)
{
if ((path_info[i].code == MoveToCode) || (path_info[i].code == OpenCode) ||
(path_info[i].code == GhostlineCode))
{
/*
Move to.
*/
if ((points != (PointInfo *) NULL) && (n >= 2))
{
if (edge == number_edges)
{
number_edges<<=1;
polygon_info->edges=(EdgeInfo *) ResizeQuantumMemory(
polygon_info->edges,(size_t) number_edges,
sizeof(*polygon_info->edges));
if (polygon_info->edges == (EdgeInfo *) NULL)
return((PolygonInfo *) NULL);
}
polygon_info->edges[edge].number_points=(size_t) n;
polygon_info->edges[edge].scanline=(-1.0);
polygon_info->edges[edge].highwater=0;
polygon_info->edges[edge].ghostline=ghostline;
polygon_info->edges[edge].direction=(ssize_t) (direction > 0);
if (direction < 0)
ReversePoints(points,(size_t) n);
polygon_info->edges[edge].points=points;
polygon_info->edges[edge].bounds=bounds;
polygon_info->edges[edge].bounds.y1=points[0].y;
polygon_info->edges[edge].bounds.y2=points[n-1].y;
points=(PointInfo *) NULL;
ghostline=MagickFalse;
edge++;
}
if (points == (PointInfo *) NULL)
{
number_points=16;
points=(PointInfo *) AcquireQuantumMemory((size_t) number_points,
sizeof(*points));
if (points == (PointInfo *) NULL)
return((PolygonInfo *) NULL);
}
ghostline=path_info[i].code == GhostlineCode ? MagickTrue : MagickFalse;
point=path_info[i].point;
points[0]=point;
bounds.x1=point.x;
bounds.x2=point.x;
direction=0;
n=1;
continue;
}
/*
Line to.
*/
next_direction=((path_info[i].point.y > point.y) ||
((fabs(path_info[i].point.y-point.y) < MagickEpsilon) &&
(path_info[i].point.x > point.x))) ? 1 : -1;
if ((points != (PointInfo *) NULL) && (direction != 0) &&
(direction != next_direction))
{
/*
New edge.
*/
point=points[n-1];
if (edge == number_edges)
{
number_edges<<=1;
polygon_info->edges=(EdgeInfo *) ResizeQuantumMemory(
polygon_info->edges,(size_t) number_edges,
sizeof(*polygon_info->edges));
if (polygon_info->edges == (EdgeInfo *) NULL)
return((PolygonInfo *) NULL);
}
polygon_info->edges[edge].number_points=(size_t) n;
polygon_info->edges[edge].scanline=(-1.0);
polygon_info->edges[edge].highwater=0;
polygon_info->edges[edge].ghostline=ghostline;
polygon_info->edges[edge].direction=(ssize_t) (direction > 0);
if (direction < 0)
ReversePoints(points,(size_t) n);
polygon_info->edges[edge].points=points;
polygon_info->edges[edge].bounds=bounds;
polygon_info->edges[edge].bounds.y1=points[0].y;
polygon_info->edges[edge].bounds.y2=points[n-1].y;
number_points=16;
points=(PointInfo *) AcquireQuantumMemory((size_t) number_points,
sizeof(*points));
if (points == (PointInfo *) NULL)
return((PolygonInfo *) NULL);
n=1;
ghostline=MagickFalse;
points[0]=point;
bounds.x1=point.x;
bounds.x2=point.x;
edge++;
}
direction=next_direction;
if (points == (PointInfo *) NULL)
continue;
if (n == (ssize_t) number_points)
{
number_points<<=1;
points=(PointInfo *) ResizeQuantumMemory(points,(size_t) number_points,
sizeof(*points));
if (points == (PointInfo *) NULL)
return((PolygonInfo *) NULL);
}
point=path_info[i].point;
points[n]=point;
if (point.x < bounds.x1)
bounds.x1=point.x;
if (point.x > bounds.x2)
bounds.x2=point.x;
n++;
}
if (points != (PointInfo *) NULL)
{
if (n < 2)
points=(PointInfo *) RelinquishMagickMemory(points);
else
{
if (edge == number_edges)
{
number_edges<<=1;
polygon_info->edges=(EdgeInfo *) ResizeQuantumMemory(
polygon_info->edges,(size_t) number_edges,
sizeof(*polygon_info->edges));
if (polygon_info->edges == (EdgeInfo *) NULL)
return((PolygonInfo *) NULL);
}
polygon_info->edges[edge].number_points=(size_t) n;
polygon_info->edges[edge].scanline=(-1.0);
polygon_info->edges[edge].highwater=0;
polygon_info->edges[edge].ghostline=ghostline;
polygon_info->edges[edge].direction=(ssize_t) (direction > 0);
if (direction < 0)
ReversePoints(points,(size_t) n);
polygon_info->edges[edge].points=points;
polygon_info->edges[edge].bounds=bounds;
polygon_info->edges[edge].bounds.y1=points[0].y;
polygon_info->edges[edge].bounds.y2=points[n-1].y;
ghostline=MagickFalse;
edge++;
}
}
polygon_info->number_edges=edge;
qsort(polygon_info->edges,(size_t) polygon_info->number_edges,
sizeof(*polygon_info->edges),DrawCompareEdges);
if (IsEventLogging() != MagickFalse)
LogPolygonInfo(polygon_info);
return(polygon_info);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ C o n v e r t P r i m i t i v e T o P a t h %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ConvertPrimitiveToPath() converts a PrimitiveInfo structure into a vector
% path structure.
%
% The format of the ConvertPrimitiveToPath method is:
%
% PathInfo *ConvertPrimitiveToPath(const DrawInfo *draw_info,
% const PrimitiveInfo *primitive_info)
%
% A description of each parameter follows:
%
% o Method ConvertPrimitiveToPath returns a vector path structure of type
% PathInfo.
%
% o draw_info: a structure of type DrawInfo.
%
% o primitive_info: Specifies a pointer to an PrimitiveInfo structure.
%
%
*/
static void LogPathInfo(const PathInfo *path_info)
{
register const PathInfo
*p;
(void) LogMagickEvent(DrawEvent,GetMagickModule()," begin vector-path");
for (p=path_info; p->code != EndCode; p++)
(void) LogMagickEvent(DrawEvent,GetMagickModule(),
" %g,%g %s",p->point.x,p->point.y,p->code == GhostlineCode ?
"moveto ghostline" : p->code == OpenCode ? "moveto open" :
p->code == MoveToCode ? "moveto" : p->code == LineToCode ? "lineto" :
"?");
(void) LogMagickEvent(DrawEvent,GetMagickModule()," end vector-path");
}
static PathInfo *ConvertPrimitiveToPath(const PrimitiveInfo *primitive_info)
{
MagickBooleanType
closed_subpath;
PathInfo
*path_info;
PathInfoCode
code;
PointInfo
p,
q;
register ssize_t
i,
n;
ssize_t
coordinates,
start;
/*
Converts a PrimitiveInfo structure into a vector path structure.
*/
switch (primitive_info->primitive)
{
case AlphaPrimitive:
case ColorPrimitive:
case ImagePrimitive:
case PointPrimitive:
case TextPrimitive:
return((PathInfo *) NULL);
default:
break;
}
for (i=0; primitive_info[i].primitive != UndefinedPrimitive; i++) ;
path_info=(PathInfo *) AcquireQuantumMemory((size_t) (3UL*i+1UL),
sizeof(*path_info));
if (path_info == (PathInfo *) NULL)
return((PathInfo *) NULL);
coordinates=0;
closed_subpath=MagickFalse;
n=0;
p.x=(-1.0);
p.y=(-1.0);
q.x=(-1.0);
q.y=(-1.0);
start=0;
for (i=0; primitive_info[i].primitive != UndefinedPrimitive; i++)
{
code=LineToCode;
if (coordinates <= 0)
{
/*
New subpath.
*/
coordinates=(ssize_t) primitive_info[i].coordinates;
p=primitive_info[i].point;
start=n;
code=MoveToCode;
closed_subpath=primitive_info[i].closed_subpath;
}
coordinates--;
if ((code == MoveToCode) || (coordinates <= 0) ||
(fabs(q.x-primitive_info[i].point.x) >= MagickEpsilon) ||
(fabs(q.y-primitive_info[i].point.y) >= MagickEpsilon))
{
/*
Eliminate duplicate points.
*/
path_info[n].code=code;
path_info[n].point=primitive_info[i].point;
q=primitive_info[i].point;
n++;
}
if (coordinates > 0)
continue; /* next point in current subpath */
if (closed_subpath != MagickFalse)
{
closed_subpath=MagickFalse;
continue;
}
/*
Mark the p point as open if the subpath is not closed.
*/
path_info[start].code=OpenCode;
path_info[n].code=GhostlineCode;
path_info[n].point=primitive_info[i].point;
n++;
path_info[n].code=LineToCode;
path_info[n].point=p;
n++;
}
path_info[n].code=EndCode;
path_info[n].point.x=0.0;
path_info[n].point.y=0.0;
if (IsEventLogging() != MagickFalse)
LogPathInfo(path_info);
return(path_info);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% D e s t r o y D r a w I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DestroyDrawInfo() deallocates memory associated with an DrawInfo structure.
%
% The format of the DestroyDrawInfo method is:
%
% DrawInfo *DestroyDrawInfo(DrawInfo *draw_info)
%
% A description of each parameter follows:
%
% o draw_info: the draw info.
%
*/
MagickExport DrawInfo *DestroyDrawInfo(DrawInfo *draw_info)
{
if (draw_info->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"...");
assert(draw_info != (DrawInfo *) NULL);
assert(draw_info->signature == MagickCoreSignature);
if (draw_info->primitive != (char *) NULL)
draw_info->primitive=DestroyString(draw_info->primitive);
if (draw_info->text != (char *) NULL)
draw_info->text=DestroyString(draw_info->text);
if (draw_info->geometry != (char *) NULL)
draw_info->geometry=DestroyString(draw_info->geometry);
if (draw_info->fill_pattern != (Image *) NULL)
draw_info->fill_pattern=DestroyImage(draw_info->fill_pattern);
if (draw_info->stroke_pattern != (Image *) NULL)
draw_info->stroke_pattern=DestroyImage(draw_info->stroke_pattern);
if (draw_info->font != (char *) NULL)
draw_info->font=DestroyString(draw_info->font);
if (draw_info->metrics != (char *) NULL)
draw_info->metrics=DestroyString(draw_info->metrics);
if (draw_info->family != (char *) NULL)
draw_info->family=DestroyString(draw_info->family);
if (draw_info->encoding != (char *) NULL)
draw_info->encoding=DestroyString(draw_info->encoding);
if (draw_info->density != (char *) NULL)
draw_info->density=DestroyString(draw_info->density);
if (draw_info->server_name != (char *) NULL)
draw_info->server_name=(char *)
RelinquishMagickMemory(draw_info->server_name);
if (draw_info->dash_pattern != (double *) NULL)
draw_info->dash_pattern=(double *) RelinquishMagickMemory(
draw_info->dash_pattern);
if (draw_info->gradient.stops != (StopInfo *) NULL)
draw_info->gradient.stops=(StopInfo *) RelinquishMagickMemory(
draw_info->gradient.stops);
if (draw_info->clip_mask != (char *) NULL)
draw_info->clip_mask=DestroyString(draw_info->clip_mask);
if (draw_info->clipping_mask != (Image *) NULL)
draw_info->clipping_mask=DestroyImage(draw_info->clipping_mask);
if (draw_info->composite_mask != (Image *) NULL)
draw_info->composite_mask=DestroyImage(draw_info->composite_mask);
draw_info->signature=(~MagickCoreSignature);
draw_info=(DrawInfo *) RelinquishMagickMemory(draw_info);
return(draw_info);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ D e s t r o y E d g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DestroyEdge() destroys the specified polygon edge.
%
% The format of the DestroyEdge method is:
%
% ssize_t DestroyEdge(PolygonInfo *polygon_info,const int edge)
%
% A description of each parameter follows:
%
% o polygon_info: Specifies a pointer to an PolygonInfo structure.
%
% o edge: the polygon edge number to destroy.
%
*/
static size_t DestroyEdge(PolygonInfo *polygon_info,
const size_t edge)
{
assert(edge < polygon_info->number_edges);
polygon_info->edges[edge].points=(PointInfo *) RelinquishMagickMemory(
polygon_info->edges[edge].points);
polygon_info->number_edges--;
if (edge < polygon_info->number_edges)
(void) memmove(polygon_info->edges+edge,polygon_info->edges+edge+1,
(size_t) (polygon_info->number_edges-edge)*sizeof(*polygon_info->edges));
return(polygon_info->number_edges);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ D e s t r o y P o l y g o n I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DestroyPolygonInfo() destroys the PolygonInfo data structure.
%
% The format of the DestroyPolygonInfo method is:
%
% PolygonInfo *DestroyPolygonInfo(PolygonInfo *polygon_info)
%
% A description of each parameter follows:
%
% o polygon_info: Specifies a pointer to an PolygonInfo structure.
%
*/
static PolygonInfo *DestroyPolygonInfo(PolygonInfo *polygon_info)
{
register ssize_t
i;
for (i=0; i < (ssize_t) polygon_info->number_edges; i++)
polygon_info->edges[i].points=(PointInfo *)
RelinquishMagickMemory(polygon_info->edges[i].points);
polygon_info->edges=(EdgeInfo *) RelinquishMagickMemory(polygon_info->edges);
return((PolygonInfo *) RelinquishMagickMemory(polygon_info));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% D r a w A f f i n e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DrawAffineImage() composites the source over the destination image as
% dictated by the affine transform.
%
% The format of the DrawAffineImage method is:
%
% MagickBooleanType DrawAffineImage(Image *image,const Image *source,
% const AffineMatrix *affine,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o source: the source image.
%
% o affine: the affine transform.
%
% o exception: return any errors or warnings in this structure.
%
*/
static SegmentInfo AffineEdge(const Image *image,const AffineMatrix *affine,
const double y,const SegmentInfo *edge)
{
double
intercept,
z;
register double
x;
SegmentInfo
inverse_edge;
/*
Determine left and right edges.
*/
inverse_edge.x1=edge->x1;
inverse_edge.y1=edge->y1;
inverse_edge.x2=edge->x2;
inverse_edge.y2=edge->y2;
z=affine->ry*y+affine->tx;
if (affine->sx >= MagickEpsilon)
{
intercept=(-z/affine->sx);
x=intercept;
if (x > inverse_edge.x1)
inverse_edge.x1=x;
intercept=(-z+(double) image->columns)/affine->sx;
x=intercept;
if (x < inverse_edge.x2)
inverse_edge.x2=x;
}
else
if (affine->sx < -MagickEpsilon)
{
intercept=(-z+(double) image->columns)/affine->sx;
x=intercept;
if (x > inverse_edge.x1)
inverse_edge.x1=x;
intercept=(-z/affine->sx);
x=intercept;
if (x < inverse_edge.x2)
inverse_edge.x2=x;
}
else
if ((z < 0.0) || ((size_t) floor(z+0.5) >= image->columns))
{
inverse_edge.x2=edge->x1;
return(inverse_edge);
}
/*
Determine top and bottom edges.
*/
z=affine->sy*y+affine->ty;
if (affine->rx >= MagickEpsilon)
{
intercept=(-z/affine->rx);
x=intercept;
if (x > inverse_edge.x1)
inverse_edge.x1=x;
intercept=(-z+(double) image->rows)/affine->rx;
x=intercept;
if (x < inverse_edge.x2)
inverse_edge.x2=x;
}
else
if (affine->rx < -MagickEpsilon)
{
intercept=(-z+(double) image->rows)/affine->rx;
x=intercept;
if (x > inverse_edge.x1)
inverse_edge.x1=x;
intercept=(-z/affine->rx);
x=intercept;
if (x < inverse_edge.x2)
inverse_edge.x2=x;
}
else
if ((z < 0.0) || ((size_t) floor(z+0.5) >= image->rows))
{
inverse_edge.x2=edge->x2;
return(inverse_edge);
}
return(inverse_edge);
}
static AffineMatrix InverseAffineMatrix(const AffineMatrix *affine)
{
AffineMatrix
inverse_affine;
double
determinant;
determinant=PerceptibleReciprocal(affine->sx*affine->sy-affine->rx*
affine->ry);
inverse_affine.sx=determinant*affine->sy;
inverse_affine.rx=determinant*(-affine->rx);
inverse_affine.ry=determinant*(-affine->ry);
inverse_affine.sy=determinant*affine->sx;
inverse_affine.tx=(-affine->tx)*inverse_affine.sx-affine->ty*
inverse_affine.ry;
inverse_affine.ty=(-affine->tx)*inverse_affine.rx-affine->ty*
inverse_affine.sy;
return(inverse_affine);
}
MagickExport MagickBooleanType DrawAffineImage(Image *image,
const Image *source,const AffineMatrix *affine,ExceptionInfo *exception)
{
AffineMatrix
inverse_affine;
CacheView
*image_view,
*source_view;
MagickBooleanType
status;
PixelInfo
zero;
PointInfo
extent[4],
min,
max;
register ssize_t
i;
SegmentInfo
edge;
ssize_t
start,
stop,
y;
/*
Determine bounding box.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(source != (const Image *) NULL);
assert(source->signature == MagickCoreSignature);
assert(affine != (AffineMatrix *) NULL);
extent[0].x=0.0;
extent[0].y=0.0;
extent[1].x=(double) source->columns-1.0;
extent[1].y=0.0;
extent[2].x=(double) source->columns-1.0;
extent[2].y=(double) source->rows-1.0;
extent[3].x=0.0;
extent[3].y=(double) source->rows-1.0;
for (i=0; i < 4; i++)
{
PointInfo
point;
point=extent[i];
extent[i].x=point.x*affine->sx+point.y*affine->ry+affine->tx;
extent[i].y=point.x*affine->rx+point.y*affine->sy+affine->ty;
}
min=extent[0];
max=extent[0];
for (i=1; i < 4; i++)
{
if (min.x > extent[i].x)
min.x=extent[i].x;
if (min.y > extent[i].y)
min.y=extent[i].y;
if (max.x < extent[i].x)
max.x=extent[i].x;
if (max.y < extent[i].y)
max.y=extent[i].y;
}
/*
Affine transform image.
*/
if (SetImageStorageClass(image,DirectClass,exception) == MagickFalse)
return(MagickFalse);
status=MagickTrue;
edge.x1=MagickMax(min.x,0.0);
edge.y1=MagickMax(min.y,0.0);
edge.x2=MagickMin(max.x,(double) image->columns-1.0);
edge.y2=MagickMin(max.y,(double) image->rows-1.0);
inverse_affine=InverseAffineMatrix(affine);
GetPixelInfo(image,&zero);
start=(ssize_t) ceil(edge.y1-0.5);
stop=(ssize_t) floor(edge.y2+0.5);
source_view=AcquireVirtualCacheView(source,exception);
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(source,image,stop-start,1)
#endif
for (y=start; y <= stop; y++)
{
PixelInfo
composite,
pixel;
PointInfo
point;
register ssize_t
x;
register Quantum
*magick_restrict q;
SegmentInfo
inverse_edge;
ssize_t
x_offset;
inverse_edge=AffineEdge(source,&inverse_affine,(double) y,&edge);
if (inverse_edge.x2 < inverse_edge.x1)
continue;
q=GetCacheViewAuthenticPixels(image_view,(ssize_t) ceil(inverse_edge.x1-
0.5),y,(size_t) (floor(inverse_edge.x2+0.5)-ceil(inverse_edge.x1-0.5)+1),
1,exception);
if (q == (Quantum *) NULL)
continue;
pixel=zero;
composite=zero;
x_offset=0;
for (x=(ssize_t) ceil(inverse_edge.x1-0.5); x <= (ssize_t) floor(inverse_edge.x2+0.5); x++)
{
point.x=(double) x*inverse_affine.sx+y*inverse_affine.ry+
inverse_affine.tx;
point.y=(double) x*inverse_affine.rx+y*inverse_affine.sy+
inverse_affine.ty;
status=InterpolatePixelInfo(source,source_view,UndefinedInterpolatePixel,
point.x,point.y,&pixel,exception);
if (status == MagickFalse)
break;
GetPixelInfoPixel(image,q,&composite);
CompositePixelInfoOver(&pixel,pixel.alpha,&composite,composite.alpha,
&composite);
SetPixelViaPixelInfo(image,&composite,q);
x_offset++;
q+=GetPixelChannels(image);
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
}
source_view=DestroyCacheView(source_view);
image_view=DestroyCacheView(image_view);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ D r a w B o u n d i n g R e c t a n g l e s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DrawBoundingRectangles() draws the bounding rectangles on the image. This
% is only useful for developers debugging the rendering algorithm.
%
% The format of the DrawBoundingRectangles method is:
%
% MagickBooleanType DrawBoundingRectangles(Image *image,
% const DrawInfo *draw_info,PolygonInfo *polygon_info,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o draw_info: the draw info.
%
% o polygon_info: Specifies a pointer to a PolygonInfo structure.
%
% o exception: return any errors or warnings in this structure.
%
*/
static inline double SaneStrokeWidth(const Image *image,
const DrawInfo *draw_info)
{
return(MagickMin((double) draw_info->stroke_width,
(2.0*sqrt(2.0)+MagickEpsilon)*MagickMax(image->columns,image->rows)));
}
static MagickBooleanType DrawBoundingRectangles(Image *image,
const DrawInfo *draw_info,const PolygonInfo *polygon_info,
ExceptionInfo *exception)
{
double
mid;
DrawInfo
*clone_info;
MagickBooleanType
status;
PointInfo
end,
resolution,
start;
PrimitiveInfo
primitive_info[6];
register ssize_t
i;
SegmentInfo
bounds;
ssize_t
coordinates;
(void) memset(primitive_info,0,sizeof(primitive_info));
clone_info=CloneDrawInfo((ImageInfo *) NULL,draw_info);
status=QueryColorCompliance("#000F",AllCompliance,&clone_info->fill,
exception);
if (status == MagickFalse)
{
clone_info=DestroyDrawInfo(clone_info);
return(status);
}
resolution.x=96.0;
resolution.y=96.0;
if (clone_info->density != (char *) NULL)
{
GeometryInfo
geometry_info;
MagickStatusType
flags;
flags=ParseGeometry(clone_info->density,&geometry_info);
resolution.x=geometry_info.rho;
resolution.y=geometry_info.sigma;
if ((flags & SigmaValue) == MagickFalse)
resolution.y=resolution.x;
}
mid=(resolution.x/96.0)*ExpandAffine(&clone_info->affine)*
SaneStrokeWidth(image,clone_info)/2.0;
bounds.x1=0.0;
bounds.y1=0.0;
bounds.x2=0.0;
bounds.y2=0.0;
if (polygon_info != (PolygonInfo *) NULL)
{
bounds=polygon_info->edges[0].bounds;
for (i=1; i < (ssize_t) polygon_info->number_edges; i++)
{
if (polygon_info->edges[i].bounds.x1 < (double) bounds.x1)
bounds.x1=polygon_info->edges[i].bounds.x1;
if (polygon_info->edges[i].bounds.y1 < (double) bounds.y1)
bounds.y1=polygon_info->edges[i].bounds.y1;
if (polygon_info->edges[i].bounds.x2 > (double) bounds.x2)
bounds.x2=polygon_info->edges[i].bounds.x2;
if (polygon_info->edges[i].bounds.y2 > (double) bounds.y2)
bounds.y2=polygon_info->edges[i].bounds.y2;
}
bounds.x1-=mid;
bounds.x1=bounds.x1 < 0.0 ? 0.0 : bounds.x1 >= (double)
image->columns ? (double) image->columns-1 : bounds.x1;
bounds.y1-=mid;
bounds.y1=bounds.y1 < 0.0 ? 0.0 : bounds.y1 >= (double)
image->rows ? (double) image->rows-1 : bounds.y1;
bounds.x2+=mid;
bounds.x2=bounds.x2 < 0.0 ? 0.0 : bounds.x2 >= (double)
image->columns ? (double) image->columns-1 : bounds.x2;
bounds.y2+=mid;
bounds.y2=bounds.y2 < 0.0 ? 0.0 : bounds.y2 >= (double)
image->rows ? (double) image->rows-1 : bounds.y2;
for (i=0; i < (ssize_t) polygon_info->number_edges; i++)
{
if (polygon_info->edges[i].direction != 0)
status=QueryColorCompliance("#f00",AllCompliance,&clone_info->stroke,
exception);
else
status=QueryColorCompliance("#0f0",AllCompliance,&clone_info->stroke,
exception);
if (status == MagickFalse)
break;
start.x=(double) (polygon_info->edges[i].bounds.x1-mid);
start.y=(double) (polygon_info->edges[i].bounds.y1-mid);
end.x=(double) (polygon_info->edges[i].bounds.x2+mid);
end.y=(double) (polygon_info->edges[i].bounds.y2+mid);
primitive_info[0].primitive=RectanglePrimitive;
TraceRectangle(primitive_info,start,end);
primitive_info[0].method=ReplaceMethod;
coordinates=(ssize_t) primitive_info[0].coordinates;
primitive_info[coordinates].primitive=UndefinedPrimitive;
status=DrawPrimitive(image,clone_info,primitive_info,exception);
if (status == MagickFalse)
break;
}
if (i < (ssize_t) polygon_info->number_edges)
{
clone_info=DestroyDrawInfo(clone_info);
return(status);
}
}
status=QueryColorCompliance("#00f",AllCompliance,&clone_info->stroke,
exception);
if (status == MagickFalse)
{
clone_info=DestroyDrawInfo(clone_info);
return(status);
}
start.x=(double) (bounds.x1-mid);
start.y=(double) (bounds.y1-mid);
end.x=(double) (bounds.x2+mid);
end.y=(double) (bounds.y2+mid);
primitive_info[0].primitive=RectanglePrimitive;
TraceRectangle(primitive_info,start,end);
primitive_info[0].method=ReplaceMethod;
coordinates=(ssize_t) primitive_info[0].coordinates;
primitive_info[coordinates].primitive=UndefinedPrimitive;
status=DrawPrimitive(image,clone_info,primitive_info,exception);
clone_info=DestroyDrawInfo(clone_info);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% D r a w C l i p P a t h %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DrawClipPath() draws the clip path on the image mask.
%
% The format of the DrawClipPath method is:
%
% MagickBooleanType DrawClipPath(Image *image,const DrawInfo *draw_info,
% const char *id,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o draw_info: the draw info.
%
% o id: the clip path id.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType DrawClipPath(Image *image,
const DrawInfo *draw_info,const char *id,ExceptionInfo *exception)
{
const char
*clip_path;
Image
*clipping_mask;
MagickBooleanType
status;
clip_path=GetImageArtifact(image,id);
if (clip_path == (const char *) NULL)
return(MagickFalse);
clipping_mask=DrawClippingMask(image,draw_info,draw_info->clip_mask,clip_path,
exception);
if (clipping_mask == (Image *) NULL)
return(MagickFalse);
status=SetImageMask(image,WritePixelMask,clipping_mask,exception);
clipping_mask=DestroyImage(clipping_mask);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% D r a w C l i p p i n g M a s k %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DrawClippingMask() draws the clip path and returns it as an image clipping
% mask.
%
% The format of the DrawClippingMask method is:
%
% Image *DrawClippingMask(Image *image,const DrawInfo *draw_info,
% const char *id,const char *clip_path,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o draw_info: the draw info.
%
% o id: the clip path id.
%
% o clip_path: the clip path.
%
% o exception: return any errors or warnings in this structure.
%
*/
static Image *DrawClippingMask(Image *image,const DrawInfo *draw_info,
const char *id,const char *clip_path,ExceptionInfo *exception)
{
DrawInfo
*clone_info;
Image
*clip_mask,
*separate_mask;
MagickStatusType
status;
/*
Draw a clip path.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(draw_info != (const DrawInfo *) NULL);
clip_mask=AcquireImage((const ImageInfo *) NULL,exception);
status=SetImageExtent(clip_mask,image->columns,image->rows,exception);
if (status == MagickFalse)
return(DestroyImage(clip_mask));
status=SetImageMask(clip_mask,WritePixelMask,(Image *) NULL,exception);
status=QueryColorCompliance("#0000",AllCompliance,
&clip_mask->background_color,exception);
clip_mask->background_color.alpha=(MagickRealType) TransparentAlpha;
clip_mask->background_color.alpha_trait=BlendPixelTrait;
status=SetImageBackgroundColor(clip_mask,exception);
if (image->debug != MagickFalse)
(void) LogMagickEvent(DrawEvent,GetMagickModule(),"\nbegin clip-path %s",
id);
clone_info=CloneDrawInfo((ImageInfo *) NULL,draw_info);
(void) CloneString(&clone_info->primitive,clip_path);
status=QueryColorCompliance("#ffffff",AllCompliance,&clone_info->fill,
exception);
if (clone_info->clip_mask != (char *) NULL)
clone_info->clip_mask=DestroyString(clone_info->clip_mask);
status=QueryColorCompliance("#00000000",AllCompliance,&clone_info->stroke,
exception);
clone_info->stroke_width=0.0;
clone_info->alpha=OpaqueAlpha;
clone_info->clip_path=MagickTrue;
status=RenderMVGContent(clip_mask,clone_info,1,exception);
clone_info=DestroyDrawInfo(clone_info);
separate_mask=SeparateImage(clip_mask,AlphaChannel,exception);
if (separate_mask != (Image *) NULL)
{
clip_mask=DestroyImage(clip_mask);
clip_mask=separate_mask;
status=NegateImage(clip_mask,MagickFalse,exception);
if (status == MagickFalse)
clip_mask=DestroyImage(clip_mask);
}
if (image->debug != MagickFalse)
(void) LogMagickEvent(DrawEvent,GetMagickModule(),"end clip-path");
return(clip_mask);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% D r a w C o m p o s i t e M a s k %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DrawCompositeMask() draws the mask path and returns it as an image mask.
%
% The format of the DrawCompositeMask method is:
%
% Image *DrawCompositeMask(Image *image,const DrawInfo *draw_info,
% const char *id,const char *mask_path,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o draw_info: the draw info.
%
% o id: the mask path id.
%
% o mask_path: the mask path.
%
% o exception: return any errors or warnings in this structure.
%
*/
static Image *DrawCompositeMask(Image *image,const DrawInfo *draw_info,
const char *id,const char *mask_path,ExceptionInfo *exception)
{
Image
*composite_mask,
*separate_mask;
DrawInfo
*clone_info;
MagickStatusType
status;
/*
Draw a mask path.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(draw_info != (const DrawInfo *) NULL);
composite_mask=AcquireImage((const ImageInfo *) NULL,exception);
status=SetImageExtent(composite_mask,image->columns,image->rows,exception);
if (status == MagickFalse)
return(DestroyImage(composite_mask));
status=SetImageMask(composite_mask,CompositePixelMask,(Image *) NULL,
exception);
status=QueryColorCompliance("#0000",AllCompliance,
&composite_mask->background_color,exception);
composite_mask->background_color.alpha=(MagickRealType) TransparentAlpha;
composite_mask->background_color.alpha_trait=BlendPixelTrait;
(void) SetImageBackgroundColor(composite_mask,exception);
if (image->debug != MagickFalse)
(void) LogMagickEvent(DrawEvent,GetMagickModule(),"\nbegin mask-path %s",
id);
clone_info=CloneDrawInfo((ImageInfo *) NULL,draw_info);
(void) CloneString(&clone_info->primitive,mask_path);
status=QueryColorCompliance("#ffffff",AllCompliance,&clone_info->fill,
exception);
status=QueryColorCompliance("#00000000",AllCompliance,&clone_info->stroke,
exception);
clone_info->stroke_width=0.0;
clone_info->alpha=OpaqueAlpha;
status=RenderMVGContent(composite_mask,clone_info,1,exception);
clone_info=DestroyDrawInfo(clone_info);
separate_mask=SeparateImage(composite_mask,AlphaChannel,exception);
if (separate_mask != (Image *) NULL)
{
composite_mask=DestroyImage(composite_mask);
composite_mask=separate_mask;
status=NegateImage(composite_mask,MagickFalse,exception);
if (status == MagickFalse)
composite_mask=DestroyImage(composite_mask);
}
if (image->debug != MagickFalse)
(void) LogMagickEvent(DrawEvent,GetMagickModule(),"end mask-path");
return(composite_mask);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ D r a w D a s h P o l y g o n %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DrawDashPolygon() draws a dashed polygon (line, rectangle, ellipse) on the
% image while respecting the dash offset and dash pattern attributes.
%
% The format of the DrawDashPolygon method is:
%
% MagickBooleanType DrawDashPolygon(const DrawInfo *draw_info,
% const PrimitiveInfo *primitive_info,Image *image,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o draw_info: the draw info.
%
% o primitive_info: Specifies a pointer to a PrimitiveInfo structure.
%
% o image: the image.
%
% o exception: return any errors or warnings in this structure.
%
*/
static MagickBooleanType DrawDashPolygon(const DrawInfo *draw_info,
const PrimitiveInfo *primitive_info,Image *image,ExceptionInfo *exception)
{
double
length,
maximum_length,
offset,
scale,
total_length;
DrawInfo
*clone_info;
MagickStatusType
status;
PrimitiveInfo
*dash_polygon;
register double
dx,
dy;
register ssize_t
i;
size_t
number_vertices;
ssize_t
j,
n;
assert(draw_info != (const DrawInfo *) NULL);
if (image->debug != MagickFalse)
(void) LogMagickEvent(DrawEvent,GetMagickModule()," begin draw-dash");
for (i=0; primitive_info[i].primitive != UndefinedPrimitive; i++) ;
number_vertices=(size_t) i;
dash_polygon=(PrimitiveInfo *) AcquireQuantumMemory((size_t)
(2UL*number_vertices+32UL),sizeof(*dash_polygon));
if (dash_polygon == (PrimitiveInfo *) NULL)
return(MagickFalse);
(void) memset(dash_polygon,0,(2UL*number_vertices+32UL)*
sizeof(*dash_polygon));
clone_info=CloneDrawInfo((ImageInfo *) NULL,draw_info);
clone_info->miterlimit=0;
dash_polygon[0]=primitive_info[0];
scale=ExpandAffine(&draw_info->affine);
length=scale*draw_info->dash_pattern[0];
offset=fabs(draw_info->dash_offset) >= MagickEpsilon ?
scale*draw_info->dash_offset : 0.0;
j=1;
for (n=0; offset > 0.0; j=0)
{
if (draw_info->dash_pattern[n] <= 0.0)
break;
length=scale*(draw_info->dash_pattern[n]+(n == 0 ? -0.5 : 0.5));
if (offset > length)
{
offset-=length;
n++;
length=scale*draw_info->dash_pattern[n];
continue;
}
if (offset < length)
{
length-=offset;
offset=0.0;
break;
}
offset=0.0;
n++;
}
status=MagickTrue;
maximum_length=0.0;
total_length=0.0;
for (i=1; (i < (ssize_t) number_vertices) && (length >= 0.0); i++)
{
dx=primitive_info[i].point.x-primitive_info[i-1].point.x;
dy=primitive_info[i].point.y-primitive_info[i-1].point.y;
maximum_length=hypot(dx,dy);
if (maximum_length > MaxBezierCoordinates)
break;
if (fabs(length) < MagickEpsilon)
{
n++;
if (fabs(draw_info->dash_pattern[n]) < MagickEpsilon)
n=0;
length=scale*draw_info->dash_pattern[n];
}
for (total_length=0.0; (length >= 0.0) && (maximum_length >= (total_length+length)); )
{
total_length+=length;
if ((n & 0x01) != 0)
{
dash_polygon[0]=primitive_info[0];
dash_polygon[0].point.x=(double) (primitive_info[i-1].point.x+dx*
total_length*PerceptibleReciprocal(maximum_length));
dash_polygon[0].point.y=(double) (primitive_info[i-1].point.y+dy*
total_length*PerceptibleReciprocal(maximum_length));
j=1;
}
else
{
if ((j+1) > (ssize_t) number_vertices)
break;
dash_polygon[j]=primitive_info[i-1];
dash_polygon[j].point.x=(double) (primitive_info[i-1].point.x+dx*
total_length*PerceptibleReciprocal(maximum_length));
dash_polygon[j].point.y=(double) (primitive_info[i-1].point.y+dy*
total_length*PerceptibleReciprocal(maximum_length));
dash_polygon[j].coordinates=1;
j++;
dash_polygon[0].coordinates=(size_t) j;
dash_polygon[j].primitive=UndefinedPrimitive;
status&=DrawStrokePolygon(image,clone_info,dash_polygon,exception);
}
n++;
if (fabs(draw_info->dash_pattern[n]) < MagickEpsilon)
n=0;
length=scale*draw_info->dash_pattern[n];
}
length-=(maximum_length-total_length);
if ((n & 0x01) != 0)
continue;
dash_polygon[j]=primitive_info[i];
dash_polygon[j].coordinates=1;
j++;
}
if ((total_length < maximum_length) && ((n & 0x01) == 0) && (j > 1))
{
dash_polygon[j]=primitive_info[i-1];
dash_polygon[j].point.x+=MagickEpsilon;
dash_polygon[j].point.y+=MagickEpsilon;
dash_polygon[j].coordinates=1;
j++;
dash_polygon[0].coordinates=(size_t) j;
dash_polygon[j].primitive=UndefinedPrimitive;
status&=DrawStrokePolygon(image,clone_info,dash_polygon,exception);
}
dash_polygon=(PrimitiveInfo *) RelinquishMagickMemory(dash_polygon);
clone_info=DestroyDrawInfo(clone_info);
if (image->debug != MagickFalse)
(void) LogMagickEvent(DrawEvent,GetMagickModule()," end draw-dash");
return(status != 0 ? MagickTrue : MagickFalse);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% D r a w G r a d i e n t I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DrawGradientImage() draws a linear gradient on the image.
%
% The format of the DrawGradientImage method is:
%
% MagickBooleanType DrawGradientImage(Image *image,
% const DrawInfo *draw_info,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o draw_info: the draw info.
%
% o exception: return any errors or warnings in this structure.
%
*/
static inline double GetStopColorOffset(const GradientInfo *gradient,
const ssize_t x,const ssize_t y)
{
switch (gradient->type)
{
case UndefinedGradient:
case LinearGradient:
{
double
gamma,
length,
offset,
scale;
PointInfo
p,
q;
const SegmentInfo
*gradient_vector;
gradient_vector=(&gradient->gradient_vector);
p.x=gradient_vector->x2-gradient_vector->x1;
p.y=gradient_vector->y2-gradient_vector->y1;
q.x=(double) x-gradient_vector->x1;
q.y=(double) y-gradient_vector->y1;
length=sqrt(q.x*q.x+q.y*q.y);
gamma=sqrt(p.x*p.x+p.y*p.y)*length;
gamma=PerceptibleReciprocal(gamma);
scale=p.x*q.x+p.y*q.y;
offset=gamma*scale*length;
return(offset);
}
case RadialGradient:
{
PointInfo
v;
if (gradient->spread == RepeatSpread)
{
v.x=(double) x-gradient->center.x;
v.y=(double) y-gradient->center.y;
return(sqrt(v.x*v.x+v.y*v.y));
}
v.x=(double) (((x-gradient->center.x)*cos(DegreesToRadians(
gradient->angle)))+((y-gradient->center.y)*sin(DegreesToRadians(
gradient->angle))))*PerceptibleReciprocal(gradient->radii.x);
v.y=(double) (((x-gradient->center.x)*sin(DegreesToRadians(
gradient->angle)))-((y-gradient->center.y)*cos(DegreesToRadians(
gradient->angle))))*PerceptibleReciprocal(gradient->radii.y);
return(sqrt(v.x*v.x+v.y*v.y));
}
}
return(0.0);
}
static int StopInfoCompare(const void *x,const void *y)
{
StopInfo
*stop_1,
*stop_2;
stop_1=(StopInfo *) x;
stop_2=(StopInfo *) y;
if (stop_1->offset > stop_2->offset)
return(1);
if (fabs(stop_1->offset-stop_2->offset) <= MagickEpsilon)
return(0);
return(-1);
}
MagickExport MagickBooleanType DrawGradientImage(Image *image,
const DrawInfo *draw_info,ExceptionInfo *exception)
{
CacheView
*image_view;
const GradientInfo
*gradient;
const SegmentInfo
*gradient_vector;
double
length;
MagickBooleanType
status;
PixelInfo
zero;
PointInfo
point;
RectangleInfo
bounding_box;
ssize_t
y;
/*
Draw linear or radial gradient on image.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(draw_info != (const DrawInfo *) NULL);
gradient=(&draw_info->gradient);
qsort(gradient->stops,gradient->number_stops,sizeof(StopInfo),
StopInfoCompare);
gradient_vector=(&gradient->gradient_vector);
point.x=gradient_vector->x2-gradient_vector->x1;
point.y=gradient_vector->y2-gradient_vector->y1;
length=sqrt(point.x*point.x+point.y*point.y);
bounding_box=gradient->bounding_box;
status=MagickTrue;
GetPixelInfo(image,&zero);
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,image,bounding_box.height-bounding_box.y,1)
#endif
for (y=bounding_box.y; y < (ssize_t) bounding_box.height; y++)
{
PixelInfo
composite,
pixel;
double
alpha,
offset;
register Quantum
*magick_restrict q;
register ssize_t
i,
x;
ssize_t
j;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
pixel=zero;
composite=zero;
offset=GetStopColorOffset(gradient,0,y);
if (gradient->type != RadialGradient)
offset*=PerceptibleReciprocal(length);
for (x=bounding_box.x; x < (ssize_t) bounding_box.width; x++)
{
GetPixelInfoPixel(image,q,&pixel);
switch (gradient->spread)
{
case UndefinedSpread:
case PadSpread:
{
if ((x != (ssize_t) ceil(gradient_vector->x1-0.5)) ||
(y != (ssize_t) ceil(gradient_vector->y1-0.5)))
{
offset=GetStopColorOffset(gradient,x,y);
if (gradient->type != RadialGradient)
offset*=PerceptibleReciprocal(length);
}
for (i=0; i < (ssize_t) gradient->number_stops; i++)
if (offset < gradient->stops[i].offset)
break;
if ((offset < 0.0) || (i == 0))
composite=gradient->stops[0].color;
else
if ((offset > 1.0) || (i == (ssize_t) gradient->number_stops))
composite=gradient->stops[gradient->number_stops-1].color;
else
{
j=i;
i--;
alpha=(offset-gradient->stops[i].offset)/
(gradient->stops[j].offset-gradient->stops[i].offset);
CompositePixelInfoBlend(&gradient->stops[i].color,1.0-alpha,
&gradient->stops[j].color,alpha,&composite);
}
break;
}
case ReflectSpread:
{
if ((x != (ssize_t) ceil(gradient_vector->x1-0.5)) ||
(y != (ssize_t) ceil(gradient_vector->y1-0.5)))
{
offset=GetStopColorOffset(gradient,x,y);
if (gradient->type != RadialGradient)
offset*=PerceptibleReciprocal(length);
}
if (offset < 0.0)
offset=(-offset);
if ((ssize_t) fmod(offset,2.0) == 0)
offset=fmod(offset,1.0);
else
offset=1.0-fmod(offset,1.0);
for (i=0; i < (ssize_t) gradient->number_stops; i++)
if (offset < gradient->stops[i].offset)
break;
if (i == 0)
composite=gradient->stops[0].color;
else
if (i == (ssize_t) gradient->number_stops)
composite=gradient->stops[gradient->number_stops-1].color;
else
{
j=i;
i--;
alpha=(offset-gradient->stops[i].offset)/
(gradient->stops[j].offset-gradient->stops[i].offset);
CompositePixelInfoBlend(&gradient->stops[i].color,1.0-alpha,
&gradient->stops[j].color,alpha,&composite);
}
break;
}
case RepeatSpread:
{
MagickBooleanType
antialias;
double
repeat;
antialias=MagickFalse;
repeat=0.0;
if ((x != (ssize_t) ceil(gradient_vector->x1-0.5)) ||
(y != (ssize_t) ceil(gradient_vector->y1-0.5)))
{
offset=GetStopColorOffset(gradient,x,y);
if (gradient->type == LinearGradient)
{
repeat=fmod(offset,length);
if (repeat < 0.0)
repeat=length-fmod(-repeat,length);
else
repeat=fmod(offset,length);
antialias=(repeat < length) && ((repeat+1.0) > length) ?
MagickTrue : MagickFalse;
offset=PerceptibleReciprocal(length)*repeat;
}
else
{
repeat=fmod(offset,gradient->radius);
if (repeat < 0.0)
repeat=gradient->radius-fmod(-repeat,gradient->radius);
else
repeat=fmod(offset,gradient->radius);
antialias=repeat+1.0 > gradient->radius ? MagickTrue :
MagickFalse;
offset=repeat/gradient->radius;
}
}
for (i=0; i < (ssize_t) gradient->number_stops; i++)
if (offset < gradient->stops[i].offset)
break;
if (i == 0)
composite=gradient->stops[0].color;
else
if (i == (ssize_t) gradient->number_stops)
composite=gradient->stops[gradient->number_stops-1].color;
else
{
j=i;
i--;
alpha=(offset-gradient->stops[i].offset)/
(gradient->stops[j].offset-gradient->stops[i].offset);
if (antialias != MagickFalse)
{
if (gradient->type == LinearGradient)
alpha=length-repeat;
else
alpha=gradient->radius-repeat;
i=0;
j=(ssize_t) gradient->number_stops-1L;
}
CompositePixelInfoBlend(&gradient->stops[i].color,1.0-alpha,
&gradient->stops[j].color,alpha,&composite);
}
break;
}
}
CompositePixelInfoOver(&composite,composite.alpha,&pixel,pixel.alpha,
&pixel);
SetPixelViaPixelInfo(image,&pixel,q);
q+=GetPixelChannels(image);
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
}
image_view=DestroyCacheView(image_view);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% D r a w I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DrawImage() draws a graphic primitive on your image. The primitive
% may be represented as a string or filename. Precede the filename with an
% "at" sign (@) and the contents of the file are drawn on the image. You
% can affect how text is drawn by setting one or more members of the draw
% info structure.
%
% The format of the DrawImage method is:
%
% MagickBooleanType DrawImage(Image *image,const DrawInfo *draw_info,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o draw_info: the draw info.
%
% o exception: return any errors or warnings in this structure.
%
*/
static MagickBooleanType CheckPrimitiveExtent(MVGInfo *mvg_info,
const size_t pad)
{
size_t
extent;
/*
Check if there is enough storage for drawing pimitives.
*/
extent=(size_t) mvg_info->offset+pad+4096;
if (extent <= *mvg_info->extent)
return(MagickTrue);
*mvg_info->primitive_info=ResizeQuantumMemory(*mvg_info->primitive_info,
extent,sizeof(**mvg_info->primitive_info));
if (*mvg_info->primitive_info != (PrimitiveInfo *) NULL)
{
*mvg_info->extent=extent;
return(MagickTrue);
}
/*
Reallocation failed, allocate a primitive to facilitate unwinding.
*/
(void) ThrowMagickException(mvg_info->exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","`%s'","");
*mvg_info->primitive_info=AcquireCriticalMemory(
sizeof(**mvg_info->primitive_info));
(void) memset(*mvg_info->primitive_info,0,sizeof(**mvg_info->primitive_info));
*mvg_info->extent=1;
return(MagickFalse);
}
static SplayTreeInfo *GetMVGMacros(const char *primitive)
{
char
*token;
const char
*q;
size_t
extent;
SplayTreeInfo
*macros;
/*
Scan graphic primitives for definitions and classes.
*/
if (primitive == (const char *) NULL)
return((SplayTreeInfo *) NULL);
macros=NewSplayTree(CompareSplayTreeString,RelinquishMagickMemory,
RelinquishMagickMemory);
token=AcquireString(primitive);
extent=strlen(token)+MagickPathExtent;
for (q=primitive; *q != '\0'; )
{
GetNextToken(q,&q,extent,token);
if (*token == '\0')
break;
if (LocaleCompare("push",token) == 0)
{
register const char
*end,
*start;
GetNextToken(q,&q,extent,token);
if (*q == '"')
{
char
name[MagickPathExtent];
const char
*p;
ssize_t
n;
/*
Named macro (e.g. push graphic-context "wheel").
*/
GetNextToken(q,&q,extent,token);
start=q;
end=q;
(void) CopyMagickString(name,token,MagickPathExtent);
n=1;
for (p=q; *p != '\0'; )
{
GetNextToken(p,&p,extent,token);
if (*token == '\0')
break;
if (LocaleCompare(token,"pop") == 0)
{
end=p-strlen(token)-1;
n--;
}
if (LocaleCompare(token,"push") == 0)
n++;
if ((n == 0) && (end > start))
{
char
*macro;
/*
Extract macro.
*/
GetNextToken(p,&p,extent,token);
macro=AcquireString(start);
macro[end-start]='\0';
(void) AddValueToSplayTree(macros,ConstantString(name),
ConstantString(macro));
macro=DestroyString(macro);
break;
}
}
}
}
}
token=DestroyString(token);
return(macros);
}
static inline MagickBooleanType IsPoint(const char *point)
{
char
*p;
double
value;
value=StringToDouble(point,&p);
return((fabs(value) < MagickEpsilon) && (p == point) ? MagickFalse : MagickTrue);
}
static inline void TracePoint(PrimitiveInfo *primitive_info,
const PointInfo point)
{
primitive_info->coordinates=1;
primitive_info->closed_subpath=MagickFalse;
primitive_info->point=point;
}
static MagickBooleanType RenderMVGContent(Image *image,
const DrawInfo *draw_info,const size_t depth,ExceptionInfo *exception)
{
#define RenderImageTag "Render/Image"
AffineMatrix
affine,
current;
char
keyword[MagickPathExtent],
geometry[MagickPathExtent],
*next_token,
pattern[MagickPathExtent],
*primitive,
*token;
const char
*q;
double
angle,
coordinates,
cursor,
factor,
primitive_extent;
DrawInfo
*clone_info,
**graphic_context;
MagickBooleanType
proceed;
MagickStatusType
status;
MVGInfo
mvg_info;
PointInfo
point;
PrimitiveInfo
*primitive_info;
PrimitiveType
primitive_type;
register const char
*p;
register ssize_t
i,
x;
SegmentInfo
bounds;
size_t
extent,
number_points,
number_stops;
SplayTreeInfo
*macros;
ssize_t
defsDepth,
j,
k,
n,
symbolDepth;
StopInfo
*stops;
TypeMetric
metrics;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(draw_info != (DrawInfo *) NULL);
assert(draw_info->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"...");
if (depth > MagickMaxRecursionDepth)
ThrowBinaryException(DrawError,"VectorGraphicsNestedTooDeeply",
image->filename);
if ((draw_info->primitive == (char *) NULL) ||
(*draw_info->primitive == '\0'))
return(MagickFalse);
if (image->debug != MagickFalse)
(void) LogMagickEvent(DrawEvent,GetMagickModule(),"begin draw-image");
if (SetImageStorageClass(image,DirectClass,exception) == MagickFalse)
return(MagickFalse);
if (image->alpha_trait == UndefinedPixelTrait)
{
status=SetImageAlphaChannel(image,OpaqueAlphaChannel,exception);
if (status == MagickFalse)
return(status);
}
primitive=(char *) NULL;
if (*draw_info->primitive != '@')
primitive=AcquireString(draw_info->primitive);
else
if ((strlen(draw_info->primitive) > 1) &&
(*(draw_info->primitive+1) != '-'))
primitive=FileToString(draw_info->primitive+1,~0UL,exception);
if (primitive == (char *) NULL)
return(MagickFalse);
primitive_extent=(double) strlen(primitive);
(void) SetImageArtifact(image,"MVG",primitive);
n=0;
number_stops=0;
stops=(StopInfo *) NULL;
/*
Allocate primitive info memory.
*/
graphic_context=(DrawInfo **) AcquireMagickMemory(sizeof(*graphic_context));
if (graphic_context == (DrawInfo **) NULL)
{
primitive=DestroyString(primitive);
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
}
number_points=4096;
primitive_info=(PrimitiveInfo *) AcquireQuantumMemory((size_t) number_points,
sizeof(*primitive_info));
if (primitive_info == (PrimitiveInfo *) NULL)
{
primitive=DestroyString(primitive);
for ( ; n >= 0; n--)
graphic_context[n]=DestroyDrawInfo(graphic_context[n]);
graphic_context=(DrawInfo **) RelinquishMagickMemory(graphic_context);
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
}
(void) memset(primitive_info,0,(size_t) number_points*
sizeof(*primitive_info));
mvg_info.primitive_info=(&primitive_info);
mvg_info.extent=(&number_points);
mvg_info.offset=0;
mvg_info.exception=exception;
graphic_context[n]=CloneDrawInfo((ImageInfo *) NULL,draw_info);
graphic_context[n]->viewbox=image->page;
if ((image->page.width == 0) || (image->page.height == 0))
{
graphic_context[n]->viewbox.width=image->columns;
graphic_context[n]->viewbox.height=image->rows;
}
token=AcquireString(primitive);
extent=strlen(token)+MagickPathExtent;
defsDepth=0;
symbolDepth=0;
cursor=0.0;
macros=GetMVGMacros(primitive);
status=MagickTrue;
for (q=primitive; *q != '\0'; )
{
/*
Interpret graphic primitive.
*/
GetNextToken(q,&q,MagickPathExtent,keyword);
if (*keyword == '\0')
break;
if (*keyword == '#')
{
/*
Comment.
*/
while ((*q != '\n') && (*q != '\0'))
q++;
continue;
}
p=q-strlen(keyword)-1;
primitive_type=UndefinedPrimitive;
current=graphic_context[n]->affine;
GetAffineMatrix(&affine);
switch (*keyword)
{
case ';':
break;
case 'a':
case 'A':
{
if (LocaleCompare("affine",keyword) == 0)
{
GetNextToken(q,&q,extent,token);
affine.sx=StringToDouble(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
GetNextToken(q,&q,extent,token);
if (*token == ',')
GetNextToken(q,&q,extent,token);
affine.rx=StringToDouble(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
GetNextToken(q,&q,extent,token);
if (*token == ',')
GetNextToken(q,&q,extent,token);
affine.ry=StringToDouble(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
GetNextToken(q,&q,extent,token);
if (*token == ',')
GetNextToken(q,&q,extent,token);
affine.sy=StringToDouble(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
GetNextToken(q,&q,extent,token);
if (*token == ',')
GetNextToken(q,&q,extent,token);
affine.tx=StringToDouble(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
GetNextToken(q,&q,extent,token);
if (*token == ',')
GetNextToken(q,&q,extent,token);
affine.ty=StringToDouble(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
break;
}
if (LocaleCompare("alpha",keyword) == 0)
{
primitive_type=AlphaPrimitive;
break;
}
if (LocaleCompare("arc",keyword) == 0)
{
primitive_type=ArcPrimitive;
break;
}
status=MagickFalse;
break;
}
case 'b':
case 'B':
{
if (LocaleCompare("bezier",keyword) == 0)
{
primitive_type=BezierPrimitive;
break;
}
if (LocaleCompare("border-color",keyword) == 0)
{
GetNextToken(q,&q,extent,token);
status&=QueryColorCompliance(token,AllCompliance,
&graphic_context[n]->border_color,exception);
break;
}
status=MagickFalse;
break;
}
case 'c':
case 'C':
{
if (LocaleCompare("class",keyword) == 0)
{
const char
*mvg_class;
GetNextToken(q,&q,extent,token);
if (*token == '\0')
{
status=MagickFalse;
break;
}
mvg_class=(const char *) GetValueFromSplayTree(macros,token);
if (mvg_class != (const char *) NULL)
{
char
*elements;
ssize_t
offset;
/*
Inject class elements in stream.
*/
offset=(ssize_t) (p-primitive);
elements=AcquireString(primitive);
elements[offset]='\0';
(void) ConcatenateString(&elements,mvg_class);
(void) ConcatenateString(&elements,"\n");
(void) ConcatenateString(&elements,q);
primitive=DestroyString(primitive);
primitive=elements;
q=primitive+offset;
}
break;
}
if (LocaleCompare("clip-path",keyword) == 0)
{
const char
*clip_path;
/*
Take a node from within the MVG document, and duplicate it here.
*/
GetNextToken(q,&q,extent,token);
if (*token == '\0')
{
status=MagickFalse;
break;
}
(void) CloneString(&graphic_context[n]->clip_mask,token);
clip_path=(const char *) GetValueFromSplayTree(macros,token);
if (clip_path != (const char *) NULL)
{
if (graphic_context[n]->clipping_mask != (Image *) NULL)
graphic_context[n]->clipping_mask=
DestroyImage(graphic_context[n]->clipping_mask);
graphic_context[n]->clipping_mask=DrawClippingMask(image,
graphic_context[n],token,clip_path,exception);
if (draw_info->compliance != SVGCompliance)
(void) DrawClipPath(image,graphic_context[n],
graphic_context[n]->clip_mask,exception);
}
break;
}
if (LocaleCompare("clip-rule",keyword) == 0)
{
ssize_t
fill_rule;
GetNextToken(q,&q,extent,token);
fill_rule=ParseCommandOption(MagickFillRuleOptions,MagickFalse,
token);
if (fill_rule == -1)
{
status=MagickFalse;
break;
}
graphic_context[n]->fill_rule=(FillRule) fill_rule;
break;
}
if (LocaleCompare("clip-units",keyword) == 0)
{
ssize_t
clip_units;
GetNextToken(q,&q,extent,token);
clip_units=ParseCommandOption(MagickClipPathOptions,MagickFalse,
token);
if (clip_units == -1)
{
status=MagickFalse;
break;
}
graphic_context[n]->clip_units=(ClipPathUnits) clip_units;
if (clip_units == ObjectBoundingBox)
{
GetAffineMatrix(¤t);
affine.sx=draw_info->bounds.x2;
affine.sy=draw_info->bounds.y2;
affine.tx=draw_info->bounds.x1;
affine.ty=draw_info->bounds.y1;
break;
}
break;
}
if (LocaleCompare("circle",keyword) == 0)
{
primitive_type=CirclePrimitive;
break;
}
if (LocaleCompare("color",keyword) == 0)
{
primitive_type=ColorPrimitive;
break;
}
if (LocaleCompare("compliance",keyword) == 0)
{
/*
MVG compliance associates a clipping mask with an image; SVG
compliance associates a clipping mask with a graphics context.
*/
GetNextToken(q,&q,extent,token);
graphic_context[n]->compliance=(ComplianceType) ParseCommandOption(
MagickComplianceOptions,MagickFalse,token);
break;
}
status=MagickFalse;
break;
}
case 'd':
case 'D':
{
if (LocaleCompare("decorate",keyword) == 0)
{
ssize_t
decorate;
GetNextToken(q,&q,extent,token);
decorate=ParseCommandOption(MagickDecorateOptions,MagickFalse,
token);
if (decorate == -1)
{
status=MagickFalse;
break;
}
graphic_context[n]->decorate=(DecorationType) decorate;
break;
}
if (LocaleCompare("density",keyword) == 0)
{
GetNextToken(q,&q,extent,token);
(void) CloneString(&graphic_context[n]->density,token);
break;
}
if (LocaleCompare("direction",keyword) == 0)
{
ssize_t
direction;
GetNextToken(q,&q,extent,token);
direction=ParseCommandOption(MagickDirectionOptions,MagickFalse,
token);
if (direction == -1)
status=MagickFalse;
else
graphic_context[n]->direction=(DirectionType) direction;
break;
}
status=MagickFalse;
break;
}
case 'e':
case 'E':
{
if (LocaleCompare("ellipse",keyword) == 0)
{
primitive_type=EllipsePrimitive;
break;
}
if (LocaleCompare("encoding",keyword) == 0)
{
GetNextToken(q,&q,extent,token);
(void) CloneString(&graphic_context[n]->encoding,token);
break;
}
status=MagickFalse;
break;
}
case 'f':
case 'F':
{
if (LocaleCompare("fill",keyword) == 0)
{
GetNextToken(q,&q,extent,token);
if (graphic_context[n]->clip_path != MagickFalse)
break;
(void) FormatLocaleString(pattern,MagickPathExtent,"%s",token);
if (GetImageArtifact(image,pattern) != (const char *) NULL)
(void) DrawPatternPath(image,draw_info,token,
&graphic_context[n]->fill_pattern,exception);
else
{
status&=QueryColorCompliance(token,AllCompliance,
&graphic_context[n]->fill,exception);
if (graphic_context[n]->fill_alpha != OpaqueAlpha)
graphic_context[n]->fill.alpha=graphic_context[n]->fill_alpha;
}
break;
}
if (LocaleCompare("fill-opacity",keyword) == 0)
{
double
opacity;
GetNextToken(q,&q,extent,token);
if (graphic_context[n]->clip_path != MagickFalse)
break;
factor=strchr(token,'%') != (char *) NULL ? 0.01 : 1.0;
opacity=MagickMin(MagickMax(factor*
StringToDouble(token,&next_token),0.0),1.0);
if (token == next_token)
ThrowPointExpectedException(token,exception);
graphic_context[n]->fill_alpha*=opacity;
if (graphic_context[n]->fill_alpha != OpaqueAlpha)
graphic_context[n]->fill.alpha=graphic_context[n]->fill_alpha;
break;
}
if (LocaleCompare("fill-rule",keyword) == 0)
{
ssize_t
fill_rule;
GetNextToken(q,&q,extent,token);
fill_rule=ParseCommandOption(MagickFillRuleOptions,MagickFalse,
token);
if (fill_rule == -1)
{
status=MagickFalse;
break;
}
graphic_context[n]->fill_rule=(FillRule) fill_rule;
break;
}
if (LocaleCompare("font",keyword) == 0)
{
GetNextToken(q,&q,extent,token);
(void) CloneString(&graphic_context[n]->font,token);
if (LocaleCompare("none",token) == 0)
graphic_context[n]->font=(char *) RelinquishMagickMemory(
graphic_context[n]->font);
break;
}
if (LocaleCompare("font-family",keyword) == 0)
{
GetNextToken(q,&q,extent,token);
(void) CloneString(&graphic_context[n]->family,token);
break;
}
if (LocaleCompare("font-size",keyword) == 0)
{
GetNextToken(q,&q,extent,token);
graphic_context[n]->pointsize=StringToDouble(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
break;
}
if (LocaleCompare("font-stretch",keyword) == 0)
{
ssize_t
stretch;
GetNextToken(q,&q,extent,token);
stretch=ParseCommandOption(MagickStretchOptions,MagickFalse,token);
if (stretch == -1)
{
status=MagickFalse;
break;
}
graphic_context[n]->stretch=(StretchType) stretch;
break;
}
if (LocaleCompare("font-style",keyword) == 0)
{
ssize_t
style;
GetNextToken(q,&q,extent,token);
style=ParseCommandOption(MagickStyleOptions,MagickFalse,token);
if (style == -1)
{
status=MagickFalse;
break;
}
graphic_context[n]->style=(StyleType) style;
break;
}
if (LocaleCompare("font-weight",keyword) == 0)
{
ssize_t
weight;
GetNextToken(q,&q,extent,token);
weight=ParseCommandOption(MagickWeightOptions,MagickFalse,token);
if (weight == -1)
weight=(ssize_t) StringToUnsignedLong(token);
graphic_context[n]->weight=(size_t) weight;
break;
}
status=MagickFalse;
break;
}
case 'g':
case 'G':
{
if (LocaleCompare("gradient-units",keyword) == 0)
{
GetNextToken(q,&q,extent,token);
break;
}
if (LocaleCompare("gravity",keyword) == 0)
{
ssize_t
gravity;
GetNextToken(q,&q,extent,token);
gravity=ParseCommandOption(MagickGravityOptions,MagickFalse,token);
if (gravity == -1)
{
status=MagickFalse;
break;
}
graphic_context[n]->gravity=(GravityType) gravity;
break;
}
status=MagickFalse;
break;
}
case 'i':
case 'I':
{
if (LocaleCompare("image",keyword) == 0)
{
ssize_t
compose;
primitive_type=ImagePrimitive;
GetNextToken(q,&q,extent,token);
compose=ParseCommandOption(MagickComposeOptions,MagickFalse,token);
if (compose == -1)
{
status=MagickFalse;
break;
}
graphic_context[n]->compose=(CompositeOperator) compose;
break;
}
if (LocaleCompare("interline-spacing",keyword) == 0)
{
GetNextToken(q,&q,extent,token);
graphic_context[n]->interline_spacing=StringToDouble(token,
&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
break;
}
if (LocaleCompare("interword-spacing",keyword) == 0)
{
GetNextToken(q,&q,extent,token);
graphic_context[n]->interword_spacing=StringToDouble(token,
&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
break;
}
status=MagickFalse;
break;
}
case 'k':
case 'K':
{
if (LocaleCompare("kerning",keyword) == 0)
{
GetNextToken(q,&q,extent,token);
graphic_context[n]->kerning=StringToDouble(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
break;
}
status=MagickFalse;
break;
}
case 'l':
case 'L':
{
if (LocaleCompare("letter-spacing",keyword) == 0)
{
GetNextToken(q,&q,extent,token);
clone_info=CloneDrawInfo((ImageInfo *) NULL,graphic_context[n]);
clone_info->text=AcquireString(" ");
status&=GetTypeMetrics(image,clone_info,&metrics,exception);
graphic_context[n]->kerning=metrics.width*
StringToDouble(token,&next_token);
clone_info=DestroyDrawInfo(clone_info);
if (token == next_token)
ThrowPointExpectedException(token,exception);
break;
}
if (LocaleCompare("line",keyword) == 0)
{
primitive_type=LinePrimitive;
break;
}
status=MagickFalse;
break;
}
case 'm':
case 'M':
{
if (LocaleCompare("mask",keyword) == 0)
{
const char
*mask_path;
/*
Take a node from within the MVG document, and duplicate it here.
*/
GetNextToken(q,&q,extent,token);
mask_path=(const char *) GetValueFromSplayTree(macros,token);
if (mask_path != (const char *) NULL)
{
if (graphic_context[n]->composite_mask != (Image *) NULL)
graphic_context[n]->composite_mask=
DestroyImage(graphic_context[n]->composite_mask);
graphic_context[n]->composite_mask=DrawCompositeMask(image,
graphic_context[n],token,mask_path,exception);
if (draw_info->compliance != SVGCompliance)
status=SetImageMask(image,CompositePixelMask,
graphic_context[n]->composite_mask,exception);
}
break;
}
break;
}
case 'o':
case 'O':
{
if (LocaleCompare("offset",keyword) == 0)
{
GetNextToken(q,&q,extent,token);
break;
}
if (LocaleCompare("opacity",keyword) == 0)
{
double
opacity;
GetNextToken(q,&q,extent,token);
if (graphic_context[n]->clip_path != MagickFalse)
break;
factor=strchr(token,'%') != (char *) NULL ? 0.01 : 1.0;
opacity=MagickMin(MagickMax(factor*
StringToDouble(token,&next_token),0.0),1.0);
if (token == next_token)
ThrowPointExpectedException(token,exception);
graphic_context[n]->fill_alpha*=opacity;
if (graphic_context[n]->fill_alpha != OpaqueAlpha)
graphic_context[n]->fill.alpha=graphic_context[n]->fill_alpha;
graphic_context[n]->stroke_alpha*=opacity;
if (graphic_context[n]->stroke_alpha != OpaqueAlpha)
graphic_context[n]->stroke.alpha=graphic_context[n]->stroke_alpha;
break;
}
status=MagickFalse;
break;
}
case 'p':
case 'P':
{
if (LocaleCompare("path",keyword) == 0)
{
primitive_type=PathPrimitive;
break;
}
if (LocaleCompare("point",keyword) == 0)
{
primitive_type=PointPrimitive;
break;
}
if (LocaleCompare("polyline",keyword) == 0)
{
primitive_type=PolylinePrimitive;
break;
}
if (LocaleCompare("polygon",keyword) == 0)
{
primitive_type=PolygonPrimitive;
break;
}
if (LocaleCompare("pop",keyword) == 0)
{
GetNextToken(q,&q,extent,token);
if (LocaleCompare("class",token) == 0)
break;
if (LocaleCompare("clip-path",token) == 0)
break;
if (LocaleCompare("defs",token) == 0)
{
defsDepth--;
graphic_context[n]->render=defsDepth > 0 ? MagickFalse :
MagickTrue;
break;
}
if (LocaleCompare("gradient",token) == 0)
break;
if (LocaleCompare("graphic-context",token) == 0)
{
if (n <= 0)
{
(void) ThrowMagickException(exception,GetMagickModule(),
DrawError,"UnbalancedGraphicContextPushPop","`%s'",token);
status=MagickFalse;
n=0;
break;
}
if ((graphic_context[n]->clip_mask != (char *) NULL) &&
(draw_info->compliance != SVGCompliance))
if (LocaleCompare(graphic_context[n]->clip_mask,
graphic_context[n-1]->clip_mask) != 0)
status=SetImageMask(image,WritePixelMask,(Image *) NULL,
exception);
graphic_context[n]=DestroyDrawInfo(graphic_context[n]);
n--;
break;
}
if (LocaleCompare("mask",token) == 0)
break;
if (LocaleCompare("pattern",token) == 0)
break;
if (LocaleCompare("symbol",token) == 0)
{
symbolDepth--;
graphic_context[n]->render=symbolDepth > 0 ? MagickFalse :
MagickTrue;
break;
}
status=MagickFalse;
break;
}
if (LocaleCompare("push",keyword) == 0)
{
GetNextToken(q,&q,extent,token);
if (LocaleCompare("class",token) == 0)
{
/*
Class context.
*/
for (p=q; *q != '\0'; )
{
GetNextToken(q,&q,extent,token);
if (LocaleCompare(token,"pop") != 0)
continue;
GetNextToken(q,(const char **) NULL,extent,token);
if (LocaleCompare(token,"class") != 0)
continue;
break;
}
GetNextToken(q,&q,extent,token);
break;
}
if (LocaleCompare("clip-path",token) == 0)
{
char
name[MaxTextExtent];
const char
*clip_path;
GetNextToken(q,&q,extent,token);
(void) FormatLocaleString(name,MaxTextExtent,"%s",token);
clip_path=(const char *) GetValueFromSplayTree(macros,name);
if (clip_path != (const char *) NULL)
(void) SetImageArtifact(image,name,clip_path);
break;
}
if (LocaleCompare("defs",token) == 0)
{
defsDepth++;
graphic_context[n]->render=defsDepth > 0 ? MagickFalse :
MagickTrue;
break;
}
if (LocaleCompare("gradient",token) == 0)
{
char
key[2*MagickPathExtent],
name[MagickPathExtent],
type[MagickPathExtent];
SegmentInfo
segment;
GetNextToken(q,&q,extent,token);
(void) CopyMagickString(name,token,MagickPathExtent);
GetNextToken(q,&q,extent,token);
(void) CopyMagickString(type,token,MagickPathExtent);
GetNextToken(q,&q,extent,token);
segment.x1=StringToDouble(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
GetNextToken(q,&q,extent,token);
if (*token == ',')
GetNextToken(q,&q,extent,token);
segment.y1=StringToDouble(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
GetNextToken(q,&q,extent,token);
if (*token == ',')
GetNextToken(q,&q,extent,token);
segment.x2=StringToDouble(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
GetNextToken(q,&q,extent,token);
if (*token == ',')
GetNextToken(q,&q,extent,token);
segment.y2=StringToDouble(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
if (LocaleCompare(type,"radial") == 0)
{
GetNextToken(q,&q,extent,token);
if (*token == ',')
GetNextToken(q,&q,extent,token);
}
for (p=q; *q != '\0'; )
{
GetNextToken(q,&q,extent,token);
if (LocaleCompare(token,"pop") != 0)
continue;
GetNextToken(q,(const char **) NULL,extent,token);
if (LocaleCompare(token,"gradient") != 0)
continue;
break;
}
if ((q == (char *) NULL) || (p == (char *) NULL) || ((q-4) < p))
{
status=MagickFalse;
break;
}
(void) CopyMagickString(token,p,(size_t) (q-p-4+1));
bounds.x1=graphic_context[n]->affine.sx*segment.x1+
graphic_context[n]->affine.ry*segment.y1+
graphic_context[n]->affine.tx;
bounds.y1=graphic_context[n]->affine.rx*segment.x1+
graphic_context[n]->affine.sy*segment.y1+
graphic_context[n]->affine.ty;
bounds.x2=graphic_context[n]->affine.sx*segment.x2+
graphic_context[n]->affine.ry*segment.y2+
graphic_context[n]->affine.tx;
bounds.y2=graphic_context[n]->affine.rx*segment.x2+
graphic_context[n]->affine.sy*segment.y2+
graphic_context[n]->affine.ty;
(void) FormatLocaleString(key,MagickPathExtent,"%s",name);
(void) SetImageArtifact(image,key,token);
(void) FormatLocaleString(key,MagickPathExtent,"%s-type",name);
(void) SetImageArtifact(image,key,type);
(void) FormatLocaleString(key,MagickPathExtent,"%s-geometry",
name);
(void) FormatLocaleString(geometry,MagickPathExtent,
"%gx%g%+.15g%+.15g",
MagickMax(fabs(bounds.x2-bounds.x1+1.0),1.0),
MagickMax(fabs(bounds.y2-bounds.y1+1.0),1.0),
bounds.x1,bounds.y1);
(void) SetImageArtifact(image,key,geometry);
GetNextToken(q,&q,extent,token);
break;
}
if (LocaleCompare("graphic-context",token) == 0)
{
n++;
graphic_context=(DrawInfo **) ResizeQuantumMemory(
graphic_context,(size_t) (n+1),sizeof(*graphic_context));
if (graphic_context == (DrawInfo **) NULL)
{
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","`%s'",
image->filename);
break;
}
graphic_context[n]=CloneDrawInfo((ImageInfo *) NULL,
graphic_context[n-1]);
if (*q == '"')
GetNextToken(q,&q,extent,token);
break;
}
if (LocaleCompare("mask",token) == 0)
{
GetNextToken(q,&q,extent,token);
break;
}
if (LocaleCompare("pattern",token) == 0)
{
char
key[2*MagickPathExtent],
name[MagickPathExtent];
RectangleInfo
bounds;
GetNextToken(q,&q,extent,token);
(void) CopyMagickString(name,token,MagickPathExtent);
GetNextToken(q,&q,extent,token);
bounds.x=(ssize_t) ceil(StringToDouble(token,&next_token)-0.5);
if (token == next_token)
ThrowPointExpectedException(token,exception);
GetNextToken(q,&q,extent,token);
if (*token == ',')
GetNextToken(q,&q,extent,token);
bounds.y=(ssize_t) ceil(StringToDouble(token,&next_token)-0.5);
if (token == next_token)
ThrowPointExpectedException(token,exception);
GetNextToken(q,&q,extent,token);
if (*token == ',')
GetNextToken(q,&q,extent,token);
bounds.width=(size_t) floor(StringToDouble(token,&next_token)+
0.5);
if (token == next_token)
ThrowPointExpectedException(token,exception);
GetNextToken(q,&q,extent,token);
if (*token == ',')
GetNextToken(q,&q,extent,token);
bounds.height=(size_t) floor(StringToDouble(token,&next_token)+
0.5);
if (token == next_token)
ThrowPointExpectedException(token,exception);
for (p=q; *q != '\0'; )
{
GetNextToken(q,&q,extent,token);
if (LocaleCompare(token,"pop") != 0)
continue;
GetNextToken(q,(const char **) NULL,extent,token);
if (LocaleCompare(token,"pattern") != 0)
continue;
break;
}
if ((q == (char *) NULL) || (p == (char *) NULL) || ((q-4) < p))
{
status=MagickFalse;
break;
}
(void) CopyMagickString(token,p,(size_t) (q-p-4+1));
(void) FormatLocaleString(key,MagickPathExtent,"%s",name);
(void) SetImageArtifact(image,key,token);
(void) FormatLocaleString(key,MagickPathExtent,"%s-geometry",
name);
(void) FormatLocaleString(geometry,MagickPathExtent,
"%.20gx%.20g%+.20g%+.20g",(double) bounds.width,(double)
bounds.height,(double) bounds.x,(double) bounds.y);
(void) SetImageArtifact(image,key,geometry);
GetNextToken(q,&q,extent,token);
break;
}
if (LocaleCompare("symbol",token) == 0)
{
symbolDepth++;
graphic_context[n]->render=symbolDepth > 0 ? MagickFalse :
MagickTrue;
break;
}
status=MagickFalse;
break;
}
status=MagickFalse;
break;
}
case 'r':
case 'R':
{
if (LocaleCompare("rectangle",keyword) == 0)
{
primitive_type=RectanglePrimitive;
break;
}
if (LocaleCompare("rotate",keyword) == 0)
{
GetNextToken(q,&q,extent,token);
angle=StringToDouble(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
affine.sx=cos(DegreesToRadians(fmod((double) angle,360.0)));
affine.rx=sin(DegreesToRadians(fmod((double) angle,360.0)));
affine.ry=(-sin(DegreesToRadians(fmod((double) angle,360.0))));
affine.sy=cos(DegreesToRadians(fmod((double) angle,360.0)));
break;
}
if (LocaleCompare("roundRectangle",keyword) == 0)
{
primitive_type=RoundRectanglePrimitive;
break;
}
status=MagickFalse;
break;
}
case 's':
case 'S':
{
if (LocaleCompare("scale",keyword) == 0)
{
GetNextToken(q,&q,extent,token);
affine.sx=StringToDouble(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
GetNextToken(q,&q,extent,token);
if (*token == ',')
GetNextToken(q,&q,extent,token);
affine.sy=StringToDouble(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
break;
}
if (LocaleCompare("skewX",keyword) == 0)
{
GetNextToken(q,&q,extent,token);
angle=StringToDouble(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
affine.ry=sin(DegreesToRadians(angle));
break;
}
if (LocaleCompare("skewY",keyword) == 0)
{
GetNextToken(q,&q,extent,token);
angle=StringToDouble(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
affine.rx=(-tan(DegreesToRadians(angle)/2.0));
break;
}
if (LocaleCompare("stop-color",keyword) == 0)
{
PixelInfo
stop_color;
number_stops++;
if (number_stops == 1)
stops=(StopInfo *) AcquireQuantumMemory(2,sizeof(*stops));
else
if (number_stops > 2)
stops=(StopInfo *) ResizeQuantumMemory(stops,number_stops,
sizeof(*stops));
if (stops == (StopInfo *) NULL)
{
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","`%s'",
image->filename);
break;
}
GetNextToken(q,&q,extent,token);
status&=QueryColorCompliance(token,AllCompliance,&stop_color,
exception);
stops[number_stops-1].color=stop_color;
GetNextToken(q,&q,extent,token);
factor=strchr(token,'%') != (char *) NULL ? 0.01 : 1.0;
stops[number_stops-1].offset=factor*StringToDouble(token,
&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
break;
}
if (LocaleCompare("stroke",keyword) == 0)
{
GetNextToken(q,&q,extent,token);
if (graphic_context[n]->clip_path != MagickFalse)
break;
(void) FormatLocaleString(pattern,MagickPathExtent,"%s",token);
if (GetImageArtifact(image,pattern) != (const char *) NULL)
(void) DrawPatternPath(image,draw_info,token,
&graphic_context[n]->stroke_pattern,exception);
else
{
status&=QueryColorCompliance(token,AllCompliance,
&graphic_context[n]->stroke,exception);
if (graphic_context[n]->stroke_alpha != OpaqueAlpha)
graphic_context[n]->stroke.alpha=
graphic_context[n]->stroke_alpha;
}
break;
}
if (LocaleCompare("stroke-antialias",keyword) == 0)
{
GetNextToken(q,&q,extent,token);
graphic_context[n]->stroke_antialias=StringToLong(token) != 0 ?
MagickTrue : MagickFalse;
break;
}
if (LocaleCompare("stroke-dasharray",keyword) == 0)
{
if (graphic_context[n]->dash_pattern != (double *) NULL)
graphic_context[n]->dash_pattern=(double *)
RelinquishMagickMemory(graphic_context[n]->dash_pattern);
if (IsPoint(q) != MagickFalse)
{
const char
*r;
r=q;
GetNextToken(r,&r,extent,token);
if (*token == ',')
GetNextToken(r,&r,extent,token);
for (x=0; IsPoint(token) != MagickFalse; x++)
{
GetNextToken(r,&r,extent,token);
if (*token == ',')
GetNextToken(r,&r,extent,token);
}
graphic_context[n]->dash_pattern=(double *)
AcquireQuantumMemory((size_t) (2*x+2),
sizeof(*graphic_context[n]->dash_pattern));
if (graphic_context[n]->dash_pattern == (double *) NULL)
{
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","`%s'",
image->filename);
status=MagickFalse;
break;
}
(void) memset(graphic_context[n]->dash_pattern,0,(size_t)
(2*x+2)*sizeof(*graphic_context[n]->dash_pattern));
for (j=0; j < x; j++)
{
GetNextToken(q,&q,extent,token);
if (*token == ',')
GetNextToken(q,&q,extent,token);
graphic_context[n]->dash_pattern[j]=StringToDouble(token,
&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
if (graphic_context[n]->dash_pattern[j] < 0.0)
status=MagickFalse;
}
if ((x & 0x01) != 0)
for ( ; j < (2*x); j++)
graphic_context[n]->dash_pattern[j]=
graphic_context[n]->dash_pattern[j-x];
graphic_context[n]->dash_pattern[j]=0.0;
break;
}
GetNextToken(q,&q,extent,token);
break;
}
if (LocaleCompare("stroke-dashoffset",keyword) == 0)
{
GetNextToken(q,&q,extent,token);
graphic_context[n]->dash_offset=StringToDouble(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
break;
}
if (LocaleCompare("stroke-linecap",keyword) == 0)
{
ssize_t
linecap;
GetNextToken(q,&q,extent,token);
linecap=ParseCommandOption(MagickLineCapOptions,MagickFalse,token);
if (linecap == -1)
{
status=MagickFalse;
break;
}
graphic_context[n]->linecap=(LineCap) linecap;
break;
}
if (LocaleCompare("stroke-linejoin",keyword) == 0)
{
ssize_t
linejoin;
GetNextToken(q,&q,extent,token);
linejoin=ParseCommandOption(MagickLineJoinOptions,MagickFalse,
token);
if (linejoin == -1)
{
status=MagickFalse;
break;
}
graphic_context[n]->linejoin=(LineJoin) linejoin;
break;
}
if (LocaleCompare("stroke-miterlimit",keyword) == 0)
{
GetNextToken(q,&q,extent,token);
graphic_context[n]->miterlimit=StringToUnsignedLong(token);
break;
}
if (LocaleCompare("stroke-opacity",keyword) == 0)
{
double
opacity;
GetNextToken(q,&q,extent,token);
if (graphic_context[n]->clip_path != MagickFalse)
break;
factor=strchr(token,'%') != (char *) NULL ? 0.01 : 1.0;
opacity=MagickMin(MagickMax(factor*
StringToDouble(token,&next_token),0.0),1.0);
if (token == next_token)
ThrowPointExpectedException(token,exception);
graphic_context[n]->stroke_alpha*=opacity;
if (graphic_context[n]->stroke_alpha != OpaqueAlpha)
graphic_context[n]->stroke.alpha=graphic_context[n]->stroke_alpha;
break;
}
if (LocaleCompare("stroke-width",keyword) == 0)
{
GetNextToken(q,&q,extent,token);
if (graphic_context[n]->clip_path != MagickFalse)
break;
graphic_context[n]->stroke_width=StringToDouble(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
break;
}
status=MagickFalse;
break;
}
case 't':
case 'T':
{
if (LocaleCompare("text",keyword) == 0)
{
primitive_type=TextPrimitive;
break;
}
if (LocaleCompare("text-align",keyword) == 0)
{
ssize_t
align;
GetNextToken(q,&q,extent,token);
align=ParseCommandOption(MagickAlignOptions,MagickFalse,token);
if (align == -1)
{
status=MagickFalse;
break;
}
graphic_context[n]->align=(AlignType) align;
break;
}
if (LocaleCompare("text-anchor",keyword) == 0)
{
ssize_t
align;
GetNextToken(q,&q,extent,token);
align=ParseCommandOption(MagickAlignOptions,MagickFalse,token);
if (align == -1)
{
status=MagickFalse;
break;
}
graphic_context[n]->align=(AlignType) align;
break;
}
if (LocaleCompare("text-antialias",keyword) == 0)
{
GetNextToken(q,&q,extent,token);
graphic_context[n]->text_antialias=StringToLong(token) != 0 ?
MagickTrue : MagickFalse;
break;
}
if (LocaleCompare("text-undercolor",keyword) == 0)
{
GetNextToken(q,&q,extent,token);
status&=QueryColorCompliance(token,AllCompliance,
&graphic_context[n]->undercolor,exception);
break;
}
if (LocaleCompare("translate",keyword) == 0)
{
GetNextToken(q,&q,extent,token);
affine.tx=StringToDouble(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
GetNextToken(q,&q,extent,token);
if (*token == ',')
GetNextToken(q,&q,extent,token);
affine.ty=StringToDouble(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
cursor=0.0;
break;
}
status=MagickFalse;
break;
}
case 'u':
case 'U':
{
if (LocaleCompare("use",keyword) == 0)
{
const char
*use;
/*
Get a macro from the MVG document, and "use" it here.
*/
GetNextToken(q,&q,extent,token);
use=(const char *) GetValueFromSplayTree(macros,token);
if (use != (const char *) NULL)
{
clone_info=CloneDrawInfo((ImageInfo *) NULL,graphic_context[n]);
(void) CloneString(&clone_info->primitive,use);
status=RenderMVGContent(image,clone_info,depth+1,exception);
clone_info=DestroyDrawInfo(clone_info);
}
break;
}
break;
}
case 'v':
case 'V':
{
if (LocaleCompare("viewbox",keyword) == 0)
{
GetNextToken(q,&q,extent,token);
graphic_context[n]->viewbox.x=(ssize_t) ceil(StringToDouble(token,
&next_token)-0.5);
if (token == next_token)
ThrowPointExpectedException(token,exception);
GetNextToken(q,&q,extent,token);
if (*token == ',')
GetNextToken(q,&q,extent,token);
graphic_context[n]->viewbox.y=(ssize_t) ceil(StringToDouble(token,
&next_token)-0.5);
if (token == next_token)
ThrowPointExpectedException(token,exception);
GetNextToken(q,&q,extent,token);
if (*token == ',')
GetNextToken(q,&q,extent,token);
graphic_context[n]->viewbox.width=(size_t) floor(StringToDouble(
token,&next_token)+0.5);
if (token == next_token)
ThrowPointExpectedException(token,exception);
GetNextToken(q,&q,extent,token);
if (*token == ',')
GetNextToken(q,&q,extent,token);
graphic_context[n]->viewbox.height=(size_t) floor(StringToDouble(
token,&next_token)+0.5);
if (token == next_token)
ThrowPointExpectedException(token,exception);
break;
}
status=MagickFalse;
break;
}
case 'w':
case 'W':
{
if (LocaleCompare("word-spacing",keyword) == 0)
{
GetNextToken(q,&q,extent,token);
graphic_context[n]->interword_spacing=StringToDouble(token,
&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
break;
}
status=MagickFalse;
break;
}
default:
{
status=MagickFalse;
break;
}
}
if (status == MagickFalse)
break;
if ((fabs(affine.sx-1.0) >= MagickEpsilon) ||
(fabs(affine.rx) >= MagickEpsilon) || (fabs(affine.ry) >= MagickEpsilon) ||
(fabs(affine.sy-1.0) >= MagickEpsilon) ||
(fabs(affine.tx) >= MagickEpsilon) || (fabs(affine.ty) >= MagickEpsilon))
{
graphic_context[n]->affine.sx=current.sx*affine.sx+current.ry*affine.rx;
graphic_context[n]->affine.rx=current.rx*affine.sx+current.sy*affine.rx;
graphic_context[n]->affine.ry=current.sx*affine.ry+current.ry*affine.sy;
graphic_context[n]->affine.sy=current.rx*affine.ry+current.sy*affine.sy;
graphic_context[n]->affine.tx=current.sx*affine.tx+current.ry*affine.ty+
current.tx;
graphic_context[n]->affine.ty=current.rx*affine.tx+current.sy*affine.ty+
current.ty;
}
if (primitive_type == UndefinedPrimitive)
{
if (*q == '\0')
{
if (number_stops > 1)
{
GradientType
type;
type=LinearGradient;
if (draw_info->gradient.type == RadialGradient)
type=RadialGradient;
(void) GradientImage(image,type,PadSpread,stops,number_stops,
exception);
}
if (number_stops > 0)
stops=(StopInfo *) RelinquishMagickMemory(stops);
}
if ((image->debug != MagickFalse) && (q > p))
(void) LogMagickEvent(DrawEvent,GetMagickModule()," %.*s",(int)
(q-p-1),p);
continue;
}
/*
Parse the primitive attributes.
*/
i=0;
mvg_info.offset=i;
j=0;
primitive_info[0].point.x=0.0;
primitive_info[0].point.y=0.0;
primitive_info[0].coordinates=0;
primitive_info[0].method=FloodfillMethod;
primitive_info[0].closed_subpath=MagickFalse;
for (x=0; *q != '\0'; x++)
{
/*
Define points.
*/
if (IsPoint(q) == MagickFalse)
break;
GetNextToken(q,&q,extent,token);
point.x=StringToDouble(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
GetNextToken(q,&q,extent,token);
if (*token == ',')
GetNextToken(q,&q,extent,token);
point.y=StringToDouble(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
GetNextToken(q,(const char **) NULL,extent,token);
if (*token == ',')
GetNextToken(q,&q,extent,token);
primitive_info[i].primitive=primitive_type;
primitive_info[i].point=point;
primitive_info[i].coordinates=0;
primitive_info[i].method=FloodfillMethod;
primitive_info[i].closed_subpath=MagickFalse;
i++;
mvg_info.offset=i;
if (i < (ssize_t) number_points)
continue;
status&=CheckPrimitiveExtent(&mvg_info,number_points);
}
if (status == MagickFalse)
break;
primitive_info[j].primitive=primitive_type;
primitive_info[j].coordinates=(size_t) x;
primitive_info[j].method=FloodfillMethod;
primitive_info[j].closed_subpath=MagickFalse;
primitive_info[j].text=(char *) NULL;
/*
Circumscribe primitive within a circle.
*/
bounds.x1=primitive_info[j].point.x;
bounds.y1=primitive_info[j].point.y;
bounds.x2=primitive_info[j].point.x;
bounds.y2=primitive_info[j].point.y;
for (k=1; k < (ssize_t) primitive_info[j].coordinates; k++)
{
point=primitive_info[j+k].point;
if (point.x < bounds.x1)
bounds.x1=point.x;
if (point.y < bounds.y1)
bounds.y1=point.y;
if (point.x > bounds.x2)
bounds.x2=point.x;
if (point.y > bounds.y2)
bounds.y2=point.y;
}
/*
Speculate how many points our primitive might consume.
*/
coordinates=(double) primitive_info[j].coordinates;
switch (primitive_type)
{
case RectanglePrimitive:
{
coordinates*=5.0;
break;
}
case RoundRectanglePrimitive:
{
double
alpha,
beta,
radius;
alpha=bounds.x2-bounds.x1;
beta=bounds.y2-bounds.y1;
radius=hypot((double) alpha,(double) beta);
coordinates*=5.0;
coordinates+=2.0*((size_t) ceil((double) MagickPI*radius))+6.0*
BezierQuantum+360.0;
break;
}
case BezierPrimitive:
{
coordinates=(double) (BezierQuantum*primitive_info[j].coordinates);
if (primitive_info[j].coordinates > (107*BezierQuantum))
{
(void) ThrowMagickException(exception,GetMagickModule(),DrawError,
"TooManyBezierCoordinates","`%s'",token);
status=MagickFalse;
break;
}
break;
}
case PathPrimitive:
{
char
*s,
*t;
GetNextToken(q,&q,extent,token);
coordinates=1.0;
t=token;
for (s=token; *s != '\0'; s=t)
{
double
value;
value=StringToDouble(s,&t);
(void) value;
if (s == t)
{
t++;
continue;
}
coordinates++;
}
for (s=token; *s != '\0'; s++)
if (strspn(s,"AaCcQqSsTt") != 0)
coordinates+=(20.0*BezierQuantum)+360.0;
break;
}
case CirclePrimitive:
case ArcPrimitive:
case EllipsePrimitive:
{
double
alpha,
beta,
radius;
alpha=bounds.x2-bounds.x1;
beta=bounds.y2-bounds.y1;
radius=hypot(alpha,beta);
coordinates=2.0*(ceil(MagickPI*radius))+6.0*BezierQuantum+360.0;
break;
}
default:
break;
}
if (coordinates > MaxBezierCoordinates)
{
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","`%s'",token);
status=MagickFalse;
}
if (status == MagickFalse)
break;
if (((size_t) (i+coordinates)) >= number_points)
{
/*
Resize based on speculative points required by primitive.
*/
number_points+=coordinates+1;
if (number_points < (size_t) coordinates)
{
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","`%s'",
image->filename);
break;
}
mvg_info.offset=i;
status&=CheckPrimitiveExtent(&mvg_info,number_points);
}
status&=CheckPrimitiveExtent(&mvg_info,4096);
if (status == MagickFalse)
break;
mvg_info.offset=j;
switch (primitive_type)
{
case PointPrimitive:
default:
{
if (primitive_info[j].coordinates != 1)
{
status=MagickFalse;
break;
}
TracePoint(primitive_info+j,primitive_info[j].point);
i=(ssize_t) (j+primitive_info[j].coordinates);
break;
}
case LinePrimitive:
{
if (primitive_info[j].coordinates != 2)
{
status=MagickFalse;
break;
}
TraceLine(primitive_info+j,primitive_info[j].point,
primitive_info[j+1].point);
i=(ssize_t) (j+primitive_info[j].coordinates);
break;
}
case RectanglePrimitive:
{
if (primitive_info[j].coordinates != 2)
{
status=MagickFalse;
break;
}
TraceRectangle(primitive_info+j,primitive_info[j].point,
primitive_info[j+1].point);
i=(ssize_t) (j+primitive_info[j].coordinates);
break;
}
case RoundRectanglePrimitive:
{
if (primitive_info[j].coordinates != 3)
{
status=MagickFalse;
break;
}
if ((primitive_info[j+2].point.x < 0.0) ||
(primitive_info[j+2].point.y < 0.0))
{
status=MagickFalse;
break;
}
if ((primitive_info[j+1].point.x-primitive_info[j].point.x) < 0.0)
{
status=MagickFalse;
break;
}
if ((primitive_info[j+1].point.y-primitive_info[j].point.y) < 0.0)
{
status=MagickFalse;
break;
}
TraceRoundRectangle(&mvg_info,primitive_info[j].point,
primitive_info[j+1].point,primitive_info[j+2].point);
i=(ssize_t) (j+primitive_info[j].coordinates);
break;
}
case ArcPrimitive:
{
if (primitive_info[j].coordinates != 3)
{
primitive_type=UndefinedPrimitive;
break;
}
TraceArc(&mvg_info,primitive_info[j].point,primitive_info[j+1].point,
primitive_info[j+2].point);
i=(ssize_t) (j+primitive_info[j].coordinates);
break;
}
case EllipsePrimitive:
{
if (primitive_info[j].coordinates != 3)
{
status=MagickFalse;
break;
}
if ((primitive_info[j+1].point.x < 0.0) ||
(primitive_info[j+1].point.y < 0.0))
{
status=MagickFalse;
break;
}
TraceEllipse(&mvg_info,primitive_info[j].point,
primitive_info[j+1].point,primitive_info[j+2].point);
i=(ssize_t) (j+primitive_info[j].coordinates);
break;
}
case CirclePrimitive:
{
if (primitive_info[j].coordinates != 2)
{
status=MagickFalse;
break;
}
TraceCircle(&mvg_info,primitive_info[j].point,
primitive_info[j+1].point);
i=(ssize_t) (j+primitive_info[j].coordinates);
break;
}
case PolylinePrimitive:
{
if (primitive_info[j].coordinates < 1)
{
status=MagickFalse;
break;
}
break;
}
case PolygonPrimitive:
{
if (primitive_info[j].coordinates < 3)
{
status=MagickFalse;
break;
}
primitive_info[i]=primitive_info[j];
primitive_info[i].coordinates=0;
primitive_info[j].coordinates++;
primitive_info[j].closed_subpath=MagickTrue;
i++;
break;
}
case BezierPrimitive:
{
if (primitive_info[j].coordinates < 3)
{
status=MagickFalse;
break;
}
TraceBezier(&mvg_info,primitive_info[j].coordinates);
i=(ssize_t) (j+primitive_info[j].coordinates);
break;
}
case PathPrimitive:
{
coordinates=(double) TracePath(&mvg_info,token,exception);
if (coordinates == 0.0)
{
status=MagickFalse;
break;
}
i=(ssize_t) (j+coordinates);
break;
}
case AlphaPrimitive:
case ColorPrimitive:
{
ssize_t
method;
if (primitive_info[j].coordinates != 1)
{
status=MagickFalse;
break;
}
GetNextToken(q,&q,extent,token);
method=ParseCommandOption(MagickMethodOptions,MagickFalse,token);
if (method == -1)
{
status=MagickFalse;
break;
}
primitive_info[j].method=(PaintMethod) method;
break;
}
case TextPrimitive:
{
char
geometry[MagickPathExtent];
if (primitive_info[j].coordinates != 1)
{
status=MagickFalse;
break;
}
if (*token != ',')
GetNextToken(q,&q,extent,token);
(void) CloneString(&primitive_info[j].text,token);
/*
Compute text cursor offset.
*/
clone_info=CloneDrawInfo((ImageInfo *) NULL,graphic_context[n]);
if ((fabs(mvg_info.point.x-primitive_info->point.x) < MagickEpsilon) &&
(fabs(mvg_info.point.y-primitive_info->point.y) < MagickEpsilon))
{
mvg_info.point=primitive_info->point;
primitive_info->point.x+=cursor;
}
else
{
mvg_info.point=primitive_info->point;
cursor=0.0;
}
(void) FormatLocaleString(geometry,MagickPathExtent,"%+f%+f",
primitive_info->point.x,primitive_info->point.y);
clone_info->render=MagickFalse;
clone_info->text=AcquireString(token);
status&=GetTypeMetrics(image,clone_info,&metrics,exception);
clone_info=DestroyDrawInfo(clone_info);
cursor+=metrics.width;
break;
}
case ImagePrimitive:
{
if (primitive_info[j].coordinates != 2)
{
status=MagickFalse;
break;
}
GetNextToken(q,&q,extent,token);
(void) CloneString(&primitive_info[j].text,token);
break;
}
}
mvg_info.offset=i;
if (primitive_info == (PrimitiveInfo *) NULL)
break;
if ((image->debug != MagickFalse) && (q > p))
(void) LogMagickEvent(DrawEvent,GetMagickModule()," %.*s",(int) (q-p-1),
p);
if (status == MagickFalse)
break;
primitive_info[i].primitive=UndefinedPrimitive;
if (i == 0)
continue;
/*
Transform points.
*/
for (i=0; primitive_info[i].primitive != UndefinedPrimitive; i++)
{
point=primitive_info[i].point;
primitive_info[i].point.x=graphic_context[n]->affine.sx*point.x+
graphic_context[n]->affine.ry*point.y+graphic_context[n]->affine.tx;
primitive_info[i].point.y=graphic_context[n]->affine.rx*point.x+
graphic_context[n]->affine.sy*point.y+graphic_context[n]->affine.ty;
point=primitive_info[i].point;
if (point.x < graphic_context[n]->bounds.x1)
graphic_context[n]->bounds.x1=point.x;
if (point.y < graphic_context[n]->bounds.y1)
graphic_context[n]->bounds.y1=point.y;
if (point.x > graphic_context[n]->bounds.x2)
graphic_context[n]->bounds.x2=point.x;
if (point.y > graphic_context[n]->bounds.y2)
graphic_context[n]->bounds.y2=point.y;
if (primitive_info[i].primitive == ImagePrimitive)
break;
if (i >= (ssize_t) number_points)
ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed");
}
if (graphic_context[n]->render != MagickFalse)
{
if ((n != 0) && (draw_info->compliance != SVGCompliance) &&
(graphic_context[n]->clip_mask != (char *) NULL) &&
(LocaleCompare(graphic_context[n]->clip_mask,
graphic_context[n-1]->clip_mask) != 0))
status&=DrawClipPath(image,graphic_context[n],
graphic_context[n]->clip_mask,exception);
status&=DrawPrimitive(image,graphic_context[n],primitive_info,
exception);
}
proceed=SetImageProgress(image,RenderImageTag,q-primitive,(MagickSizeType)
primitive_extent);
if (proceed == MagickFalse)
break;
if (status == 0)
break;
}
if (image->debug != MagickFalse)
(void) LogMagickEvent(DrawEvent,GetMagickModule(),"end draw-image");
/*
Relinquish resources.
*/
macros=DestroySplayTree(macros);
token=DestroyString(token);
if (primitive_info != (PrimitiveInfo *) NULL)
{
for (i=0; primitive_info[i].primitive != UndefinedPrimitive; i++)
if ((primitive_info[i].primitive == TextPrimitive) ||
(primitive_info[i].primitive == ImagePrimitive))
if (primitive_info[i].text != (char *) NULL)
primitive_info[i].text=DestroyString(primitive_info[i].text);
primitive_info=(PrimitiveInfo *) RelinquishMagickMemory(primitive_info);
}
primitive=DestroyString(primitive);
if (stops != (StopInfo *) NULL)
stops=(StopInfo *) RelinquishMagickMemory(stops);
for ( ; n >= 0; n--)
graphic_context[n]=DestroyDrawInfo(graphic_context[n]);
graphic_context=(DrawInfo **) RelinquishMagickMemory(graphic_context);
if (status == MagickFalse)
ThrowBinaryException(DrawError,"NonconformingDrawingPrimitiveDefinition",
keyword);
return(status != 0 ? MagickTrue : MagickFalse);
}
MagickExport MagickBooleanType DrawImage(Image *image,const DrawInfo *draw_info,
ExceptionInfo *exception)
{
return(RenderMVGContent(image,draw_info,1,exception));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% D r a w P a t t e r n P a t h %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DrawPatternPath() draws a pattern.
%
% The format of the DrawPatternPath method is:
%
% MagickBooleanType DrawPatternPath(Image *image,const DrawInfo *draw_info,
% const char *name,Image **pattern,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o draw_info: the draw info.
%
% o name: the pattern name.
%
% o image: the image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType DrawPatternPath(Image *image,
const DrawInfo *draw_info,const char *name,Image **pattern,
ExceptionInfo *exception)
{
char
property[MagickPathExtent];
const char
*geometry,
*path,
*type;
DrawInfo
*clone_info;
ImageInfo
*image_info;
MagickBooleanType
status;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(draw_info != (const DrawInfo *) NULL);
assert(name != (const char *) NULL);
(void) FormatLocaleString(property,MagickPathExtent,"%s",name);
path=GetImageArtifact(image,property);
if (path == (const char *) NULL)
return(MagickFalse);
(void) FormatLocaleString(property,MagickPathExtent,"%s-geometry",name);
geometry=GetImageArtifact(image,property);
if (geometry == (const char *) NULL)
return(MagickFalse);
if ((*pattern) != (Image *) NULL)
*pattern=DestroyImage(*pattern);
image_info=AcquireImageInfo();
image_info->size=AcquireString(geometry);
*pattern=AcquireImage(image_info,exception);
image_info=DestroyImageInfo(image_info);
(void) QueryColorCompliance("#000000ff",AllCompliance,
&(*pattern)->background_color,exception);
(void) SetImageBackgroundColor(*pattern,exception);
if (image->debug != MagickFalse)
(void) LogMagickEvent(DrawEvent,GetMagickModule(),
"begin pattern-path %s %s",name,geometry);
clone_info=CloneDrawInfo((ImageInfo *) NULL,draw_info);
clone_info->fill_pattern=NewImageList();
clone_info->stroke_pattern=NewImageList();
(void) FormatLocaleString(property,MagickPathExtent,"%s-type",name);
type=GetImageArtifact(image,property);
if (type != (const char *) NULL)
clone_info->gradient.type=(GradientType) ParseCommandOption(
MagickGradientOptions,MagickFalse,type);
(void) CloneString(&clone_info->primitive,path);
status=RenderMVGContent(*pattern,clone_info,1,exception);
clone_info=DestroyDrawInfo(clone_info);
if (image->debug != MagickFalse)
(void) LogMagickEvent(DrawEvent,GetMagickModule(),"end pattern-path");
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ D r a w P o l y g o n P r i m i t i v e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DrawPolygonPrimitive() draws a polygon on the image.
%
% The format of the DrawPolygonPrimitive method is:
%
% MagickBooleanType DrawPolygonPrimitive(Image *image,
% const DrawInfo *draw_info,const PrimitiveInfo *primitive_info,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o draw_info: the draw info.
%
% o primitive_info: Specifies a pointer to a PrimitiveInfo structure.
%
% o exception: return any errors or warnings in this structure.
%
*/
static PolygonInfo **DestroyPolygonThreadSet(PolygonInfo **polygon_info)
{
register ssize_t
i;
assert(polygon_info != (PolygonInfo **) NULL);
for (i=0; i < (ssize_t) GetMagickResourceLimit(ThreadResource); i++)
if (polygon_info[i] != (PolygonInfo *) NULL)
polygon_info[i]=DestroyPolygonInfo(polygon_info[i]);
polygon_info=(PolygonInfo **) RelinquishMagickMemory(polygon_info);
return(polygon_info);
}
static PolygonInfo **AcquirePolygonThreadSet(
const PrimitiveInfo *primitive_info)
{
PathInfo
*magick_restrict path_info;
PolygonInfo
**polygon_info;
register ssize_t
i;
size_t
number_threads;
number_threads=(size_t) GetMagickResourceLimit(ThreadResource);
polygon_info=(PolygonInfo **) AcquireQuantumMemory(number_threads,
sizeof(*polygon_info));
if (polygon_info == (PolygonInfo **) NULL)
return((PolygonInfo **) NULL);
(void) memset(polygon_info,0,number_threads*sizeof(*polygon_info));
path_info=ConvertPrimitiveToPath(primitive_info);
if (path_info == (PathInfo *) NULL)
return(DestroyPolygonThreadSet(polygon_info));
for (i=0; i < (ssize_t) number_threads; i++)
{
polygon_info[i]=ConvertPathToPolygon(path_info);
if (polygon_info[i] == (PolygonInfo *) NULL)
return(DestroyPolygonThreadSet(polygon_info));
}
path_info=(PathInfo *) RelinquishMagickMemory(path_info);
return(polygon_info);
}
static double GetFillAlpha(PolygonInfo *polygon_info,const double mid,
const MagickBooleanType fill,const FillRule fill_rule,const ssize_t x,
const ssize_t y,double *stroke_alpha)
{
double
alpha,
beta,
distance,
subpath_alpha;
PointInfo
delta;
register const PointInfo
*q;
register EdgeInfo
*p;
register ssize_t
i;
ssize_t
j,
winding_number;
/*
Compute fill & stroke opacity for this (x,y) point.
*/
*stroke_alpha=0.0;
subpath_alpha=0.0;
p=polygon_info->edges;
for (j=0; j < (ssize_t) polygon_info->number_edges; j++, p++)
{
if ((double) y <= (p->bounds.y1-mid-0.5))
break;
if ((double) y > (p->bounds.y2+mid+0.5))
{
(void) DestroyEdge(polygon_info,(size_t) j);
continue;
}
if (((double) x <= (p->bounds.x1-mid-0.5)) ||
((double) x > (p->bounds.x2+mid+0.5)))
continue;
i=(ssize_t) MagickMax((double) p->highwater,1.0);
for ( ; i < (ssize_t) p->number_points; i++)
{
if ((double) y <= (p->points[i-1].y-mid-0.5))
break;
if ((double) y > (p->points[i].y+mid+0.5))
continue;
if (p->scanline != (double) y)
{
p->scanline=(double) y;
p->highwater=(size_t) i;
}
/*
Compute distance between a point and an edge.
*/
q=p->points+i-1;
delta.x=(q+1)->x-q->x;
delta.y=(q+1)->y-q->y;
beta=delta.x*(x-q->x)+delta.y*(y-q->y);
if (beta <= 0.0)
{
delta.x=(double) x-q->x;
delta.y=(double) y-q->y;
distance=delta.x*delta.x+delta.y*delta.y;
}
else
{
alpha=delta.x*delta.x+delta.y*delta.y;
if (beta >= alpha)
{
delta.x=(double) x-(q+1)->x;
delta.y=(double) y-(q+1)->y;
distance=delta.x*delta.x+delta.y*delta.y;
}
else
{
alpha=PerceptibleReciprocal(alpha);
beta=delta.x*(y-q->y)-delta.y*(x-q->x);
distance=alpha*beta*beta;
}
}
/*
Compute stroke & subpath opacity.
*/
beta=0.0;
if (p->ghostline == MagickFalse)
{
alpha=mid+0.5;
if ((*stroke_alpha < 1.0) &&
(distance <= ((alpha+0.25)*(alpha+0.25))))
{
alpha=mid-0.5;
if (distance <= ((alpha+0.25)*(alpha+0.25)))
*stroke_alpha=1.0;
else
{
beta=1.0;
if (fabs(distance-1.0) >= MagickEpsilon)
beta=sqrt((double) distance);
alpha=beta-mid-0.5;
if (*stroke_alpha < ((alpha-0.25)*(alpha-0.25)))
*stroke_alpha=(alpha-0.25)*(alpha-0.25);
}
}
}
if ((fill == MagickFalse) || (distance > 1.0) || (subpath_alpha >= 1.0))
continue;
if (distance <= 0.0)
{
subpath_alpha=1.0;
continue;
}
if (distance > 1.0)
continue;
if (fabs(beta) < MagickEpsilon)
{
beta=1.0;
if (fabs(distance-1.0) >= MagickEpsilon)
beta=sqrt(distance);
}
alpha=beta-1.0;
if (subpath_alpha < (alpha*alpha))
subpath_alpha=alpha*alpha;
}
}
/*
Compute fill opacity.
*/
if (fill == MagickFalse)
return(0.0);
if (subpath_alpha >= 1.0)
return(1.0);
/*
Determine winding number.
*/
winding_number=0;
p=polygon_info->edges;
for (j=0; j < (ssize_t) polygon_info->number_edges; j++, p++)
{
if ((double) y <= p->bounds.y1)
break;
if (((double) y > p->bounds.y2) || ((double) x <= p->bounds.x1))
continue;
if ((double) x > p->bounds.x2)
{
winding_number+=p->direction ? 1 : -1;
continue;
}
i=(ssize_t) MagickMax((double) p->highwater,1.0);
for ( ; i < (ssize_t) (p->number_points-1); i++)
if ((double) y <= p->points[i].y)
break;
q=p->points+i-1;
if ((((q+1)->x-q->x)*(y-q->y)) <= (((q+1)->y-q->y)*(x-q->x)))
winding_number+=p->direction ? 1 : -1;
}
if (fill_rule != NonZeroRule)
{
if ((MagickAbsoluteValue(winding_number) & 0x01) != 0)
return(1.0);
}
else
if (MagickAbsoluteValue(winding_number) != 0)
return(1.0);
return(subpath_alpha);
}
static MagickBooleanType DrawPolygonPrimitive(Image *image,
const DrawInfo *draw_info,const PrimitiveInfo *primitive_info,
ExceptionInfo *exception)
{
CacheView
*image_view;
MagickBooleanType
fill,
status;
double
mid;
PolygonInfo
**magick_restrict polygon_info;
register EdgeInfo
*p;
register ssize_t
i;
SegmentInfo
bounds;
ssize_t
start_y,
stop_y,
y;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(draw_info != (DrawInfo *) NULL);
assert(draw_info->signature == MagickCoreSignature);
assert(primitive_info != (PrimitiveInfo *) NULL);
if (primitive_info->coordinates <= 1)
return(MagickTrue);
/*
Compute bounding box.
*/
polygon_info=AcquirePolygonThreadSet(primitive_info);
if (polygon_info == (PolygonInfo **) NULL)
return(MagickFalse);
DisableMSCWarning(4127)
if (0)
{
status=DrawBoundingRectangles(image,draw_info,polygon_info[0],exception);
if (status == MagickFalse)
{
polygon_info=DestroyPolygonThreadSet(polygon_info);
return(status);
}
}
RestoreMSCWarning
if (image->debug != MagickFalse)
(void) LogMagickEvent(DrawEvent,GetMagickModule()," begin draw-polygon");
fill=(primitive_info->method == FillToBorderMethod) ||
(primitive_info->method == FloodfillMethod) ? MagickTrue : MagickFalse;
mid=ExpandAffine(&draw_info->affine)*SaneStrokeWidth(image,draw_info)/2.0;
bounds=polygon_info[0]->edges[0].bounds;
for (i=1; i < (ssize_t) polygon_info[0]->number_edges; i++)
{
p=polygon_info[0]->edges+i;
if (p->bounds.x1 < bounds.x1)
bounds.x1=p->bounds.x1;
if (p->bounds.y1 < bounds.y1)
bounds.y1=p->bounds.y1;
if (p->bounds.x2 > bounds.x2)
bounds.x2=p->bounds.x2;
if (p->bounds.y2 > bounds.y2)
bounds.y2=p->bounds.y2;
}
bounds.x1-=(mid+1.0);
bounds.y1-=(mid+1.0);
bounds.x2+=(mid+1.0);
bounds.y2+=(mid+1.0);
if ((bounds.x1 >= (double) image->columns) ||
(bounds.y1 >= (double) image->rows) ||
(bounds.x2 <= 0.0) || (bounds.y2 <= 0.0))
{
polygon_info=DestroyPolygonThreadSet(polygon_info);
return(MagickTrue); /* virtual polygon */
}
bounds.x1=bounds.x1 < 0.0 ? 0.0 : bounds.x1 >= (double) image->columns-1.0 ?
(double) image->columns-1.0 : bounds.x1;
bounds.y1=bounds.y1 < 0.0 ? 0.0 : bounds.y1 >= (double) image->rows-1.0 ?
(double) image->rows-1.0 : bounds.y1;
bounds.x2=bounds.x2 < 0.0 ? 0.0 : bounds.x2 >= (double) image->columns-1.0 ?
(double) image->columns-1.0 : bounds.x2;
bounds.y2=bounds.y2 < 0.0 ? 0.0 : bounds.y2 >= (double) image->rows-1.0 ?
(double) image->rows-1.0 : bounds.y2;
status=MagickTrue;
image_view=AcquireAuthenticCacheView(image,exception);
if ((primitive_info->coordinates == 1) ||
(polygon_info[0]->number_edges == 0))
{
/*
Draw point.
*/
start_y=(ssize_t) ceil(bounds.y1-0.5);
stop_y=(ssize_t) floor(bounds.y2+0.5);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,image,stop_y-start_y+1,1)
#endif
for (y=start_y; y <= stop_y; y++)
{
MagickBooleanType
sync;
PixelInfo
pixel;
register ssize_t
x;
register Quantum
*magick_restrict q;
ssize_t
start_x,
stop_x;
if (status == MagickFalse)
continue;
start_x=(ssize_t) ceil(bounds.x1-0.5);
stop_x=(ssize_t) floor(bounds.x2+0.5);
x=start_x;
q=GetCacheViewAuthenticPixels(image_view,x,y,(size_t) (stop_x-x+1),1,
exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
GetPixelInfo(image,&pixel);
for ( ; x <= stop_x; x++)
{
if ((x == (ssize_t) ceil(primitive_info->point.x-0.5)) &&
(y == (ssize_t) ceil(primitive_info->point.y-0.5)))
{
GetFillColor(draw_info,x-start_x,y-start_y,&pixel,exception);
SetPixelViaPixelInfo(image,&pixel,q);
}
q+=GetPixelChannels(image);
}
sync=SyncCacheViewAuthenticPixels(image_view,exception);
if (sync == MagickFalse)
status=MagickFalse;
}
image_view=DestroyCacheView(image_view);
polygon_info=DestroyPolygonThreadSet(polygon_info);
if (image->debug != MagickFalse)
(void) LogMagickEvent(DrawEvent,GetMagickModule(),
" end draw-polygon");
return(status);
}
/*
Draw polygon or line.
*/
start_y=(ssize_t) ceil(bounds.y1-0.5);
stop_y=(ssize_t) floor(bounds.y2+0.5);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,image,stop_y-start_y+1,1)
#endif
for (y=start_y; y <= stop_y; y++)
{
const int
id = GetOpenMPThreadId();
register Quantum
*magick_restrict q;
register ssize_t
x;
ssize_t
start_x,
stop_x;
if (status == MagickFalse)
continue;
start_x=(ssize_t) ceil(bounds.x1-0.5);
stop_x=(ssize_t) floor(bounds.x2+0.5);
q=GetCacheViewAuthenticPixels(image_view,start_x,y,(size_t) (stop_x-start_x+
1),1,exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=start_x; x <= stop_x; x++)
{
double
fill_alpha,
stroke_alpha;
PixelInfo
fill_color,
stroke_color;
/*
Fill and/or stroke.
*/
fill_alpha=GetFillAlpha(polygon_info[id],mid,fill,draw_info->fill_rule,
x,y,&stroke_alpha);
if (draw_info->stroke_antialias == MagickFalse)
{
fill_alpha=fill_alpha > 0.25 ? 1.0 : 0.0;
stroke_alpha=stroke_alpha > 0.25 ? 1.0 : 0.0;
}
GetFillColor(draw_info,x-start_x,y-start_y,&fill_color,exception);
CompositePixelOver(image,&fill_color,fill_alpha*fill_color.alpha,q,
(double) GetPixelAlpha(image,q),q);
GetStrokeColor(draw_info,x-start_x,y-start_y,&stroke_color,exception);
CompositePixelOver(image,&stroke_color,stroke_alpha*stroke_color.alpha,q,
(double) GetPixelAlpha(image,q),q);
q+=GetPixelChannels(image);
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
}
image_view=DestroyCacheView(image_view);
polygon_info=DestroyPolygonThreadSet(polygon_info);
if (image->debug != MagickFalse)
(void) LogMagickEvent(DrawEvent,GetMagickModule()," end draw-polygon");
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% D r a w P r i m i t i v e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DrawPrimitive() draws a primitive (line, rectangle, ellipse) on the image.
%
% The format of the DrawPrimitive method is:
%
% MagickBooleanType DrawPrimitive(Image *image,const DrawInfo *draw_info,
% PrimitiveInfo *primitive_info,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o draw_info: the draw info.
%
% o primitive_info: Specifies a pointer to a PrimitiveInfo structure.
%
% o exception: return any errors or warnings in this structure.
%
*/
static void LogPrimitiveInfo(const PrimitiveInfo *primitive_info)
{
const char
*methods[] =
{
"point",
"replace",
"floodfill",
"filltoborder",
"reset",
"?"
};
PointInfo
p,
q,
point;
register ssize_t
i,
x;
ssize_t
coordinates,
y;
x=(ssize_t) ceil(primitive_info->point.x-0.5);
y=(ssize_t) ceil(primitive_info->point.y-0.5);
switch (primitive_info->primitive)
{
case AlphaPrimitive:
{
(void) LogMagickEvent(DrawEvent,GetMagickModule(),
"AlphaPrimitive %.20g,%.20g %s",(double) x,(double) y,
methods[primitive_info->method]);
return;
}
case ColorPrimitive:
{
(void) LogMagickEvent(DrawEvent,GetMagickModule(),
"ColorPrimitive %.20g,%.20g %s",(double) x,(double) y,
methods[primitive_info->method]);
return;
}
case ImagePrimitive:
{
(void) LogMagickEvent(DrawEvent,GetMagickModule(),
"ImagePrimitive %.20g,%.20g",(double) x,(double) y);
return;
}
case PointPrimitive:
{
(void) LogMagickEvent(DrawEvent,GetMagickModule(),
"PointPrimitive %.20g,%.20g %s",(double) x,(double) y,
methods[primitive_info->method]);
return;
}
case TextPrimitive:
{
(void) LogMagickEvent(DrawEvent,GetMagickModule(),
"TextPrimitive %.20g,%.20g",(double) x,(double) y);
return;
}
default:
break;
}
coordinates=0;
p=primitive_info[0].point;
q.x=(-1.0);
q.y=(-1.0);
for (i=0; primitive_info[i].primitive != UndefinedPrimitive; i++)
{
point=primitive_info[i].point;
if (coordinates <= 0)
{
coordinates=(ssize_t) primitive_info[i].coordinates;
(void) LogMagickEvent(DrawEvent,GetMagickModule(),
" begin open (%.20g)",(double) coordinates);
p=point;
}
point=primitive_info[i].point;
if ((fabs(q.x-point.x) >= MagickEpsilon) ||
(fabs(q.y-point.y) >= MagickEpsilon))
(void) LogMagickEvent(DrawEvent,GetMagickModule(),
" %.20g: %.18g,%.18g",(double) coordinates,point.x,point.y);
else
(void) LogMagickEvent(DrawEvent,GetMagickModule(),
" %.20g: %g %g (duplicate)",(double) coordinates,point.x,point.y);
q=point;
coordinates--;
if (coordinates > 0)
continue;
if ((fabs(p.x-point.x) >= MagickEpsilon) ||
(fabs(p.y-point.y) >= MagickEpsilon))
(void) LogMagickEvent(DrawEvent,GetMagickModule()," end last (%.20g)",
(double) coordinates);
else
(void) LogMagickEvent(DrawEvent,GetMagickModule()," end open (%.20g)",
(double) coordinates);
}
}
MagickExport MagickBooleanType DrawPrimitive(Image *image,
const DrawInfo *draw_info,const PrimitiveInfo *primitive_info,
ExceptionInfo *exception)
{
CacheView
*image_view;
MagickStatusType
status;
register ssize_t
i,
x;
ssize_t
y;
if (image->debug != MagickFalse)
{
(void) LogMagickEvent(DrawEvent,GetMagickModule(),
" begin draw-primitive");
(void) LogMagickEvent(DrawEvent,GetMagickModule(),
" affine: %g,%g,%g,%g,%g,%g",draw_info->affine.sx,
draw_info->affine.rx,draw_info->affine.ry,draw_info->affine.sy,
draw_info->affine.tx,draw_info->affine.ty);
}
status=MagickTrue;
if ((IsGrayColorspace(image->colorspace) != MagickFalse) &&
((IsPixelInfoGray(&draw_info->fill) == MagickFalse) ||
(IsPixelInfoGray(&draw_info->stroke) == MagickFalse)))
status=SetImageColorspace(image,sRGBColorspace,exception);
if (draw_info->compliance == SVGCompliance)
{
status&=SetImageMask(image,WritePixelMask,draw_info->clipping_mask,
exception);
status&=SetImageMask(image,CompositePixelMask,draw_info->composite_mask,
exception);
}
x=(ssize_t) ceil(primitive_info->point.x-0.5);
y=(ssize_t) ceil(primitive_info->point.y-0.5);
image_view=AcquireAuthenticCacheView(image,exception);
switch (primitive_info->primitive)
{
case AlphaPrimitive:
{
if (image->alpha_trait == UndefinedPixelTrait)
(void) SetImageAlphaChannel(image,OpaqueAlphaChannel,exception);
switch (primitive_info->method)
{
case PointMethod:
default:
{
PixelInfo
pixel;
register Quantum
*q;
q=GetCacheViewAuthenticPixels(image_view,x,y,1,1,exception);
if (q == (Quantum *) NULL)
break;
GetFillColor(draw_info,x,y,&pixel,exception);
SetPixelAlpha(image,ClampToQuantum(pixel.alpha),q);
(void) SyncCacheViewAuthenticPixels(image_view,exception);
break;
}
case ReplaceMethod:
{
MagickBooleanType
sync;
PixelInfo
pixel,
target;
(void) GetOneCacheViewVirtualPixelInfo(image_view,x,y,&target,
exception);
GetPixelInfo(image,&pixel);
for (y=0; y < (ssize_t) image->rows; y++)
{
register Quantum
*magick_restrict q;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,
exception);
if (q == (Quantum *) NULL)
break;
for (x=0; x < (ssize_t) image->columns; x++)
{
GetPixelInfoPixel(image,q,&pixel);
if (IsFuzzyEquivalencePixelInfo(&pixel,&target) == MagickFalse)
{
q+=GetPixelChannels(image);
continue;
}
GetFillColor(draw_info,x,y,&pixel,exception);
SetPixelAlpha(image,ClampToQuantum(pixel.alpha),q);
q+=GetPixelChannels(image);
}
sync=SyncCacheViewAuthenticPixels(image_view,exception);
if (sync == MagickFalse)
break;
}
break;
}
case FloodfillMethod:
case FillToBorderMethod:
{
ChannelType
channel_mask;
PixelInfo
target;
(void) GetOneVirtualPixelInfo(image,TileVirtualPixelMethod,x,y,
&target,exception);
if (primitive_info->method == FillToBorderMethod)
{
target.red=(double) draw_info->border_color.red;
target.green=(double) draw_info->border_color.green;
target.blue=(double) draw_info->border_color.blue;
}
channel_mask=SetImageChannelMask(image,AlphaChannel);
status&=FloodfillPaintImage(image,draw_info,&target,x,y,
primitive_info->method == FloodfillMethod ? MagickFalse :
MagickTrue,exception);
(void) SetImageChannelMask(image,channel_mask);
break;
}
case ResetMethod:
{
MagickBooleanType
sync;
PixelInfo
pixel;
for (y=0; y < (ssize_t) image->rows; y++)
{
register Quantum
*magick_restrict q;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,
exception);
if (q == (Quantum *) NULL)
break;
for (x=0; x < (ssize_t) image->columns; x++)
{
GetFillColor(draw_info,x,y,&pixel,exception);
SetPixelAlpha(image,ClampToQuantum(pixel.alpha),q);
q+=GetPixelChannels(image);
}
sync=SyncCacheViewAuthenticPixels(image_view,exception);
if (sync == MagickFalse)
break;
}
break;
}
}
break;
}
case ColorPrimitive:
{
switch (primitive_info->method)
{
case PointMethod:
default:
{
PixelInfo
pixel;
register Quantum
*q;
q=GetCacheViewAuthenticPixels(image_view,x,y,1,1,exception);
if (q == (Quantum *) NULL)
break;
GetPixelInfo(image,&pixel);
GetFillColor(draw_info,x,y,&pixel,exception);
SetPixelViaPixelInfo(image,&pixel,q);
(void) SyncCacheViewAuthenticPixels(image_view,exception);
break;
}
case ReplaceMethod:
{
MagickBooleanType
sync;
PixelInfo
pixel,
target;
(void) GetOneCacheViewVirtualPixelInfo(image_view,x,y,&target,
exception);
for (y=0; y < (ssize_t) image->rows; y++)
{
register Quantum
*magick_restrict q;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,
exception);
if (q == (Quantum *) NULL)
break;
for (x=0; x < (ssize_t) image->columns; x++)
{
GetPixelInfoPixel(image,q,&pixel);
if (IsFuzzyEquivalencePixelInfo(&pixel,&target) == MagickFalse)
{
q+=GetPixelChannels(image);
continue;
}
GetFillColor(draw_info,x,y,&pixel,exception);
SetPixelViaPixelInfo(image,&pixel,q);
q+=GetPixelChannels(image);
}
sync=SyncCacheViewAuthenticPixels(image_view,exception);
if (sync == MagickFalse)
break;
}
break;
}
case FloodfillMethod:
case FillToBorderMethod:
{
PixelInfo
target;
(void) GetOneVirtualPixelInfo(image,TileVirtualPixelMethod,x,y,
&target,exception);
if (primitive_info->method == FillToBorderMethod)
{
target.red=(double) draw_info->border_color.red;
target.green=(double) draw_info->border_color.green;
target.blue=(double) draw_info->border_color.blue;
}
status&=FloodfillPaintImage(image,draw_info,&target,x,y,
primitive_info->method == FloodfillMethod ? MagickFalse :
MagickTrue,exception);
break;
}
case ResetMethod:
{
MagickBooleanType
sync;
PixelInfo
pixel;
GetPixelInfo(image,&pixel);
for (y=0; y < (ssize_t) image->rows; y++)
{
register Quantum
*magick_restrict q;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,
exception);
if (q == (Quantum *) NULL)
break;
for (x=0; x < (ssize_t) image->columns; x++)
{
GetFillColor(draw_info,x,y,&pixel,exception);
SetPixelViaPixelInfo(image,&pixel,q);
q+=GetPixelChannels(image);
}
sync=SyncCacheViewAuthenticPixels(image_view,exception);
if (sync == MagickFalse)
break;
}
break;
}
}
break;
}
case ImagePrimitive:
{
AffineMatrix
affine;
char
composite_geometry[MagickPathExtent];
Image
*composite_image,
*composite_images;
ImageInfo
*clone_info;
RectangleInfo
geometry;
ssize_t
x1,
y1;
if (primitive_info->text == (char *) NULL)
break;
clone_info=AcquireImageInfo();
if (LocaleNCompare(primitive_info->text,"data:",5) == 0)
composite_images=ReadInlineImage(clone_info,primitive_info->text,
exception);
else
{
(void) CopyMagickString(clone_info->filename,primitive_info->text,
MagickPathExtent);
composite_images=ReadImage(clone_info,exception);
}
clone_info=DestroyImageInfo(clone_info);
if (composite_images == (Image *) NULL)
{
status=0;
break;
}
composite_image=RemoveFirstImageFromList(&composite_images);
composite_images=DestroyImageList(composite_images);
(void) SetImageProgressMonitor(composite_image,(MagickProgressMonitor)
NULL,(void *) NULL);
x1=(ssize_t) ceil(primitive_info[1].point.x-0.5);
y1=(ssize_t) ceil(primitive_info[1].point.y-0.5);
if (((x1 != 0L) && (x1 != (ssize_t) composite_image->columns)) ||
((y1 != 0L) && (y1 != (ssize_t) composite_image->rows)))
{
/*
Resize image.
*/
(void) FormatLocaleString(composite_geometry,MagickPathExtent,
"%gx%g!",primitive_info[1].point.x,primitive_info[1].point.y);
composite_image->filter=image->filter;
(void) TransformImage(&composite_image,(char *) NULL,
composite_geometry,exception);
}
if (composite_image->alpha_trait == UndefinedPixelTrait)
(void) SetImageAlphaChannel(composite_image,OpaqueAlphaChannel,
exception);
if (draw_info->alpha != OpaqueAlpha)
(void) SetImageAlpha(composite_image,draw_info->alpha,exception);
SetGeometry(image,&geometry);
image->gravity=draw_info->gravity;
geometry.x=x;
geometry.y=y;
(void) FormatLocaleString(composite_geometry,MagickPathExtent,
"%.20gx%.20g%+.20g%+.20g",(double) composite_image->columns,(double)
composite_image->rows,(double) geometry.x,(double) geometry.y);
(void) ParseGravityGeometry(image,composite_geometry,&geometry,exception);
affine=draw_info->affine;
affine.tx=(double) geometry.x;
affine.ty=(double) geometry.y;
composite_image->interpolate=image->interpolate;
status&=DrawAffineImage(image,composite_image,&affine,exception);
composite_image=DestroyImage(composite_image);
break;
}
case PointPrimitive:
{
PixelInfo
fill_color;
register Quantum
*q;
if ((y < 0) || (y >= (ssize_t) image->rows))
break;
if ((x < 0) || (x >= (ssize_t) image->columns))
break;
q=GetCacheViewAuthenticPixels(image_view,x,y,1,1,exception);
if (q == (Quantum *) NULL)
break;
GetFillColor(draw_info,x,y,&fill_color,exception);
CompositePixelOver(image,&fill_color,(double) fill_color.alpha,q,
(double) GetPixelAlpha(image,q),q);
(void) SyncCacheViewAuthenticPixels(image_view,exception);
break;
}
case TextPrimitive:
{
char
geometry[MagickPathExtent];
DrawInfo
*clone_info;
if (primitive_info->text == (char *) NULL)
break;
clone_info=CloneDrawInfo((ImageInfo *) NULL,draw_info);
(void) CloneString(&clone_info->text,primitive_info->text);
(void) FormatLocaleString(geometry,MagickPathExtent,"%+f%+f",
primitive_info->point.x,primitive_info->point.y);
(void) CloneString(&clone_info->geometry,geometry);
status&=AnnotateImage(image,clone_info,exception);
clone_info=DestroyDrawInfo(clone_info);
break;
}
default:
{
double
mid,
scale;
DrawInfo
*clone_info;
if (IsEventLogging() != MagickFalse)
LogPrimitiveInfo(primitive_info);
scale=ExpandAffine(&draw_info->affine);
if ((draw_info->dash_pattern != (double *) NULL) &&
(fabs(draw_info->dash_pattern[0]) >= MagickEpsilon) &&
(fabs(scale*draw_info->stroke_width) >= MagickEpsilon) &&
(draw_info->stroke.alpha != (Quantum) TransparentAlpha))
{
/*
Draw dash polygon.
*/
clone_info=CloneDrawInfo((ImageInfo *) NULL,draw_info);
clone_info->stroke_width=0.0;
clone_info->stroke.alpha=(MagickRealType) TransparentAlpha;
status&=DrawPolygonPrimitive(image,clone_info,primitive_info,
exception);
clone_info=DestroyDrawInfo(clone_info);
status=DrawDashPolygon(draw_info,primitive_info,image,exception);
break;
}
mid=ExpandAffine(&draw_info->affine)*SaneStrokeWidth(image,draw_info)/2.0;
if ((mid > 1.0) &&
((draw_info->stroke.alpha != (Quantum) TransparentAlpha) ||
(draw_info->stroke_pattern != (Image *) NULL)))
{
double
x,
y;
MagickBooleanType
closed_path;
/*
Draw strokes while respecting line cap/join attributes.
*/
closed_path=primitive_info[0].closed_subpath;
i=(ssize_t) primitive_info[0].coordinates;
x=fabs(primitive_info[i-1].point.x-primitive_info[0].point.x);
y=fabs(primitive_info[i-1].point.y-primitive_info[0].point.y);
if ((x < MagickEpsilon) && (y < MagickEpsilon))
closed_path=MagickTrue;
if ((((draw_info->linecap == RoundCap) ||
(closed_path != MagickFalse)) &&
(draw_info->linejoin == RoundJoin)) ||
(primitive_info[i].primitive != UndefinedPrimitive))
{
status=DrawPolygonPrimitive(image,draw_info,primitive_info,
exception);
break;
}
clone_info=CloneDrawInfo((ImageInfo *) NULL,draw_info);
clone_info->stroke_width=0.0;
clone_info->stroke.alpha=(MagickRealType) TransparentAlpha;
status&=DrawPolygonPrimitive(image,clone_info,primitive_info,
exception);
clone_info=DestroyDrawInfo(clone_info);
status&=DrawStrokePolygon(image,draw_info,primitive_info,exception);
break;
}
status&=DrawPolygonPrimitive(image,draw_info,primitive_info,exception);
break;
}
}
image_view=DestroyCacheView(image_view);
if (draw_info->compliance == SVGCompliance)
{
status&=SetImageMask(image,WritePixelMask,(Image *) NULL,exception);
status&=SetImageMask(image,CompositePixelMask,(Image *) NULL,exception);
}
if (image->debug != MagickFalse)
(void) LogMagickEvent(DrawEvent,GetMagickModule()," end draw-primitive");
return(status != 0 ? MagickTrue : MagickFalse);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ D r a w S t r o k e P o l y g o n %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DrawStrokePolygon() draws a stroked polygon (line, rectangle, ellipse) on
% the image while respecting the line cap and join attributes.
%
% The format of the DrawStrokePolygon method is:
%
% MagickBooleanType DrawStrokePolygon(Image *image,
% const DrawInfo *draw_info,const PrimitiveInfo *primitive_info)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o draw_info: the draw info.
%
% o primitive_info: Specifies a pointer to a PrimitiveInfo structure.
%
%
*/
static MagickBooleanType DrawRoundLinecap(Image *image,
const DrawInfo *draw_info,const PrimitiveInfo *primitive_info,
ExceptionInfo *exception)
{
PrimitiveInfo
linecap[5];
register ssize_t
i;
for (i=0; i < 4; i++)
linecap[i]=(*primitive_info);
linecap[0].coordinates=4;
linecap[1].point.x+=2.0*MagickEpsilon;
linecap[2].point.x+=2.0*MagickEpsilon;
linecap[2].point.y+=2.0*MagickEpsilon;
linecap[3].point.y+=2.0*MagickEpsilon;
linecap[4].primitive=UndefinedPrimitive;
return(DrawPolygonPrimitive(image,draw_info,linecap,exception));
}
static MagickBooleanType DrawStrokePolygon(Image *image,
const DrawInfo *draw_info,const PrimitiveInfo *primitive_info,
ExceptionInfo *exception)
{
DrawInfo
*clone_info;
MagickBooleanType
closed_path;
MagickStatusType
status;
PrimitiveInfo
*stroke_polygon;
register const PrimitiveInfo
*p,
*q;
/*
Draw stroked polygon.
*/
if (image->debug != MagickFalse)
(void) LogMagickEvent(DrawEvent,GetMagickModule(),
" begin draw-stroke-polygon");
clone_info=CloneDrawInfo((ImageInfo *) NULL,draw_info);
clone_info->fill=draw_info->stroke;
if (clone_info->fill_pattern != (Image *) NULL)
clone_info->fill_pattern=DestroyImage(clone_info->fill_pattern);
if (clone_info->stroke_pattern != (Image *) NULL)
clone_info->fill_pattern=CloneImage(clone_info->stroke_pattern,0,0,
MagickTrue,exception);
clone_info->stroke.alpha=(MagickRealType) TransparentAlpha;
clone_info->stroke_width=0.0;
clone_info->fill_rule=NonZeroRule;
status=MagickTrue;
for (p=primitive_info; p->primitive != UndefinedPrimitive; p+=p->coordinates)
{
if (p->coordinates == 1)
continue;
stroke_polygon=TraceStrokePolygon(image,draw_info,p);
if (stroke_polygon == (PrimitiveInfo *) NULL)
{
status=0;
stroke_polygon=(PrimitiveInfo *) RelinquishMagickMemory(stroke_polygon);
break;
}
status&=DrawPolygonPrimitive(image,clone_info,stroke_polygon,exception);
stroke_polygon=(PrimitiveInfo *) RelinquishMagickMemory(stroke_polygon);
if (status == 0)
break;
q=p+p->coordinates-1;
closed_path=p->closed_subpath;
if ((draw_info->linecap == RoundCap) && (closed_path == MagickFalse))
{
status&=DrawRoundLinecap(image,draw_info,p,exception);
status&=DrawRoundLinecap(image,draw_info,q,exception);
}
}
clone_info=DestroyDrawInfo(clone_info);
if (image->debug != MagickFalse)
(void) LogMagickEvent(DrawEvent,GetMagickModule(),
" end draw-stroke-polygon");
return(status != 0 ? MagickTrue : MagickFalse);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t A f f i n e M a t r i x %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetAffineMatrix() returns an AffineMatrix initialized to the identity
% matrix.
%
% The format of the GetAffineMatrix method is:
%
% void GetAffineMatrix(AffineMatrix *affine_matrix)
%
% A description of each parameter follows:
%
% o affine_matrix: the affine matrix.
%
*/
MagickExport void GetAffineMatrix(AffineMatrix *affine_matrix)
{
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"...");
assert(affine_matrix != (AffineMatrix *) NULL);
(void) memset(affine_matrix,0,sizeof(*affine_matrix));
affine_matrix->sx=1.0;
affine_matrix->sy=1.0;
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t D r a w I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetDrawInfo() initializes draw_info to default values from image_info.
%
% The format of the GetDrawInfo method is:
%
% void GetDrawInfo(const ImageInfo *image_info,DrawInfo *draw_info)
%
% A description of each parameter follows:
%
% o image_info: the image info..
%
% o draw_info: the draw info.
%
*/
MagickExport void GetDrawInfo(const ImageInfo *image_info,DrawInfo *draw_info)
{
char
*next_token;
const char
*option;
ExceptionInfo
*exception;
ImageInfo
*clone_info;
/*
Initialize draw attributes.
*/
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"...");
assert(draw_info != (DrawInfo *) NULL);
(void) memset(draw_info,0,sizeof(*draw_info));
clone_info=CloneImageInfo(image_info);
GetAffineMatrix(&draw_info->affine);
exception=AcquireExceptionInfo();
(void) QueryColorCompliance("#000F",AllCompliance,&draw_info->fill,
exception);
(void) QueryColorCompliance("#FFF0",AllCompliance,&draw_info->stroke,
exception);
draw_info->stroke_antialias=clone_info->antialias;
draw_info->stroke_width=1.0;
draw_info->fill_rule=EvenOddRule;
draw_info->alpha=OpaqueAlpha;
draw_info->fill_alpha=OpaqueAlpha;
draw_info->stroke_alpha=OpaqueAlpha;
draw_info->linecap=ButtCap;
draw_info->linejoin=MiterJoin;
draw_info->miterlimit=10;
draw_info->decorate=NoDecoration;
draw_info->pointsize=12.0;
draw_info->undercolor.alpha=(MagickRealType) TransparentAlpha;
draw_info->compose=OverCompositeOp;
draw_info->render=MagickTrue;
draw_info->clip_path=MagickFalse;
draw_info->debug=IsEventLogging();
if (clone_info->font != (char *) NULL)
draw_info->font=AcquireString(clone_info->font);
if (clone_info->density != (char *) NULL)
draw_info->density=AcquireString(clone_info->density);
draw_info->text_antialias=clone_info->antialias;
if (fabs(clone_info->pointsize) >= MagickEpsilon)
draw_info->pointsize=clone_info->pointsize;
draw_info->border_color=clone_info->border_color;
if (clone_info->server_name != (char *) NULL)
draw_info->server_name=AcquireString(clone_info->server_name);
option=GetImageOption(clone_info,"direction");
if (option != (const char *) NULL)
draw_info->direction=(DirectionType) ParseCommandOption(
MagickDirectionOptions,MagickFalse,option);
else
draw_info->direction=UndefinedDirection;
option=GetImageOption(clone_info,"encoding");
if (option != (const char *) NULL)
(void) CloneString(&draw_info->encoding,option);
option=GetImageOption(clone_info,"family");
if (option != (const char *) NULL)
(void) CloneString(&draw_info->family,option);
option=GetImageOption(clone_info,"fill");
if (option != (const char *) NULL)
(void) QueryColorCompliance(option,AllCompliance,&draw_info->fill,
exception);
option=GetImageOption(clone_info,"gravity");
if (option != (const char *) NULL)
draw_info->gravity=(GravityType) ParseCommandOption(MagickGravityOptions,
MagickFalse,option);
option=GetImageOption(clone_info,"interline-spacing");
if (option != (const char *) NULL)
draw_info->interline_spacing=StringToDouble(option,&next_token);
option=GetImageOption(clone_info,"interword-spacing");
if (option != (const char *) NULL)
draw_info->interword_spacing=StringToDouble(option,&next_token);
option=GetImageOption(clone_info,"kerning");
if (option != (const char *) NULL)
draw_info->kerning=StringToDouble(option,&next_token);
option=GetImageOption(clone_info,"stroke");
if (option != (const char *) NULL)
(void) QueryColorCompliance(option,AllCompliance,&draw_info->stroke,
exception);
option=GetImageOption(clone_info,"strokewidth");
if (option != (const char *) NULL)
draw_info->stroke_width=StringToDouble(option,&next_token);
option=GetImageOption(clone_info,"style");
if (option != (const char *) NULL)
draw_info->style=(StyleType) ParseCommandOption(MagickStyleOptions,
MagickFalse,option);
option=GetImageOption(clone_info,"undercolor");
if (option != (const char *) NULL)
(void) QueryColorCompliance(option,AllCompliance,&draw_info->undercolor,
exception);
option=GetImageOption(clone_info,"weight");
if (option != (const char *) NULL)
{
ssize_t
weight;
weight=ParseCommandOption(MagickWeightOptions,MagickFalse,option);
if (weight == -1)
weight=(ssize_t) StringToUnsignedLong(option);
draw_info->weight=(size_t) weight;
}
exception=DestroyExceptionInfo(exception);
draw_info->signature=MagickCoreSignature;
clone_info=DestroyImageInfo(clone_info);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ P e r m u t a t e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% Permutate() returns the permuation of the (n,k).
%
% The format of the Permutate method is:
%
% void Permutate(ssize_t n,ssize_t k)
%
% A description of each parameter follows:
%
% o n:
%
% o k:
%
%
*/
static inline double Permutate(const ssize_t n,const ssize_t k)
{
double
r;
register ssize_t
i;
r=1.0;
for (i=k+1; i <= n; i++)
r*=i;
for (i=1; i <= (n-k); i++)
r/=i;
return(r);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ T r a c e P r i m i t i v e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% TracePrimitive is a collection of methods for generating graphic
% primitives such as arcs, ellipses, paths, etc.
%
*/
static void TraceArc(MVGInfo *mvg_info,const PointInfo start,
const PointInfo end,const PointInfo degrees)
{
PointInfo
center,
radius;
center.x=0.5*(end.x+start.x);
center.y=0.5*(end.y+start.y);
radius.x=fabs(center.x-start.x);
radius.y=fabs(center.y-start.y);
TraceEllipse(mvg_info,center,radius,degrees);
}
static void TraceArcPath(MVGInfo *mvg_info,const PointInfo start,
const PointInfo end,const PointInfo arc,const double angle,
const MagickBooleanType large_arc,const MagickBooleanType sweep)
{
double
alpha,
beta,
delta,
factor,
gamma,
theta;
PointInfo
center,
points[3],
radii;
register double
cosine,
sine;
PrimitiveInfo
*primitive_info;
register PrimitiveInfo
*p;
register ssize_t
i;
size_t
arc_segments;
ssize_t
offset;
offset=mvg_info->offset;
primitive_info=(*mvg_info->primitive_info)+mvg_info->offset;
primitive_info->coordinates=0;
if ((fabs(start.x-end.x) < MagickEpsilon) &&
(fabs(start.y-end.y) < MagickEpsilon))
{
TracePoint(primitive_info,end);
return;
}
radii.x=fabs(arc.x);
radii.y=fabs(arc.y);
if ((fabs(radii.x) < MagickEpsilon) || (fabs(radii.y) < MagickEpsilon))
{
TraceLine(primitive_info,start,end);
return;
}
cosine=cos(DegreesToRadians(fmod((double) angle,360.0)));
sine=sin(DegreesToRadians(fmod((double) angle,360.0)));
center.x=(double) (cosine*(end.x-start.x)/2+sine*(end.y-start.y)/2);
center.y=(double) (cosine*(end.y-start.y)/2-sine*(end.x-start.x)/2);
delta=(center.x*center.x)/(radii.x*radii.x)+(center.y*center.y)/
(radii.y*radii.y);
if (delta < MagickEpsilon)
{
TraceLine(primitive_info,start,end);
return;
}
if (delta > 1.0)
{
radii.x*=sqrt((double) delta);
radii.y*=sqrt((double) delta);
}
points[0].x=(double) (cosine*start.x/radii.x+sine*start.y/radii.x);
points[0].y=(double) (cosine*start.y/radii.y-sine*start.x/radii.y);
points[1].x=(double) (cosine*end.x/radii.x+sine*end.y/radii.x);
points[1].y=(double) (cosine*end.y/radii.y-sine*end.x/radii.y);
alpha=points[1].x-points[0].x;
beta=points[1].y-points[0].y;
factor=PerceptibleReciprocal(alpha*alpha+beta*beta)-0.25;
if (factor <= 0.0)
factor=0.0;
else
{
factor=sqrt((double) factor);
if (sweep == large_arc)
factor=(-factor);
}
center.x=(double) ((points[0].x+points[1].x)/2-factor*beta);
center.y=(double) ((points[0].y+points[1].y)/2+factor*alpha);
alpha=atan2(points[0].y-center.y,points[0].x-center.x);
theta=atan2(points[1].y-center.y,points[1].x-center.x)-alpha;
if ((theta < 0.0) && (sweep != MagickFalse))
theta+=2.0*MagickPI;
else
if ((theta > 0.0) && (sweep == MagickFalse))
theta-=2.0*MagickPI;
arc_segments=(size_t) ceil(fabs((double) (theta/(0.5*MagickPI+MagickEpsilon))));
p=primitive_info;
for (i=0; i < (ssize_t) arc_segments; i++)
{
beta=0.5*((alpha+(i+1)*theta/arc_segments)-(alpha+i*theta/arc_segments));
gamma=(8.0/3.0)*sin(fmod((double) (0.5*beta),DegreesToRadians(360.0)))*
sin(fmod((double) (0.5*beta),DegreesToRadians(360.0)))/
sin(fmod((double) beta,DegreesToRadians(360.0)));
points[0].x=(double) (center.x+cos(fmod((double) (alpha+(double) i*theta/
arc_segments),DegreesToRadians(360.0)))-gamma*sin(fmod((double) (alpha+
(double) i*theta/arc_segments),DegreesToRadians(360.0))));
points[0].y=(double) (center.y+sin(fmod((double) (alpha+(double) i*theta/
arc_segments),DegreesToRadians(360.0)))+gamma*cos(fmod((double) (alpha+
(double) i*theta/arc_segments),DegreesToRadians(360.0))));
points[2].x=(double) (center.x+cos(fmod((double) (alpha+(double) (i+1)*
theta/arc_segments),DegreesToRadians(360.0))));
points[2].y=(double) (center.y+sin(fmod((double) (alpha+(double) (i+1)*
theta/arc_segments),DegreesToRadians(360.0))));
points[1].x=(double) (points[2].x+gamma*sin(fmod((double) (alpha+(double)
(i+1)*theta/arc_segments),DegreesToRadians(360.0))));
points[1].y=(double) (points[2].y-gamma*cos(fmod((double) (alpha+(double)
(i+1)*theta/arc_segments),DegreesToRadians(360.0))));
p->point.x=(p == primitive_info) ? start.x : (p-1)->point.x;
p->point.y=(p == primitive_info) ? start.y : (p-1)->point.y;
(p+1)->point.x=(double) (cosine*radii.x*points[0].x-sine*radii.y*
points[0].y);
(p+1)->point.y=(double) (sine*radii.x*points[0].x+cosine*radii.y*
points[0].y);
(p+2)->point.x=(double) (cosine*radii.x*points[1].x-sine*radii.y*
points[1].y);
(p+2)->point.y=(double) (sine*radii.x*points[1].x+cosine*radii.y*
points[1].y);
(p+3)->point.x=(double) (cosine*radii.x*points[2].x-sine*radii.y*
points[2].y);
(p+3)->point.y=(double) (sine*radii.x*points[2].x+cosine*radii.y*
points[2].y);
if (i == (ssize_t) (arc_segments-1))
(p+3)->point=end;
TraceBezier(mvg_info,4);
p=(*mvg_info->primitive_info)+mvg_info->offset;
mvg_info->offset+=p->coordinates;
p+=p->coordinates;
}
mvg_info->offset=offset;
primitive_info=(*mvg_info->primitive_info)+mvg_info->offset;
primitive_info->coordinates=(size_t) (p-primitive_info);
primitive_info->closed_subpath=MagickFalse;
for (i=0; i < (ssize_t) primitive_info->coordinates; i++)
{
p->primitive=primitive_info->primitive;
p--;
}
}
static void TraceBezier(MVGInfo *mvg_info,const size_t number_coordinates)
{
double
alpha,
*coefficients,
weight;
PointInfo
end,
point,
*points;
PrimitiveInfo
*primitive_info;
register PrimitiveInfo
*p;
register ssize_t
i,
j;
size_t
control_points,
quantum;
/*
Allocate coefficients.
*/
primitive_info=(*mvg_info->primitive_info)+mvg_info->offset;
quantum=number_coordinates;
for (i=0; i < (ssize_t) number_coordinates; i++)
{
for (j=i+1; j < (ssize_t) number_coordinates; j++)
{
alpha=fabs(primitive_info[j].point.x-primitive_info[i].point.x);
if (alpha > (double) quantum)
quantum=(size_t) alpha;
alpha=fabs(primitive_info[j].point.y-primitive_info[i].point.y);
if (alpha > (double) quantum)
quantum=(size_t) alpha;
}
}
quantum=(size_t) MagickMin((double) quantum/number_coordinates,
(double) BezierQuantum);
control_points=quantum*number_coordinates;
if (CheckPrimitiveExtent(mvg_info,control_points+1) == MagickFalse)
return;
primitive_info=(*mvg_info->primitive_info)+mvg_info->offset;
coefficients=(double *) AcquireQuantumMemory((size_t)
number_coordinates,sizeof(*coefficients));
points=(PointInfo *) AcquireQuantumMemory((size_t) control_points,
sizeof(*points));
if ((coefficients == (double *) NULL) || (points == (PointInfo *) NULL))
ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed");
/*
Compute bezier points.
*/
end=primitive_info[number_coordinates-1].point;
for (i=0; i < (ssize_t) number_coordinates; i++)
coefficients[i]=Permutate((ssize_t) number_coordinates-1,i);
weight=0.0;
for (i=0; i < (ssize_t) control_points; i++)
{
p=primitive_info;
point.x=0.0;
point.y=0.0;
alpha=pow((double) (1.0-weight),(double) number_coordinates-1.0);
for (j=0; j < (ssize_t) number_coordinates; j++)
{
point.x+=alpha*coefficients[j]*p->point.x;
point.y+=alpha*coefficients[j]*p->point.y;
alpha*=weight/(1.0-weight);
p++;
}
points[i]=point;
weight+=1.0/control_points;
}
/*
Bezier curves are just short segmented polys.
*/
p=primitive_info;
for (i=0; i < (ssize_t) control_points; i++)
{
TracePoint(p,points[i]);
p+=p->coordinates;
}
TracePoint(p,end);
p+=p->coordinates;
primitive_info->coordinates=(size_t) (p-primitive_info);
primitive_info->closed_subpath=MagickFalse;
for (i=0; i < (ssize_t) primitive_info->coordinates; i++)
{
p->primitive=primitive_info->primitive;
p--;
}
points=(PointInfo *) RelinquishMagickMemory(points);
coefficients=(double *) RelinquishMagickMemory(coefficients);
}
static void TraceCircle(MVGInfo *mvg_info,const PointInfo start,
const PointInfo end)
{
double
alpha,
beta,
radius;
PointInfo
offset,
degrees;
alpha=end.x-start.x;
beta=end.y-start.y;
radius=hypot((double) alpha,(double) beta);
offset.x=(double) radius;
offset.y=(double) radius;
degrees.x=0.0;
degrees.y=360.0;
TraceEllipse(mvg_info,start,offset,degrees);
}
static void TraceEllipse(MVGInfo *mvg_info,const PointInfo center,
const PointInfo radii,const PointInfo arc)
{
double
delta,
step,
x,
y;
PointInfo
angle,
point;
PrimitiveInfo
*primitive_info;
register PrimitiveInfo
*p;
register ssize_t
i;
size_t
extent;
/*
Ellipses are just short segmented polys.
*/
primitive_info=(*mvg_info->primitive_info)+mvg_info->offset;
primitive_info->coordinates=0;
if ((fabs(radii.x) < MagickEpsilon) || (fabs(radii.y) < MagickEpsilon))
return;
delta=2.0*PerceptibleReciprocal(MagickMax(radii.x,radii.y));
step=MagickPI/8.0;
if ((delta >= 0.0) && (delta < (MagickPI/8.0)))
step=MagickPI/(4.0*(MagickPI*PerceptibleReciprocal(delta)/2.0));
angle.x=DegreesToRadians(arc.x);
y=arc.y;
while (y < arc.x)
y+=360.0;
angle.y=DegreesToRadians(y);
extent=(size_t) ceil((angle.y-angle.x)/step)+1;
if (CheckPrimitiveExtent(mvg_info,extent) == MagickFalse)
return;
primitive_info=(*mvg_info->primitive_info)+mvg_info->offset;
for (p=primitive_info; angle.x < angle.y; angle.x+=step)
{
point.x=cos(fmod(angle.x,DegreesToRadians(360.0)))*radii.x+center.x;
point.y=sin(fmod(angle.x,DegreesToRadians(360.0)))*radii.y+center.y;
TracePoint(p,point);
p+=p->coordinates;
}
point.x=cos(fmod(angle.y,DegreesToRadians(360.0)))*radii.x+center.x;
point.y=sin(fmod(angle.y,DegreesToRadians(360.0)))*radii.y+center.y;
TracePoint(p,point);
p+=p->coordinates;
primitive_info->coordinates=(size_t) (p-primitive_info);
primitive_info->closed_subpath=MagickFalse;
x=fabs(primitive_info[0].point.x-
primitive_info[primitive_info->coordinates-1].point.x);
y=fabs(primitive_info[0].point.y-
primitive_info[primitive_info->coordinates-1].point.y);
if ((x < MagickEpsilon) && (y < MagickEpsilon))
primitive_info->closed_subpath=MagickTrue;
for (i=0; i < (ssize_t) primitive_info->coordinates; i++)
{
p->primitive=primitive_info->primitive;
p--;
}
}
static void TraceLine(PrimitiveInfo *primitive_info,const PointInfo start,
const PointInfo end)
{
TracePoint(primitive_info,start);
if ((fabs(start.x-end.x) < MagickEpsilon) &&
(fabs(start.y-end.y) < MagickEpsilon))
{
primitive_info->primitive=PointPrimitive;
primitive_info->coordinates=1;
return;
}
TracePoint(primitive_info+1,end);
(primitive_info+1)->primitive=primitive_info->primitive;
primitive_info->coordinates=2;
primitive_info->closed_subpath=MagickFalse;
}
static size_t TracePath(MVGInfo *mvg_info,const char *path,
ExceptionInfo *exception)
{
char
*next_token,
token[MagickPathExtent];
const char
*p;
double
x,
y;
int
attribute,
last_attribute;
MagickBooleanType
status;
PointInfo
end = {0.0, 0.0},
points[4] = { {0.0, 0.0}, {0.0, 0.0}, {0.0, 0.0}, {0.0, 0.0} },
point = {0.0, 0.0},
start = {0.0, 0.0};
PrimitiveInfo
*primitive_info;
PrimitiveType
primitive_type;
register PrimitiveInfo
*q;
register ssize_t
i;
size_t
number_coordinates,
z_count;
ssize_t
subpath_offset;
subpath_offset=mvg_info->offset;
primitive_info=(*mvg_info->primitive_info)+mvg_info->offset;
status=MagickTrue;
attribute=0;
number_coordinates=0;
z_count=0;
primitive_type=primitive_info->primitive;
q=primitive_info;
for (p=path; *p != '\0'; )
{
if (status == MagickFalse)
break;
while (isspace((int) ((unsigned char) *p)) != 0)
p++;
if (*p == '\0')
break;
last_attribute=attribute;
attribute=(int) (*p++);
switch (attribute)
{
case 'a':
case 'A':
{
double
angle = 0.0;
MagickBooleanType
large_arc = MagickFalse,
sweep = MagickFalse;
PointInfo
arc = {0.0, 0.0};
/*
Elliptical arc.
*/
do
{
GetNextToken(p,&p,MagickPathExtent,token);
if (*token == ',')
GetNextToken(p,&p,MagickPathExtent,token);
arc.x=StringToDouble(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
GetNextToken(p,&p,MagickPathExtent,token);
if (*token == ',')
GetNextToken(p,&p,MagickPathExtent,token);
arc.y=StringToDouble(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
GetNextToken(p,&p,MagickPathExtent,token);
if (*token == ',')
GetNextToken(p,&p,MagickPathExtent,token);
angle=StringToDouble(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
GetNextToken(p,&p,MagickPathExtent,token);
if (*token == ',')
GetNextToken(p,&p,MagickPathExtent,token);
large_arc=StringToLong(token) != 0 ? MagickTrue : MagickFalse;
GetNextToken(p,&p,MagickPathExtent,token);
if (*token == ',')
GetNextToken(p,&p,MagickPathExtent,token);
sweep=StringToLong(token) != 0 ? MagickTrue : MagickFalse;
if (*token == ',')
GetNextToken(p,&p,MagickPathExtent,token);
GetNextToken(p,&p,MagickPathExtent,token);
if (*token == ',')
GetNextToken(p,&p,MagickPathExtent,token);
x=StringToDouble(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
GetNextToken(p,&p,MagickPathExtent,token);
if (*token == ',')
GetNextToken(p,&p,MagickPathExtent,token);
y=StringToDouble(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
end.x=(double) (attribute == (int) 'A' ? x : point.x+x);
end.y=(double) (attribute == (int) 'A' ? y : point.y+y);
TraceArcPath(mvg_info,point,end,arc,angle,large_arc,sweep);
q=(*mvg_info->primitive_info)+mvg_info->offset;
mvg_info->offset+=q->coordinates;
q+=q->coordinates;
point=end;
while (isspace((int) ((unsigned char) *p)) != 0)
p++;
if (*p == ',')
p++;
} while (IsPoint(p) != MagickFalse);
break;
}
case 'c':
case 'C':
{
/*
Cubic Bézier curve.
*/
do
{
points[0]=point;
for (i=1; i < 4; i++)
{
GetNextToken(p,&p,MagickPathExtent,token);
if (*token == ',')
GetNextToken(p,&p,MagickPathExtent,token);
x=StringToDouble(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
GetNextToken(p,&p,MagickPathExtent,token);
if (*token == ',')
GetNextToken(p,&p,MagickPathExtent,token);
y=StringToDouble(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
end.x=(double) (attribute == (int) 'C' ? x : point.x+x);
end.y=(double) (attribute == (int) 'C' ? y : point.y+y);
points[i]=end;
}
for (i=0; i < 4; i++)
(q+i)->point=points[i];
TraceBezier(mvg_info,4);
q=(*mvg_info->primitive_info)+mvg_info->offset;
mvg_info->offset+=q->coordinates;
q+=q->coordinates;
point=end;
while (isspace((int) ((unsigned char) *p)) != 0)
p++;
if (*p == ',')
p++;
} while (IsPoint(p) != MagickFalse);
break;
}
case 'H':
case 'h':
{
do
{
GetNextToken(p,&p,MagickPathExtent,token);
if (*token == ',')
GetNextToken(p,&p,MagickPathExtent,token);
x=StringToDouble(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
point.x=(double) (attribute == (int) 'H' ? x: point.x+x);
if (CheckPrimitiveExtent(mvg_info,4096) == MagickFalse)
return(0);
q=(*mvg_info->primitive_info)+mvg_info->offset;
TracePoint(q,point);
mvg_info->offset+=q->coordinates;
q+=q->coordinates;
while (isspace((int) ((unsigned char) *p)) != 0)
p++;
if (*p == ',')
p++;
} while (IsPoint(p) != MagickFalse);
break;
}
case 'l':
case 'L':
{
/*
Line to.
*/
do
{
GetNextToken(p,&p,MagickPathExtent,token);
if (*token == ',')
GetNextToken(p,&p,MagickPathExtent,token);
x=StringToDouble(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
GetNextToken(p,&p,MagickPathExtent,token);
if (*token == ',')
GetNextToken(p,&p,MagickPathExtent,token);
y=StringToDouble(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
point.x=(double) (attribute == (int) 'L' ? x : point.x+x);
point.y=(double) (attribute == (int) 'L' ? y : point.y+y);
if (CheckPrimitiveExtent(mvg_info,4096) == MagickFalse)
return(0);
q=(*mvg_info->primitive_info)+mvg_info->offset;
TracePoint(q,point);
mvg_info->offset+=q->coordinates;
q+=q->coordinates;
while (isspace((int) ((unsigned char) *p)) != 0)
p++;
if (*p == ',')
p++;
} while (IsPoint(p) != MagickFalse);
break;
}
case 'M':
case 'm':
{
/*
Move to.
*/
if (mvg_info->offset != subpath_offset)
{
primitive_info=(*mvg_info->primitive_info)+subpath_offset;
primitive_info->coordinates=(size_t) (q-primitive_info);
number_coordinates+=primitive_info->coordinates;
primitive_info=q;
subpath_offset=mvg_info->offset;
}
i=0;
do
{
GetNextToken(p,&p,MagickPathExtent,token);
if (*token == ',')
GetNextToken(p,&p,MagickPathExtent,token);
x=StringToDouble(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
GetNextToken(p,&p,MagickPathExtent,token);
if (*token == ',')
GetNextToken(p,&p,MagickPathExtent,token);
y=StringToDouble(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
point.x=(double) (attribute == (int) 'M' ? x : point.x+x);
point.y=(double) (attribute == (int) 'M' ? y : point.y+y);
if (i == 0)
start=point;
i++;
if (CheckPrimitiveExtent(mvg_info,4096) == MagickFalse)
return(0);
q=(*mvg_info->primitive_info)+mvg_info->offset;
TracePoint(q,point);
mvg_info->offset+=q->coordinates;
q+=q->coordinates;
while (isspace((int) ((unsigned char) *p)) != 0)
p++;
if (*p == ',')
p++;
} while (IsPoint(p) != MagickFalse);
break;
}
case 'q':
case 'Q':
{
/*
Quadratic Bézier curve.
*/
do
{
points[0]=point;
for (i=1; i < 3; i++)
{
GetNextToken(p,&p,MagickPathExtent,token);
if (*token == ',')
GetNextToken(p,&p,MagickPathExtent,token);
x=StringToDouble(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
GetNextToken(p,&p,MagickPathExtent,token);
if (*token == ',')
GetNextToken(p,&p,MagickPathExtent,token);
y=StringToDouble(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
if (*p == ',')
p++;
end.x=(double) (attribute == (int) 'Q' ? x : point.x+x);
end.y=(double) (attribute == (int) 'Q' ? y : point.y+y);
points[i]=end;
}
for (i=0; i < 3; i++)
(q+i)->point=points[i];
TraceBezier(mvg_info,3);
q=(*mvg_info->primitive_info)+mvg_info->offset;
mvg_info->offset+=q->coordinates;
q+=q->coordinates;
point=end;
while (isspace((int) ((unsigned char) *p)) != 0)
p++;
if (*p == ',')
p++;
} while (IsPoint(p) != MagickFalse);
break;
}
case 's':
case 'S':
{
/*
Cubic Bézier curve.
*/
do
{
points[0]=points[3];
points[1].x=2.0*points[3].x-points[2].x;
points[1].y=2.0*points[3].y-points[2].y;
for (i=2; i < 4; i++)
{
GetNextToken(p,&p,MagickPathExtent,token);
if (*token == ',')
GetNextToken(p,&p,MagickPathExtent,token);
x=StringToDouble(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
GetNextToken(p,&p,MagickPathExtent,token);
if (*token == ',')
GetNextToken(p,&p,MagickPathExtent,token);
y=StringToDouble(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
if (*p == ',')
p++;
end.x=(double) (attribute == (int) 'S' ? x : point.x+x);
end.y=(double) (attribute == (int) 'S' ? y : point.y+y);
points[i]=end;
}
if (strchr("CcSs",last_attribute) == (char *) NULL)
{
points[0]=point;
points[1]=point;
}
for (i=0; i < 4; i++)
(q+i)->point=points[i];
TraceBezier(mvg_info,4);
q=(*mvg_info->primitive_info)+mvg_info->offset;
mvg_info->offset+=q->coordinates;
q+=q->coordinates;
point=end;
last_attribute=attribute;
while (isspace((int) ((unsigned char) *p)) != 0)
p++;
if (*p == ',')
p++;
} while (IsPoint(p) != MagickFalse);
break;
}
case 't':
case 'T':
{
/*
Quadratic Bézier curve.
*/
do
{
points[0]=points[2];
points[1].x=2.0*points[2].x-points[1].x;
points[1].y=2.0*points[2].y-points[1].y;
for (i=2; i < 3; i++)
{
GetNextToken(p,&p,MagickPathExtent,token);
if (*token == ',')
GetNextToken(p,&p,MagickPathExtent,token);
x=StringToDouble(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
GetNextToken(p,&p,MagickPathExtent,token);
if (*token == ',')
GetNextToken(p,&p,MagickPathExtent,token);
y=StringToDouble(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
end.x=(double) (attribute == (int) 'T' ? x : point.x+x);
end.y=(double) (attribute == (int) 'T' ? y : point.y+y);
points[i]=end;
}
if (status == MagickFalse)
break;
if (strchr("QqTt",last_attribute) == (char *) NULL)
{
points[0]=point;
points[1]=point;
}
for (i=0; i < 3; i++)
(q+i)->point=points[i];
TraceBezier(mvg_info,3);
q=(*mvg_info->primitive_info)+mvg_info->offset;
mvg_info->offset+=q->coordinates;
q+=q->coordinates;
point=end;
last_attribute=attribute;
while (isspace((int) ((unsigned char) *p)) != 0)
p++;
if (*p == ',')
p++;
} while (IsPoint(p) != MagickFalse);
break;
}
case 'v':
case 'V':
{
/*
Line to.
*/
do
{
GetNextToken(p,&p,MagickPathExtent,token);
if (*token == ',')
GetNextToken(p,&p,MagickPathExtent,token);
y=StringToDouble(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
point.y=(double) (attribute == (int) 'V' ? y : point.y+y);
if (CheckPrimitiveExtent(mvg_info,4096) == MagickFalse)
return(0);
q=(*mvg_info->primitive_info)+mvg_info->offset;
TracePoint(q,point);
mvg_info->offset+=q->coordinates;
q+=q->coordinates;
while (isspace((int) ((unsigned char) *p)) != 0)
p++;
if (*p == ',')
p++;
} while (IsPoint(p) != MagickFalse);
break;
}
case 'z':
case 'Z':
{
/*
Close path.
*/
point=start;
if (CheckPrimitiveExtent(mvg_info,4096) == MagickFalse)
return(0);
q=(*mvg_info->primitive_info)+mvg_info->offset;
TracePoint(q,point);
mvg_info->offset+=q->coordinates;
q+=q->coordinates;
primitive_info=(*mvg_info->primitive_info)+subpath_offset;
primitive_info->coordinates=(size_t) (q-primitive_info);
primitive_info->closed_subpath=MagickTrue;
number_coordinates+=primitive_info->coordinates;
primitive_info=q;
subpath_offset=mvg_info->offset;
z_count++;
break;
}
default:
{
ThrowPointExpectedException(token,exception);
break;
}
}
}
if (status == MagickFalse)
return(0);
primitive_info=(*mvg_info->primitive_info)+subpath_offset;
primitive_info->coordinates=(size_t) (q-primitive_info);
number_coordinates+=primitive_info->coordinates;
for (i=0; i < (ssize_t) number_coordinates; i++)
{
q--;
q->primitive=primitive_type;
if (z_count > 1)
q->method=FillToBorderMethod;
}
q=primitive_info;
return(number_coordinates);
}
static void TraceRectangle(PrimitiveInfo *primitive_info,const PointInfo start,
const PointInfo end)
{
PointInfo
point;
register PrimitiveInfo
*p;
register ssize_t
i;
if ((fabs(start.x-end.x) < MagickEpsilon) ||
(fabs(start.y-end.y) < MagickEpsilon))
{
primitive_info->coordinates=0;
return;
}
p=primitive_info;
TracePoint(p,start);
p+=p->coordinates;
point.x=start.x;
point.y=end.y;
TracePoint(p,point);
p+=p->coordinates;
TracePoint(p,end);
p+=p->coordinates;
point.x=end.x;
point.y=start.y;
TracePoint(p,point);
p+=p->coordinates;
TracePoint(p,start);
p+=p->coordinates;
primitive_info->coordinates=(size_t) (p-primitive_info);
primitive_info->closed_subpath=MagickTrue;
for (i=0; i < (ssize_t) primitive_info->coordinates; i++)
{
p->primitive=primitive_info->primitive;
p--;
}
}
static void TraceRoundRectangle(MVGInfo *mvg_info,const PointInfo start,
const PointInfo end,PointInfo arc)
{
PointInfo
degrees,
point,
segment;
PrimitiveInfo
*primitive_info;
register PrimitiveInfo
*p;
register ssize_t
i;
ssize_t
offset;
offset=mvg_info->offset;
segment.x=fabs(end.x-start.x);
segment.y=fabs(end.y-start.y);
if ((segment.x < MagickEpsilon) || (segment.y < MagickEpsilon))
{
(*mvg_info->primitive_info+mvg_info->offset)->coordinates=0;
return;
}
if (arc.x > (0.5*segment.x))
arc.x=0.5*segment.x;
if (arc.y > (0.5*segment.y))
arc.y=0.5*segment.y;
point.x=start.x+segment.x-arc.x;
point.y=start.y+arc.y;
degrees.x=270.0;
degrees.y=360.0;
TraceEllipse(mvg_info,point,arc,degrees);
p=(*mvg_info->primitive_info)+mvg_info->offset;
mvg_info->offset+=p->coordinates;
point.x=start.x+segment.x-arc.x;
point.y=start.y+segment.y-arc.y;
degrees.x=0.0;
degrees.y=90.0;
TraceEllipse(mvg_info,point,arc,degrees);
p=(*mvg_info->primitive_info)+mvg_info->offset;
mvg_info->offset+=p->coordinates;
point.x=start.x+arc.x;
point.y=start.y+segment.y-arc.y;
degrees.x=90.0;
degrees.y=180.0;
TraceEllipse(mvg_info,point,arc,degrees);
p=(*mvg_info->primitive_info)+mvg_info->offset;
mvg_info->offset+=p->coordinates;
point.x=start.x+arc.x;
point.y=start.y+arc.y;
degrees.x=180.0;
degrees.y=270.0;
TraceEllipse(mvg_info,point,arc,degrees);
p=(*mvg_info->primitive_info)+mvg_info->offset;
mvg_info->offset+=p->coordinates;
if (CheckPrimitiveExtent(mvg_info,4096) == MagickFalse)
return;
p=(*mvg_info->primitive_info)+mvg_info->offset;
TracePoint(p,(*mvg_info->primitive_info+offset)->point);
p+=p->coordinates;
mvg_info->offset=offset;
primitive_info=(*mvg_info->primitive_info)+offset;
primitive_info->coordinates=(size_t) (p-primitive_info);
primitive_info->closed_subpath=MagickTrue;
for (i=0; i < (ssize_t) primitive_info->coordinates; i++)
{
p->primitive=primitive_info->primitive;
p--;
}
}
static void TraceSquareLinecap(PrimitiveInfo *primitive_info,
const size_t number_vertices,const double offset)
{
double
distance;
register double
dx,
dy;
register ssize_t
i;
ssize_t
j;
dx=0.0;
dy=0.0;
for (i=1; i < (ssize_t) number_vertices; i++)
{
dx=primitive_info[0].point.x-primitive_info[i].point.x;
dy=primitive_info[0].point.y-primitive_info[i].point.y;
if ((fabs((double) dx) >= MagickEpsilon) ||
(fabs((double) dy) >= MagickEpsilon))
break;
}
if (i == (ssize_t) number_vertices)
i=(ssize_t) number_vertices-1L;
distance=hypot((double) dx,(double) dy);
primitive_info[0].point.x=(double) (primitive_info[i].point.x+
dx*(distance+offset)/distance);
primitive_info[0].point.y=(double) (primitive_info[i].point.y+
dy*(distance+offset)/distance);
for (j=(ssize_t) number_vertices-2; j >= 0; j--)
{
dx=primitive_info[number_vertices-1].point.x-primitive_info[j].point.x;
dy=primitive_info[number_vertices-1].point.y-primitive_info[j].point.y;
if ((fabs((double) dx) >= MagickEpsilon) ||
(fabs((double) dy) >= MagickEpsilon))
break;
}
distance=hypot((double) dx,(double) dy);
primitive_info[number_vertices-1].point.x=(double) (primitive_info[j].point.x+
dx*(distance+offset)/distance);
primitive_info[number_vertices-1].point.y=(double) (primitive_info[j].point.y+
dy*(distance+offset)/distance);
}
static PrimitiveInfo *TraceStrokePolygon(const Image *image,
const DrawInfo *draw_info,const PrimitiveInfo *primitive_info)
{
#define CheckPathExtent(pad) \
if ((q+(pad)) >= (ssize_t) max_strokes) \
{ \
if (~max_strokes < (pad)) \
{ \
path_p=(PointInfo *) RelinquishMagickMemory(path_p); \
path_q=(PointInfo *) RelinquishMagickMemory(path_q); \
} \
else \
{ \
max_strokes+=(pad); \
path_p=(PointInfo *) ResizeQuantumMemory(path_p,max_strokes, \
sizeof(*path_p)); \
path_q=(PointInfo *) ResizeQuantumMemory(path_q,max_strokes, \
sizeof(*path_q)); \
} \
if ((path_p == (PointInfo *) NULL) || (path_q == (PointInfo *) NULL)) \
{ \
if (path_p != (PointInfo *) NULL) \
path_p=(PointInfo *) RelinquishMagickMemory(path_p); \
if (path_q != (PointInfo *) NULL) \
path_q=(PointInfo *) RelinquishMagickMemory(path_q); \
polygon_primitive=(PrimitiveInfo *) \
RelinquishMagickMemory(polygon_primitive); \
return((PrimitiveInfo *) NULL); \
} \
}
typedef struct _LineSegment
{
double
p,
q;
} LineSegment;
double
delta_theta,
dot_product,
mid,
miterlimit;
LineSegment
dx = {0,0},
dy = {0,0},
inverse_slope = {0,0},
slope = {0,0},
theta = {0,0};
MagickBooleanType
closed_path;
PointInfo
box_p[5],
box_q[5],
center,
offset,
*path_p,
*path_q;
PrimitiveInfo
*polygon_primitive,
*stroke_polygon;
register ssize_t
i;
size_t
arc_segments,
max_strokes,
number_vertices;
ssize_t
j,
n,
p,
q;
/*
Allocate paths.
*/
number_vertices=primitive_info->coordinates;
max_strokes=2*number_vertices+6*BezierQuantum+360;
polygon_primitive=(PrimitiveInfo *) AcquireQuantumMemory((size_t)
number_vertices+2UL,sizeof(*polygon_primitive));
if (polygon_primitive == (PrimitiveInfo *) NULL)
return((PrimitiveInfo *) NULL);
(void) memcpy(polygon_primitive,primitive_info,(size_t) number_vertices*
sizeof(*polygon_primitive));
closed_path=primitive_info[0].closed_subpath;
if (((draw_info->linejoin == RoundJoin) ||
(draw_info->linejoin == MiterJoin)) && (closed_path != MagickFalse))
{
polygon_primitive[number_vertices]=primitive_info[1];
number_vertices++;
}
polygon_primitive[number_vertices].primitive=UndefinedPrimitive;
/*
Compute the slope for the first line segment, p.
*/
dx.p=0.0;
dy.p=0.0;
for (n=1; n < (ssize_t) number_vertices; n++)
{
dx.p=polygon_primitive[n].point.x-polygon_primitive[0].point.x;
dy.p=polygon_primitive[n].point.y-polygon_primitive[0].point.y;
if ((fabs(dx.p) >= MagickEpsilon) || (fabs(dy.p) >= MagickEpsilon))
break;
}
if (n == (ssize_t) number_vertices)
{
if ((draw_info->linecap != RoundCap) || (closed_path != MagickFalse))
{
/*
Zero length subpath.
*/
stroke_polygon=(PrimitiveInfo *) AcquireCriticalMemory(
sizeof(*stroke_polygon));
stroke_polygon[0]=polygon_primitive[0];
stroke_polygon[0].coordinates=0;
polygon_primitive=(PrimitiveInfo *) RelinquishMagickMemory(
polygon_primitive);
return(stroke_polygon);
}
n=(ssize_t) number_vertices-1L;
}
path_p=(PointInfo *) AcquireQuantumMemory((size_t) max_strokes,
sizeof(*path_p));
if (path_p == (PointInfo *) NULL)
{
polygon_primitive=(PrimitiveInfo *) RelinquishMagickMemory(
polygon_primitive);
return((PrimitiveInfo *) NULL);
}
path_q=(PointInfo *) AcquireQuantumMemory((size_t) max_strokes,
sizeof(*path_q));
if (path_q == (PointInfo *) NULL)
{
path_p=(PointInfo *) RelinquishMagickMemory(path_p);
polygon_primitive=(PrimitiveInfo *) RelinquishMagickMemory(
polygon_primitive);
return((PrimitiveInfo *) NULL);
}
slope.p=0.0;
inverse_slope.p=0.0;
if (fabs(dx.p) < MagickEpsilon)
{
if (dx.p >= 0.0)
slope.p=dy.p < 0.0 ? -1.0/MagickEpsilon : 1.0/MagickEpsilon;
else
slope.p=dy.p < 0.0 ? 1.0/MagickEpsilon : -1.0/MagickEpsilon;
}
else
if (fabs(dy.p) < MagickEpsilon)
{
if (dy.p >= 0.0)
inverse_slope.p=dx.p < 0.0 ? -1.0/MagickEpsilon : 1.0/MagickEpsilon;
else
inverse_slope.p=dx.p < 0.0 ? 1.0/MagickEpsilon : -1.0/MagickEpsilon;
}
else
{
slope.p=dy.p/dx.p;
inverse_slope.p=(-1.0/slope.p);
}
mid=ExpandAffine(&draw_info->affine)*SaneStrokeWidth(image,draw_info)/2.0;
miterlimit=(double) (draw_info->miterlimit*draw_info->miterlimit*mid*mid);
if ((draw_info->linecap == SquareCap) && (closed_path == MagickFalse))
TraceSquareLinecap(polygon_primitive,number_vertices,mid);
offset.x=sqrt((double) (mid*mid/(inverse_slope.p*inverse_slope.p+1.0)));
offset.y=(double) (offset.x*inverse_slope.p);
if ((dy.p*offset.x-dx.p*offset.y) > 0.0)
{
box_p[0].x=polygon_primitive[0].point.x-offset.x;
box_p[0].y=polygon_primitive[0].point.y-offset.x*inverse_slope.p;
box_p[1].x=polygon_primitive[n].point.x-offset.x;
box_p[1].y=polygon_primitive[n].point.y-offset.x*inverse_slope.p;
box_q[0].x=polygon_primitive[0].point.x+offset.x;
box_q[0].y=polygon_primitive[0].point.y+offset.x*inverse_slope.p;
box_q[1].x=polygon_primitive[n].point.x+offset.x;
box_q[1].y=polygon_primitive[n].point.y+offset.x*inverse_slope.p;
}
else
{
box_p[0].x=polygon_primitive[0].point.x+offset.x;
box_p[0].y=polygon_primitive[0].point.y+offset.y;
box_p[1].x=polygon_primitive[n].point.x+offset.x;
box_p[1].y=polygon_primitive[n].point.y+offset.y;
box_q[0].x=polygon_primitive[0].point.x-offset.x;
box_q[0].y=polygon_primitive[0].point.y-offset.y;
box_q[1].x=polygon_primitive[n].point.x-offset.x;
box_q[1].y=polygon_primitive[n].point.y-offset.y;
}
/*
Create strokes for the line join attribute: bevel, miter, round.
*/
p=0;
q=0;
path_q[p++]=box_q[0];
path_p[q++]=box_p[0];
for (i=(ssize_t) n+1; i < (ssize_t) number_vertices; i++)
{
/*
Compute the slope for this line segment, q.
*/
dx.q=polygon_primitive[i].point.x-polygon_primitive[n].point.x;
dy.q=polygon_primitive[i].point.y-polygon_primitive[n].point.y;
dot_product=dx.q*dx.q+dy.q*dy.q;
if (dot_product < 0.25)
continue;
slope.q=0.0;
inverse_slope.q=0.0;
if (fabs(dx.q) < MagickEpsilon)
{
if (dx.q >= 0.0)
slope.q=dy.q < 0.0 ? -1.0/MagickEpsilon : 1.0/MagickEpsilon;
else
slope.q=dy.q < 0.0 ? 1.0/MagickEpsilon : -1.0/MagickEpsilon;
}
else
if (fabs(dy.q) < MagickEpsilon)
{
if (dy.q >= 0.0)
inverse_slope.q=dx.q < 0.0 ? -1.0/MagickEpsilon : 1.0/MagickEpsilon;
else
inverse_slope.q=dx.q < 0.0 ? 1.0/MagickEpsilon : -1.0/MagickEpsilon;
}
else
{
slope.q=dy.q/dx.q;
inverse_slope.q=(-1.0/slope.q);
}
offset.x=sqrt((double) (mid*mid/(inverse_slope.q*inverse_slope.q+1.0)));
offset.y=(double) (offset.x*inverse_slope.q);
dot_product=dy.q*offset.x-dx.q*offset.y;
if (dot_product > 0.0)
{
box_p[2].x=polygon_primitive[n].point.x-offset.x;
box_p[2].y=polygon_primitive[n].point.y-offset.y;
box_p[3].x=polygon_primitive[i].point.x-offset.x;
box_p[3].y=polygon_primitive[i].point.y-offset.y;
box_q[2].x=polygon_primitive[n].point.x+offset.x;
box_q[2].y=polygon_primitive[n].point.y+offset.y;
box_q[3].x=polygon_primitive[i].point.x+offset.x;
box_q[3].y=polygon_primitive[i].point.y+offset.y;
}
else
{
box_p[2].x=polygon_primitive[n].point.x+offset.x;
box_p[2].y=polygon_primitive[n].point.y+offset.y;
box_p[3].x=polygon_primitive[i].point.x+offset.x;
box_p[3].y=polygon_primitive[i].point.y+offset.y;
box_q[2].x=polygon_primitive[n].point.x-offset.x;
box_q[2].y=polygon_primitive[n].point.y-offset.y;
box_q[3].x=polygon_primitive[i].point.x-offset.x;
box_q[3].y=polygon_primitive[i].point.y-offset.y;
}
if (fabs((double) (slope.p-slope.q)) < MagickEpsilon)
{
box_p[4]=box_p[1];
box_q[4]=box_q[1];
}
else
{
box_p[4].x=(double) ((slope.p*box_p[0].x-box_p[0].y-slope.q*box_p[3].x+
box_p[3].y)/(slope.p-slope.q));
box_p[4].y=(double) (slope.p*(box_p[4].x-box_p[0].x)+box_p[0].y);
box_q[4].x=(double) ((slope.p*box_q[0].x-box_q[0].y-slope.q*box_q[3].x+
box_q[3].y)/(slope.p-slope.q));
box_q[4].y=(double) (slope.p*(box_q[4].x-box_q[0].x)+box_q[0].y);
}
CheckPathExtent(6*BezierQuantum+360);
dot_product=dx.q*dy.p-dx.p*dy.q;
if (dot_product <= 0.0)
switch (draw_info->linejoin)
{
case BevelJoin:
{
path_q[q++]=box_q[1];
path_q[q++]=box_q[2];
dot_product=(box_q[4].x-box_p[4].x)*(box_q[4].x-box_p[4].x)+
(box_q[4].y-box_p[4].y)*(box_q[4].y-box_p[4].y);
if (dot_product <= miterlimit)
path_p[p++]=box_p[4];
else
{
path_p[p++]=box_p[1];
path_p[p++]=box_p[2];
}
break;
}
case MiterJoin:
{
dot_product=(box_q[4].x-box_p[4].x)*(box_q[4].x-box_p[4].x)+
(box_q[4].y-box_p[4].y)*(box_q[4].y-box_p[4].y);
if (dot_product <= miterlimit)
{
path_q[q++]=box_q[4];
path_p[p++]=box_p[4];
}
else
{
path_q[q++]=box_q[1];
path_q[q++]=box_q[2];
path_p[p++]=box_p[1];
path_p[p++]=box_p[2];
}
break;
}
case RoundJoin:
{
dot_product=(box_q[4].x-box_p[4].x)*(box_q[4].x-box_p[4].x)+
(box_q[4].y-box_p[4].y)*(box_q[4].y-box_p[4].y);
if (dot_product <= miterlimit)
path_p[p++]=box_p[4];
else
{
path_p[p++]=box_p[1];
path_p[p++]=box_p[2];
}
center=polygon_primitive[n].point;
theta.p=atan2(box_q[1].y-center.y,box_q[1].x-center.x);
theta.q=atan2(box_q[2].y-center.y,box_q[2].x-center.x);
if (theta.q < theta.p)
theta.q+=2.0*MagickPI;
arc_segments=(size_t) ceil((double) ((theta.q-theta.p)/
(2.0*sqrt((double) (1.0/mid)))));
CheckPathExtent(arc_segments+6*BezierQuantum+360);
path_q[q].x=box_q[1].x;
path_q[q].y=box_q[1].y;
q++;
for (j=1; j < (ssize_t) arc_segments; j++)
{
delta_theta=(double) (j*(theta.q-theta.p)/arc_segments);
path_q[q].x=(double) (center.x+mid*cos(fmod((double)
(theta.p+delta_theta),DegreesToRadians(360.0))));
path_q[q].y=(double) (center.y+mid*sin(fmod((double)
(theta.p+delta_theta),DegreesToRadians(360.0))));
q++;
}
path_q[q++]=box_q[2];
break;
}
default:
break;
}
else
switch (draw_info->linejoin)
{
case BevelJoin:
{
path_p[p++]=box_p[1];
path_p[p++]=box_p[2];
dot_product=(box_q[4].x-box_p[4].x)*(box_q[4].x-box_p[4].x)+
(box_q[4].y-box_p[4].y)*(box_q[4].y-box_p[4].y);
if (dot_product <= miterlimit)
path_q[q++]=box_q[4];
else
{
path_q[q++]=box_q[1];
path_q[q++]=box_q[2];
}
break;
}
case MiterJoin:
{
dot_product=(box_q[4].x-box_p[4].x)*(box_q[4].x-box_p[4].x)+
(box_q[4].y-box_p[4].y)*(box_q[4].y-box_p[4].y);
if (dot_product <= miterlimit)
{
path_q[q++]=box_q[4];
path_p[p++]=box_p[4];
}
else
{
path_q[q++]=box_q[1];
path_q[q++]=box_q[2];
path_p[p++]=box_p[1];
path_p[p++]=box_p[2];
}
break;
}
case RoundJoin:
{
dot_product=(box_q[4].x-box_p[4].x)*(box_q[4].x-box_p[4].x)+
(box_q[4].y-box_p[4].y)*(box_q[4].y-box_p[4].y);
if (dot_product <= miterlimit)
path_q[q++]=box_q[4];
else
{
path_q[q++]=box_q[1];
path_q[q++]=box_q[2];
}
center=polygon_primitive[n].point;
theta.p=atan2(box_p[1].y-center.y,box_p[1].x-center.x);
theta.q=atan2(box_p[2].y-center.y,box_p[2].x-center.x);
if (theta.p < theta.q)
theta.p+=2.0*MagickPI;
arc_segments=(size_t) ceil((double) ((theta.p-theta.q)/
(2.0*sqrt((double) (1.0/mid)))));
CheckPathExtent(arc_segments+6*BezierQuantum+360);
path_p[p++]=box_p[1];
for (j=1; j < (ssize_t) arc_segments; j++)
{
delta_theta=(double) (j*(theta.q-theta.p)/arc_segments);
path_p[p].x=(double) (center.x+mid*cos(fmod((double)
(theta.p+delta_theta),DegreesToRadians(360.0))));
path_p[p].y=(double) (center.y+mid*sin(fmod((double)
(theta.p+delta_theta),DegreesToRadians(360.0))));
p++;
}
path_p[p++]=box_p[2];
break;
}
default:
break;
}
slope.p=slope.q;
inverse_slope.p=inverse_slope.q;
box_p[0]=box_p[2];
box_p[1]=box_p[3];
box_q[0]=box_q[2];
box_q[1]=box_q[3];
dx.p=dx.q;
dy.p=dy.q;
n=i;
}
path_p[p++]=box_p[1];
path_q[q++]=box_q[1];
/*
Trace stroked polygon.
*/
stroke_polygon=(PrimitiveInfo *) AcquireQuantumMemory((size_t)
(p+q+2UL*closed_path+2UL),sizeof(*stroke_polygon));
if (stroke_polygon != (PrimitiveInfo *) NULL)
{
for (i=0; i < (ssize_t) p; i++)
{
stroke_polygon[i]=polygon_primitive[0];
stroke_polygon[i].point=path_p[i];
}
if (closed_path != MagickFalse)
{
stroke_polygon[i]=polygon_primitive[0];
stroke_polygon[i].point=stroke_polygon[0].point;
i++;
}
for ( ; i < (ssize_t) (p+q+closed_path); i++)
{
stroke_polygon[i]=polygon_primitive[0];
stroke_polygon[i].point=path_q[p+q+closed_path-(i+1)];
}
if (closed_path != MagickFalse)
{
stroke_polygon[i]=polygon_primitive[0];
stroke_polygon[i].point=stroke_polygon[p+closed_path].point;
i++;
}
stroke_polygon[i]=polygon_primitive[0];
stroke_polygon[i].point=stroke_polygon[0].point;
i++;
stroke_polygon[i].primitive=UndefinedPrimitive;
stroke_polygon[0].coordinates=(size_t) (p+q+2*closed_path+1);
}
path_p=(PointInfo *) RelinquishMagickMemory(path_p);
path_q=(PointInfo *) RelinquishMagickMemory(path_q);
polygon_primitive=(PrimitiveInfo *) RelinquishMagickMemory(polygon_primitive);
return(stroke_polygon);
}
|
GB_unop__ainv_int8_int8.c | //------------------------------------------------------------------------------
// GB_unop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCUDA_DEV
#include "GB_control.h"
#include "GB_atomics.h"
#include "GB_unop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB (_unop_apply__ainv_int8_int8)
// op(A') function: GB (_unop_tran__ainv_int8_int8)
// C type: int8_t
// A type: int8_t
// cast: int8_t cij = aij
// unaryop: cij = -aij
#define GB_ATYPE \
int8_t
#define GB_CTYPE \
int8_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
int8_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = -x ;
// casting
#define GB_CAST(z, aij) \
int8_t z = aij ;
// cij = op (aij)
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
int8_t aij = Ax [pA] ; \
/* Cx [pC] = op (cast (aij)) */ \
int8_t z = aij ; \
Cx [pC] = -z ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_AINV || GxB_NO_INT8)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_apply__ainv_int8_int8)
(
int8_t *Cx, // Cx and Ax may be aliased
const int8_t *Ax,
const int8_t *restrict Ab, // A->b if A is bitmap
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
if (Ab == NULL)
{
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
int8_t aij = Ax [p] ;
int8_t z = aij ;
Cx [p] = -z ;
}
}
else
{
// bitmap case, no transpose; A->b already memcpy'd into C->b
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!Ab [p]) continue ;
int8_t aij = Ax [p] ;
int8_t z = aij ;
Cx [p] = -z ;
}
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_tran__ainv_int8_int8)
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
analyze.c | /*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% AAA N N AAA L Y Y ZZZZZ EEEEE %
% A A NN N A A L Y Y ZZ E %
% AAAAA N N N AAAAA L Y ZZZ EEE %
% A A N NN A A L Y ZZ E %
% A A N N A A LLLLL Y ZZZZZ EEEEE %
% %
% Analyze An Image %
% %
% Software Design %
% Bill Corbis %
% December 1998 %
% %
% %
% Copyright 1999-2018 ImageMagick Studio LLC, a non-profit organization %
% dedicated to making software imaging solutions freely available. %
% %
% You may not use this file except in compliance with the License. You may %
% obtain a copy of the License at %
% %
% https://imagemagick.org/script/license.php %
% %
% Unless required by applicable law or agreed to in writing, software %
% distributed under the License is distributed on an "AS IS" BASIS, %
% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. %
% See the License for the specific language governing permissions and %
% limitations under the License. %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
*/
/*
Include declarations.
*/
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <time.h>
#include <assert.h>
#include <math.h>
#include "MagickCore/MagickCore.h"
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% a n a l y z e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% analyzeImage() computes the brightness and saturation mean, standard
% deviation, kurtosis and skewness and stores these values as attributes
% of the image.
%
% The format of the analyzeImage method is:
%
% size_t analyzeImage(Image *images,const int argc,
% char **argv,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the address of a structure of type Image.
%
% o argc: Specifies a pointer to an integer describing the number of
% elements in the argument vector.
%
% o argv: Specifies a pointer to a text array containing the command line
% arguments.
%
% o exception: return any errors or warnings in this structure.
%
*/
ModuleExport size_t analyzeImage(Image **images,const int argc,
const char **argv,ExceptionInfo *exception)
{
char
text[MagickPathExtent];
double
area,
brightness,
brightness_mean,
brightness_standard_deviation,
brightness_kurtosis,
brightness_skewness,
brightness_sum_x,
brightness_sum_x2,
brightness_sum_x3,
brightness_sum_x4,
hue,
saturation,
saturation_mean,
saturation_standard_deviation,
saturation_kurtosis,
saturation_skewness,
saturation_sum_x,
saturation_sum_x2,
saturation_sum_x3,
saturation_sum_x4;
Image
*image;
assert(images != (Image **) NULL);
assert(*images != (Image *) NULL);
assert((*images)->signature == MagickCoreSignature);
(void) argc;
(void) argv;
image=(*images);
for ( ; image != (Image *) NULL; image=GetNextImageInList(image))
{
CacheView
*image_view;
ssize_t
y;
MagickBooleanType
status;
brightness_sum_x=0.0;
brightness_sum_x2=0.0;
brightness_sum_x3=0.0;
brightness_sum_x4=0.0;
brightness_mean=0.0;
brightness_standard_deviation=0.0;
brightness_kurtosis=0.0;
brightness_skewness=0.0;
saturation_sum_x=0.0;
saturation_sum_x2=0.0;
saturation_sum_x3=0.0;
saturation_sum_x4=0.0;
saturation_mean=0.0;
saturation_standard_deviation=0.0;
saturation_kurtosis=0.0;
saturation_skewness=0.0;
area=0.0;
status=MagickTrue;
image_view=AcquireVirtualCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
register const Quantum
*p;
register ssize_t
x;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception);
if (p == (const Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
ConvertRGBToHSL(GetPixelRed(image,p),GetPixelGreen(image,p),
GetPixelBlue(image,p),&hue,&saturation,&brightness);
brightness*=QuantumRange;
brightness_sum_x+=brightness;
brightness_sum_x2+=brightness*brightness;
brightness_sum_x3+=brightness*brightness*brightness;
brightness_sum_x4+=brightness*brightness*brightness*brightness;
saturation*=QuantumRange;
saturation_sum_x+=saturation;
saturation_sum_x2+=saturation*saturation;
saturation_sum_x3+=saturation*saturation*saturation;
saturation_sum_x4+=saturation*saturation*saturation*saturation;
area++;
p+=GetPixelChannels(image);
}
}
image_view=DestroyCacheView(image_view);
if (area <= 0.0)
break;
brightness_mean=brightness_sum_x/area;
(void) FormatLocaleString(text,MagickPathExtent,"%g",brightness_mean);
(void) SetImageProperty(image,"filter:brightness:mean",text,
exception);
brightness_standard_deviation=sqrt(brightness_sum_x2/area-(brightness_sum_x/
area*brightness_sum_x/area));
(void) FormatLocaleString(text,MagickPathExtent,"%g",
brightness_standard_deviation);
(void) SetImageProperty(image,"filter:brightness:standard-deviation",text,
exception);
if (fabs(brightness_standard_deviation) >= MagickEpsilon)
brightness_kurtosis=(brightness_sum_x4/area-4.0*brightness_mean*
brightness_sum_x3/area+6.0*brightness_mean*brightness_mean*
brightness_sum_x2/area-3.0*brightness_mean*brightness_mean*
brightness_mean*brightness_mean)/(brightness_standard_deviation*
brightness_standard_deviation*brightness_standard_deviation*
brightness_standard_deviation)-3.0;
(void) FormatLocaleString(text,MagickPathExtent,"%g",brightness_kurtosis);
(void) SetImageProperty(image,"filter:brightness:kurtosis",text,
exception);
if (brightness_standard_deviation != 0)
brightness_skewness=(brightness_sum_x3/area-3.0*brightness_mean*
brightness_sum_x2/area+2.0*brightness_mean*brightness_mean*
brightness_mean)/(brightness_standard_deviation*
brightness_standard_deviation*brightness_standard_deviation);
(void) FormatLocaleString(text,MagickPathExtent,"%g",brightness_skewness);
(void) SetImageProperty(image,"filter:brightness:skewness",text,
exception);
saturation_mean=saturation_sum_x/area;
(void) FormatLocaleString(text,MagickPathExtent,"%g",saturation_mean);
(void) SetImageProperty(image,"filter:saturation:mean",text,
exception);
saturation_standard_deviation=sqrt(saturation_sum_x2/area-(saturation_sum_x/
area*saturation_sum_x/area));
(void) FormatLocaleString(text,MagickPathExtent,"%g",
saturation_standard_deviation);
(void) SetImageProperty(image,"filter:saturation:standard-deviation",text,
exception);
if (fabs(saturation_standard_deviation) >= MagickEpsilon)
saturation_kurtosis=(saturation_sum_x4/area-4.0*saturation_mean*
saturation_sum_x3/area+6.0*saturation_mean*saturation_mean*
saturation_sum_x2/area-3.0*saturation_mean*saturation_mean*
saturation_mean*saturation_mean)/(saturation_standard_deviation*
saturation_standard_deviation*saturation_standard_deviation*
saturation_standard_deviation)-3.0;
(void) FormatLocaleString(text,MagickPathExtent,"%g",saturation_kurtosis);
(void) SetImageProperty(image,"filter:saturation:kurtosis",text,
exception);
if (fabs(saturation_standard_deviation) >= MagickEpsilon)
saturation_skewness=(saturation_sum_x3/area-3.0*saturation_mean*
saturation_sum_x2/area+2.0*saturation_mean*saturation_mean*
saturation_mean)/(saturation_standard_deviation*
saturation_standard_deviation*saturation_standard_deviation);
(void) FormatLocaleString(text,MagickPathExtent,"%g",saturation_skewness);
(void) SetImageProperty(image,"filter:saturation:skewness",text,
exception);
}
return(MagickImageFilterSignature);
}
|
GeneralMatrixMatrix.h | // This file is part of Eigen, a lightweight C++ template library
// for linear algebra.
//
// Copyright (C) 2008-2009 Gael Guennebaud <gael.guennebaud@inria.fr>
//
// This Source Code Form is subject to the terms of the Mozilla
// Public License v. 2.0. If a copy of the MPL was not distributed
// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
#ifndef EIGEN_GENERAL_MATRIX_MATRIX_H
#define EIGEN_GENERAL_MATRIX_MATRIX_H
namespace Eigen {
namespace internal {
template<typename _LhsScalar, typename _RhsScalar> class level3_blocking;
/* Specialization for a row-major destination matrix => simple transposition of the product */
template<
typename Index,
typename LhsScalar, int LhsStorageOrder, bool ConjugateLhs,
typename RhsScalar, int RhsStorageOrder, bool ConjugateRhs,
int ResInnerStride>
struct general_matrix_matrix_product<Index,LhsScalar,LhsStorageOrder,ConjugateLhs,RhsScalar,RhsStorageOrder,ConjugateRhs,RowMajor,ResInnerStride>
{
typedef gebp_traits<RhsScalar,LhsScalar> Traits;
typedef typename ScalarBinaryOpTraits<LhsScalar, RhsScalar>::ReturnType ResScalar;
static EIGEN_STRONG_INLINE void run(
Index rows, Index cols, Index depth,
const LhsScalar* lhs, Index lhsStride,
const RhsScalar* rhs, Index rhsStride,
ResScalar* res, Index resIncr, Index resStride,
ResScalar alpha,
level3_blocking<RhsScalar,LhsScalar>& blocking,
GemmParallelInfo<Index>* info = 0)
{
// transpose the product such that the result is column major
general_matrix_matrix_product<Index,
RhsScalar, RhsStorageOrder==RowMajor ? ColMajor : RowMajor, ConjugateRhs,
LhsScalar, LhsStorageOrder==RowMajor ? ColMajor : RowMajor, ConjugateLhs,
ColMajor,ResInnerStride>
::run(cols,rows,depth,rhs,rhsStride,lhs,lhsStride,res,resIncr,resStride,alpha,blocking,info);
}
};
/* Specialization for a col-major destination matrix
* => Blocking algorithm following Goto's paper */
template<
typename Index,
typename LhsScalar, int LhsStorageOrder, bool ConjugateLhs,
typename RhsScalar, int RhsStorageOrder, bool ConjugateRhs,
int ResInnerStride>
struct general_matrix_matrix_product<Index,LhsScalar,LhsStorageOrder,ConjugateLhs,RhsScalar,RhsStorageOrder,ConjugateRhs,ColMajor,ResInnerStride>
{
typedef gebp_traits<LhsScalar,RhsScalar> Traits;
typedef typename ScalarBinaryOpTraits<LhsScalar, RhsScalar>::ReturnType ResScalar;
static void run(Index rows, Index cols, Index depth,
const LhsScalar* _lhs, Index lhsStride,
const RhsScalar* _rhs, Index rhsStride,
ResScalar* _res, Index resIncr, Index resStride,
ResScalar alpha,
level3_blocking<LhsScalar,RhsScalar>& blocking,
GemmParallelInfo<Index>* info = 0)
{
typedef const_blas_data_mapper<LhsScalar, Index, LhsStorageOrder> LhsMapper;
typedef const_blas_data_mapper<RhsScalar, Index, RhsStorageOrder> RhsMapper;
typedef blas_data_mapper<typename Traits::ResScalar, Index, ColMajor,Unaligned,ResInnerStride> ResMapper;
LhsMapper lhs(_lhs, lhsStride);
RhsMapper rhs(_rhs, rhsStride);
ResMapper res(_res, resStride, resIncr);
Index kc = blocking.kc(); // cache block size along the K direction
Index mc = (std::min)(rows,blocking.mc()); // cache block size along the M direction
Index nc = (std::min)(cols,blocking.nc()); // cache block size along the N direction
gemm_pack_lhs<LhsScalar, Index, LhsMapper, Traits::mr, Traits::LhsProgress, LhsStorageOrder> pack_lhs;
gemm_pack_rhs<RhsScalar, Index, RhsMapper, Traits::nr, RhsStorageOrder> pack_rhs;
gebp_kernel<LhsScalar, RhsScalar, Index, ResMapper, Traits::mr, Traits::nr, ConjugateLhs, ConjugateRhs> gebp;
#ifdef EIGEN_HAS_OPENMP
if(info)
{
// this is the parallel version!
int tid = omp_get_thread_num();
int threads = omp_get_num_threads();
LhsScalar* blockA = blocking.blockA();
eigen_internal_assert(blockA!=0);
std::size_t sizeB = kc*nc;
ei_declare_aligned_stack_constructed_variable(RhsScalar, blockB, sizeB, 0);
// For each horizontal panel of the rhs, and corresponding vertical panel of the lhs...
for(Index k=0; k<depth; k+=kc)
{
const Index actual_kc = (std::min)(k+kc,depth)-k; // => rows of B', and cols of the A'
// In order to reduce the chance that a thread has to wait for the other,
// let's start by packing B'.
pack_rhs(blockB, rhs.getSubMapper(k,0), actual_kc, nc);
// Pack A_k to A' in a parallel fashion:
// each thread packs the sub block A_k,i to A'_i where i is the thread id.
// However, before copying to A'_i, we have to make sure that no other thread is still using it,
// i.e., we test that info[tid].users equals 0.
// Then, we set info[tid].users to the number of threads to mark that all other threads are going to use it.
while(info[tid].users!=0) {}
info[tid].users += threads;
pack_lhs(blockA+info[tid].lhs_start*actual_kc, lhs.getSubMapper(info[tid].lhs_start,k), actual_kc, info[tid].lhs_length);
// Notify the other threads that the part A'_i is ready to go.
info[tid].sync = k;
// Computes C_i += A' * B' per A'_i
for(int shift=0; shift<threads; ++shift)
{
int i = (tid+shift)%threads;
// At this point we have to make sure that A'_i has been updated by the thread i,
// we use testAndSetOrdered to mimic a volatile access.
// However, no need to wait for the B' part which has been updated by the current thread!
if (shift>0) {
while(info[i].sync!=k) {
}
}
gebp(res.getSubMapper(info[i].lhs_start, 0), blockA+info[i].lhs_start*actual_kc, blockB, info[i].lhs_length, actual_kc, nc, alpha);
}
// Then keep going as usual with the remaining B'
for(Index j=nc; j<cols; j+=nc)
{
const Index actual_nc = (std::min)(j+nc,cols)-j;
// pack B_k,j to B'
pack_rhs(blockB, rhs.getSubMapper(k,j), actual_kc, actual_nc);
// C_j += A' * B'
gebp(res.getSubMapper(0, j), blockA, blockB, rows, actual_kc, actual_nc, alpha);
}
// Release all the sub blocks A'_i of A' for the current thread,
// i.e., we simply decrement the number of users by 1
for(Index i=0; i<threads; ++i)
#pragma omp atomic
info[i].users -= 1;
}
}
else
#endif // EIGEN_HAS_OPENMP
{
EIGEN_UNUSED_VARIABLE(info);
// this is the sequential version!
std::size_t sizeA = kc*mc;
std::size_t sizeB = kc*nc;
ei_declare_aligned_stack_constructed_variable(LhsScalar, blockA, sizeA, blocking.blockA());
ei_declare_aligned_stack_constructed_variable(RhsScalar, blockB, sizeB, blocking.blockB());
const bool pack_rhs_once = mc!=rows && kc==depth && nc==cols;
// For each horizontal panel of the rhs, and corresponding panel of the lhs...
for(Index i2=0; i2<rows; i2+=mc)
{
const Index actual_mc = (std::min)(i2+mc,rows)-i2;
for(Index k2=0; k2<depth; k2+=kc)
{
const Index actual_kc = (std::min)(k2+kc,depth)-k2;
// OK, here we have selected one horizontal panel of rhs and one vertical panel of lhs.
// => Pack lhs's panel into a sequential chunk of memory (L2/L3 caching)
// Note that this panel will be read as many times as the number of blocks in the rhs's
// horizontal panel which is, in practice, a very low number.
pack_lhs(blockA, lhs.getSubMapper(i2,k2), actual_kc, actual_mc);
// For each kc x nc block of the rhs's horizontal panel...
for(Index j2=0; j2<cols; j2+=nc)
{
const Index actual_nc = (std::min)(j2+nc,cols)-j2;
// We pack the rhs's block into a sequential chunk of memory (L2 caching)
// Note that this block will be read a very high number of times, which is equal to the number of
// micro horizontal panel of the large rhs's panel (e.g., rows/12 times).
if((!pack_rhs_once) || i2==0)
pack_rhs(blockB, rhs.getSubMapper(k2,j2), actual_kc, actual_nc);
// Everything is packed, we can now call the panel * block kernel:
gebp(res.getSubMapper(i2, j2), blockA, blockB, actual_mc, actual_kc, actual_nc, alpha);
}
}
}
}
}
};
/*********************************************************************************
* Specialization of generic_product_impl for "large" GEMM, i.e.,
* implementation of the high level wrapper to general_matrix_matrix_product
**********************************************************************************/
template<typename Scalar, typename Index, typename Gemm, typename Lhs, typename Rhs, typename Dest, typename BlockingType>
struct gemm_functor
{
gemm_functor(const Lhs& lhs, const Rhs& rhs, Dest& dest, const Scalar& actualAlpha, BlockingType& blocking)
: m_lhs(lhs), m_rhs(rhs), m_dest(dest), m_actualAlpha(actualAlpha), m_blocking(blocking)
{}
void initParallelSession(Index num_threads) const
{
m_blocking.initParallel(m_lhs.rows(), m_rhs.cols(), m_lhs.cols(), num_threads);
m_blocking.allocateA();
}
void operator() (Index row, Index rows, Index col=0, Index cols=-1, GemmParallelInfo<Index>* info=0) const
{
if(cols==-1)
cols = m_rhs.cols();
Gemm::run(rows, cols, m_lhs.cols(),
&m_lhs.coeffRef(row,0), m_lhs.outerStride(),
&m_rhs.coeffRef(0,col), m_rhs.outerStride(),
(Scalar*)&(m_dest.coeffRef(row,col)), m_dest.innerStride(), m_dest.outerStride(),
m_actualAlpha, m_blocking, info);
}
typedef typename Gemm::Traits Traits;
protected:
const Lhs& m_lhs;
const Rhs& m_rhs;
Dest& m_dest;
Scalar m_actualAlpha;
BlockingType& m_blocking;
};
template<int StorageOrder, typename LhsScalar, typename RhsScalar, int MaxRows, int MaxCols, int MaxDepth, int KcFactor=1,
bool FiniteAtCompileTime = MaxRows!=Dynamic && MaxCols!=Dynamic && MaxDepth != Dynamic> class gemm_blocking_space;
template<typename _LhsScalar, typename _RhsScalar>
class level3_blocking
{
typedef _LhsScalar LhsScalar;
typedef _RhsScalar RhsScalar;
protected:
LhsScalar* m_blockA;
RhsScalar* m_blockB;
Index m_mc;
Index m_nc;
Index m_kc;
public:
level3_blocking()
: m_blockA(0), m_blockB(0), m_mc(0), m_nc(0), m_kc(0)
{}
inline Index mc() const { return m_mc; }
inline Index nc() const { return m_nc; }
inline Index kc() const { return m_kc; }
inline LhsScalar* blockA() { return m_blockA; }
inline RhsScalar* blockB() { return m_blockB; }
};
template<int StorageOrder, typename _LhsScalar, typename _RhsScalar, int MaxRows, int MaxCols, int MaxDepth, int KcFactor>
class gemm_blocking_space<StorageOrder,_LhsScalar,_RhsScalar,MaxRows, MaxCols, MaxDepth, KcFactor, true /* == FiniteAtCompileTime */>
: public level3_blocking<
typename conditional<StorageOrder==RowMajor,_RhsScalar,_LhsScalar>::type,
typename conditional<StorageOrder==RowMajor,_LhsScalar,_RhsScalar>::type>
{
enum {
Transpose = StorageOrder==RowMajor,
ActualRows = Transpose ? MaxCols : MaxRows,
ActualCols = Transpose ? MaxRows : MaxCols
};
typedef typename conditional<Transpose,_RhsScalar,_LhsScalar>::type LhsScalar;
typedef typename conditional<Transpose,_LhsScalar,_RhsScalar>::type RhsScalar;
typedef gebp_traits<LhsScalar,RhsScalar> Traits;
enum {
SizeA = ActualRows * MaxDepth,
SizeB = ActualCols * MaxDepth
};
#if EIGEN_MAX_STATIC_ALIGN_BYTES >= EIGEN_DEFAULT_ALIGN_BYTES
EIGEN_ALIGN_MAX LhsScalar m_staticA[SizeA];
EIGEN_ALIGN_MAX RhsScalar m_staticB[SizeB];
#else
EIGEN_ALIGN_MAX char m_staticA[SizeA * sizeof(LhsScalar) + EIGEN_DEFAULT_ALIGN_BYTES-1];
EIGEN_ALIGN_MAX char m_staticB[SizeB * sizeof(RhsScalar) + EIGEN_DEFAULT_ALIGN_BYTES-1];
#endif
public:
gemm_blocking_space(Index /*rows*/, Index /*cols*/, Index /*depth*/, Index /*num_threads*/, bool /*full_rows = false*/)
{
this->m_mc = ActualRows;
this->m_nc = ActualCols;
this->m_kc = MaxDepth;
#if EIGEN_MAX_STATIC_ALIGN_BYTES >= EIGEN_DEFAULT_ALIGN_BYTES
this->m_blockA = m_staticA;
this->m_blockB = m_staticB;
#else
this->m_blockA = reinterpret_cast<LhsScalar*>((internal::UIntPtr(m_staticA) + (EIGEN_DEFAULT_ALIGN_BYTES-1)) & ~std::size_t(EIGEN_DEFAULT_ALIGN_BYTES-1));
this->m_blockB = reinterpret_cast<RhsScalar*>((internal::UIntPtr(m_staticB) + (EIGEN_DEFAULT_ALIGN_BYTES-1)) & ~std::size_t(EIGEN_DEFAULT_ALIGN_BYTES-1));
#endif
}
void initParallel(Index, Index, Index, Index)
{}
inline void allocateA() {}
inline void allocateB() {}
inline void allocateAll() {}
};
template<int StorageOrder, typename _LhsScalar, typename _RhsScalar, int MaxRows, int MaxCols, int MaxDepth, int KcFactor>
class gemm_blocking_space<StorageOrder,_LhsScalar,_RhsScalar,MaxRows, MaxCols, MaxDepth, KcFactor, false>
: public level3_blocking<
typename conditional<StorageOrder==RowMajor,_RhsScalar,_LhsScalar>::type,
typename conditional<StorageOrder==RowMajor,_LhsScalar,_RhsScalar>::type>
{
enum {
Transpose = StorageOrder==RowMajor
};
typedef typename conditional<Transpose,_RhsScalar,_LhsScalar>::type LhsScalar;
typedef typename conditional<Transpose,_LhsScalar,_RhsScalar>::type RhsScalar;
typedef gebp_traits<LhsScalar,RhsScalar> Traits;
Index m_sizeA;
Index m_sizeB;
public:
gemm_blocking_space(Index rows, Index cols, Index depth, Index num_threads, bool l3_blocking)
{
this->m_mc = Transpose ? cols : rows;
this->m_nc = Transpose ? rows : cols;
this->m_kc = depth;
if(l3_blocking)
{
computeProductBlockingSizes<LhsScalar,RhsScalar,KcFactor>(this->m_kc, this->m_mc, this->m_nc, num_threads);
}
else // no l3 blocking
{
Index n = this->m_nc;
computeProductBlockingSizes<LhsScalar,RhsScalar,KcFactor>(this->m_kc, this->m_mc, n, num_threads);
}
m_sizeA = this->m_mc * this->m_kc;
m_sizeB = this->m_kc * this->m_nc;
}
void initParallel(Index rows, Index cols, Index depth, Index num_threads)
{
this->m_mc = Transpose ? cols : rows;
this->m_nc = Transpose ? rows : cols;
this->m_kc = depth;
eigen_internal_assert(this->m_blockA==0 && this->m_blockB==0);
Index m = this->m_mc;
computeProductBlockingSizes<LhsScalar,RhsScalar,KcFactor>(this->m_kc, m, this->m_nc, num_threads);
m_sizeA = this->m_mc * this->m_kc;
m_sizeB = this->m_kc * this->m_nc;
}
void allocateA()
{
if(this->m_blockA==0)
this->m_blockA = aligned_new<LhsScalar>(m_sizeA);
}
void allocateB()
{
if(this->m_blockB==0)
this->m_blockB = aligned_new<RhsScalar>(m_sizeB);
}
void allocateAll()
{
allocateA();
allocateB();
}
~gemm_blocking_space()
{
aligned_delete(this->m_blockA, m_sizeA);
aligned_delete(this->m_blockB, m_sizeB);
}
};
} // end namespace internal
namespace internal {
template<typename Lhs, typename Rhs>
struct generic_product_impl<Lhs,Rhs,DenseShape,DenseShape,GemmProduct>
: generic_product_impl_base<Lhs,Rhs,generic_product_impl<Lhs,Rhs,DenseShape,DenseShape,GemmProduct> >
{
typedef typename Product<Lhs,Rhs>::Scalar Scalar;
typedef typename Lhs::Scalar LhsScalar;
typedef typename Rhs::Scalar RhsScalar;
typedef internal::blas_traits<Lhs> LhsBlasTraits;
typedef typename LhsBlasTraits::DirectLinearAccessType ActualLhsType;
typedef typename internal::remove_all<ActualLhsType>::type ActualLhsTypeCleaned;
typedef internal::blas_traits<Rhs> RhsBlasTraits;
typedef typename RhsBlasTraits::DirectLinearAccessType ActualRhsType;
typedef typename internal::remove_all<ActualRhsType>::type ActualRhsTypeCleaned;
enum {
MaxDepthAtCompileTime = EIGEN_SIZE_MIN_PREFER_FIXED(Lhs::MaxColsAtCompileTime,Rhs::MaxRowsAtCompileTime)
};
typedef generic_product_impl<Lhs,Rhs,DenseShape,DenseShape,CoeffBasedProductMode> lazyproduct;
template<typename Dst>
static void evalTo(Dst& dst, const Lhs& lhs, const Rhs& rhs)
{
if((rhs.rows()+dst.rows()+dst.cols())<20 && rhs.rows()>0)
lazyproduct::eval_dynamic(dst, lhs, rhs, internal::assign_op<typename Dst::Scalar,Scalar>());
else
{
dst.setZero();
scaleAndAddTo(dst, lhs, rhs, Scalar(1));
}
}
template<typename Dst>
static void addTo(Dst& dst, const Lhs& lhs, const Rhs& rhs)
{
if((rhs.rows()+dst.rows()+dst.cols())<20 && rhs.rows()>0)
lazyproduct::eval_dynamic(dst, lhs, rhs, internal::add_assign_op<typename Dst::Scalar,Scalar>());
else
scaleAndAddTo(dst,lhs, rhs, Scalar(1));
}
template<typename Dst>
static void subTo(Dst& dst, const Lhs& lhs, const Rhs& rhs)
{
if((rhs.rows()+dst.rows()+dst.cols())<20 && rhs.rows()>0)
lazyproduct::eval_dynamic(dst, lhs, rhs, internal::sub_assign_op<typename Dst::Scalar,Scalar>());
else
scaleAndAddTo(dst, lhs, rhs, Scalar(-1));
}
template<typename Dest>
static void scaleAndAddTo(Dest& dst, const Lhs& a_lhs, const Rhs& a_rhs, const Scalar& alpha)
{
eigen_assert(dst.rows()==a_lhs.rows() && dst.cols()==a_rhs.cols());
if(a_lhs.cols()==0 || a_lhs.rows()==0 || a_rhs.cols()==0)
return;
typename internal::add_const_on_value_type<ActualLhsType>::type lhs = LhsBlasTraits::extract(a_lhs);
typename internal::add_const_on_value_type<ActualRhsType>::type rhs = RhsBlasTraits::extract(a_rhs);
Scalar actualAlpha = alpha * LhsBlasTraits::extractScalarFactor(a_lhs)
* RhsBlasTraits::extractScalarFactor(a_rhs);
typedef internal::gemm_blocking_space<(Dest::Flags&RowMajorBit) ? RowMajor : ColMajor,LhsScalar,RhsScalar,
Dest::MaxRowsAtCompileTime,Dest::MaxColsAtCompileTime,MaxDepthAtCompileTime> BlockingType;
typedef internal::gemm_functor<
Scalar, Index,
internal::general_matrix_matrix_product<
Index,
LhsScalar, (ActualLhsTypeCleaned::Flags&RowMajorBit) ? RowMajor : ColMajor, bool(LhsBlasTraits::NeedToConjugate),
RhsScalar, (ActualRhsTypeCleaned::Flags&RowMajorBit) ? RowMajor : ColMajor, bool(RhsBlasTraits::NeedToConjugate),
(Dest::Flags&RowMajorBit) ? RowMajor : ColMajor,
Dest::InnerStrideAtCompileTime>,
ActualLhsTypeCleaned, ActualRhsTypeCleaned, Dest, BlockingType> GemmFunctor;
BlockingType blocking(dst.rows(), dst.cols(), lhs.cols(), 1, true);
internal::parallelize_gemm<(Dest::MaxRowsAtCompileTime>32 || Dest::MaxRowsAtCompileTime==Dynamic)>
(GemmFunctor(lhs, rhs, dst, actualAlpha, blocking), a_lhs.rows(), a_rhs.cols(), a_lhs.cols(), Dest::Flags&RowMajorBit);
}
};
} // end namespace internal
} // end namespace Eigen
#endif // EIGEN_GENERAL_MATRIX_MATRIX_H
|
gimple.h | /* Gimple IR definitions.
Copyright (C) 2007-2014 Free Software Foundation, Inc.
Contributed by Aldy Hernandez <aldyh@redhat.com>
This file is part of GCC.
GCC is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free
Software Foundation; either version 3, or (at your option) any later
version.
GCC is distributed in the hope that it will be useful, but WITHOUT ANY
WARRANTY; without even the implied warranty of MERCHANTABILITY or
FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
for more details.
You should have received a copy of the GNU General Public License
along with GCC; see the file COPYING3. If not see
<http://www.gnu.org/licenses/>. */
#ifndef GCC_GIMPLE_H
#define GCC_GIMPLE_H
typedef gimple gimple_seq_node;
/* For each block, the PHI nodes that need to be rewritten are stored into
these vectors. */
typedef vec<gimple> gimple_vec;
enum gimple_code {
#define DEFGSCODE(SYM, STRING, STRUCT) SYM,
#include "gimple.def"
#undef DEFGSCODE
LAST_AND_UNUSED_GIMPLE_CODE
};
extern const char *const gimple_code_name[];
extern const unsigned char gimple_rhs_class_table[];
/* Error out if a gimple tuple is addressed incorrectly. */
#if defined ENABLE_GIMPLE_CHECKING
#define gcc_gimple_checking_assert(EXPR) gcc_assert (EXPR)
extern void gimple_check_failed (const_gimple, const char *, int, \
const char *, enum gimple_code, \
enum tree_code) ATTRIBUTE_NORETURN;
#define GIMPLE_CHECK(GS, CODE) \
do { \
const_gimple __gs = (GS); \
if (gimple_code (__gs) != (CODE)) \
gimple_check_failed (__gs, __FILE__, __LINE__, __FUNCTION__, \
(CODE), ERROR_MARK); \
} while (0)
#else /* not ENABLE_GIMPLE_CHECKING */
#define gcc_gimple_checking_assert(EXPR) ((void)(0 && (EXPR)))
#define GIMPLE_CHECK(GS, CODE) (void)0
#endif
/* Class of GIMPLE expressions suitable for the RHS of assignments. See
get_gimple_rhs_class. */
enum gimple_rhs_class
{
GIMPLE_INVALID_RHS, /* The expression cannot be used on the RHS. */
GIMPLE_TERNARY_RHS, /* The expression is a ternary operation. */
GIMPLE_BINARY_RHS, /* The expression is a binary operation. */
GIMPLE_UNARY_RHS, /* The expression is a unary operation. */
GIMPLE_SINGLE_RHS /* The expression is a single object (an SSA
name, a _DECL, a _REF, etc. */
};
/* Specific flags for individual GIMPLE statements. These flags are
always stored in gimple_statement_base.subcode and they may only be
defined for statement codes that do not use subcodes.
Values for the masks can overlap as long as the overlapping values
are never used in the same statement class.
The maximum mask value that can be defined is 1 << 15 (i.e., each
statement code can hold up to 16 bitflags).
Keep this list sorted. */
enum gf_mask {
GF_ASM_INPUT = 1 << 0,
GF_ASM_VOLATILE = 1 << 1,
GF_CALL_FROM_THUNK = 1 << 0,
GF_CALL_RETURN_SLOT_OPT = 1 << 1,
GF_CALL_TAILCALL = 1 << 2,
GF_CALL_VA_ARG_PACK = 1 << 3,
GF_CALL_NOTHROW = 1 << 4,
GF_CALL_ALLOCA_FOR_VAR = 1 << 5,
GF_CALL_INTERNAL = 1 << 6,
GF_CALL_CTRL_ALTERING = 1 << 7,
GF_OMP_PARALLEL_COMBINED = 1 << 0,
GF_OMP_FOR_KIND_MASK = 3 << 0,
GF_OMP_FOR_KIND_FOR = 0 << 0,
GF_OMP_FOR_KIND_DISTRIBUTE = 1 << 0,
GF_OMP_FOR_KIND_SIMD = 2 << 0,
GF_OMP_FOR_KIND_CILKSIMD = 3 << 0,
GF_OMP_FOR_COMBINED = 1 << 2,
GF_OMP_FOR_COMBINED_INTO = 1 << 3,
GF_OMP_TARGET_KIND_MASK = 3 << 0,
GF_OMP_TARGET_KIND_REGION = 0 << 0,
GF_OMP_TARGET_KIND_DATA = 1 << 0,
GF_OMP_TARGET_KIND_UPDATE = 2 << 0,
/* True on an GIMPLE_OMP_RETURN statement if the return does not require
a thread synchronization via some sort of barrier. The exact barrier
that would otherwise be emitted is dependent on the OMP statement with
which this return is associated. */
GF_OMP_RETURN_NOWAIT = 1 << 0,
GF_OMP_SECTION_LAST = 1 << 0,
GF_OMP_ATOMIC_NEED_VALUE = 1 << 0,
GF_OMP_ATOMIC_SEQ_CST = 1 << 1,
GF_PREDICT_TAKEN = 1 << 15
};
/* Currently, there are only two types of gimple debug stmt. Others are
envisioned, for example, to enable the generation of is_stmt notes
in line number information, to mark sequence points, etc. This
subcode is to be used to tell them apart. */
enum gimple_debug_subcode {
GIMPLE_DEBUG_BIND = 0,
GIMPLE_DEBUG_SOURCE_BIND = 1
};
/* Masks for selecting a pass local flag (PLF) to work on. These
masks are used by gimple_set_plf and gimple_plf. */
enum plf_mask {
GF_PLF_1 = 1 << 0,
GF_PLF_2 = 1 << 1
};
/* Data structure definitions for GIMPLE tuples. NOTE: word markers
are for 64 bit hosts. */
struct GTY((desc ("gimple_statement_structure (&%h)"), tag ("GSS_BASE"),
chain_next ("%h.next"), variable_size))
gimple_statement_base
{
/* [ WORD 1 ]
Main identifying code for a tuple. */
ENUM_BITFIELD(gimple_code) code : 8;
/* Nonzero if a warning should not be emitted on this tuple. */
unsigned int no_warning : 1;
/* Nonzero if this tuple has been visited. Passes are responsible
for clearing this bit before using it. */
unsigned int visited : 1;
/* Nonzero if this tuple represents a non-temporal move. */
unsigned int nontemporal_move : 1;
/* Pass local flags. These flags are free for any pass to use as
they see fit. Passes should not assume that these flags contain
any useful value when the pass starts. Any initial state that
the pass requires should be set on entry to the pass. See
gimple_set_plf and gimple_plf for usage. */
unsigned int plf : 2;
/* Nonzero if this statement has been modified and needs to have its
operands rescanned. */
unsigned modified : 1;
/* Nonzero if this statement contains volatile operands. */
unsigned has_volatile_ops : 1;
/* Padding to get subcode to 16 bit alignment. */
unsigned pad : 1;
/* The SUBCODE field can be used for tuple-specific flags for tuples
that do not require subcodes. Note that SUBCODE should be at
least as wide as tree codes, as several tuples store tree codes
in there. */
unsigned int subcode : 16;
/* UID of this statement. This is used by passes that want to
assign IDs to statements. It must be assigned and used by each
pass. By default it should be assumed to contain garbage. */
unsigned uid;
/* [ WORD 2 ]
Locus information for debug info. */
location_t location;
/* Number of operands in this tuple. */
unsigned num_ops;
/* [ WORD 3 ]
Basic block holding this statement. */
basic_block bb;
/* [ WORD 4-5 ]
Linked lists of gimple statements. The next pointers form
a NULL terminated list, the prev pointers are a cyclic list.
A gimple statement is hence also a double-ended list of
statements, with the pointer itself being the first element,
and the prev pointer being the last. */
gimple next;
gimple GTY((skip)) prev;
};
/* Base structure for tuples with operands. */
/* This gimple subclass has no tag value. */
struct GTY(())
gimple_statement_with_ops_base : public gimple_statement_base
{
/* [ WORD 1-6 ] : base class */
/* [ WORD 7 ]
SSA operand vectors. NOTE: It should be possible to
amalgamate these vectors with the operand vector OP. However,
the SSA operand vectors are organized differently and contain
more information (like immediate use chaining). */
struct use_optype_d GTY((skip (""))) *use_ops;
};
/* Statements that take register operands. */
struct GTY((tag("GSS_WITH_OPS")))
gimple_statement_with_ops : public gimple_statement_with_ops_base
{
/* [ WORD 1-7 ] : base class */
/* [ WORD 8 ]
Operand vector. NOTE! This must always be the last field
of this structure. In particular, this means that this
structure cannot be embedded inside another one. */
tree GTY((length ("%h.num_ops"))) op[1];
};
/* Base for statements that take both memory and register operands. */
struct GTY((tag("GSS_WITH_MEM_OPS_BASE")))
gimple_statement_with_memory_ops_base : public gimple_statement_with_ops_base
{
/* [ WORD 1-7 ] : base class */
/* [ WORD 8-9 ]
Virtual operands for this statement. The GC will pick them
up via the ssa_names array. */
tree GTY((skip (""))) vdef;
tree GTY((skip (""))) vuse;
};
/* Statements that take both memory and register operands. */
struct GTY((tag("GSS_WITH_MEM_OPS")))
gimple_statement_with_memory_ops :
public gimple_statement_with_memory_ops_base
{
/* [ WORD 1-9 ] : base class */
/* [ WORD 10 ]
Operand vector. NOTE! This must always be the last field
of this structure. In particular, this means that this
structure cannot be embedded inside another one. */
tree GTY((length ("%h.num_ops"))) op[1];
};
/* Call statements that take both memory and register operands. */
struct GTY((tag("GSS_CALL")))
gimple_statement_call : public gimple_statement_with_memory_ops_base
{
/* [ WORD 1-9 ] : base class */
/* [ WORD 10-13 ] */
struct pt_solution call_used;
struct pt_solution call_clobbered;
/* [ WORD 14 ] */
union GTY ((desc ("%1.subcode & GF_CALL_INTERNAL"))) {
tree GTY ((tag ("0"))) fntype;
enum internal_fn GTY ((tag ("GF_CALL_INTERNAL"))) internal_fn;
} u;
/* [ WORD 15 ]
Operand vector. NOTE! This must always be the last field
of this structure. In particular, this means that this
structure cannot be embedded inside another one. */
tree GTY((length ("%h.num_ops"))) op[1];
};
/* OpenMP statements (#pragma omp). */
struct GTY((tag("GSS_OMP")))
gimple_statement_omp : public gimple_statement_base
{
/* [ WORD 1-6 ] : base class */
/* [ WORD 7 ] */
gimple_seq body;
};
/* GIMPLE_BIND */
struct GTY((tag("GSS_BIND")))
gimple_statement_bind : public gimple_statement_base
{
/* [ WORD 1-6 ] : base class */
/* [ WORD 7 ]
Variables declared in this scope. */
tree vars;
/* [ WORD 8 ]
This is different than the BLOCK field in gimple_statement_base,
which is analogous to TREE_BLOCK (i.e., the lexical block holding
this statement). This field is the equivalent of BIND_EXPR_BLOCK
in tree land (i.e., the lexical scope defined by this bind). See
gimple-low.c. */
tree block;
/* [ WORD 9 ] */
gimple_seq body;
};
/* GIMPLE_CATCH */
struct GTY((tag("GSS_CATCH")))
gimple_statement_catch : public gimple_statement_base
{
/* [ WORD 1-6 ] : base class */
/* [ WORD 7 ] */
tree types;
/* [ WORD 8 ] */
gimple_seq handler;
};
/* GIMPLE_EH_FILTER */
struct GTY((tag("GSS_EH_FILTER")))
gimple_statement_eh_filter : public gimple_statement_base
{
/* [ WORD 1-6 ] : base class */
/* [ WORD 7 ]
Filter types. */
tree types;
/* [ WORD 8 ]
Failure actions. */
gimple_seq failure;
};
/* GIMPLE_EH_ELSE */
struct GTY((tag("GSS_EH_ELSE")))
gimple_statement_eh_else : public gimple_statement_base
{
/* [ WORD 1-6 ] : base class */
/* [ WORD 7,8 ] */
gimple_seq n_body, e_body;
};
/* GIMPLE_EH_MUST_NOT_THROW */
struct GTY((tag("GSS_EH_MNT")))
gimple_statement_eh_mnt : public gimple_statement_base
{
/* [ WORD 1-6 ] : base class */
/* [ WORD 7 ] Abort function decl. */
tree fndecl;
};
/* GIMPLE_PHI */
struct GTY((tag("GSS_PHI")))
gimple_statement_phi : public gimple_statement_base
{
/* [ WORD 1-6 ] : base class */
/* [ WORD 7 ] */
unsigned capacity;
unsigned nargs;
/* [ WORD 8 ] */
tree result;
/* [ WORD 9 ] */
struct phi_arg_d GTY ((length ("%h.nargs"))) args[1];
};
/* GIMPLE_RESX, GIMPLE_EH_DISPATCH */
struct GTY((tag("GSS_EH_CTRL")))
gimple_statement_eh_ctrl : public gimple_statement_base
{
/* [ WORD 1-6 ] : base class */
/* [ WORD 7 ]
Exception region number. */
int region;
};
struct GTY((tag("GSS_EH_CTRL")))
gimple_statement_resx : public gimple_statement_eh_ctrl
{
/* No extra fields; adds invariant:
stmt->code == GIMPLE_RESX. */
};
struct GTY((tag("GSS_EH_CTRL")))
gimple_statement_eh_dispatch : public gimple_statement_eh_ctrl
{
/* No extra fields; adds invariant:
stmt->code == GIMPLE_EH_DISPATH. */
};
/* GIMPLE_TRY */
struct GTY((tag("GSS_TRY")))
gimple_statement_try : public gimple_statement_base
{
/* [ WORD 1-6 ] : base class */
/* [ WORD 7 ]
Expression to evaluate. */
gimple_seq eval;
/* [ WORD 8 ]
Cleanup expression. */
gimple_seq cleanup;
};
/* Kind of GIMPLE_TRY statements. */
enum gimple_try_flags
{
/* A try/catch. */
GIMPLE_TRY_CATCH = 1 << 0,
/* A try/finally. */
GIMPLE_TRY_FINALLY = 1 << 1,
GIMPLE_TRY_KIND = GIMPLE_TRY_CATCH | GIMPLE_TRY_FINALLY,
/* Analogous to TRY_CATCH_IS_CLEANUP. */
GIMPLE_TRY_CATCH_IS_CLEANUP = 1 << 2
};
/* GIMPLE_WITH_CLEANUP_EXPR */
struct GTY((tag("GSS_WCE")))
gimple_statement_wce : public gimple_statement_base
{
/* [ WORD 1-6 ] : base class */
/* Subcode: CLEANUP_EH_ONLY. True if the cleanup should only be
executed if an exception is thrown, not on normal exit of its
scope. This flag is analogous to the CLEANUP_EH_ONLY flag
in TARGET_EXPRs. */
/* [ WORD 7 ]
Cleanup expression. */
gimple_seq cleanup;
};
/* GIMPLE_ASM */
struct GTY((tag("GSS_ASM")))
gimple_statement_asm : public gimple_statement_with_memory_ops_base
{
/* [ WORD 1-9 ] : base class */
/* [ WORD 10 ]
__asm__ statement. */
const char *string;
/* [ WORD 11 ]
Number of inputs, outputs, clobbers, labels. */
unsigned char ni;
unsigned char no;
unsigned char nc;
unsigned char nl;
/* [ WORD 12 ]
Operand vector. NOTE! This must always be the last field
of this structure. In particular, this means that this
structure cannot be embedded inside another one. */
tree GTY((length ("%h.num_ops"))) op[1];
};
/* GIMPLE_OMP_CRITICAL */
struct GTY((tag("GSS_OMP_CRITICAL")))
gimple_statement_omp_critical : public gimple_statement_omp
{
/* [ WORD 1-7 ] : base class */
/* [ WORD 8 ]
Critical section name. */
tree name;
};
struct GTY(()) gimple_omp_for_iter {
/* Condition code. */
enum tree_code cond;
/* Index variable. */
tree index;
/* Initial value. */
tree initial;
/* Final value. */
tree final;
/* Increment. */
tree incr;
};
/* GIMPLE_OMP_FOR */
struct GTY((tag("GSS_OMP_FOR")))
gimple_statement_omp_for : public gimple_statement_omp
{
/* [ WORD 1-7 ] : base class */
/* [ WORD 8 ] */
tree clauses;
/* [ WORD 9 ]
Number of elements in iter array. */
size_t collapse;
/* [ WORD 10 ] */
struct gimple_omp_for_iter * GTY((length ("%h.collapse"))) iter;
/* [ WORD 11 ]
Pre-body evaluated before the loop body begins. */
gimple_seq pre_body;
};
/* GIMPLE_OMP_PARALLEL, GIMPLE_OMP_TARGET */
struct GTY((tag("GSS_OMP_PARALLEL_LAYOUT")))
gimple_statement_omp_parallel_layout : public gimple_statement_omp
{
/* [ WORD 1-7 ] : base class */
/* [ WORD 8 ]
Clauses. */
tree clauses;
/* [ WORD 9 ]
Child function holding the body of the parallel region. */
tree child_fn;
/* [ WORD 10 ]
Shared data argument. */
tree data_arg;
};
/* GIMPLE_OMP_PARALLEL or GIMPLE_TASK */
struct GTY((tag("GSS_OMP_PARALLEL_LAYOUT")))
gimple_statement_omp_taskreg : public gimple_statement_omp_parallel_layout
{
/* No extra fields; adds invariant:
stmt->code == GIMPLE_OMP_PARALLEL
|| stmt->code == GIMPLE_OMP_TASK. */
};
/* GIMPLE_OMP_PARALLEL */
struct GTY((tag("GSS_OMP_PARALLEL_LAYOUT")))
gimple_statement_omp_parallel : public gimple_statement_omp_taskreg
{
/* No extra fields; adds invariant:
stmt->code == GIMPLE_OMP_PARALLEL. */
};
struct GTY((tag("GSS_OMP_PARALLEL_LAYOUT")))
gimple_statement_omp_target : public gimple_statement_omp_parallel_layout
{
/* No extra fields; adds invariant:
stmt->code == GIMPLE_OMP_TARGET. */
};
/* GIMPLE_OMP_TASK */
struct GTY((tag("GSS_OMP_TASK")))
gimple_statement_omp_task : public gimple_statement_omp_taskreg
{
/* [ WORD 1-10 ] : base class */
/* [ WORD 11 ]
Child function holding firstprivate initialization if needed. */
tree copy_fn;
/* [ WORD 12-13 ]
Size and alignment in bytes of the argument data block. */
tree arg_size;
tree arg_align;
};
/* GIMPLE_OMP_SECTION */
/* Uses struct gimple_statement_omp. */
/* GIMPLE_OMP_SECTIONS */
struct GTY((tag("GSS_OMP_SECTIONS")))
gimple_statement_omp_sections : public gimple_statement_omp
{
/* [ WORD 1-7 ] : base class */
/* [ WORD 8 ] */
tree clauses;
/* [ WORD 9 ]
The control variable used for deciding which of the sections to
execute. */
tree control;
};
/* GIMPLE_OMP_CONTINUE.
Note: This does not inherit from gimple_statement_omp, because we
do not need the body field. */
struct GTY((tag("GSS_OMP_CONTINUE")))
gimple_statement_omp_continue : public gimple_statement_base
{
/* [ WORD 1-6 ] : base class */
/* [ WORD 7 ] */
tree control_def;
/* [ WORD 8 ] */
tree control_use;
};
/* GIMPLE_OMP_SINGLE, GIMPLE_OMP_TEAMS */
struct GTY((tag("GSS_OMP_SINGLE_LAYOUT")))
gimple_statement_omp_single_layout : public gimple_statement_omp
{
/* [ WORD 1-7 ] : base class */
/* [ WORD 7 ] */
tree clauses;
};
struct GTY((tag("GSS_OMP_SINGLE_LAYOUT")))
gimple_statement_omp_single : public gimple_statement_omp_single_layout
{
/* No extra fields; adds invariant:
stmt->code == GIMPLE_OMP_SINGLE. */
};
struct GTY((tag("GSS_OMP_SINGLE_LAYOUT")))
gimple_statement_omp_teams : public gimple_statement_omp_single_layout
{
/* No extra fields; adds invariant:
stmt->code == GIMPLE_OMP_TEAMS. */
};
/* GIMPLE_OMP_ATOMIC_LOAD.
Note: This is based on gimple_statement_base, not g_s_omp, because g_s_omp
contains a sequence, which we don't need here. */
struct GTY((tag("GSS_OMP_ATOMIC_LOAD")))
gimple_statement_omp_atomic_load : public gimple_statement_base
{
/* [ WORD 1-6 ] : base class */
/* [ WORD 7-8 ] */
tree rhs, lhs;
};
/* GIMPLE_OMP_ATOMIC_STORE.
See note on GIMPLE_OMP_ATOMIC_LOAD. */
struct GTY((tag("GSS_OMP_ATOMIC_STORE_LAYOUT")))
gimple_statement_omp_atomic_store_layout : public gimple_statement_base
{
/* [ WORD 1-6 ] : base class */
/* [ WORD 7 ] */
tree val;
};
struct GTY((tag("GSS_OMP_ATOMIC_STORE_LAYOUT")))
gimple_statement_omp_atomic_store :
public gimple_statement_omp_atomic_store_layout
{
/* No extra fields; adds invariant:
stmt->code == GIMPLE_OMP_ATOMIC_STORE. */
};
struct GTY((tag("GSS_OMP_ATOMIC_STORE_LAYOUT")))
gimple_statement_omp_return :
public gimple_statement_omp_atomic_store_layout
{
/* No extra fields; adds invariant:
stmt->code == GIMPLE_OMP_RETURN. */
};
/* GIMPLE_TRANSACTION. */
/* Bits to be stored in the GIMPLE_TRANSACTION subcode. */
/* The __transaction_atomic was declared [[outer]] or it is
__transaction_relaxed. */
#define GTMA_IS_OUTER (1u << 0)
#define GTMA_IS_RELAXED (1u << 1)
#define GTMA_DECLARATION_MASK (GTMA_IS_OUTER | GTMA_IS_RELAXED)
/* The transaction is seen to not have an abort. */
#define GTMA_HAVE_ABORT (1u << 2)
/* The transaction is seen to have loads or stores. */
#define GTMA_HAVE_LOAD (1u << 3)
#define GTMA_HAVE_STORE (1u << 4)
/* The transaction MAY enter serial irrevocable mode in its dynamic scope. */
#define GTMA_MAY_ENTER_IRREVOCABLE (1u << 5)
/* The transaction WILL enter serial irrevocable mode.
An irrevocable block post-dominates the entire transaction, such
that all invocations of the transaction will go serial-irrevocable.
In such case, we don't bother instrumenting the transaction, and
tell the runtime that it should begin the transaction in
serial-irrevocable mode. */
#define GTMA_DOES_GO_IRREVOCABLE (1u << 6)
/* The transaction contains no instrumentation code whatsover, most
likely because it is guaranteed to go irrevocable upon entry. */
#define GTMA_HAS_NO_INSTRUMENTATION (1u << 7)
struct GTY((tag("GSS_TRANSACTION")))
gimple_statement_transaction : public gimple_statement_with_memory_ops_base
{
/* [ WORD 1-9 ] : base class */
/* [ WORD 10 ] */
gimple_seq body;
/* [ WORD 11 ] */
tree label;
};
#define DEFGSSTRUCT(SYM, STRUCT, HAS_TREE_OP) SYM,
enum gimple_statement_structure_enum {
#include "gsstruct.def"
LAST_GSS_ENUM
};
#undef DEFGSSTRUCT
template <>
template <>
inline bool
is_a_helper <gimple_statement_asm>::test (gimple gs)
{
return gs->code == GIMPLE_ASM;
}
template <>
template <>
inline bool
is_a_helper <gimple_statement_bind>::test (gimple gs)
{
return gs->code == GIMPLE_BIND;
}
template <>
template <>
inline bool
is_a_helper <gimple_statement_call>::test (gimple gs)
{
return gs->code == GIMPLE_CALL;
}
template <>
template <>
inline bool
is_a_helper <gimple_statement_catch>::test (gimple gs)
{
return gs->code == GIMPLE_CATCH;
}
template <>
template <>
inline bool
is_a_helper <gimple_statement_resx>::test (gimple gs)
{
return gs->code == GIMPLE_RESX;
}
template <>
template <>
inline bool
is_a_helper <gimple_statement_eh_dispatch>::test (gimple gs)
{
return gs->code == GIMPLE_EH_DISPATCH;
}
template <>
template <>
inline bool
is_a_helper <gimple_statement_eh_else>::test (gimple gs)
{
return gs->code == GIMPLE_EH_ELSE;
}
template <>
template <>
inline bool
is_a_helper <gimple_statement_eh_filter>::test (gimple gs)
{
return gs->code == GIMPLE_EH_FILTER;
}
template <>
template <>
inline bool
is_a_helper <gimple_statement_eh_mnt>::test (gimple gs)
{
return gs->code == GIMPLE_EH_MUST_NOT_THROW;
}
template <>
template <>
inline bool
is_a_helper <gimple_statement_omp_atomic_load>::test (gimple gs)
{
return gs->code == GIMPLE_OMP_ATOMIC_LOAD;
}
template <>
template <>
inline bool
is_a_helper <gimple_statement_omp_atomic_store>::test (gimple gs)
{
return gs->code == GIMPLE_OMP_ATOMIC_STORE;
}
template <>
template <>
inline bool
is_a_helper <gimple_statement_omp_return>::test (gimple gs)
{
return gs->code == GIMPLE_OMP_RETURN;
}
template <>
template <>
inline bool
is_a_helper <gimple_statement_omp_continue>::test (gimple gs)
{
return gs->code == GIMPLE_OMP_CONTINUE;
}
template <>
template <>
inline bool
is_a_helper <gimple_statement_omp_critical>::test (gimple gs)
{
return gs->code == GIMPLE_OMP_CRITICAL;
}
template <>
template <>
inline bool
is_a_helper <gimple_statement_omp_for>::test (gimple gs)
{
return gs->code == GIMPLE_OMP_FOR;
}
template <>
template <>
inline bool
is_a_helper <gimple_statement_omp_taskreg>::test (gimple gs)
{
return gs->code == GIMPLE_OMP_PARALLEL || gs->code == GIMPLE_OMP_TASK;
}
template <>
template <>
inline bool
is_a_helper <gimple_statement_omp_parallel>::test (gimple gs)
{
return gs->code == GIMPLE_OMP_PARALLEL;
}
template <>
template <>
inline bool
is_a_helper <gimple_statement_omp_target>::test (gimple gs)
{
return gs->code == GIMPLE_OMP_TARGET;
}
template <>
template <>
inline bool
is_a_helper <gimple_statement_omp_sections>::test (gimple gs)
{
return gs->code == GIMPLE_OMP_SECTIONS;
}
template <>
template <>
inline bool
is_a_helper <gimple_statement_omp_single>::test (gimple gs)
{
return gs->code == GIMPLE_OMP_SINGLE;
}
template <>
template <>
inline bool
is_a_helper <gimple_statement_omp_teams>::test (gimple gs)
{
return gs->code == GIMPLE_OMP_TEAMS;
}
template <>
template <>
inline bool
is_a_helper <gimple_statement_omp_task>::test (gimple gs)
{
return gs->code == GIMPLE_OMP_TASK;
}
template <>
template <>
inline bool
is_a_helper <gimple_statement_phi>::test (gimple gs)
{
return gs->code == GIMPLE_PHI;
}
template <>
template <>
inline bool
is_a_helper <gimple_statement_transaction>::test (gimple gs)
{
return gs->code == GIMPLE_TRANSACTION;
}
template <>
template <>
inline bool
is_a_helper <gimple_statement_try>::test (gimple gs)
{
return gs->code == GIMPLE_TRY;
}
template <>
template <>
inline bool
is_a_helper <gimple_statement_wce>::test (gimple gs)
{
return gs->code == GIMPLE_WITH_CLEANUP_EXPR;
}
template <>
template <>
inline bool
is_a_helper <const gimple_statement_asm>::test (const_gimple gs)
{
return gs->code == GIMPLE_ASM;
}
template <>
template <>
inline bool
is_a_helper <const gimple_statement_bind>::test (const_gimple gs)
{
return gs->code == GIMPLE_BIND;
}
template <>
template <>
inline bool
is_a_helper <const gimple_statement_call>::test (const_gimple gs)
{
return gs->code == GIMPLE_CALL;
}
template <>
template <>
inline bool
is_a_helper <const gimple_statement_catch>::test (const_gimple gs)
{
return gs->code == GIMPLE_CATCH;
}
template <>
template <>
inline bool
is_a_helper <const gimple_statement_resx>::test (const_gimple gs)
{
return gs->code == GIMPLE_RESX;
}
template <>
template <>
inline bool
is_a_helper <const gimple_statement_eh_dispatch>::test (const_gimple gs)
{
return gs->code == GIMPLE_EH_DISPATCH;
}
template <>
template <>
inline bool
is_a_helper <const gimple_statement_eh_filter>::test (const_gimple gs)
{
return gs->code == GIMPLE_EH_FILTER;
}
template <>
template <>
inline bool
is_a_helper <const gimple_statement_omp_atomic_load>::test (const_gimple gs)
{
return gs->code == GIMPLE_OMP_ATOMIC_LOAD;
}
template <>
template <>
inline bool
is_a_helper <const gimple_statement_omp_atomic_store>::test (const_gimple gs)
{
return gs->code == GIMPLE_OMP_ATOMIC_STORE;
}
template <>
template <>
inline bool
is_a_helper <const gimple_statement_omp_return>::test (const_gimple gs)
{
return gs->code == GIMPLE_OMP_RETURN;
}
template <>
template <>
inline bool
is_a_helper <const gimple_statement_omp_continue>::test (const_gimple gs)
{
return gs->code == GIMPLE_OMP_CONTINUE;
}
template <>
template <>
inline bool
is_a_helper <const gimple_statement_omp_critical>::test (const_gimple gs)
{
return gs->code == GIMPLE_OMP_CRITICAL;
}
template <>
template <>
inline bool
is_a_helper <const gimple_statement_omp_for>::test (const_gimple gs)
{
return gs->code == GIMPLE_OMP_FOR;
}
template <>
template <>
inline bool
is_a_helper <const gimple_statement_omp_taskreg>::test (const_gimple gs)
{
return gs->code == GIMPLE_OMP_PARALLEL || gs->code == GIMPLE_OMP_TASK;
}
template <>
template <>
inline bool
is_a_helper <const gimple_statement_omp_parallel>::test (const_gimple gs)
{
return gs->code == GIMPLE_OMP_PARALLEL;
}
template <>
template <>
inline bool
is_a_helper <const gimple_statement_omp_target>::test (const_gimple gs)
{
return gs->code == GIMPLE_OMP_TARGET;
}
template <>
template <>
inline bool
is_a_helper <const gimple_statement_omp_sections>::test (const_gimple gs)
{
return gs->code == GIMPLE_OMP_SECTIONS;
}
template <>
template <>
inline bool
is_a_helper <const gimple_statement_omp_single>::test (const_gimple gs)
{
return gs->code == GIMPLE_OMP_SINGLE;
}
template <>
template <>
inline bool
is_a_helper <const gimple_statement_omp_teams>::test (const_gimple gs)
{
return gs->code == GIMPLE_OMP_TEAMS;
}
template <>
template <>
inline bool
is_a_helper <const gimple_statement_omp_task>::test (const_gimple gs)
{
return gs->code == GIMPLE_OMP_TASK;
}
template <>
template <>
inline bool
is_a_helper <const gimple_statement_phi>::test (const_gimple gs)
{
return gs->code == GIMPLE_PHI;
}
template <>
template <>
inline bool
is_a_helper <const gimple_statement_transaction>::test (const_gimple gs)
{
return gs->code == GIMPLE_TRANSACTION;
}
/* Offset in bytes to the location of the operand vector.
Zero if there is no operand vector for this tuple structure. */
extern size_t const gimple_ops_offset_[];
/* Map GIMPLE codes to GSS codes. */
extern enum gimple_statement_structure_enum const gss_for_code_[];
/* This variable holds the currently expanded gimple statement for purposes
of comminucating the profile info to the builtin expanders. */
extern gimple currently_expanding_gimple_stmt;
#define gimple_alloc(c, n) gimple_alloc_stat (c, n MEM_STAT_INFO)
gimple gimple_alloc_stat (enum gimple_code, unsigned MEM_STAT_DECL);
gimple gimple_build_return (tree);
void gimple_call_reset_alias_info (gimple);
gimple gimple_build_call_vec (tree, vec<tree> );
gimple gimple_build_call (tree, unsigned, ...);
gimple gimple_build_call_valist (tree, unsigned, va_list);
gimple gimple_build_call_internal (enum internal_fn, unsigned, ...);
gimple gimple_build_call_internal_vec (enum internal_fn, vec<tree> );
gimple gimple_build_call_from_tree (tree);
gimple gimple_build_assign_stat (tree, tree MEM_STAT_DECL);
#define gimple_build_assign(l,r) gimple_build_assign_stat (l, r MEM_STAT_INFO)
gimple gimple_build_assign_with_ops (enum tree_code, tree,
tree, tree, tree CXX_MEM_STAT_INFO);
gimple gimple_build_assign_with_ops (enum tree_code, tree,
tree, tree CXX_MEM_STAT_INFO);
gimple gimple_build_cond (enum tree_code, tree, tree, tree, tree);
gimple gimple_build_cond_from_tree (tree, tree, tree);
void gimple_cond_set_condition_from_tree (gimple, tree);
gimple gimple_build_label (tree label);
gimple gimple_build_goto (tree dest);
gimple gimple_build_nop (void);
gimple gimple_build_bind (tree, gimple_seq, tree);
gimple gimple_build_asm_vec (const char *, vec<tree, va_gc> *,
vec<tree, va_gc> *, vec<tree, va_gc> *,
vec<tree, va_gc> *);
gimple gimple_build_catch (tree, gimple_seq);
gimple gimple_build_eh_filter (tree, gimple_seq);
gimple gimple_build_eh_must_not_throw (tree);
gimple gimple_build_eh_else (gimple_seq, gimple_seq);
gimple_statement_try *gimple_build_try (gimple_seq, gimple_seq,
enum gimple_try_flags);
gimple gimple_build_wce (gimple_seq);
gimple gimple_build_resx (int);
gimple gimple_build_switch_nlabels (unsigned, tree, tree);
gimple gimple_build_switch (tree, tree, vec<tree> );
gimple gimple_build_eh_dispatch (int);
gimple gimple_build_debug_bind_stat (tree, tree, gimple MEM_STAT_DECL);
#define gimple_build_debug_bind(var,val,stmt) \
gimple_build_debug_bind_stat ((var), (val), (stmt) MEM_STAT_INFO)
gimple gimple_build_debug_source_bind_stat (tree, tree, gimple MEM_STAT_DECL);
#define gimple_build_debug_source_bind(var,val,stmt) \
gimple_build_debug_source_bind_stat ((var), (val), (stmt) MEM_STAT_INFO)
gimple gimple_build_omp_critical (gimple_seq, tree);
gimple gimple_build_omp_for (gimple_seq, int, tree, size_t, gimple_seq);
gimple gimple_build_omp_parallel (gimple_seq, tree, tree, tree);
gimple gimple_build_omp_task (gimple_seq, tree, tree, tree, tree, tree, tree);
gimple gimple_build_omp_section (gimple_seq);
gimple gimple_build_omp_master (gimple_seq);
gimple gimple_build_omp_taskgroup (gimple_seq);
gimple gimple_build_omp_continue (tree, tree);
gimple gimple_build_omp_ordered (gimple_seq);
gimple gimple_build_omp_return (bool);
gimple gimple_build_omp_sections (gimple_seq, tree);
gimple gimple_build_omp_sections_switch (void);
gimple gimple_build_omp_single (gimple_seq, tree);
gimple gimple_build_omp_target (gimple_seq, int, tree);
gimple gimple_build_omp_teams (gimple_seq, tree);
gimple gimple_build_omp_atomic_load (tree, tree);
gimple gimple_build_omp_atomic_store (tree);
gimple gimple_build_transaction (gimple_seq, tree);
gimple gimple_build_predict (enum br_predictor, enum prediction);
extern void gimple_seq_add_stmt (gimple_seq *, gimple);
extern void gimple_seq_add_stmt_without_update (gimple_seq *, gimple);
void gimple_seq_add_seq (gimple_seq *, gimple_seq);
extern void annotate_all_with_location_after (gimple_seq, gimple_stmt_iterator,
location_t);
extern void annotate_all_with_location (gimple_seq, location_t);
bool empty_body_p (gimple_seq);
gimple_seq gimple_seq_copy (gimple_seq);
bool gimple_call_same_target_p (const_gimple, const_gimple);
int gimple_call_flags (const_gimple);
int gimple_call_arg_flags (const_gimple, unsigned);
int gimple_call_return_flags (const_gimple);
bool gimple_assign_copy_p (gimple);
bool gimple_assign_ssa_name_copy_p (gimple);
bool gimple_assign_unary_nop_p (gimple);
void gimple_set_bb (gimple, basic_block);
void gimple_assign_set_rhs_from_tree (gimple_stmt_iterator *, tree);
void gimple_assign_set_rhs_with_ops_1 (gimple_stmt_iterator *, enum tree_code,
tree, tree, tree);
tree gimple_get_lhs (const_gimple);
void gimple_set_lhs (gimple, tree);
gimple gimple_copy (gimple);
bool gimple_has_side_effects (const_gimple);
bool gimple_could_trap_p_1 (gimple, bool, bool);
bool gimple_could_trap_p (gimple);
bool gimple_assign_rhs_could_trap_p (gimple);
extern void dump_gimple_statistics (void);
unsigned get_gimple_rhs_num_ops (enum tree_code);
extern tree canonicalize_cond_expr_cond (tree);
gimple gimple_call_copy_skip_args (gimple, bitmap);
extern bool gimple_compare_field_offset (tree, tree);
extern tree gimple_unsigned_type (tree);
extern tree gimple_signed_type (tree);
extern alias_set_type gimple_get_alias_set (tree);
extern bool gimple_ior_addresses_taken (bitmap, gimple);
extern bool gimple_builtin_call_types_compatible_p (gimple, tree);
extern bool gimple_call_builtin_p (gimple);
extern bool gimple_call_builtin_p (gimple, enum built_in_class);
extern bool gimple_call_builtin_p (gimple, enum built_in_function);
extern bool gimple_asm_clobbers_memory_p (const_gimple);
extern void dump_decl_set (FILE *, bitmap);
extern bool nonfreeing_call_p (gimple);
extern bool infer_nonnull_range (gimple, tree, bool, bool);
extern void sort_case_labels (vec<tree> );
extern void preprocess_case_label_vec_for_gimple (vec<tree> , tree, tree *);
extern void gimple_seq_set_location (gimple_seq , location_t);
/* Formal (expression) temporary table handling: multiple occurrences of
the same scalar expression are evaluated into the same temporary. */
typedef struct gimple_temp_hash_elt
{
tree val; /* Key */
tree temp; /* Value */
} elt_t;
/* Get the number of the next statement uid to be allocated. */
static inline unsigned int
gimple_stmt_max_uid (struct function *fn)
{
return fn->last_stmt_uid;
}
/* Set the number of the next statement uid to be allocated. */
static inline void
set_gimple_stmt_max_uid (struct function *fn, unsigned int maxid)
{
fn->last_stmt_uid = maxid;
}
/* Set the number of the next statement uid to be allocated. */
static inline unsigned int
inc_gimple_stmt_max_uid (struct function *fn)
{
return fn->last_stmt_uid++;
}
/* Return the first node in GIMPLE sequence S. */
static inline gimple_seq_node
gimple_seq_first (gimple_seq s)
{
return s;
}
/* Return the first statement in GIMPLE sequence S. */
static inline gimple
gimple_seq_first_stmt (gimple_seq s)
{
gimple_seq_node n = gimple_seq_first (s);
return n;
}
/* Return the last node in GIMPLE sequence S. */
static inline gimple_seq_node
gimple_seq_last (gimple_seq s)
{
return s ? s->prev : NULL;
}
/* Return the last statement in GIMPLE sequence S. */
static inline gimple
gimple_seq_last_stmt (gimple_seq s)
{
gimple_seq_node n = gimple_seq_last (s);
return n;
}
/* Set the last node in GIMPLE sequence *PS to LAST. */
static inline void
gimple_seq_set_last (gimple_seq *ps, gimple_seq_node last)
{
(*ps)->prev = last;
}
/* Set the first node in GIMPLE sequence *PS to FIRST. */
static inline void
gimple_seq_set_first (gimple_seq *ps, gimple_seq_node first)
{
*ps = first;
}
/* Return true if GIMPLE sequence S is empty. */
static inline bool
gimple_seq_empty_p (gimple_seq s)
{
return s == NULL;
}
/* Allocate a new sequence and initialize its first element with STMT. */
static inline gimple_seq
gimple_seq_alloc_with_stmt (gimple stmt)
{
gimple_seq seq = NULL;
gimple_seq_add_stmt (&seq, stmt);
return seq;
}
/* Returns the sequence of statements in BB. */
static inline gimple_seq
bb_seq (const_basic_block bb)
{
return (!(bb->flags & BB_RTL)) ? bb->il.gimple.seq : NULL;
}
static inline gimple_seq *
bb_seq_addr (basic_block bb)
{
return (!(bb->flags & BB_RTL)) ? &bb->il.gimple.seq : NULL;
}
/* Sets the sequence of statements in BB to SEQ. */
static inline void
set_bb_seq (basic_block bb, gimple_seq seq)
{
gcc_checking_assert (!(bb->flags & BB_RTL));
bb->il.gimple.seq = seq;
}
/* Return the code for GIMPLE statement G. */
static inline enum gimple_code
gimple_code (const_gimple g)
{
return g->code;
}
/* Return the GSS code used by a GIMPLE code. */
static inline enum gimple_statement_structure_enum
gss_for_code (enum gimple_code code)
{
gcc_gimple_checking_assert ((unsigned int)code < LAST_AND_UNUSED_GIMPLE_CODE);
return gss_for_code_[code];
}
/* Return which GSS code is used by GS. */
static inline enum gimple_statement_structure_enum
gimple_statement_structure (gimple gs)
{
return gss_for_code (gimple_code (gs));
}
/* Return true if statement G has sub-statements. This is only true for
High GIMPLE statements. */
static inline bool
gimple_has_substatements (gimple g)
{
switch (gimple_code (g))
{
case GIMPLE_BIND:
case GIMPLE_CATCH:
case GIMPLE_EH_FILTER:
case GIMPLE_EH_ELSE:
case GIMPLE_TRY:
case GIMPLE_OMP_FOR:
case GIMPLE_OMP_MASTER:
case GIMPLE_OMP_TASKGROUP:
case GIMPLE_OMP_ORDERED:
case GIMPLE_OMP_SECTION:
case GIMPLE_OMP_PARALLEL:
case GIMPLE_OMP_TASK:
case GIMPLE_OMP_SECTIONS:
case GIMPLE_OMP_SINGLE:
case GIMPLE_OMP_TARGET:
case GIMPLE_OMP_TEAMS:
case GIMPLE_OMP_CRITICAL:
case GIMPLE_WITH_CLEANUP_EXPR:
case GIMPLE_TRANSACTION:
return true;
default:
return false;
}
}
/* Return the basic block holding statement G. */
static inline basic_block
gimple_bb (const_gimple g)
{
return g->bb;
}
/* Return the lexical scope block holding statement G. */
static inline tree
gimple_block (const_gimple g)
{
return LOCATION_BLOCK (g->location);
}
/* Set BLOCK to be the lexical scope block holding statement G. */
static inline void
gimple_set_block (gimple g, tree block)
{
if (block)
g->location =
COMBINE_LOCATION_DATA (line_table, g->location, block);
else
g->location = LOCATION_LOCUS (g->location);
}
/* Return location information for statement G. */
static inline location_t
gimple_location (const_gimple g)
{
return g->location;
}
/* Return pointer to location information for statement G. */
static inline const location_t *
gimple_location_ptr (const_gimple g)
{
return &g->location;
}
/* Set location information for statement G. */
static inline void
gimple_set_location (gimple g, location_t location)
{
g->location = location;
}
/* Return true if G contains location information. */
static inline bool
gimple_has_location (const_gimple g)
{
return LOCATION_LOCUS (gimple_location (g)) != UNKNOWN_LOCATION;
}
/* Return the file name of the location of STMT. */
static inline const char *
gimple_filename (const_gimple stmt)
{
return LOCATION_FILE (gimple_location (stmt));
}
/* Return the line number of the location of STMT. */
static inline int
gimple_lineno (const_gimple stmt)
{
return LOCATION_LINE (gimple_location (stmt));
}
/* Determine whether SEQ is a singleton. */
static inline bool
gimple_seq_singleton_p (gimple_seq seq)
{
return ((gimple_seq_first (seq) != NULL)
&& (gimple_seq_first (seq) == gimple_seq_last (seq)));
}
/* Return true if no warnings should be emitted for statement STMT. */
static inline bool
gimple_no_warning_p (const_gimple stmt)
{
return stmt->no_warning;
}
/* Set the no_warning flag of STMT to NO_WARNING. */
static inline void
gimple_set_no_warning (gimple stmt, bool no_warning)
{
stmt->no_warning = (unsigned) no_warning;
}
/* Set the visited status on statement STMT to VISITED_P. */
static inline void
gimple_set_visited (gimple stmt, bool visited_p)
{
stmt->visited = (unsigned) visited_p;
}
/* Return the visited status for statement STMT. */
static inline bool
gimple_visited_p (gimple stmt)
{
return stmt->visited;
}
/* Set pass local flag PLF on statement STMT to VAL_P. */
static inline void
gimple_set_plf (gimple stmt, enum plf_mask plf, bool val_p)
{
if (val_p)
stmt->plf |= (unsigned int) plf;
else
stmt->plf &= ~((unsigned int) plf);
}
/* Return the value of pass local flag PLF on statement STMT. */
static inline unsigned int
gimple_plf (gimple stmt, enum plf_mask plf)
{
return stmt->plf & ((unsigned int) plf);
}
/* Set the UID of statement. */
static inline void
gimple_set_uid (gimple g, unsigned uid)
{
g->uid = uid;
}
/* Return the UID of statement. */
static inline unsigned
gimple_uid (const_gimple g)
{
return g->uid;
}
/* Make statement G a singleton sequence. */
static inline void
gimple_init_singleton (gimple g)
{
g->next = NULL;
g->prev = g;
}
/* Return true if GIMPLE statement G has register or memory operands. */
static inline bool
gimple_has_ops (const_gimple g)
{
return gimple_code (g) >= GIMPLE_COND && gimple_code (g) <= GIMPLE_RETURN;
}
template <>
template <>
inline bool
is_a_helper <const gimple_statement_with_ops>::test (const_gimple gs)
{
return gimple_has_ops (gs);
}
template <>
template <>
inline bool
is_a_helper <gimple_statement_with_ops>::test (gimple gs)
{
return gimple_has_ops (gs);
}
/* Return true if GIMPLE statement G has memory operands. */
static inline bool
gimple_has_mem_ops (const_gimple g)
{
return gimple_code (g) >= GIMPLE_ASSIGN && gimple_code (g) <= GIMPLE_RETURN;
}
template <>
template <>
inline bool
is_a_helper <const gimple_statement_with_memory_ops>::test (const_gimple gs)
{
return gimple_has_mem_ops (gs);
}
template <>
template <>
inline bool
is_a_helper <gimple_statement_with_memory_ops>::test (gimple gs)
{
return gimple_has_mem_ops (gs);
}
/* Return the set of USE operands for statement G. */
static inline struct use_optype_d *
gimple_use_ops (const_gimple g)
{
const gimple_statement_with_ops *ops_stmt =
dyn_cast <const gimple_statement_with_ops> (g);
if (!ops_stmt)
return NULL;
return ops_stmt->use_ops;
}
/* Set USE to be the set of USE operands for statement G. */
static inline void
gimple_set_use_ops (gimple g, struct use_optype_d *use)
{
gimple_statement_with_ops *ops_stmt =
as_a <gimple_statement_with_ops> (g);
ops_stmt->use_ops = use;
}
/* Return the single VUSE operand of the statement G. */
static inline tree
gimple_vuse (const_gimple g)
{
const gimple_statement_with_memory_ops *mem_ops_stmt =
dyn_cast <const gimple_statement_with_memory_ops> (g);
if (!mem_ops_stmt)
return NULL_TREE;
return mem_ops_stmt->vuse;
}
/* Return the single VDEF operand of the statement G. */
static inline tree
gimple_vdef (const_gimple g)
{
const gimple_statement_with_memory_ops *mem_ops_stmt =
dyn_cast <const gimple_statement_with_memory_ops> (g);
if (!mem_ops_stmt)
return NULL_TREE;
return mem_ops_stmt->vdef;
}
/* Return the single VUSE operand of the statement G. */
static inline tree *
gimple_vuse_ptr (gimple g)
{
gimple_statement_with_memory_ops *mem_ops_stmt =
dyn_cast <gimple_statement_with_memory_ops> (g);
if (!mem_ops_stmt)
return NULL;
return &mem_ops_stmt->vuse;
}
/* Return the single VDEF operand of the statement G. */
static inline tree *
gimple_vdef_ptr (gimple g)
{
gimple_statement_with_memory_ops *mem_ops_stmt =
dyn_cast <gimple_statement_with_memory_ops> (g);
if (!mem_ops_stmt)
return NULL;
return &mem_ops_stmt->vdef;
}
/* Set the single VUSE operand of the statement G. */
static inline void
gimple_set_vuse (gimple g, tree vuse)
{
gimple_statement_with_memory_ops *mem_ops_stmt =
as_a <gimple_statement_with_memory_ops> (g);
mem_ops_stmt->vuse = vuse;
}
/* Set the single VDEF operand of the statement G. */
static inline void
gimple_set_vdef (gimple g, tree vdef)
{
gimple_statement_with_memory_ops *mem_ops_stmt =
as_a <gimple_statement_with_memory_ops> (g);
mem_ops_stmt->vdef = vdef;
}
/* Return true if statement G has operands and the modified field has
been set. */
static inline bool
gimple_modified_p (const_gimple g)
{
return (gimple_has_ops (g)) ? (bool) g->modified : false;
}
/* Set the MODIFIED flag to MODIFIEDP, iff the gimple statement G has
a MODIFIED field. */
static inline void
gimple_set_modified (gimple s, bool modifiedp)
{
if (gimple_has_ops (s))
s->modified = (unsigned) modifiedp;
}
/* Return the tree code for the expression computed by STMT. This is
only valid for GIMPLE_COND, GIMPLE_CALL and GIMPLE_ASSIGN. For
GIMPLE_CALL, return CALL_EXPR as the expression code for
consistency. This is useful when the caller needs to deal with the
three kinds of computation that GIMPLE supports. */
static inline enum tree_code
gimple_expr_code (const_gimple stmt)
{
enum gimple_code code = gimple_code (stmt);
if (code == GIMPLE_ASSIGN || code == GIMPLE_COND)
return (enum tree_code) stmt->subcode;
else
{
gcc_gimple_checking_assert (code == GIMPLE_CALL);
return CALL_EXPR;
}
}
/* Return true if statement STMT contains volatile operands. */
static inline bool
gimple_has_volatile_ops (const_gimple stmt)
{
if (gimple_has_mem_ops (stmt))
return stmt->has_volatile_ops;
else
return false;
}
/* Set the HAS_VOLATILE_OPS flag to VOLATILEP. */
static inline void
gimple_set_has_volatile_ops (gimple stmt, bool volatilep)
{
if (gimple_has_mem_ops (stmt))
stmt->has_volatile_ops = (unsigned) volatilep;
}
/* Return true if STMT is in a transaction. */
static inline bool
gimple_in_transaction (gimple stmt)
{
return bb_in_transaction (gimple_bb (stmt));
}
/* Return true if statement STMT may access memory. */
static inline bool
gimple_references_memory_p (gimple stmt)
{
return gimple_has_mem_ops (stmt) && gimple_vuse (stmt);
}
/* Return the subcode for OMP statement S. */
static inline unsigned
gimple_omp_subcode (const_gimple s)
{
gcc_gimple_checking_assert (gimple_code (s) >= GIMPLE_OMP_ATOMIC_LOAD
&& gimple_code (s) <= GIMPLE_OMP_TEAMS);
return s->subcode;
}
/* Set the subcode for OMP statement S to SUBCODE. */
static inline void
gimple_omp_set_subcode (gimple s, unsigned int subcode)
{
/* We only have 16 bits for the subcode. Assert that we are not
overflowing it. */
gcc_gimple_checking_assert (subcode < (1 << 16));
s->subcode = subcode;
}
/* Set the nowait flag on OMP_RETURN statement S. */
static inline void
gimple_omp_return_set_nowait (gimple s)
{
GIMPLE_CHECK (s, GIMPLE_OMP_RETURN);
s->subcode |= GF_OMP_RETURN_NOWAIT;
}
/* Return true if OMP return statement G has the GF_OMP_RETURN_NOWAIT
flag set. */
static inline bool
gimple_omp_return_nowait_p (const_gimple g)
{
GIMPLE_CHECK (g, GIMPLE_OMP_RETURN);
return (gimple_omp_subcode (g) & GF_OMP_RETURN_NOWAIT) != 0;
}
/* Set the LHS of OMP return. */
static inline void
gimple_omp_return_set_lhs (gimple g, tree lhs)
{
gimple_statement_omp_return *omp_return_stmt =
as_a <gimple_statement_omp_return> (g);
omp_return_stmt->val = lhs;
}
/* Get the LHS of OMP return. */
static inline tree
gimple_omp_return_lhs (const_gimple g)
{
const gimple_statement_omp_return *omp_return_stmt =
as_a <const gimple_statement_omp_return> (g);
return omp_return_stmt->val;
}
/* Return a pointer to the LHS of OMP return. */
static inline tree *
gimple_omp_return_lhs_ptr (gimple g)
{
gimple_statement_omp_return *omp_return_stmt =
as_a <gimple_statement_omp_return> (g);
return &omp_return_stmt->val;
}
/* Return true if OMP section statement G has the GF_OMP_SECTION_LAST
flag set. */
static inline bool
gimple_omp_section_last_p (const_gimple g)
{
GIMPLE_CHECK (g, GIMPLE_OMP_SECTION);
return (gimple_omp_subcode (g) & GF_OMP_SECTION_LAST) != 0;
}
/* Set the GF_OMP_SECTION_LAST flag on G. */
static inline void
gimple_omp_section_set_last (gimple g)
{
GIMPLE_CHECK (g, GIMPLE_OMP_SECTION);
g->subcode |= GF_OMP_SECTION_LAST;
}
/* Return true if OMP parallel statement G has the
GF_OMP_PARALLEL_COMBINED flag set. */
static inline bool
gimple_omp_parallel_combined_p (const_gimple g)
{
GIMPLE_CHECK (g, GIMPLE_OMP_PARALLEL);
return (gimple_omp_subcode (g) & GF_OMP_PARALLEL_COMBINED) != 0;
}
/* Set the GF_OMP_PARALLEL_COMBINED field in G depending on the boolean
value of COMBINED_P. */
static inline void
gimple_omp_parallel_set_combined_p (gimple g, bool combined_p)
{
GIMPLE_CHECK (g, GIMPLE_OMP_PARALLEL);
if (combined_p)
g->subcode |= GF_OMP_PARALLEL_COMBINED;
else
g->subcode &= ~GF_OMP_PARALLEL_COMBINED;
}
/* Return true if OMP atomic load/store statement G has the
GF_OMP_ATOMIC_NEED_VALUE flag set. */
static inline bool
gimple_omp_atomic_need_value_p (const_gimple g)
{
if (gimple_code (g) != GIMPLE_OMP_ATOMIC_LOAD)
GIMPLE_CHECK (g, GIMPLE_OMP_ATOMIC_STORE);
return (gimple_omp_subcode (g) & GF_OMP_ATOMIC_NEED_VALUE) != 0;
}
/* Set the GF_OMP_ATOMIC_NEED_VALUE flag on G. */
static inline void
gimple_omp_atomic_set_need_value (gimple g)
{
if (gimple_code (g) != GIMPLE_OMP_ATOMIC_LOAD)
GIMPLE_CHECK (g, GIMPLE_OMP_ATOMIC_STORE);
g->subcode |= GF_OMP_ATOMIC_NEED_VALUE;
}
/* Return true if OMP atomic load/store statement G has the
GF_OMP_ATOMIC_SEQ_CST flag set. */
static inline bool
gimple_omp_atomic_seq_cst_p (const_gimple g)
{
if (gimple_code (g) != GIMPLE_OMP_ATOMIC_LOAD)
GIMPLE_CHECK (g, GIMPLE_OMP_ATOMIC_STORE);
return (gimple_omp_subcode (g) & GF_OMP_ATOMIC_SEQ_CST) != 0;
}
/* Set the GF_OMP_ATOMIC_SEQ_CST flag on G. */
static inline void
gimple_omp_atomic_set_seq_cst (gimple g)
{
if (gimple_code (g) != GIMPLE_OMP_ATOMIC_LOAD)
GIMPLE_CHECK (g, GIMPLE_OMP_ATOMIC_STORE);
g->subcode |= GF_OMP_ATOMIC_SEQ_CST;
}
/* Return the number of operands for statement GS. */
static inline unsigned
gimple_num_ops (const_gimple gs)
{
return gs->num_ops;
}
/* Set the number of operands for statement GS. */
static inline void
gimple_set_num_ops (gimple gs, unsigned num_ops)
{
gs->num_ops = num_ops;
}
/* Return the array of operands for statement GS. */
static inline tree *
gimple_ops (gimple gs)
{
size_t off;
/* All the tuples have their operand vector at the very bottom
of the structure. Note that those structures that do not
have an operand vector have a zero offset. */
off = gimple_ops_offset_[gimple_statement_structure (gs)];
gcc_gimple_checking_assert (off != 0);
return (tree *) ((char *) gs + off);
}
/* Return operand I for statement GS. */
static inline tree
gimple_op (const_gimple gs, unsigned i)
{
if (gimple_has_ops (gs))
{
gcc_gimple_checking_assert (i < gimple_num_ops (gs));
return gimple_ops (CONST_CAST_GIMPLE (gs))[i];
}
else
return NULL_TREE;
}
/* Return a pointer to operand I for statement GS. */
static inline tree *
gimple_op_ptr (const_gimple gs, unsigned i)
{
if (gimple_has_ops (gs))
{
gcc_gimple_checking_assert (i < gimple_num_ops (gs));
return gimple_ops (CONST_CAST_GIMPLE (gs)) + i;
}
else
return NULL;
}
/* Set operand I of statement GS to OP. */
static inline void
gimple_set_op (gimple gs, unsigned i, tree op)
{
gcc_gimple_checking_assert (gimple_has_ops (gs) && i < gimple_num_ops (gs));
/* Note. It may be tempting to assert that OP matches
is_gimple_operand, but that would be wrong. Different tuples
accept slightly different sets of tree operands. Each caller
should perform its own validation. */
gimple_ops (gs)[i] = op;
}
/* Return true if GS is a GIMPLE_ASSIGN. */
static inline bool
is_gimple_assign (const_gimple gs)
{
return gimple_code (gs) == GIMPLE_ASSIGN;
}
/* Determine if expression CODE is one of the valid expressions that can
be used on the RHS of GIMPLE assignments. */
static inline enum gimple_rhs_class
get_gimple_rhs_class (enum tree_code code)
{
return (enum gimple_rhs_class) gimple_rhs_class_table[(int) code];
}
/* Return the LHS of assignment statement GS. */
static inline tree
gimple_assign_lhs (const_gimple gs)
{
GIMPLE_CHECK (gs, GIMPLE_ASSIGN);
return gimple_op (gs, 0);
}
/* Return a pointer to the LHS of assignment statement GS. */
static inline tree *
gimple_assign_lhs_ptr (const_gimple gs)
{
GIMPLE_CHECK (gs, GIMPLE_ASSIGN);
return gimple_op_ptr (gs, 0);
}
/* Set LHS to be the LHS operand of assignment statement GS. */
static inline void
gimple_assign_set_lhs (gimple gs, tree lhs)
{
GIMPLE_CHECK (gs, GIMPLE_ASSIGN);
gimple_set_op (gs, 0, lhs);
if (lhs && TREE_CODE (lhs) == SSA_NAME)
SSA_NAME_DEF_STMT (lhs) = gs;
}
/* Return the first operand on the RHS of assignment statement GS. */
static inline tree
gimple_assign_rhs1 (const_gimple gs)
{
GIMPLE_CHECK (gs, GIMPLE_ASSIGN);
return gimple_op (gs, 1);
}
/* Return a pointer to the first operand on the RHS of assignment
statement GS. */
static inline tree *
gimple_assign_rhs1_ptr (const_gimple gs)
{
GIMPLE_CHECK (gs, GIMPLE_ASSIGN);
return gimple_op_ptr (gs, 1);
}
/* Set RHS to be the first operand on the RHS of assignment statement GS. */
static inline void
gimple_assign_set_rhs1 (gimple gs, tree rhs)
{
GIMPLE_CHECK (gs, GIMPLE_ASSIGN);
gimple_set_op (gs, 1, rhs);
}
/* Return the second operand on the RHS of assignment statement GS.
If GS does not have two operands, NULL is returned instead. */
static inline tree
gimple_assign_rhs2 (const_gimple gs)
{
GIMPLE_CHECK (gs, GIMPLE_ASSIGN);
if (gimple_num_ops (gs) >= 3)
return gimple_op (gs, 2);
else
return NULL_TREE;
}
/* Return a pointer to the second operand on the RHS of assignment
statement GS. */
static inline tree *
gimple_assign_rhs2_ptr (const_gimple gs)
{
GIMPLE_CHECK (gs, GIMPLE_ASSIGN);
return gimple_op_ptr (gs, 2);
}
/* Set RHS to be the second operand on the RHS of assignment statement GS. */
static inline void
gimple_assign_set_rhs2 (gimple gs, tree rhs)
{
GIMPLE_CHECK (gs, GIMPLE_ASSIGN);
gimple_set_op (gs, 2, rhs);
}
/* Return the third operand on the RHS of assignment statement GS.
If GS does not have two operands, NULL is returned instead. */
static inline tree
gimple_assign_rhs3 (const_gimple gs)
{
GIMPLE_CHECK (gs, GIMPLE_ASSIGN);
if (gimple_num_ops (gs) >= 4)
return gimple_op (gs, 3);
else
return NULL_TREE;
}
/* Return a pointer to the third operand on the RHS of assignment
statement GS. */
static inline tree *
gimple_assign_rhs3_ptr (const_gimple gs)
{
GIMPLE_CHECK (gs, GIMPLE_ASSIGN);
return gimple_op_ptr (gs, 3);
}
/* Set RHS to be the third operand on the RHS of assignment statement GS. */
static inline void
gimple_assign_set_rhs3 (gimple gs, tree rhs)
{
GIMPLE_CHECK (gs, GIMPLE_ASSIGN);
gimple_set_op (gs, 3, rhs);
}
/* A wrapper around gimple_assign_set_rhs_with_ops_1, for callers which expect
to see only a maximum of two operands. */
static inline void
gimple_assign_set_rhs_with_ops (gimple_stmt_iterator *gsi, enum tree_code code,
tree op1, tree op2)
{
gimple_assign_set_rhs_with_ops_1 (gsi, code, op1, op2, NULL);
}
/* Returns true if GS is a nontemporal move. */
static inline bool
gimple_assign_nontemporal_move_p (const_gimple gs)
{
GIMPLE_CHECK (gs, GIMPLE_ASSIGN);
return gs->nontemporal_move;
}
/* Sets nontemporal move flag of GS to NONTEMPORAL. */
static inline void
gimple_assign_set_nontemporal_move (gimple gs, bool nontemporal)
{
GIMPLE_CHECK (gs, GIMPLE_ASSIGN);
gs->nontemporal_move = nontemporal;
}
/* Return the code of the expression computed on the rhs of assignment
statement GS. In case that the RHS is a single object, returns the
tree code of the object. */
static inline enum tree_code
gimple_assign_rhs_code (const_gimple gs)
{
enum tree_code code;
GIMPLE_CHECK (gs, GIMPLE_ASSIGN);
code = (enum tree_code) gs->subcode;
/* While we initially set subcode to the TREE_CODE of the rhs for
GIMPLE_SINGLE_RHS assigns we do not update that subcode to stay
in sync when we rewrite stmts into SSA form or do SSA propagations. */
if (get_gimple_rhs_class (code) == GIMPLE_SINGLE_RHS)
code = TREE_CODE (gimple_assign_rhs1 (gs));
return code;
}
/* Set CODE to be the code for the expression computed on the RHS of
assignment S. */
static inline void
gimple_assign_set_rhs_code (gimple s, enum tree_code code)
{
GIMPLE_CHECK (s, GIMPLE_ASSIGN);
s->subcode = code;
}
/* Return the gimple rhs class of the code of the expression computed on
the rhs of assignment statement GS.
This will never return GIMPLE_INVALID_RHS. */
static inline enum gimple_rhs_class
gimple_assign_rhs_class (const_gimple gs)
{
return get_gimple_rhs_class (gimple_assign_rhs_code (gs));
}
/* Return true if GS is an assignment with a singleton RHS, i.e.,
there is no operator associated with the assignment itself.
Unlike gimple_assign_copy_p, this predicate returns true for
any RHS operand, including those that perform an operation
and do not have the semantics of a copy, such as COND_EXPR. */
static inline bool
gimple_assign_single_p (gimple gs)
{
return (is_gimple_assign (gs)
&& gimple_assign_rhs_class (gs) == GIMPLE_SINGLE_RHS);
}
/* Return true if GS performs a store to its lhs. */
static inline bool
gimple_store_p (gimple gs)
{
tree lhs = gimple_get_lhs (gs);
return lhs && !is_gimple_reg (lhs);
}
/* Return true if GS is an assignment that loads from its rhs1. */
static inline bool
gimple_assign_load_p (gimple gs)
{
tree rhs;
if (!gimple_assign_single_p (gs))
return false;
rhs = gimple_assign_rhs1 (gs);
if (TREE_CODE (rhs) == WITH_SIZE_EXPR)
return true;
rhs = get_base_address (rhs);
return (DECL_P (rhs)
|| TREE_CODE (rhs) == MEM_REF || TREE_CODE (rhs) == TARGET_MEM_REF);
}
/* Return true if S is a type-cast assignment. */
static inline bool
gimple_assign_cast_p (gimple s)
{
if (is_gimple_assign (s))
{
enum tree_code sc = gimple_assign_rhs_code (s);
return CONVERT_EXPR_CODE_P (sc)
|| sc == VIEW_CONVERT_EXPR
|| sc == FIX_TRUNC_EXPR;
}
return false;
}
/* Return true if S is a clobber statement. */
static inline bool
gimple_clobber_p (gimple s)
{
return gimple_assign_single_p (s)
&& TREE_CLOBBER_P (gimple_assign_rhs1 (s));
}
/* Return true if GS is a GIMPLE_CALL. */
static inline bool
is_gimple_call (const_gimple gs)
{
return gimple_code (gs) == GIMPLE_CALL;
}
/* Return the LHS of call statement GS. */
static inline tree
gimple_call_lhs (const_gimple gs)
{
GIMPLE_CHECK (gs, GIMPLE_CALL);
return gimple_op (gs, 0);
}
/* Return a pointer to the LHS of call statement GS. */
static inline tree *
gimple_call_lhs_ptr (const_gimple gs)
{
GIMPLE_CHECK (gs, GIMPLE_CALL);
return gimple_op_ptr (gs, 0);
}
/* Set LHS to be the LHS operand of call statement GS. */
static inline void
gimple_call_set_lhs (gimple gs, tree lhs)
{
GIMPLE_CHECK (gs, GIMPLE_CALL);
gimple_set_op (gs, 0, lhs);
if (lhs && TREE_CODE (lhs) == SSA_NAME)
SSA_NAME_DEF_STMT (lhs) = gs;
}
/* Return true if call GS calls an internal-only function, as enumerated
by internal_fn. */
static inline bool
gimple_call_internal_p (const_gimple gs)
{
GIMPLE_CHECK (gs, GIMPLE_CALL);
return (gs->subcode & GF_CALL_INTERNAL) != 0;
}
/* Return the target of internal call GS. */
static inline enum internal_fn
gimple_call_internal_fn (const_gimple gs)
{
gcc_gimple_checking_assert (gimple_call_internal_p (gs));
return static_cast <const gimple_statement_call *> (gs)->u.internal_fn;
}
/* If CTRL_ALTERING_P is true, mark GIMPLE_CALL S to be a stmt
that could alter control flow. */
static inline void
gimple_call_set_ctrl_altering (gimple s, bool ctrl_altering_p)
{
GIMPLE_CHECK (s, GIMPLE_CALL);
if (ctrl_altering_p)
s->subcode |= GF_CALL_CTRL_ALTERING;
else
s->subcode &= ~GF_CALL_CTRL_ALTERING;
}
/* Return true if call GS calls an func whose GF_CALL_CTRL_ALTERING
flag is set. Such call could not be a stmt in the middle of a bb. */
static inline bool
gimple_call_ctrl_altering_p (const_gimple gs)
{
GIMPLE_CHECK (gs, GIMPLE_CALL);
return (gs->subcode & GF_CALL_CTRL_ALTERING) != 0;
}
/* Return the function type of the function called by GS. */
static inline tree
gimple_call_fntype (const_gimple gs)
{
const gimple_statement_call *call_stmt =
as_a <const gimple_statement_call> (gs);
if (gimple_call_internal_p (gs))
return NULL_TREE;
return call_stmt->u.fntype;
}
/* Set the type of the function called by GS to FNTYPE. */
static inline void
gimple_call_set_fntype (gimple gs, tree fntype)
{
gimple_statement_call *call_stmt = as_a <gimple_statement_call> (gs);
gcc_gimple_checking_assert (!gimple_call_internal_p (gs));
call_stmt->u.fntype = fntype;
}
/* Return the tree node representing the function called by call
statement GS. */
static inline tree
gimple_call_fn (const_gimple gs)
{
GIMPLE_CHECK (gs, GIMPLE_CALL);
return gimple_op (gs, 1);
}
/* Return a pointer to the tree node representing the function called by call
statement GS. */
static inline tree *
gimple_call_fn_ptr (const_gimple gs)
{
GIMPLE_CHECK (gs, GIMPLE_CALL);
return gimple_op_ptr (gs, 1);
}
/* Set FN to be the function called by call statement GS. */
static inline void
gimple_call_set_fn (gimple gs, tree fn)
{
GIMPLE_CHECK (gs, GIMPLE_CALL);
gcc_gimple_checking_assert (!gimple_call_internal_p (gs));
gimple_set_op (gs, 1, fn);
}
/* Set FNDECL to be the function called by call statement GS. */
static inline void
gimple_call_set_fndecl (gimple gs, tree decl)
{
GIMPLE_CHECK (gs, GIMPLE_CALL);
gcc_gimple_checking_assert (!gimple_call_internal_p (gs));
gimple_set_op (gs, 1, build_fold_addr_expr_loc (gimple_location (gs), decl));
}
/* Set internal function FN to be the function called by call statement GS. */
static inline void
gimple_call_set_internal_fn (gimple gs, enum internal_fn fn)
{
gimple_statement_call *call_stmt = as_a <gimple_statement_call> (gs);
gcc_gimple_checking_assert (gimple_call_internal_p (gs));
call_stmt->u.internal_fn = fn;
}
/* If a given GIMPLE_CALL's callee is a FUNCTION_DECL, return it.
Otherwise return NULL. This function is analogous to
get_callee_fndecl in tree land. */
static inline tree
gimple_call_fndecl (const_gimple gs)
{
return gimple_call_addr_fndecl (gimple_call_fn (gs));
}
/* Return the type returned by call statement GS. */
static inline tree
gimple_call_return_type (const_gimple gs)
{
tree type = gimple_call_fntype (gs);
if (type == NULL_TREE)
return TREE_TYPE (gimple_call_lhs (gs));
/* The type returned by a function is the type of its
function type. */
return TREE_TYPE (type);
}
/* Return the static chain for call statement GS. */
static inline tree
gimple_call_chain (const_gimple gs)
{
GIMPLE_CHECK (gs, GIMPLE_CALL);
return gimple_op (gs, 2);
}
/* Return a pointer to the static chain for call statement GS. */
static inline tree *
gimple_call_chain_ptr (const_gimple gs)
{
GIMPLE_CHECK (gs, GIMPLE_CALL);
return gimple_op_ptr (gs, 2);
}
/* Set CHAIN to be the static chain for call statement GS. */
static inline void
gimple_call_set_chain (gimple gs, tree chain)
{
GIMPLE_CHECK (gs, GIMPLE_CALL);
gimple_set_op (gs, 2, chain);
}
/* Return the number of arguments used by call statement GS. */
static inline unsigned
gimple_call_num_args (const_gimple gs)
{
unsigned num_ops;
GIMPLE_CHECK (gs, GIMPLE_CALL);
num_ops = gimple_num_ops (gs);
return num_ops - 3;
}
/* Return the argument at position INDEX for call statement GS. */
static inline tree
gimple_call_arg (const_gimple gs, unsigned index)
{
GIMPLE_CHECK (gs, GIMPLE_CALL);
return gimple_op (gs, index + 3);
}
/* Return a pointer to the argument at position INDEX for call
statement GS. */
static inline tree *
gimple_call_arg_ptr (const_gimple gs, unsigned index)
{
GIMPLE_CHECK (gs, GIMPLE_CALL);
return gimple_op_ptr (gs, index + 3);
}
/* Set ARG to be the argument at position INDEX for call statement GS. */
static inline void
gimple_call_set_arg (gimple gs, unsigned index, tree arg)
{
GIMPLE_CHECK (gs, GIMPLE_CALL);
gimple_set_op (gs, index + 3, arg);
}
/* If TAIL_P is true, mark call statement S as being a tail call
(i.e., a call just before the exit of a function). These calls are
candidate for tail call optimization. */
static inline void
gimple_call_set_tail (gimple s, bool tail_p)
{
GIMPLE_CHECK (s, GIMPLE_CALL);
if (tail_p)
s->subcode |= GF_CALL_TAILCALL;
else
s->subcode &= ~GF_CALL_TAILCALL;
}
/* Return true if GIMPLE_CALL S is marked as a tail call. */
static inline bool
gimple_call_tail_p (gimple s)
{
GIMPLE_CHECK (s, GIMPLE_CALL);
return (s->subcode & GF_CALL_TAILCALL) != 0;
}
/* If RETURN_SLOT_OPT_P is true mark GIMPLE_CALL S as valid for return
slot optimization. This transformation uses the target of the call
expansion as the return slot for calls that return in memory. */
static inline void
gimple_call_set_return_slot_opt (gimple s, bool return_slot_opt_p)
{
GIMPLE_CHECK (s, GIMPLE_CALL);
if (return_slot_opt_p)
s->subcode |= GF_CALL_RETURN_SLOT_OPT;
else
s->subcode &= ~GF_CALL_RETURN_SLOT_OPT;
}
/* Return true if S is marked for return slot optimization. */
static inline bool
gimple_call_return_slot_opt_p (gimple s)
{
GIMPLE_CHECK (s, GIMPLE_CALL);
return (s->subcode & GF_CALL_RETURN_SLOT_OPT) != 0;
}
/* If FROM_THUNK_P is true, mark GIMPLE_CALL S as being the jump from a
thunk to the thunked-to function. */
static inline void
gimple_call_set_from_thunk (gimple s, bool from_thunk_p)
{
GIMPLE_CHECK (s, GIMPLE_CALL);
if (from_thunk_p)
s->subcode |= GF_CALL_FROM_THUNK;
else
s->subcode &= ~GF_CALL_FROM_THUNK;
}
/* Return true if GIMPLE_CALL S is a jump from a thunk. */
static inline bool
gimple_call_from_thunk_p (gimple s)
{
GIMPLE_CHECK (s, GIMPLE_CALL);
return (s->subcode & GF_CALL_FROM_THUNK) != 0;
}
/* If PASS_ARG_PACK_P is true, GIMPLE_CALL S is a stdarg call that needs the
argument pack in its argument list. */
static inline void
gimple_call_set_va_arg_pack (gimple s, bool pass_arg_pack_p)
{
GIMPLE_CHECK (s, GIMPLE_CALL);
if (pass_arg_pack_p)
s->subcode |= GF_CALL_VA_ARG_PACK;
else
s->subcode &= ~GF_CALL_VA_ARG_PACK;
}
/* Return true if GIMPLE_CALL S is a stdarg call that needs the
argument pack in its argument list. */
static inline bool
gimple_call_va_arg_pack_p (gimple s)
{
GIMPLE_CHECK (s, GIMPLE_CALL);
return (s->subcode & GF_CALL_VA_ARG_PACK) != 0;
}
/* Return true if S is a noreturn call. */
static inline bool
gimple_call_noreturn_p (gimple s)
{
GIMPLE_CHECK (s, GIMPLE_CALL);
return (gimple_call_flags (s) & ECF_NORETURN) != 0;
}
/* If NOTHROW_P is true, GIMPLE_CALL S is a call that is known to not throw
even if the called function can throw in other cases. */
static inline void
gimple_call_set_nothrow (gimple s, bool nothrow_p)
{
GIMPLE_CHECK (s, GIMPLE_CALL);
if (nothrow_p)
s->subcode |= GF_CALL_NOTHROW;
else
s->subcode &= ~GF_CALL_NOTHROW;
}
/* Return true if S is a nothrow call. */
static inline bool
gimple_call_nothrow_p (gimple s)
{
GIMPLE_CHECK (s, GIMPLE_CALL);
return (gimple_call_flags (s) & ECF_NOTHROW) != 0;
}
/* If FOR_VAR is true, GIMPLE_CALL S is a call to builtin_alloca that
is known to be emitted for VLA objects. Those are wrapped by
stack_save/stack_restore calls and hence can't lead to unbounded
stack growth even when they occur in loops. */
static inline void
gimple_call_set_alloca_for_var (gimple s, bool for_var)
{
GIMPLE_CHECK (s, GIMPLE_CALL);
if (for_var)
s->subcode |= GF_CALL_ALLOCA_FOR_VAR;
else
s->subcode &= ~GF_CALL_ALLOCA_FOR_VAR;
}
/* Return true of S is a call to builtin_alloca emitted for VLA objects. */
static inline bool
gimple_call_alloca_for_var_p (gimple s)
{
GIMPLE_CHECK (s, GIMPLE_CALL);
return (s->subcode & GF_CALL_ALLOCA_FOR_VAR) != 0;
}
/* Copy all the GF_CALL_* flags from ORIG_CALL to DEST_CALL. */
static inline void
gimple_call_copy_flags (gimple dest_call, gimple orig_call)
{
GIMPLE_CHECK (dest_call, GIMPLE_CALL);
GIMPLE_CHECK (orig_call, GIMPLE_CALL);
dest_call->subcode = orig_call->subcode;
}
/* Return a pointer to the points-to solution for the set of call-used
variables of the call CALL. */
static inline struct pt_solution *
gimple_call_use_set (gimple call)
{
gimple_statement_call *call_stmt = as_a <gimple_statement_call> (call);
return &call_stmt->call_used;
}
/* Return a pointer to the points-to solution for the set of call-used
variables of the call CALL. */
static inline struct pt_solution *
gimple_call_clobber_set (gimple call)
{
gimple_statement_call *call_stmt = as_a <gimple_statement_call> (call);
return &call_stmt->call_clobbered;
}
/* Returns true if this is a GIMPLE_ASSIGN or a GIMPLE_CALL with a
non-NULL lhs. */
static inline bool
gimple_has_lhs (gimple stmt)
{
return (is_gimple_assign (stmt)
|| (is_gimple_call (stmt)
&& gimple_call_lhs (stmt) != NULL_TREE));
}
/* Return the code of the predicate computed by conditional statement GS. */
static inline enum tree_code
gimple_cond_code (const_gimple gs)
{
GIMPLE_CHECK (gs, GIMPLE_COND);
return (enum tree_code) gs->subcode;
}
/* Set CODE to be the predicate code for the conditional statement GS. */
static inline void
gimple_cond_set_code (gimple gs, enum tree_code code)
{
GIMPLE_CHECK (gs, GIMPLE_COND);
gs->subcode = code;
}
/* Return the LHS of the predicate computed by conditional statement GS. */
static inline tree
gimple_cond_lhs (const_gimple gs)
{
GIMPLE_CHECK (gs, GIMPLE_COND);
return gimple_op (gs, 0);
}
/* Return the pointer to the LHS of the predicate computed by conditional
statement GS. */
static inline tree *
gimple_cond_lhs_ptr (const_gimple gs)
{
GIMPLE_CHECK (gs, GIMPLE_COND);
return gimple_op_ptr (gs, 0);
}
/* Set LHS to be the LHS operand of the predicate computed by
conditional statement GS. */
static inline void
gimple_cond_set_lhs (gimple gs, tree lhs)
{
GIMPLE_CHECK (gs, GIMPLE_COND);
gimple_set_op (gs, 0, lhs);
}
/* Return the RHS operand of the predicate computed by conditional GS. */
static inline tree
gimple_cond_rhs (const_gimple gs)
{
GIMPLE_CHECK (gs, GIMPLE_COND);
return gimple_op (gs, 1);
}
/* Return the pointer to the RHS operand of the predicate computed by
conditional GS. */
static inline tree *
gimple_cond_rhs_ptr (const_gimple gs)
{
GIMPLE_CHECK (gs, GIMPLE_COND);
return gimple_op_ptr (gs, 1);
}
/* Set RHS to be the RHS operand of the predicate computed by
conditional statement GS. */
static inline void
gimple_cond_set_rhs (gimple gs, tree rhs)
{
GIMPLE_CHECK (gs, GIMPLE_COND);
gimple_set_op (gs, 1, rhs);
}
/* Return the label used by conditional statement GS when its
predicate evaluates to true. */
static inline tree
gimple_cond_true_label (const_gimple gs)
{
GIMPLE_CHECK (gs, GIMPLE_COND);
return gimple_op (gs, 2);
}
/* Set LABEL to be the label used by conditional statement GS when its
predicate evaluates to true. */
static inline void
gimple_cond_set_true_label (gimple gs, tree label)
{
GIMPLE_CHECK (gs, GIMPLE_COND);
gimple_set_op (gs, 2, label);
}
/* Set LABEL to be the label used by conditional statement GS when its
predicate evaluates to false. */
static inline void
gimple_cond_set_false_label (gimple gs, tree label)
{
GIMPLE_CHECK (gs, GIMPLE_COND);
gimple_set_op (gs, 3, label);
}
/* Return the label used by conditional statement GS when its
predicate evaluates to false. */
static inline tree
gimple_cond_false_label (const_gimple gs)
{
GIMPLE_CHECK (gs, GIMPLE_COND);
return gimple_op (gs, 3);
}
/* Set the conditional COND_STMT to be of the form 'if (1 == 0)'. */
static inline void
gimple_cond_make_false (gimple gs)
{
gimple_cond_set_lhs (gs, boolean_true_node);
gimple_cond_set_rhs (gs, boolean_false_node);
gs->subcode = EQ_EXPR;
}
/* Set the conditional COND_STMT to be of the form 'if (1 == 1)'. */
static inline void
gimple_cond_make_true (gimple gs)
{
gimple_cond_set_lhs (gs, boolean_true_node);
gimple_cond_set_rhs (gs, boolean_true_node);
gs->subcode = EQ_EXPR;
}
/* Check if conditional statemente GS is of the form 'if (1 == 1)',
'if (0 == 0)', 'if (1 != 0)' or 'if (0 != 1)' */
static inline bool
gimple_cond_true_p (const_gimple gs)
{
tree lhs = gimple_cond_lhs (gs);
tree rhs = gimple_cond_rhs (gs);
enum tree_code code = gimple_cond_code (gs);
if (lhs != boolean_true_node && lhs != boolean_false_node)
return false;
if (rhs != boolean_true_node && rhs != boolean_false_node)
return false;
if (code == NE_EXPR && lhs != rhs)
return true;
if (code == EQ_EXPR && lhs == rhs)
return true;
return false;
}
/* Check if conditional statement GS is of the form 'if (1 != 1)',
'if (0 != 0)', 'if (1 == 0)' or 'if (0 == 1)' */
static inline bool
gimple_cond_false_p (const_gimple gs)
{
tree lhs = gimple_cond_lhs (gs);
tree rhs = gimple_cond_rhs (gs);
enum tree_code code = gimple_cond_code (gs);
if (lhs != boolean_true_node && lhs != boolean_false_node)
return false;
if (rhs != boolean_true_node && rhs != boolean_false_node)
return false;
if (code == NE_EXPR && lhs == rhs)
return true;
if (code == EQ_EXPR && lhs != rhs)
return true;
return false;
}
/* Set the code, LHS and RHS of GIMPLE_COND STMT from CODE, LHS and RHS. */
static inline void
gimple_cond_set_condition (gimple stmt, enum tree_code code, tree lhs, tree rhs)
{
gimple_cond_set_code (stmt, code);
gimple_cond_set_lhs (stmt, lhs);
gimple_cond_set_rhs (stmt, rhs);
}
/* Return the LABEL_DECL node used by GIMPLE_LABEL statement GS. */
static inline tree
gimple_label_label (const_gimple gs)
{
GIMPLE_CHECK (gs, GIMPLE_LABEL);
return gimple_op (gs, 0);
}
/* Set LABEL to be the LABEL_DECL node used by GIMPLE_LABEL statement
GS. */
static inline void
gimple_label_set_label (gimple gs, tree label)
{
GIMPLE_CHECK (gs, GIMPLE_LABEL);
gimple_set_op (gs, 0, label);
}
/* Return the destination of the unconditional jump GS. */
static inline tree
gimple_goto_dest (const_gimple gs)
{
GIMPLE_CHECK (gs, GIMPLE_GOTO);
return gimple_op (gs, 0);
}
/* Set DEST to be the destination of the unconditonal jump GS. */
static inline void
gimple_goto_set_dest (gimple gs, tree dest)
{
GIMPLE_CHECK (gs, GIMPLE_GOTO);
gimple_set_op (gs, 0, dest);
}
/* Return the variables declared in the GIMPLE_BIND statement GS. */
static inline tree
gimple_bind_vars (const_gimple gs)
{
const gimple_statement_bind *bind_stmt =
as_a <const gimple_statement_bind> (gs);
return bind_stmt->vars;
}
/* Set VARS to be the set of variables declared in the GIMPLE_BIND
statement GS. */
static inline void
gimple_bind_set_vars (gimple gs, tree vars)
{
gimple_statement_bind *bind_stmt = as_a <gimple_statement_bind> (gs);
bind_stmt->vars = vars;
}
/* Append VARS to the set of variables declared in the GIMPLE_BIND
statement GS. */
static inline void
gimple_bind_append_vars (gimple gs, tree vars)
{
gimple_statement_bind *bind_stmt = as_a <gimple_statement_bind> (gs);
bind_stmt->vars = chainon (bind_stmt->vars, vars);
}
static inline gimple_seq *
gimple_bind_body_ptr (gimple gs)
{
gimple_statement_bind *bind_stmt = as_a <gimple_statement_bind> (gs);
return &bind_stmt->body;
}
/* Return the GIMPLE sequence contained in the GIMPLE_BIND statement GS. */
static inline gimple_seq
gimple_bind_body (gimple gs)
{
return *gimple_bind_body_ptr (gs);
}
/* Set SEQ to be the GIMPLE sequence contained in the GIMPLE_BIND
statement GS. */
static inline void
gimple_bind_set_body (gimple gs, gimple_seq seq)
{
gimple_statement_bind *bind_stmt = as_a <gimple_statement_bind> (gs);
bind_stmt->body = seq;
}
/* Append a statement to the end of a GIMPLE_BIND's body. */
static inline void
gimple_bind_add_stmt (gimple gs, gimple stmt)
{
gimple_statement_bind *bind_stmt = as_a <gimple_statement_bind> (gs);
gimple_seq_add_stmt (&bind_stmt->body, stmt);
}
/* Append a sequence of statements to the end of a GIMPLE_BIND's body. */
static inline void
gimple_bind_add_seq (gimple gs, gimple_seq seq)
{
gimple_statement_bind *bind_stmt = as_a <gimple_statement_bind> (gs);
gimple_seq_add_seq (&bind_stmt->body, seq);
}
/* Return the TREE_BLOCK node associated with GIMPLE_BIND statement
GS. This is analogous to the BIND_EXPR_BLOCK field in trees. */
static inline tree
gimple_bind_block (const_gimple gs)
{
const gimple_statement_bind *bind_stmt =
as_a <const gimple_statement_bind> (gs);
return bind_stmt->block;
}
/* Set BLOCK to be the TREE_BLOCK node associated with GIMPLE_BIND
statement GS. */
static inline void
gimple_bind_set_block (gimple gs, tree block)
{
gimple_statement_bind *bind_stmt = as_a <gimple_statement_bind> (gs);
gcc_gimple_checking_assert (block == NULL_TREE
|| TREE_CODE (block) == BLOCK);
bind_stmt->block = block;
}
/* Return the number of input operands for GIMPLE_ASM GS. */
static inline unsigned
gimple_asm_ninputs (const_gimple gs)
{
const gimple_statement_asm *asm_stmt =
as_a <const gimple_statement_asm> (gs);
return asm_stmt->ni;
}
/* Return the number of output operands for GIMPLE_ASM GS. */
static inline unsigned
gimple_asm_noutputs (const_gimple gs)
{
const gimple_statement_asm *asm_stmt =
as_a <const gimple_statement_asm> (gs);
return asm_stmt->no;
}
/* Return the number of clobber operands for GIMPLE_ASM GS. */
static inline unsigned
gimple_asm_nclobbers (const_gimple gs)
{
const gimple_statement_asm *asm_stmt =
as_a <const gimple_statement_asm> (gs);
return asm_stmt->nc;
}
/* Return the number of label operands for GIMPLE_ASM GS. */
static inline unsigned
gimple_asm_nlabels (const_gimple gs)
{
const gimple_statement_asm *asm_stmt =
as_a <const gimple_statement_asm> (gs);
return asm_stmt->nl;
}
/* Return input operand INDEX of GIMPLE_ASM GS. */
static inline tree
gimple_asm_input_op (const_gimple gs, unsigned index)
{
const gimple_statement_asm *asm_stmt =
as_a <const gimple_statement_asm> (gs);
gcc_gimple_checking_assert (index < asm_stmt->ni);
return gimple_op (gs, index + asm_stmt->no);
}
/* Return a pointer to input operand INDEX of GIMPLE_ASM GS. */
static inline tree *
gimple_asm_input_op_ptr (const_gimple gs, unsigned index)
{
const gimple_statement_asm *asm_stmt =
as_a <const gimple_statement_asm> (gs);
gcc_gimple_checking_assert (index < asm_stmt->ni);
return gimple_op_ptr (gs, index + asm_stmt->no);
}
/* Set IN_OP to be input operand INDEX in GIMPLE_ASM GS. */
static inline void
gimple_asm_set_input_op (gimple gs, unsigned index, tree in_op)
{
gimple_statement_asm *asm_stmt = as_a <gimple_statement_asm> (gs);
gcc_gimple_checking_assert (index < asm_stmt->ni
&& TREE_CODE (in_op) == TREE_LIST);
gimple_set_op (gs, index + asm_stmt->no, in_op);
}
/* Return output operand INDEX of GIMPLE_ASM GS. */
static inline tree
gimple_asm_output_op (const_gimple gs, unsigned index)
{
const gimple_statement_asm *asm_stmt =
as_a <const gimple_statement_asm> (gs);
gcc_gimple_checking_assert (index < asm_stmt->no);
return gimple_op (gs, index);
}
/* Return a pointer to output operand INDEX of GIMPLE_ASM GS. */
static inline tree *
gimple_asm_output_op_ptr (const_gimple gs, unsigned index)
{
const gimple_statement_asm *asm_stmt =
as_a <const gimple_statement_asm> (gs);
gcc_gimple_checking_assert (index < asm_stmt->no);
return gimple_op_ptr (gs, index);
}
/* Set OUT_OP to be output operand INDEX in GIMPLE_ASM GS. */
static inline void
gimple_asm_set_output_op (gimple gs, unsigned index, tree out_op)
{
gimple_statement_asm *asm_stmt = as_a <gimple_statement_asm> (gs);
gcc_gimple_checking_assert (index < asm_stmt->no
&& TREE_CODE (out_op) == TREE_LIST);
gimple_set_op (gs, index, out_op);
}
/* Return clobber operand INDEX of GIMPLE_ASM GS. */
static inline tree
gimple_asm_clobber_op (const_gimple gs, unsigned index)
{
const gimple_statement_asm *asm_stmt =
as_a <const gimple_statement_asm> (gs);
gcc_gimple_checking_assert (index < asm_stmt->nc);
return gimple_op (gs, index + asm_stmt->ni + asm_stmt->no);
}
/* Set CLOBBER_OP to be clobber operand INDEX in GIMPLE_ASM GS. */
static inline void
gimple_asm_set_clobber_op (gimple gs, unsigned index, tree clobber_op)
{
gimple_statement_asm *asm_stmt = as_a <gimple_statement_asm> (gs);
gcc_gimple_checking_assert (index < asm_stmt->nc
&& TREE_CODE (clobber_op) == TREE_LIST);
gimple_set_op (gs, index + asm_stmt->ni + asm_stmt->no, clobber_op);
}
/* Return label operand INDEX of GIMPLE_ASM GS. */
static inline tree
gimple_asm_label_op (const_gimple gs, unsigned index)
{
const gimple_statement_asm *asm_stmt =
as_a <const gimple_statement_asm> (gs);
gcc_gimple_checking_assert (index < asm_stmt->nl);
return gimple_op (gs, index + asm_stmt->ni + asm_stmt->nc);
}
/* Set LABEL_OP to be label operand INDEX in GIMPLE_ASM GS. */
static inline void
gimple_asm_set_label_op (gimple gs, unsigned index, tree label_op)
{
gimple_statement_asm *asm_stmt = as_a <gimple_statement_asm> (gs);
gcc_gimple_checking_assert (index < asm_stmt->nl
&& TREE_CODE (label_op) == TREE_LIST);
gimple_set_op (gs, index + asm_stmt->ni + asm_stmt->nc, label_op);
}
/* Return the string representing the assembly instruction in
GIMPLE_ASM GS. */
static inline const char *
gimple_asm_string (const_gimple gs)
{
const gimple_statement_asm *asm_stmt =
as_a <const gimple_statement_asm> (gs);
return asm_stmt->string;
}
/* Return true if GS is an asm statement marked volatile. */
static inline bool
gimple_asm_volatile_p (const_gimple gs)
{
GIMPLE_CHECK (gs, GIMPLE_ASM);
return (gs->subcode & GF_ASM_VOLATILE) != 0;
}
/* If VOLATLE_P is true, mark asm statement GS as volatile. */
static inline void
gimple_asm_set_volatile (gimple gs, bool volatile_p)
{
GIMPLE_CHECK (gs, GIMPLE_ASM);
if (volatile_p)
gs->subcode |= GF_ASM_VOLATILE;
else
gs->subcode &= ~GF_ASM_VOLATILE;
}
/* If INPUT_P is true, mark asm GS as an ASM_INPUT. */
static inline void
gimple_asm_set_input (gimple gs, bool input_p)
{
GIMPLE_CHECK (gs, GIMPLE_ASM);
if (input_p)
gs->subcode |= GF_ASM_INPUT;
else
gs->subcode &= ~GF_ASM_INPUT;
}
/* Return true if asm GS is an ASM_INPUT. */
static inline bool
gimple_asm_input_p (const_gimple gs)
{
GIMPLE_CHECK (gs, GIMPLE_ASM);
return (gs->subcode & GF_ASM_INPUT) != 0;
}
/* Return the types handled by GIMPLE_CATCH statement GS. */
static inline tree
gimple_catch_types (const_gimple gs)
{
const gimple_statement_catch *catch_stmt =
as_a <const gimple_statement_catch> (gs);
return catch_stmt->types;
}
/* Return a pointer to the types handled by GIMPLE_CATCH statement GS. */
static inline tree *
gimple_catch_types_ptr (gimple gs)
{
gimple_statement_catch *catch_stmt = as_a <gimple_statement_catch> (gs);
return &catch_stmt->types;
}
/* Return a pointer to the GIMPLE sequence representing the body of
the handler of GIMPLE_CATCH statement GS. */
static inline gimple_seq *
gimple_catch_handler_ptr (gimple gs)
{
gimple_statement_catch *catch_stmt = as_a <gimple_statement_catch> (gs);
return &catch_stmt->handler;
}
/* Return the GIMPLE sequence representing the body of the handler of
GIMPLE_CATCH statement GS. */
static inline gimple_seq
gimple_catch_handler (gimple gs)
{
return *gimple_catch_handler_ptr (gs);
}
/* Set T to be the set of types handled by GIMPLE_CATCH GS. */
static inline void
gimple_catch_set_types (gimple gs, tree t)
{
gimple_statement_catch *catch_stmt = as_a <gimple_statement_catch> (gs);
catch_stmt->types = t;
}
/* Set HANDLER to be the body of GIMPLE_CATCH GS. */
static inline void
gimple_catch_set_handler (gimple gs, gimple_seq handler)
{
gimple_statement_catch *catch_stmt = as_a <gimple_statement_catch> (gs);
catch_stmt->handler = handler;
}
/* Return the types handled by GIMPLE_EH_FILTER statement GS. */
static inline tree
gimple_eh_filter_types (const_gimple gs)
{
const gimple_statement_eh_filter *eh_filter_stmt =
as_a <const gimple_statement_eh_filter> (gs);
return eh_filter_stmt->types;
}
/* Return a pointer to the types handled by GIMPLE_EH_FILTER statement
GS. */
static inline tree *
gimple_eh_filter_types_ptr (gimple gs)
{
gimple_statement_eh_filter *eh_filter_stmt =
as_a <gimple_statement_eh_filter> (gs);
return &eh_filter_stmt->types;
}
/* Return a pointer to the sequence of statement to execute when
GIMPLE_EH_FILTER statement fails. */
static inline gimple_seq *
gimple_eh_filter_failure_ptr (gimple gs)
{
gimple_statement_eh_filter *eh_filter_stmt =
as_a <gimple_statement_eh_filter> (gs);
return &eh_filter_stmt->failure;
}
/* Return the sequence of statement to execute when GIMPLE_EH_FILTER
statement fails. */
static inline gimple_seq
gimple_eh_filter_failure (gimple gs)
{
return *gimple_eh_filter_failure_ptr (gs);
}
/* Set TYPES to be the set of types handled by GIMPLE_EH_FILTER GS. */
static inline void
gimple_eh_filter_set_types (gimple gs, tree types)
{
gimple_statement_eh_filter *eh_filter_stmt =
as_a <gimple_statement_eh_filter> (gs);
eh_filter_stmt->types = types;
}
/* Set FAILURE to be the sequence of statements to execute on failure
for GIMPLE_EH_FILTER GS. */
static inline void
gimple_eh_filter_set_failure (gimple gs, gimple_seq failure)
{
gimple_statement_eh_filter *eh_filter_stmt =
as_a <gimple_statement_eh_filter> (gs);
eh_filter_stmt->failure = failure;
}
/* Get the function decl to be called by the MUST_NOT_THROW region. */
static inline tree
gimple_eh_must_not_throw_fndecl (gimple gs)
{
gimple_statement_eh_mnt *eh_mnt_stmt = as_a <gimple_statement_eh_mnt> (gs);
return eh_mnt_stmt->fndecl;
}
/* Set the function decl to be called by GS to DECL. */
static inline void
gimple_eh_must_not_throw_set_fndecl (gimple gs, tree decl)
{
gimple_statement_eh_mnt *eh_mnt_stmt = as_a <gimple_statement_eh_mnt> (gs);
eh_mnt_stmt->fndecl = decl;
}
/* GIMPLE_EH_ELSE accessors. */
static inline gimple_seq *
gimple_eh_else_n_body_ptr (gimple gs)
{
gimple_statement_eh_else *eh_else_stmt =
as_a <gimple_statement_eh_else> (gs);
return &eh_else_stmt->n_body;
}
static inline gimple_seq
gimple_eh_else_n_body (gimple gs)
{
return *gimple_eh_else_n_body_ptr (gs);
}
static inline gimple_seq *
gimple_eh_else_e_body_ptr (gimple gs)
{
gimple_statement_eh_else *eh_else_stmt =
as_a <gimple_statement_eh_else> (gs);
return &eh_else_stmt->e_body;
}
static inline gimple_seq
gimple_eh_else_e_body (gimple gs)
{
return *gimple_eh_else_e_body_ptr (gs);
}
static inline void
gimple_eh_else_set_n_body (gimple gs, gimple_seq seq)
{
gimple_statement_eh_else *eh_else_stmt =
as_a <gimple_statement_eh_else> (gs);
eh_else_stmt->n_body = seq;
}
static inline void
gimple_eh_else_set_e_body (gimple gs, gimple_seq seq)
{
gimple_statement_eh_else *eh_else_stmt =
as_a <gimple_statement_eh_else> (gs);
eh_else_stmt->e_body = seq;
}
/* GIMPLE_TRY accessors. */
/* Return the kind of try block represented by GIMPLE_TRY GS. This is
either GIMPLE_TRY_CATCH or GIMPLE_TRY_FINALLY. */
static inline enum gimple_try_flags
gimple_try_kind (const_gimple gs)
{
GIMPLE_CHECK (gs, GIMPLE_TRY);
return (enum gimple_try_flags) (gs->subcode & GIMPLE_TRY_KIND);
}
/* Set the kind of try block represented by GIMPLE_TRY GS. */
static inline void
gimple_try_set_kind (gimple gs, enum gimple_try_flags kind)
{
GIMPLE_CHECK (gs, GIMPLE_TRY);
gcc_gimple_checking_assert (kind == GIMPLE_TRY_CATCH
|| kind == GIMPLE_TRY_FINALLY);
if (gimple_try_kind (gs) != kind)
gs->subcode = (unsigned int) kind;
}
/* Return the GIMPLE_TRY_CATCH_IS_CLEANUP flag. */
static inline bool
gimple_try_catch_is_cleanup (const_gimple gs)
{
gcc_gimple_checking_assert (gimple_try_kind (gs) == GIMPLE_TRY_CATCH);
return (gs->subcode & GIMPLE_TRY_CATCH_IS_CLEANUP) != 0;
}
/* Return a pointer to the sequence of statements used as the
body for GIMPLE_TRY GS. */
static inline gimple_seq *
gimple_try_eval_ptr (gimple gs)
{
gimple_statement_try *try_stmt = as_a <gimple_statement_try> (gs);
return &try_stmt->eval;
}
/* Return the sequence of statements used as the body for GIMPLE_TRY GS. */
static inline gimple_seq
gimple_try_eval (gimple gs)
{
return *gimple_try_eval_ptr (gs);
}
/* Return a pointer to the sequence of statements used as the cleanup body for
GIMPLE_TRY GS. */
static inline gimple_seq *
gimple_try_cleanup_ptr (gimple gs)
{
gimple_statement_try *try_stmt = as_a <gimple_statement_try> (gs);
return &try_stmt->cleanup;
}
/* Return the sequence of statements used as the cleanup body for
GIMPLE_TRY GS. */
static inline gimple_seq
gimple_try_cleanup (gimple gs)
{
return *gimple_try_cleanup_ptr (gs);
}
/* Set the GIMPLE_TRY_CATCH_IS_CLEANUP flag. */
static inline void
gimple_try_set_catch_is_cleanup (gimple g, bool catch_is_cleanup)
{
gcc_gimple_checking_assert (gimple_try_kind (g) == GIMPLE_TRY_CATCH);
if (catch_is_cleanup)
g->subcode |= GIMPLE_TRY_CATCH_IS_CLEANUP;
else
g->subcode &= ~GIMPLE_TRY_CATCH_IS_CLEANUP;
}
/* Set EVAL to be the sequence of statements to use as the body for
GIMPLE_TRY GS. */
static inline void
gimple_try_set_eval (gimple gs, gimple_seq eval)
{
gimple_statement_try *try_stmt = as_a <gimple_statement_try> (gs);
try_stmt->eval = eval;
}
/* Set CLEANUP to be the sequence of statements to use as the cleanup
body for GIMPLE_TRY GS. */
static inline void
gimple_try_set_cleanup (gimple gs, gimple_seq cleanup)
{
gimple_statement_try *try_stmt = as_a <gimple_statement_try> (gs);
try_stmt->cleanup = cleanup;
}
/* Return a pointer to the cleanup sequence for cleanup statement GS. */
static inline gimple_seq *
gimple_wce_cleanup_ptr (gimple gs)
{
gimple_statement_wce *wce_stmt = as_a <gimple_statement_wce> (gs);
return &wce_stmt->cleanup;
}
/* Return the cleanup sequence for cleanup statement GS. */
static inline gimple_seq
gimple_wce_cleanup (gimple gs)
{
return *gimple_wce_cleanup_ptr (gs);
}
/* Set CLEANUP to be the cleanup sequence for GS. */
static inline void
gimple_wce_set_cleanup (gimple gs, gimple_seq cleanup)
{
gimple_statement_wce *wce_stmt = as_a <gimple_statement_wce> (gs);
wce_stmt->cleanup = cleanup;
}
/* Return the CLEANUP_EH_ONLY flag for a WCE tuple. */
static inline bool
gimple_wce_cleanup_eh_only (const_gimple gs)
{
GIMPLE_CHECK (gs, GIMPLE_WITH_CLEANUP_EXPR);
return gs->subcode != 0;
}
/* Set the CLEANUP_EH_ONLY flag for a WCE tuple. */
static inline void
gimple_wce_set_cleanup_eh_only (gimple gs, bool eh_only_p)
{
GIMPLE_CHECK (gs, GIMPLE_WITH_CLEANUP_EXPR);
gs->subcode = (unsigned int) eh_only_p;
}
/* Return the maximum number of arguments supported by GIMPLE_PHI GS. */
static inline unsigned
gimple_phi_capacity (const_gimple gs)
{
const gimple_statement_phi *phi_stmt =
as_a <const gimple_statement_phi> (gs);
return phi_stmt->capacity;
}
/* Return the number of arguments in GIMPLE_PHI GS. This must always
be exactly the number of incoming edges for the basic block holding
GS. */
static inline unsigned
gimple_phi_num_args (const_gimple gs)
{
const gimple_statement_phi *phi_stmt =
as_a <const gimple_statement_phi> (gs);
return phi_stmt->nargs;
}
/* Return the SSA name created by GIMPLE_PHI GS. */
static inline tree
gimple_phi_result (const_gimple gs)
{
const gimple_statement_phi *phi_stmt =
as_a <const gimple_statement_phi> (gs);
return phi_stmt->result;
}
/* Return a pointer to the SSA name created by GIMPLE_PHI GS. */
static inline tree *
gimple_phi_result_ptr (gimple gs)
{
gimple_statement_phi *phi_stmt = as_a <gimple_statement_phi> (gs);
return &phi_stmt->result;
}
/* Set RESULT to be the SSA name created by GIMPLE_PHI GS. */
static inline void
gimple_phi_set_result (gimple gs, tree result)
{
gimple_statement_phi *phi_stmt = as_a <gimple_statement_phi> (gs);
phi_stmt->result = result;
if (result && TREE_CODE (result) == SSA_NAME)
SSA_NAME_DEF_STMT (result) = gs;
}
/* Return the PHI argument corresponding to incoming edge INDEX for
GIMPLE_PHI GS. */
static inline struct phi_arg_d *
gimple_phi_arg (gimple gs, unsigned index)
{
gimple_statement_phi *phi_stmt = as_a <gimple_statement_phi> (gs);
gcc_gimple_checking_assert (index <= phi_stmt->capacity);
return &(phi_stmt->args[index]);
}
/* Set PHIARG to be the argument corresponding to incoming edge INDEX
for GIMPLE_PHI GS. */
static inline void
gimple_phi_set_arg (gimple gs, unsigned index, struct phi_arg_d * phiarg)
{
gimple_statement_phi *phi_stmt = as_a <gimple_statement_phi> (gs);
gcc_gimple_checking_assert (index <= phi_stmt->nargs);
phi_stmt->args[index] = *phiarg;
}
/* Return the PHI nodes for basic block BB, or NULL if there are no
PHI nodes. */
static inline gimple_seq
phi_nodes (const_basic_block bb)
{
gcc_checking_assert (!(bb->flags & BB_RTL));
return bb->il.gimple.phi_nodes;
}
/* Return a pointer to the PHI nodes for basic block BB. */
static inline gimple_seq *
phi_nodes_ptr (basic_block bb)
{
gcc_checking_assert (!(bb->flags & BB_RTL));
return &bb->il.gimple.phi_nodes;
}
/* Return the tree operand for argument I of PHI node GS. */
static inline tree
gimple_phi_arg_def (gimple gs, size_t index)
{
return gimple_phi_arg (gs, index)->def;
}
/* Return a pointer to the tree operand for argument I of PHI node GS. */
static inline tree *
gimple_phi_arg_def_ptr (gimple gs, size_t index)
{
return &gimple_phi_arg (gs, index)->def;
}
/* Return the edge associated with argument I of phi node GS. */
static inline edge
gimple_phi_arg_edge (gimple gs, size_t i)
{
return EDGE_PRED (gimple_bb (gs), i);
}
/* Return the source location of gimple argument I of phi node GS. */
static inline source_location
gimple_phi_arg_location (gimple gs, size_t i)
{
return gimple_phi_arg (gs, i)->locus;
}
/* Return the source location of the argument on edge E of phi node GS. */
static inline source_location
gimple_phi_arg_location_from_edge (gimple gs, edge e)
{
return gimple_phi_arg (gs, e->dest_idx)->locus;
}
/* Set the source location of gimple argument I of phi node GS to LOC. */
static inline void
gimple_phi_arg_set_location (gimple gs, size_t i, source_location loc)
{
gimple_phi_arg (gs, i)->locus = loc;
}
/* Return TRUE if argument I of phi node GS has a location record. */
static inline bool
gimple_phi_arg_has_location (gimple gs, size_t i)
{
return gimple_phi_arg_location (gs, i) != UNKNOWN_LOCATION;
}
/* Return the region number for GIMPLE_RESX GS. */
static inline int
gimple_resx_region (const_gimple gs)
{
const gimple_statement_resx *resx_stmt =
as_a <const gimple_statement_resx> (gs);
return resx_stmt->region;
}
/* Set REGION to be the region number for GIMPLE_RESX GS. */
static inline void
gimple_resx_set_region (gimple gs, int region)
{
gimple_statement_resx *resx_stmt = as_a <gimple_statement_resx> (gs);
resx_stmt->region = region;
}
/* Return the region number for GIMPLE_EH_DISPATCH GS. */
static inline int
gimple_eh_dispatch_region (const_gimple gs)
{
const gimple_statement_eh_dispatch *eh_dispatch_stmt =
as_a <const gimple_statement_eh_dispatch> (gs);
return eh_dispatch_stmt->region;
}
/* Set REGION to be the region number for GIMPLE_EH_DISPATCH GS. */
static inline void
gimple_eh_dispatch_set_region (gimple gs, int region)
{
gimple_statement_eh_dispatch *eh_dispatch_stmt =
as_a <gimple_statement_eh_dispatch> (gs);
eh_dispatch_stmt->region = region;
}
/* Return the number of labels associated with the switch statement GS. */
static inline unsigned
gimple_switch_num_labels (const_gimple gs)
{
unsigned num_ops;
GIMPLE_CHECK (gs, GIMPLE_SWITCH);
num_ops = gimple_num_ops (gs);
gcc_gimple_checking_assert (num_ops > 1);
return num_ops - 1;
}
/* Set NLABELS to be the number of labels for the switch statement GS. */
static inline void
gimple_switch_set_num_labels (gimple g, unsigned nlabels)
{
GIMPLE_CHECK (g, GIMPLE_SWITCH);
gimple_set_num_ops (g, nlabels + 1);
}
/* Return the index variable used by the switch statement GS. */
static inline tree
gimple_switch_index (const_gimple gs)
{
GIMPLE_CHECK (gs, GIMPLE_SWITCH);
return gimple_op (gs, 0);
}
/* Return a pointer to the index variable for the switch statement GS. */
static inline tree *
gimple_switch_index_ptr (const_gimple gs)
{
GIMPLE_CHECK (gs, GIMPLE_SWITCH);
return gimple_op_ptr (gs, 0);
}
/* Set INDEX to be the index variable for switch statement GS. */
static inline void
gimple_switch_set_index (gimple gs, tree index)
{
GIMPLE_CHECK (gs, GIMPLE_SWITCH);
gcc_gimple_checking_assert (SSA_VAR_P (index) || CONSTANT_CLASS_P (index));
gimple_set_op (gs, 0, index);
}
/* Return the label numbered INDEX. The default label is 0, followed by any
labels in a switch statement. */
static inline tree
gimple_switch_label (const_gimple gs, unsigned index)
{
GIMPLE_CHECK (gs, GIMPLE_SWITCH);
gcc_gimple_checking_assert (gimple_num_ops (gs) > index + 1);
return gimple_op (gs, index + 1);
}
/* Set the label number INDEX to LABEL. 0 is always the default label. */
static inline void
gimple_switch_set_label (gimple gs, unsigned index, tree label)
{
GIMPLE_CHECK (gs, GIMPLE_SWITCH);
gcc_gimple_checking_assert (gimple_num_ops (gs) > index + 1
&& (label == NULL_TREE
|| TREE_CODE (label) == CASE_LABEL_EXPR));
gimple_set_op (gs, index + 1, label);
}
/* Return the default label for a switch statement. */
static inline tree
gimple_switch_default_label (const_gimple gs)
{
tree label = gimple_switch_label (gs, 0);
gcc_checking_assert (!CASE_LOW (label) && !CASE_HIGH (label));
return label;
}
/* Set the default label for a switch statement. */
static inline void
gimple_switch_set_default_label (gimple gs, tree label)
{
gcc_checking_assert (!CASE_LOW (label) && !CASE_HIGH (label));
gimple_switch_set_label (gs, 0, label);
}
/* Return true if GS is a GIMPLE_DEBUG statement. */
static inline bool
is_gimple_debug (const_gimple gs)
{
return gimple_code (gs) == GIMPLE_DEBUG;
}
/* Return true if S is a GIMPLE_DEBUG BIND statement. */
static inline bool
gimple_debug_bind_p (const_gimple s)
{
if (is_gimple_debug (s))
return s->subcode == GIMPLE_DEBUG_BIND;
return false;
}
/* Return the variable bound in a GIMPLE_DEBUG bind statement. */
static inline tree
gimple_debug_bind_get_var (gimple dbg)
{
GIMPLE_CHECK (dbg, GIMPLE_DEBUG);
gcc_gimple_checking_assert (gimple_debug_bind_p (dbg));
return gimple_op (dbg, 0);
}
/* Return the value bound to the variable in a GIMPLE_DEBUG bind
statement. */
static inline tree
gimple_debug_bind_get_value (gimple dbg)
{
GIMPLE_CHECK (dbg, GIMPLE_DEBUG);
gcc_gimple_checking_assert (gimple_debug_bind_p (dbg));
return gimple_op (dbg, 1);
}
/* Return a pointer to the value bound to the variable in a
GIMPLE_DEBUG bind statement. */
static inline tree *
gimple_debug_bind_get_value_ptr (gimple dbg)
{
GIMPLE_CHECK (dbg, GIMPLE_DEBUG);
gcc_gimple_checking_assert (gimple_debug_bind_p (dbg));
return gimple_op_ptr (dbg, 1);
}
/* Set the variable bound in a GIMPLE_DEBUG bind statement. */
static inline void
gimple_debug_bind_set_var (gimple dbg, tree var)
{
GIMPLE_CHECK (dbg, GIMPLE_DEBUG);
gcc_gimple_checking_assert (gimple_debug_bind_p (dbg));
gimple_set_op (dbg, 0, var);
}
/* Set the value bound to the variable in a GIMPLE_DEBUG bind
statement. */
static inline void
gimple_debug_bind_set_value (gimple dbg, tree value)
{
GIMPLE_CHECK (dbg, GIMPLE_DEBUG);
gcc_gimple_checking_assert (gimple_debug_bind_p (dbg));
gimple_set_op (dbg, 1, value);
}
/* The second operand of a GIMPLE_DEBUG_BIND, when the value was
optimized away. */
#define GIMPLE_DEBUG_BIND_NOVALUE NULL_TREE /* error_mark_node */
/* Remove the value bound to the variable in a GIMPLE_DEBUG bind
statement. */
static inline void
gimple_debug_bind_reset_value (gimple dbg)
{
GIMPLE_CHECK (dbg, GIMPLE_DEBUG);
gcc_gimple_checking_assert (gimple_debug_bind_p (dbg));
gimple_set_op (dbg, 1, GIMPLE_DEBUG_BIND_NOVALUE);
}
/* Return true if the GIMPLE_DEBUG bind statement is bound to a
value. */
static inline bool
gimple_debug_bind_has_value_p (gimple dbg)
{
GIMPLE_CHECK (dbg, GIMPLE_DEBUG);
gcc_gimple_checking_assert (gimple_debug_bind_p (dbg));
return gimple_op (dbg, 1) != GIMPLE_DEBUG_BIND_NOVALUE;
}
#undef GIMPLE_DEBUG_BIND_NOVALUE
/* Return true if S is a GIMPLE_DEBUG SOURCE BIND statement. */
static inline bool
gimple_debug_source_bind_p (const_gimple s)
{
if (is_gimple_debug (s))
return s->subcode == GIMPLE_DEBUG_SOURCE_BIND;
return false;
}
/* Return the variable bound in a GIMPLE_DEBUG source bind statement. */
static inline tree
gimple_debug_source_bind_get_var (gimple dbg)
{
GIMPLE_CHECK (dbg, GIMPLE_DEBUG);
gcc_gimple_checking_assert (gimple_debug_source_bind_p (dbg));
return gimple_op (dbg, 0);
}
/* Return the value bound to the variable in a GIMPLE_DEBUG source bind
statement. */
static inline tree
gimple_debug_source_bind_get_value (gimple dbg)
{
GIMPLE_CHECK (dbg, GIMPLE_DEBUG);
gcc_gimple_checking_assert (gimple_debug_source_bind_p (dbg));
return gimple_op (dbg, 1);
}
/* Return a pointer to the value bound to the variable in a
GIMPLE_DEBUG source bind statement. */
static inline tree *
gimple_debug_source_bind_get_value_ptr (gimple dbg)
{
GIMPLE_CHECK (dbg, GIMPLE_DEBUG);
gcc_gimple_checking_assert (gimple_debug_source_bind_p (dbg));
return gimple_op_ptr (dbg, 1);
}
/* Set the variable bound in a GIMPLE_DEBUG source bind statement. */
static inline void
gimple_debug_source_bind_set_var (gimple dbg, tree var)
{
GIMPLE_CHECK (dbg, GIMPLE_DEBUG);
gcc_gimple_checking_assert (gimple_debug_source_bind_p (dbg));
gimple_set_op (dbg, 0, var);
}
/* Set the value bound to the variable in a GIMPLE_DEBUG source bind
statement. */
static inline void
gimple_debug_source_bind_set_value (gimple dbg, tree value)
{
GIMPLE_CHECK (dbg, GIMPLE_DEBUG);
gcc_gimple_checking_assert (gimple_debug_source_bind_p (dbg));
gimple_set_op (dbg, 1, value);
}
/* Return the line number for EXPR, or return -1 if we have no line
number information for it. */
static inline int
get_lineno (const_gimple stmt)
{
location_t loc;
if (!stmt)
return -1;
loc = gimple_location (stmt);
if (loc == UNKNOWN_LOCATION)
return -1;
return LOCATION_LINE (loc);
}
/* Return a pointer to the body for the OMP statement GS. */
static inline gimple_seq *
gimple_omp_body_ptr (gimple gs)
{
return &static_cast <gimple_statement_omp *> (gs)->body;
}
/* Return the body for the OMP statement GS. */
static inline gimple_seq
gimple_omp_body (gimple gs)
{
return *gimple_omp_body_ptr (gs);
}
/* Set BODY to be the body for the OMP statement GS. */
static inline void
gimple_omp_set_body (gimple gs, gimple_seq body)
{
static_cast <gimple_statement_omp *> (gs)->body = body;
}
/* Return the name associated with OMP_CRITICAL statement GS. */
static inline tree
gimple_omp_critical_name (const_gimple gs)
{
const gimple_statement_omp_critical *omp_critical_stmt =
as_a <const gimple_statement_omp_critical> (gs);
return omp_critical_stmt->name;
}
/* Return a pointer to the name associated with OMP critical statement GS. */
static inline tree *
gimple_omp_critical_name_ptr (gimple gs)
{
gimple_statement_omp_critical *omp_critical_stmt =
as_a <gimple_statement_omp_critical> (gs);
return &omp_critical_stmt->name;
}
/* Set NAME to be the name associated with OMP critical statement GS. */
static inline void
gimple_omp_critical_set_name (gimple gs, tree name)
{
gimple_statement_omp_critical *omp_critical_stmt =
as_a <gimple_statement_omp_critical> (gs);
omp_critical_stmt->name = name;
}
/* Return the kind of OMP for statemement. */
static inline int
gimple_omp_for_kind (const_gimple g)
{
GIMPLE_CHECK (g, GIMPLE_OMP_FOR);
return (gimple_omp_subcode (g) & GF_OMP_FOR_KIND_MASK);
}
/* Set the OMP for kind. */
static inline void
gimple_omp_for_set_kind (gimple g, int kind)
{
GIMPLE_CHECK (g, GIMPLE_OMP_FOR);
g->subcode = (g->subcode & ~GF_OMP_FOR_KIND_MASK)
| (kind & GF_OMP_FOR_KIND_MASK);
}
/* Return true if OMP for statement G has the
GF_OMP_FOR_COMBINED flag set. */
static inline bool
gimple_omp_for_combined_p (const_gimple g)
{
GIMPLE_CHECK (g, GIMPLE_OMP_FOR);
return (gimple_omp_subcode (g) & GF_OMP_FOR_COMBINED) != 0;
}
/* Set the GF_OMP_FOR_COMBINED field in G depending on the boolean
value of COMBINED_P. */
static inline void
gimple_omp_for_set_combined_p (gimple g, bool combined_p)
{
GIMPLE_CHECK (g, GIMPLE_OMP_FOR);
if (combined_p)
g->subcode |= GF_OMP_FOR_COMBINED;
else
g->subcode &= ~GF_OMP_FOR_COMBINED;
}
/* Return true if OMP for statement G has the
GF_OMP_FOR_COMBINED_INTO flag set. */
static inline bool
gimple_omp_for_combined_into_p (const_gimple g)
{
GIMPLE_CHECK (g, GIMPLE_OMP_FOR);
return (gimple_omp_subcode (g) & GF_OMP_FOR_COMBINED_INTO) != 0;
}
/* Set the GF_OMP_FOR_COMBINED_INTO field in G depending on the boolean
value of COMBINED_P. */
static inline void
gimple_omp_for_set_combined_into_p (gimple g, bool combined_p)
{
GIMPLE_CHECK (g, GIMPLE_OMP_FOR);
if (combined_p)
g->subcode |= GF_OMP_FOR_COMBINED_INTO;
else
g->subcode &= ~GF_OMP_FOR_COMBINED_INTO;
}
/* Return the clauses associated with OMP_FOR GS. */
static inline tree
gimple_omp_for_clauses (const_gimple gs)
{
const gimple_statement_omp_for *omp_for_stmt =
as_a <const gimple_statement_omp_for> (gs);
return omp_for_stmt->clauses;
}
/* Return a pointer to the OMP_FOR GS. */
static inline tree *
gimple_omp_for_clauses_ptr (gimple gs)
{
gimple_statement_omp_for *omp_for_stmt =
as_a <gimple_statement_omp_for> (gs);
return &omp_for_stmt->clauses;
}
/* Set CLAUSES to be the list of clauses associated with OMP_FOR GS. */
static inline void
gimple_omp_for_set_clauses (gimple gs, tree clauses)
{
gimple_statement_omp_for *omp_for_stmt =
as_a <gimple_statement_omp_for> (gs);
omp_for_stmt->clauses = clauses;
}
/* Get the collapse count of OMP_FOR GS. */
static inline size_t
gimple_omp_for_collapse (gimple gs)
{
gimple_statement_omp_for *omp_for_stmt =
as_a <gimple_statement_omp_for> (gs);
return omp_for_stmt->collapse;
}
/* Return the index variable for OMP_FOR GS. */
static inline tree
gimple_omp_for_index (const_gimple gs, size_t i)
{
const gimple_statement_omp_for *omp_for_stmt =
as_a <const gimple_statement_omp_for> (gs);
gcc_gimple_checking_assert (i < omp_for_stmt->collapse);
return omp_for_stmt->iter[i].index;
}
/* Return a pointer to the index variable for OMP_FOR GS. */
static inline tree *
gimple_omp_for_index_ptr (gimple gs, size_t i)
{
gimple_statement_omp_for *omp_for_stmt =
as_a <gimple_statement_omp_for> (gs);
gcc_gimple_checking_assert (i < omp_for_stmt->collapse);
return &omp_for_stmt->iter[i].index;
}
/* Set INDEX to be the index variable for OMP_FOR GS. */
static inline void
gimple_omp_for_set_index (gimple gs, size_t i, tree index)
{
gimple_statement_omp_for *omp_for_stmt =
as_a <gimple_statement_omp_for> (gs);
gcc_gimple_checking_assert (i < omp_for_stmt->collapse);
omp_for_stmt->iter[i].index = index;
}
/* Return the initial value for OMP_FOR GS. */
static inline tree
gimple_omp_for_initial (const_gimple gs, size_t i)
{
const gimple_statement_omp_for *omp_for_stmt =
as_a <const gimple_statement_omp_for> (gs);
gcc_gimple_checking_assert (i < omp_for_stmt->collapse);
return omp_for_stmt->iter[i].initial;
}
/* Return a pointer to the initial value for OMP_FOR GS. */
static inline tree *
gimple_omp_for_initial_ptr (gimple gs, size_t i)
{
gimple_statement_omp_for *omp_for_stmt =
as_a <gimple_statement_omp_for> (gs);
gcc_gimple_checking_assert (i < omp_for_stmt->collapse);
return &omp_for_stmt->iter[i].initial;
}
/* Set INITIAL to be the initial value for OMP_FOR GS. */
static inline void
gimple_omp_for_set_initial (gimple gs, size_t i, tree initial)
{
gimple_statement_omp_for *omp_for_stmt =
as_a <gimple_statement_omp_for> (gs);
gcc_gimple_checking_assert (i < omp_for_stmt->collapse);
omp_for_stmt->iter[i].initial = initial;
}
/* Return the final value for OMP_FOR GS. */
static inline tree
gimple_omp_for_final (const_gimple gs, size_t i)
{
const gimple_statement_omp_for *omp_for_stmt =
as_a <const gimple_statement_omp_for> (gs);
gcc_gimple_checking_assert (i < omp_for_stmt->collapse);
return omp_for_stmt->iter[i].final;
}
/* Return a pointer to the final value for OMP_FOR GS. */
static inline tree *
gimple_omp_for_final_ptr (gimple gs, size_t i)
{
gimple_statement_omp_for *omp_for_stmt =
as_a <gimple_statement_omp_for> (gs);
gcc_gimple_checking_assert (i < omp_for_stmt->collapse);
return &omp_for_stmt->iter[i].final;
}
/* Set FINAL to be the final value for OMP_FOR GS. */
static inline void
gimple_omp_for_set_final (gimple gs, size_t i, tree final)
{
gimple_statement_omp_for *omp_for_stmt =
as_a <gimple_statement_omp_for> (gs);
gcc_gimple_checking_assert (i < omp_for_stmt->collapse);
omp_for_stmt->iter[i].final = final;
}
/* Return the increment value for OMP_FOR GS. */
static inline tree
gimple_omp_for_incr (const_gimple gs, size_t i)
{
const gimple_statement_omp_for *omp_for_stmt =
as_a <const gimple_statement_omp_for> (gs);
gcc_gimple_checking_assert (i < omp_for_stmt->collapse);
return omp_for_stmt->iter[i].incr;
}
/* Return a pointer to the increment value for OMP_FOR GS. */
static inline tree *
gimple_omp_for_incr_ptr (gimple gs, size_t i)
{
gimple_statement_omp_for *omp_for_stmt =
as_a <gimple_statement_omp_for> (gs);
gcc_gimple_checking_assert (i < omp_for_stmt->collapse);
return &omp_for_stmt->iter[i].incr;
}
/* Set INCR to be the increment value for OMP_FOR GS. */
static inline void
gimple_omp_for_set_incr (gimple gs, size_t i, tree incr)
{
gimple_statement_omp_for *omp_for_stmt =
as_a <gimple_statement_omp_for> (gs);
gcc_gimple_checking_assert (i < omp_for_stmt->collapse);
omp_for_stmt->iter[i].incr = incr;
}
/* Return a pointer to the sequence of statements to execute before the OMP_FOR
statement GS starts. */
static inline gimple_seq *
gimple_omp_for_pre_body_ptr (gimple gs)
{
gimple_statement_omp_for *omp_for_stmt =
as_a <gimple_statement_omp_for> (gs);
return &omp_for_stmt->pre_body;
}
/* Return the sequence of statements to execute before the OMP_FOR
statement GS starts. */
static inline gimple_seq
gimple_omp_for_pre_body (gimple gs)
{
return *gimple_omp_for_pre_body_ptr (gs);
}
/* Set PRE_BODY to be the sequence of statements to execute before the
OMP_FOR statement GS starts. */
static inline void
gimple_omp_for_set_pre_body (gimple gs, gimple_seq pre_body)
{
gimple_statement_omp_for *omp_for_stmt =
as_a <gimple_statement_omp_for> (gs);
omp_for_stmt->pre_body = pre_body;
}
/* Return the clauses associated with OMP_PARALLEL GS. */
static inline tree
gimple_omp_parallel_clauses (const_gimple gs)
{
const gimple_statement_omp_parallel *omp_parallel_stmt =
as_a <const gimple_statement_omp_parallel> (gs);
return omp_parallel_stmt->clauses;
}
/* Return a pointer to the clauses associated with OMP_PARALLEL GS. */
static inline tree *
gimple_omp_parallel_clauses_ptr (gimple gs)
{
gimple_statement_omp_parallel *omp_parallel_stmt =
as_a <gimple_statement_omp_parallel> (gs);
return &omp_parallel_stmt->clauses;
}
/* Set CLAUSES to be the list of clauses associated with OMP_PARALLEL
GS. */
static inline void
gimple_omp_parallel_set_clauses (gimple gs, tree clauses)
{
gimple_statement_omp_parallel *omp_parallel_stmt =
as_a <gimple_statement_omp_parallel> (gs);
omp_parallel_stmt->clauses = clauses;
}
/* Return the child function used to hold the body of OMP_PARALLEL GS. */
static inline tree
gimple_omp_parallel_child_fn (const_gimple gs)
{
const gimple_statement_omp_parallel *omp_parallel_stmt =
as_a <const gimple_statement_omp_parallel> (gs);
return omp_parallel_stmt->child_fn;
}
/* Return a pointer to the child function used to hold the body of
OMP_PARALLEL GS. */
static inline tree *
gimple_omp_parallel_child_fn_ptr (gimple gs)
{
gimple_statement_omp_parallel *omp_parallel_stmt =
as_a <gimple_statement_omp_parallel> (gs);
return &omp_parallel_stmt->child_fn;
}
/* Set CHILD_FN to be the child function for OMP_PARALLEL GS. */
static inline void
gimple_omp_parallel_set_child_fn (gimple gs, tree child_fn)
{
gimple_statement_omp_parallel *omp_parallel_stmt =
as_a <gimple_statement_omp_parallel> (gs);
omp_parallel_stmt->child_fn = child_fn;
}
/* Return the artificial argument used to send variables and values
from the parent to the children threads in OMP_PARALLEL GS. */
static inline tree
gimple_omp_parallel_data_arg (const_gimple gs)
{
const gimple_statement_omp_parallel *omp_parallel_stmt =
as_a <const gimple_statement_omp_parallel> (gs);
return omp_parallel_stmt->data_arg;
}
/* Return a pointer to the data argument for OMP_PARALLEL GS. */
static inline tree *
gimple_omp_parallel_data_arg_ptr (gimple gs)
{
gimple_statement_omp_parallel *omp_parallel_stmt =
as_a <gimple_statement_omp_parallel> (gs);
return &omp_parallel_stmt->data_arg;
}
/* Set DATA_ARG to be the data argument for OMP_PARALLEL GS. */
static inline void
gimple_omp_parallel_set_data_arg (gimple gs, tree data_arg)
{
gimple_statement_omp_parallel *omp_parallel_stmt =
as_a <gimple_statement_omp_parallel> (gs);
omp_parallel_stmt->data_arg = data_arg;
}
/* Return the clauses associated with OMP_TASK GS. */
static inline tree
gimple_omp_task_clauses (const_gimple gs)
{
const gimple_statement_omp_task *omp_task_stmt =
as_a <const gimple_statement_omp_task> (gs);
return omp_task_stmt->clauses;
}
/* Return a pointer to the clauses associated with OMP_TASK GS. */
static inline tree *
gimple_omp_task_clauses_ptr (gimple gs)
{
gimple_statement_omp_task *omp_task_stmt =
as_a <gimple_statement_omp_task> (gs);
return &omp_task_stmt->clauses;
}
/* Set CLAUSES to be the list of clauses associated with OMP_TASK
GS. */
static inline void
gimple_omp_task_set_clauses (gimple gs, tree clauses)
{
gimple_statement_omp_task *omp_task_stmt =
as_a <gimple_statement_omp_task> (gs);
omp_task_stmt->clauses = clauses;
}
/* Return the child function used to hold the body of OMP_TASK GS. */
static inline tree
gimple_omp_task_child_fn (const_gimple gs)
{
const gimple_statement_omp_task *omp_task_stmt =
as_a <const gimple_statement_omp_task> (gs);
return omp_task_stmt->child_fn;
}
/* Return a pointer to the child function used to hold the body of
OMP_TASK GS. */
static inline tree *
gimple_omp_task_child_fn_ptr (gimple gs)
{
gimple_statement_omp_task *omp_task_stmt =
as_a <gimple_statement_omp_task> (gs);
return &omp_task_stmt->child_fn;
}
/* Set CHILD_FN to be the child function for OMP_TASK GS. */
static inline void
gimple_omp_task_set_child_fn (gimple gs, tree child_fn)
{
gimple_statement_omp_task *omp_task_stmt =
as_a <gimple_statement_omp_task> (gs);
omp_task_stmt->child_fn = child_fn;
}
/* Return the artificial argument used to send variables and values
from the parent to the children threads in OMP_TASK GS. */
static inline tree
gimple_omp_task_data_arg (const_gimple gs)
{
const gimple_statement_omp_task *omp_task_stmt =
as_a <const gimple_statement_omp_task> (gs);
return omp_task_stmt->data_arg;
}
/* Return a pointer to the data argument for OMP_TASK GS. */
static inline tree *
gimple_omp_task_data_arg_ptr (gimple gs)
{
gimple_statement_omp_task *omp_task_stmt =
as_a <gimple_statement_omp_task> (gs);
return &omp_task_stmt->data_arg;
}
/* Set DATA_ARG to be the data argument for OMP_TASK GS. */
static inline void
gimple_omp_task_set_data_arg (gimple gs, tree data_arg)
{
gimple_statement_omp_task *omp_task_stmt =
as_a <gimple_statement_omp_task> (gs);
omp_task_stmt->data_arg = data_arg;
}
/* Return the clauses associated with OMP_TASK GS. */
static inline tree
gimple_omp_taskreg_clauses (const_gimple gs)
{
const gimple_statement_omp_taskreg *omp_taskreg_stmt =
as_a <const gimple_statement_omp_taskreg> (gs);
return omp_taskreg_stmt->clauses;
}
/* Return a pointer to the clauses associated with OMP_TASK GS. */
static inline tree *
gimple_omp_taskreg_clauses_ptr (gimple gs)
{
gimple_statement_omp_taskreg *omp_taskreg_stmt =
as_a <gimple_statement_omp_taskreg> (gs);
return &omp_taskreg_stmt->clauses;
}
/* Set CLAUSES to be the list of clauses associated with OMP_TASK
GS. */
static inline void
gimple_omp_taskreg_set_clauses (gimple gs, tree clauses)
{
gimple_statement_omp_taskreg *omp_taskreg_stmt =
as_a <gimple_statement_omp_taskreg> (gs);
omp_taskreg_stmt->clauses = clauses;
}
/* Return the child function used to hold the body of OMP_TASK GS. */
static inline tree
gimple_omp_taskreg_child_fn (const_gimple gs)
{
const gimple_statement_omp_taskreg *omp_taskreg_stmt =
as_a <const gimple_statement_omp_taskreg> (gs);
return omp_taskreg_stmt->child_fn;
}
/* Return a pointer to the child function used to hold the body of
OMP_TASK GS. */
static inline tree *
gimple_omp_taskreg_child_fn_ptr (gimple gs)
{
gimple_statement_omp_taskreg *omp_taskreg_stmt =
as_a <gimple_statement_omp_taskreg> (gs);
return &omp_taskreg_stmt->child_fn;
}
/* Set CHILD_FN to be the child function for OMP_TASK GS. */
static inline void
gimple_omp_taskreg_set_child_fn (gimple gs, tree child_fn)
{
gimple_statement_omp_taskreg *omp_taskreg_stmt =
as_a <gimple_statement_omp_taskreg> (gs);
omp_taskreg_stmt->child_fn = child_fn;
}
/* Return the artificial argument used to send variables and values
from the parent to the children threads in OMP_TASK GS. */
static inline tree
gimple_omp_taskreg_data_arg (const_gimple gs)
{
const gimple_statement_omp_taskreg *omp_taskreg_stmt =
as_a <const gimple_statement_omp_taskreg> (gs);
return omp_taskreg_stmt->data_arg;
}
/* Return a pointer to the data argument for OMP_TASK GS. */
static inline tree *
gimple_omp_taskreg_data_arg_ptr (gimple gs)
{
gimple_statement_omp_taskreg *omp_taskreg_stmt =
as_a <gimple_statement_omp_taskreg> (gs);
return &omp_taskreg_stmt->data_arg;
}
/* Set DATA_ARG to be the data argument for OMP_TASK GS. */
static inline void
gimple_omp_taskreg_set_data_arg (gimple gs, tree data_arg)
{
gimple_statement_omp_taskreg *omp_taskreg_stmt =
as_a <gimple_statement_omp_taskreg> (gs);
omp_taskreg_stmt->data_arg = data_arg;
}
/* Return the copy function used to hold the body of OMP_TASK GS. */
static inline tree
gimple_omp_task_copy_fn (const_gimple gs)
{
const gimple_statement_omp_task *omp_task_stmt =
as_a <const gimple_statement_omp_task> (gs);
return omp_task_stmt->copy_fn;
}
/* Return a pointer to the copy function used to hold the body of
OMP_TASK GS. */
static inline tree *
gimple_omp_task_copy_fn_ptr (gimple gs)
{
gimple_statement_omp_task *omp_task_stmt =
as_a <gimple_statement_omp_task> (gs);
return &omp_task_stmt->copy_fn;
}
/* Set CHILD_FN to be the copy function for OMP_TASK GS. */
static inline void
gimple_omp_task_set_copy_fn (gimple gs, tree copy_fn)
{
gimple_statement_omp_task *omp_task_stmt =
as_a <gimple_statement_omp_task> (gs);
omp_task_stmt->copy_fn = copy_fn;
}
/* Return size of the data block in bytes in OMP_TASK GS. */
static inline tree
gimple_omp_task_arg_size (const_gimple gs)
{
const gimple_statement_omp_task *omp_task_stmt =
as_a <const gimple_statement_omp_task> (gs);
return omp_task_stmt->arg_size;
}
/* Return a pointer to the data block size for OMP_TASK GS. */
static inline tree *
gimple_omp_task_arg_size_ptr (gimple gs)
{
gimple_statement_omp_task *omp_task_stmt =
as_a <gimple_statement_omp_task> (gs);
return &omp_task_stmt->arg_size;
}
/* Set ARG_SIZE to be the data block size for OMP_TASK GS. */
static inline void
gimple_omp_task_set_arg_size (gimple gs, tree arg_size)
{
gimple_statement_omp_task *omp_task_stmt =
as_a <gimple_statement_omp_task> (gs);
omp_task_stmt->arg_size = arg_size;
}
/* Return align of the data block in bytes in OMP_TASK GS. */
static inline tree
gimple_omp_task_arg_align (const_gimple gs)
{
const gimple_statement_omp_task *omp_task_stmt =
as_a <const gimple_statement_omp_task> (gs);
return omp_task_stmt->arg_align;
}
/* Return a pointer to the data block align for OMP_TASK GS. */
static inline tree *
gimple_omp_task_arg_align_ptr (gimple gs)
{
gimple_statement_omp_task *omp_task_stmt =
as_a <gimple_statement_omp_task> (gs);
return &omp_task_stmt->arg_align;
}
/* Set ARG_SIZE to be the data block align for OMP_TASK GS. */
static inline void
gimple_omp_task_set_arg_align (gimple gs, tree arg_align)
{
gimple_statement_omp_task *omp_task_stmt =
as_a <gimple_statement_omp_task> (gs);
omp_task_stmt->arg_align = arg_align;
}
/* Return the clauses associated with OMP_SINGLE GS. */
static inline tree
gimple_omp_single_clauses (const_gimple gs)
{
const gimple_statement_omp_single *omp_single_stmt =
as_a <const gimple_statement_omp_single> (gs);
return omp_single_stmt->clauses;
}
/* Return a pointer to the clauses associated with OMP_SINGLE GS. */
static inline tree *
gimple_omp_single_clauses_ptr (gimple gs)
{
gimple_statement_omp_single *omp_single_stmt =
as_a <gimple_statement_omp_single> (gs);
return &omp_single_stmt->clauses;
}
/* Set CLAUSES to be the clauses associated with OMP_SINGLE GS. */
static inline void
gimple_omp_single_set_clauses (gimple gs, tree clauses)
{
gimple_statement_omp_single *omp_single_stmt =
as_a <gimple_statement_omp_single> (gs);
omp_single_stmt->clauses = clauses;
}
/* Return the clauses associated with OMP_TARGET GS. */
static inline tree
gimple_omp_target_clauses (const_gimple gs)
{
const gimple_statement_omp_target *omp_target_stmt =
as_a <const gimple_statement_omp_target> (gs);
return omp_target_stmt->clauses;
}
/* Return a pointer to the clauses associated with OMP_TARGET GS. */
static inline tree *
gimple_omp_target_clauses_ptr (gimple gs)
{
gimple_statement_omp_target *omp_target_stmt =
as_a <gimple_statement_omp_target> (gs);
return &omp_target_stmt->clauses;
}
/* Set CLAUSES to be the clauses associated with OMP_TARGET GS. */
static inline void
gimple_omp_target_set_clauses (gimple gs, tree clauses)
{
gimple_statement_omp_target *omp_target_stmt =
as_a <gimple_statement_omp_target> (gs);
omp_target_stmt->clauses = clauses;
}
/* Return the kind of OMP target statemement. */
static inline int
gimple_omp_target_kind (const_gimple g)
{
GIMPLE_CHECK (g, GIMPLE_OMP_TARGET);
return (gimple_omp_subcode (g) & GF_OMP_TARGET_KIND_MASK);
}
/* Set the OMP target kind. */
static inline void
gimple_omp_target_set_kind (gimple g, int kind)
{
GIMPLE_CHECK (g, GIMPLE_OMP_TARGET);
g->subcode = (g->subcode & ~GF_OMP_TARGET_KIND_MASK)
| (kind & GF_OMP_TARGET_KIND_MASK);
}
/* Return the child function used to hold the body of OMP_TARGET GS. */
static inline tree
gimple_omp_target_child_fn (const_gimple gs)
{
const gimple_statement_omp_target *omp_target_stmt =
as_a <const gimple_statement_omp_target> (gs);
return omp_target_stmt->child_fn;
}
/* Return a pointer to the child function used to hold the body of
OMP_TARGET GS. */
static inline tree *
gimple_omp_target_child_fn_ptr (gimple gs)
{
gimple_statement_omp_target *omp_target_stmt =
as_a <gimple_statement_omp_target> (gs);
return &omp_target_stmt->child_fn;
}
/* Set CHILD_FN to be the child function for OMP_TARGET GS. */
static inline void
gimple_omp_target_set_child_fn (gimple gs, tree child_fn)
{
gimple_statement_omp_target *omp_target_stmt =
as_a <gimple_statement_omp_target> (gs);
omp_target_stmt->child_fn = child_fn;
}
/* Return the artificial argument used to send variables and values
from the parent to the children threads in OMP_TARGET GS. */
static inline tree
gimple_omp_target_data_arg (const_gimple gs)
{
const gimple_statement_omp_target *omp_target_stmt =
as_a <const gimple_statement_omp_target> (gs);
return omp_target_stmt->data_arg;
}
/* Return a pointer to the data argument for OMP_TARGET GS. */
static inline tree *
gimple_omp_target_data_arg_ptr (gimple gs)
{
gimple_statement_omp_target *omp_target_stmt =
as_a <gimple_statement_omp_target> (gs);
return &omp_target_stmt->data_arg;
}
/* Set DATA_ARG to be the data argument for OMP_TARGET GS. */
static inline void
gimple_omp_target_set_data_arg (gimple gs, tree data_arg)
{
gimple_statement_omp_target *omp_target_stmt =
as_a <gimple_statement_omp_target> (gs);
omp_target_stmt->data_arg = data_arg;
}
/* Return the clauses associated with OMP_TEAMS GS. */
static inline tree
gimple_omp_teams_clauses (const_gimple gs)
{
const gimple_statement_omp_teams *omp_teams_stmt =
as_a <const gimple_statement_omp_teams> (gs);
return omp_teams_stmt->clauses;
}
/* Return a pointer to the clauses associated with OMP_TEAMS GS. */
static inline tree *
gimple_omp_teams_clauses_ptr (gimple gs)
{
gimple_statement_omp_teams *omp_teams_stmt =
as_a <gimple_statement_omp_teams> (gs);
return &omp_teams_stmt->clauses;
}
/* Set CLAUSES to be the clauses associated with OMP_TEAMS GS. */
static inline void
gimple_omp_teams_set_clauses (gimple gs, tree clauses)
{
gimple_statement_omp_teams *omp_teams_stmt =
as_a <gimple_statement_omp_teams> (gs);
omp_teams_stmt->clauses = clauses;
}
/* Return the clauses associated with OMP_SECTIONS GS. */
static inline tree
gimple_omp_sections_clauses (const_gimple gs)
{
const gimple_statement_omp_sections *omp_sections_stmt =
as_a <const gimple_statement_omp_sections> (gs);
return omp_sections_stmt->clauses;
}
/* Return a pointer to the clauses associated with OMP_SECTIONS GS. */
static inline tree *
gimple_omp_sections_clauses_ptr (gimple gs)
{
gimple_statement_omp_sections *omp_sections_stmt =
as_a <gimple_statement_omp_sections> (gs);
return &omp_sections_stmt->clauses;
}
/* Set CLAUSES to be the set of clauses associated with OMP_SECTIONS
GS. */
static inline void
gimple_omp_sections_set_clauses (gimple gs, tree clauses)
{
gimple_statement_omp_sections *omp_sections_stmt =
as_a <gimple_statement_omp_sections> (gs);
omp_sections_stmt->clauses = clauses;
}
/* Return the control variable associated with the GIMPLE_OMP_SECTIONS
in GS. */
static inline tree
gimple_omp_sections_control (const_gimple gs)
{
const gimple_statement_omp_sections *omp_sections_stmt =
as_a <const gimple_statement_omp_sections> (gs);
return omp_sections_stmt->control;
}
/* Return a pointer to the clauses associated with the GIMPLE_OMP_SECTIONS
GS. */
static inline tree *
gimple_omp_sections_control_ptr (gimple gs)
{
gimple_statement_omp_sections *omp_sections_stmt =
as_a <gimple_statement_omp_sections> (gs);
return &omp_sections_stmt->control;
}
/* Set CONTROL to be the set of clauses associated with the
GIMPLE_OMP_SECTIONS in GS. */
static inline void
gimple_omp_sections_set_control (gimple gs, tree control)
{
gimple_statement_omp_sections *omp_sections_stmt =
as_a <gimple_statement_omp_sections> (gs);
omp_sections_stmt->control = control;
}
/* Set COND to be the condition code for OMP_FOR GS. */
static inline void
gimple_omp_for_set_cond (gimple gs, size_t i, enum tree_code cond)
{
gimple_statement_omp_for *omp_for_stmt =
as_a <gimple_statement_omp_for> (gs);
gcc_gimple_checking_assert (TREE_CODE_CLASS (cond) == tcc_comparison
&& i < omp_for_stmt->collapse);
omp_for_stmt->iter[i].cond = cond;
}
/* Return the condition code associated with OMP_FOR GS. */
static inline enum tree_code
gimple_omp_for_cond (const_gimple gs, size_t i)
{
const gimple_statement_omp_for *omp_for_stmt =
as_a <const gimple_statement_omp_for> (gs);
gcc_gimple_checking_assert (i < omp_for_stmt->collapse);
return omp_for_stmt->iter[i].cond;
}
/* Set the value being stored in an atomic store. */
static inline void
gimple_omp_atomic_store_set_val (gimple g, tree val)
{
gimple_statement_omp_atomic_store *omp_atomic_store_stmt =
as_a <gimple_statement_omp_atomic_store> (g);
omp_atomic_store_stmt->val = val;
}
/* Return the value being stored in an atomic store. */
static inline tree
gimple_omp_atomic_store_val (const_gimple g)
{
const gimple_statement_omp_atomic_store *omp_atomic_store_stmt =
as_a <const gimple_statement_omp_atomic_store> (g);
return omp_atomic_store_stmt->val;
}
/* Return a pointer to the value being stored in an atomic store. */
static inline tree *
gimple_omp_atomic_store_val_ptr (gimple g)
{
gimple_statement_omp_atomic_store *omp_atomic_store_stmt =
as_a <gimple_statement_omp_atomic_store> (g);
return &omp_atomic_store_stmt->val;
}
/* Set the LHS of an atomic load. */
static inline void
gimple_omp_atomic_load_set_lhs (gimple g, tree lhs)
{
gimple_statement_omp_atomic_load *omp_atomic_load_stmt =
as_a <gimple_statement_omp_atomic_load> (g);
omp_atomic_load_stmt->lhs = lhs;
}
/* Get the LHS of an atomic load. */
static inline tree
gimple_omp_atomic_load_lhs (const_gimple g)
{
const gimple_statement_omp_atomic_load *omp_atomic_load_stmt =
as_a <const gimple_statement_omp_atomic_load> (g);
return omp_atomic_load_stmt->lhs;
}
/* Return a pointer to the LHS of an atomic load. */
static inline tree *
gimple_omp_atomic_load_lhs_ptr (gimple g)
{
gimple_statement_omp_atomic_load *omp_atomic_load_stmt =
as_a <gimple_statement_omp_atomic_load> (g);
return &omp_atomic_load_stmt->lhs;
}
/* Set the RHS of an atomic load. */
static inline void
gimple_omp_atomic_load_set_rhs (gimple g, tree rhs)
{
gimple_statement_omp_atomic_load *omp_atomic_load_stmt =
as_a <gimple_statement_omp_atomic_load> (g);
omp_atomic_load_stmt->rhs = rhs;
}
/* Get the RHS of an atomic load. */
static inline tree
gimple_omp_atomic_load_rhs (const_gimple g)
{
const gimple_statement_omp_atomic_load *omp_atomic_load_stmt =
as_a <const gimple_statement_omp_atomic_load> (g);
return omp_atomic_load_stmt->rhs;
}
/* Return a pointer to the RHS of an atomic load. */
static inline tree *
gimple_omp_atomic_load_rhs_ptr (gimple g)
{
gimple_statement_omp_atomic_load *omp_atomic_load_stmt =
as_a <gimple_statement_omp_atomic_load> (g);
return &omp_atomic_load_stmt->rhs;
}
/* Get the definition of the control variable in a GIMPLE_OMP_CONTINUE. */
static inline tree
gimple_omp_continue_control_def (const_gimple g)
{
const gimple_statement_omp_continue *omp_continue_stmt =
as_a <const gimple_statement_omp_continue> (g);
return omp_continue_stmt->control_def;
}
/* The same as above, but return the address. */
static inline tree *
gimple_omp_continue_control_def_ptr (gimple g)
{
gimple_statement_omp_continue *omp_continue_stmt =
as_a <gimple_statement_omp_continue> (g);
return &omp_continue_stmt->control_def;
}
/* Set the definition of the control variable in a GIMPLE_OMP_CONTINUE. */
static inline void
gimple_omp_continue_set_control_def (gimple g, tree def)
{
gimple_statement_omp_continue *omp_continue_stmt =
as_a <gimple_statement_omp_continue> (g);
omp_continue_stmt->control_def = def;
}
/* Get the use of the control variable in a GIMPLE_OMP_CONTINUE. */
static inline tree
gimple_omp_continue_control_use (const_gimple g)
{
const gimple_statement_omp_continue *omp_continue_stmt =
as_a <const gimple_statement_omp_continue> (g);
return omp_continue_stmt->control_use;
}
/* The same as above, but return the address. */
static inline tree *
gimple_omp_continue_control_use_ptr (gimple g)
{
gimple_statement_omp_continue *omp_continue_stmt =
as_a <gimple_statement_omp_continue> (g);
return &omp_continue_stmt->control_use;
}
/* Set the use of the control variable in a GIMPLE_OMP_CONTINUE. */
static inline void
gimple_omp_continue_set_control_use (gimple g, tree use)
{
gimple_statement_omp_continue *omp_continue_stmt =
as_a <gimple_statement_omp_continue> (g);
omp_continue_stmt->control_use = use;
}
/* Return a pointer to the body for the GIMPLE_TRANSACTION statement GS. */
static inline gimple_seq *
gimple_transaction_body_ptr (gimple gs)
{
gimple_statement_transaction *transaction_stmt =
as_a <gimple_statement_transaction> (gs);
return &transaction_stmt->body;
}
/* Return the body for the GIMPLE_TRANSACTION statement GS. */
static inline gimple_seq
gimple_transaction_body (gimple gs)
{
return *gimple_transaction_body_ptr (gs);
}
/* Return the label associated with a GIMPLE_TRANSACTION. */
static inline tree
gimple_transaction_label (const_gimple gs)
{
const gimple_statement_transaction *transaction_stmt =
as_a <const gimple_statement_transaction> (gs);
return transaction_stmt->label;
}
static inline tree *
gimple_transaction_label_ptr (gimple gs)
{
gimple_statement_transaction *transaction_stmt =
as_a <gimple_statement_transaction> (gs);
return &transaction_stmt->label;
}
/* Return the subcode associated with a GIMPLE_TRANSACTION. */
static inline unsigned int
gimple_transaction_subcode (const_gimple gs)
{
GIMPLE_CHECK (gs, GIMPLE_TRANSACTION);
return gs->subcode;
}
/* Set BODY to be the body for the GIMPLE_TRANSACTION statement GS. */
static inline void
gimple_transaction_set_body (gimple gs, gimple_seq body)
{
gimple_statement_transaction *transaction_stmt =
as_a <gimple_statement_transaction> (gs);
transaction_stmt->body = body;
}
/* Set the label associated with a GIMPLE_TRANSACTION. */
static inline void
gimple_transaction_set_label (gimple gs, tree label)
{
gimple_statement_transaction *transaction_stmt =
as_a <gimple_statement_transaction> (gs);
transaction_stmt->label = label;
}
/* Set the subcode associated with a GIMPLE_TRANSACTION. */
static inline void
gimple_transaction_set_subcode (gimple gs, unsigned int subcode)
{
GIMPLE_CHECK (gs, GIMPLE_TRANSACTION);
gs->subcode = subcode;
}
/* Return a pointer to the return value for GIMPLE_RETURN GS. */
static inline tree *
gimple_return_retval_ptr (const_gimple gs)
{
GIMPLE_CHECK (gs, GIMPLE_RETURN);
return gimple_op_ptr (gs, 0);
}
/* Return the return value for GIMPLE_RETURN GS. */
static inline tree
gimple_return_retval (const_gimple gs)
{
GIMPLE_CHECK (gs, GIMPLE_RETURN);
return gimple_op (gs, 0);
}
/* Set RETVAL to be the return value for GIMPLE_RETURN GS. */
static inline void
gimple_return_set_retval (gimple gs, tree retval)
{
GIMPLE_CHECK (gs, GIMPLE_RETURN);
gimple_set_op (gs, 0, retval);
}
/* Returns true when the gimple statement STMT is any of the OpenMP types. */
#define CASE_GIMPLE_OMP \
case GIMPLE_OMP_PARALLEL: \
case GIMPLE_OMP_TASK: \
case GIMPLE_OMP_FOR: \
case GIMPLE_OMP_SECTIONS: \
case GIMPLE_OMP_SECTIONS_SWITCH: \
case GIMPLE_OMP_SINGLE: \
case GIMPLE_OMP_TARGET: \
case GIMPLE_OMP_TEAMS: \
case GIMPLE_OMP_SECTION: \
case GIMPLE_OMP_MASTER: \
case GIMPLE_OMP_TASKGROUP: \
case GIMPLE_OMP_ORDERED: \
case GIMPLE_OMP_CRITICAL: \
case GIMPLE_OMP_RETURN: \
case GIMPLE_OMP_ATOMIC_LOAD: \
case GIMPLE_OMP_ATOMIC_STORE: \
case GIMPLE_OMP_CONTINUE
static inline bool
is_gimple_omp (const_gimple stmt)
{
switch (gimple_code (stmt))
{
CASE_GIMPLE_OMP:
return true;
default:
return false;
}
}
/* Returns TRUE if statement G is a GIMPLE_NOP. */
static inline bool
gimple_nop_p (const_gimple g)
{
return gimple_code (g) == GIMPLE_NOP;
}
/* Return true if GS is a GIMPLE_RESX. */
static inline bool
is_gimple_resx (const_gimple gs)
{
return gimple_code (gs) == GIMPLE_RESX;
}
/* Return the predictor of GIMPLE_PREDICT statement GS. */
static inline enum br_predictor
gimple_predict_predictor (gimple gs)
{
GIMPLE_CHECK (gs, GIMPLE_PREDICT);
return (enum br_predictor) (gs->subcode & ~GF_PREDICT_TAKEN);
}
/* Set the predictor of GIMPLE_PREDICT statement GS to PREDICT. */
static inline void
gimple_predict_set_predictor (gimple gs, enum br_predictor predictor)
{
GIMPLE_CHECK (gs, GIMPLE_PREDICT);
gs->subcode = (gs->subcode & GF_PREDICT_TAKEN)
| (unsigned) predictor;
}
/* Return the outcome of GIMPLE_PREDICT statement GS. */
static inline enum prediction
gimple_predict_outcome (gimple gs)
{
GIMPLE_CHECK (gs, GIMPLE_PREDICT);
return (gs->subcode & GF_PREDICT_TAKEN) ? TAKEN : NOT_TAKEN;
}
/* Set the outcome of GIMPLE_PREDICT statement GS to OUTCOME. */
static inline void
gimple_predict_set_outcome (gimple gs, enum prediction outcome)
{
GIMPLE_CHECK (gs, GIMPLE_PREDICT);
if (outcome == TAKEN)
gs->subcode |= GF_PREDICT_TAKEN;
else
gs->subcode &= ~GF_PREDICT_TAKEN;
}
/* Return the type of the main expression computed by STMT. Return
void_type_node if the statement computes nothing. */
static inline tree
gimple_expr_type (const_gimple stmt)
{
enum gimple_code code = gimple_code (stmt);
if (code == GIMPLE_ASSIGN || code == GIMPLE_CALL)
{
tree type;
/* In general we want to pass out a type that can be substituted
for both the RHS and the LHS types if there is a possibly
useless conversion involved. That means returning the
original RHS type as far as we can reconstruct it. */
if (code == GIMPLE_CALL)
{
if (gimple_call_internal_p (stmt)
&& gimple_call_internal_fn (stmt) == IFN_MASK_STORE)
type = TREE_TYPE (gimple_call_arg (stmt, 3));
else
type = gimple_call_return_type (stmt);
}
else
switch (gimple_assign_rhs_code (stmt))
{
case POINTER_PLUS_EXPR:
type = TREE_TYPE (gimple_assign_rhs1 (stmt));
break;
default:
/* As fallback use the type of the LHS. */
type = TREE_TYPE (gimple_get_lhs (stmt));
break;
}
return type;
}
else if (code == GIMPLE_COND)
return boolean_type_node;
else
return void_type_node;
}
/* Enum and arrays used for allocation stats. Keep in sync with
gimple.c:gimple_alloc_kind_names. */
enum gimple_alloc_kind
{
gimple_alloc_kind_assign, /* Assignments. */
gimple_alloc_kind_phi, /* PHI nodes. */
gimple_alloc_kind_cond, /* Conditionals. */
gimple_alloc_kind_rest, /* Everything else. */
gimple_alloc_kind_all
};
extern int gimple_alloc_counts[];
extern int gimple_alloc_sizes[];
/* Return the allocation kind for a given stmt CODE. */
static inline enum gimple_alloc_kind
gimple_alloc_kind (enum gimple_code code)
{
switch (code)
{
case GIMPLE_ASSIGN:
return gimple_alloc_kind_assign;
case GIMPLE_PHI:
return gimple_alloc_kind_phi;
case GIMPLE_COND:
return gimple_alloc_kind_cond;
default:
return gimple_alloc_kind_rest;
}
}
/* Return true if a location should not be emitted for this statement
by annotate_all_with_location. */
static inline bool
gimple_do_not_emit_location_p (gimple g)
{
return gimple_plf (g, GF_PLF_1);
}
/* Mark statement G so a location will not be emitted by
annotate_one_with_location. */
static inline void
gimple_set_do_not_emit_location (gimple g)
{
/* The PLF flags are initialized to 0 when a new tuple is created,
so no need to initialize it anywhere. */
gimple_set_plf (g, GF_PLF_1, true);
}
/* Macros for showing usage statistics. */
#define SCALE(x) ((unsigned long) ((x) < 1024*10 \
? (x) \
: ((x) < 1024*1024*10 \
? (x) / 1024 \
: (x) / (1024*1024))))
#define LABEL(x) ((x) < 1024*10 ? 'b' : ((x) < 1024*1024*10 ? 'k' : 'M'))
#endif /* GCC_GIMPLE_H */
|
ast-dump-openmp-master.c | // RUN: %clang_cc1 -triple x86_64-unknown-unknown -fopenmp -ast-dump %s | FileCheck --match-full-lines -implicit-check-not=openmp_structured_block %s
void test(void) {
#pragma omp master
;
}
// CHECK: TranslationUnitDecl {{.*}} <<invalid sloc>> <invalid sloc>
// CHECK: `-FunctionDecl {{.*}} <{{.*}}ast-dump-openmp-master.c:3:1, line:6:1> line:3:6 test 'void (void)'
// CHECK-NEXT: `-CompoundStmt {{.*}} <col:17, line:6:1>
// CHECK-NEXT: `-OMPMasterDirective {{.*}} <line:4:1, col:19>
// CHECK-NEXT: `-NullStmt {{.*}} <line:5:3>
|
compute_landsat_refl.c | /******************************************************************************
FILE: compute_landsat_refl.c
PURPOSE: Contains functions for handling the Landsat 8/9 TOA reflectance and
surface reflectance corrections.
PROJECT: Land Satellites Data System Science Research and Development (LSRD)
at the USGS EROS
LICENSE TYPE: NASA Open Source Agreement Version 1.3
NOTES:
******************************************************************************/
#include "time.h"
#include "aero_interp.h"
#include "poly_coeff.h"
#include "read_level1_qa.h"
#include "read_level2_qa.h"
#define WRITE_TAERO 1
/******************************************************************************
MODULE: compute_landsat_toa_refl
PURPOSE: Computes the TOA reflectance and TOA brightness temps for all
the Landsat bands except the pan band. Uses a per-pixel solar zenith angle for
the TOA corrections.
RETURN VALUE:
Type = int
Value Description
----- -----------
ERROR Error computing the reflectance
SUCCESS No errors encountered
PROJECT: Land Satellites Data System Science Research and Development (LSRD)
at the USGS EROS
NOTES:
1. These TOA and BT algorithms match those as published by the USGS Landsat
team in http://landsat.usgs.gov/Landsat8_Using_Product.php
******************************************************************************/
int compute_landsat_toa_refl
(
Input_t *input, /* I: input structure for the Landsat product */
Espa_internal_meta_t *xml_metadata,
/* I: XML metadata structure */
uint16 *qaband, /* I: QA band for the input image, nlines x nsamps */
int nlines, /* I: number of lines in reflectance, thermal bands */
int nsamps, /* I: number of samps in reflectance, thermal bands */
char *instrument, /* I: instrument to be processed (OLI, TIRS) */
int16 *sza, /* I: scaled per-pixel solar zenith angles (degrees),
nlines x nsamps */
float **sband /* O: output TOA reflectance and brightness temp
values (unscaled) */
)
{
char errmsg[STR_SIZE]; /* error message */
char FUNC_NAME[] = "compute_landsat_toa_refl"; /* function name */
int i; /* looping variable for pixels */
int ib; /* looping variable for input bands */
int th_indx; /* index of thermal band and K constants */
int sband_ib; /* looping variable for output bands */
int iband; /* current band */
long npixels; /* number of pixels to process */
float rotoa; /* top of atmosphere reflectance */
float tmpf; /* temporary floating point value */
float refl_mult; /* reflectance multiplier for bands 1-9 */
float refl_add; /* reflectance additive for bands 1-9 */
float xcals; /* radiance multiplier for bands 10 and 11 */
float xcalo; /* radiance additive for bands 10 and 11 */
float k1; /* K1 temperature constant for thermal bands */
float k2; /* K2 temperature constant for thermal bands */
float xmus; /* cosine of solar zenith angle (per-pixel) */
float sza_mult; /* sza gain value (for unscaling) */
float sza_add; /* sza offset value (for unscaling) */
uint16 *uband = NULL; /* array for input image data for a single band,
nlines x nsamps */
time_t mytime; /* time variable */
/* Start the processing */
mytime = time(NULL);
printf ("Start TOA reflectance corrections: %s", ctime(&mytime));
/* Allocate memory for band data */
npixels = nlines * nsamps;
uband = calloc (npixels, sizeof (uint16));
if (uband == NULL)
{
sprintf (errmsg, "Error allocating memory for uband");
error_handler (true, FUNC_NAME, errmsg);
return (ERROR);
}
/* Loop through all the bands (except the pan band) and compute the TOA
reflectance and TOA brightness temp */
for (ib = DNL_BAND1; ib <= DNL_BAND11; ib++)
{
/* Don't process the pan band */
if (ib == DNL_BAND8)
continue;
printf ("%d ... ", ib+1);
/* Read the current band and calibrate bands 1-9 (except pan) to
obtain TOA reflectance. Bands are corrected for the sun angle. */
if (ib <= DNL_BAND9)
{
if (ib <= DNL_BAND7)
{
iband = ib;
sband_ib = ib;
}
else
{ /* don't count the pan band */
iband = ib - 1;
sband_ib = ib - 1;
}
if (get_input_refl_lines (input, iband, 0, nlines, nsamps, uband)
!= SUCCESS)
{
sprintf (errmsg, "Error reading Landsat band %d", ib+1);
error_handler (true, FUNC_NAME, errmsg);
return (ERROR);
}
/* Get TOA reflectance coefficients for this reflectance band from
XML file */
refl_mult = input->meta.gain[iband];
refl_add = input->meta.bias[iband];
sza_mult = input->meta.gain_sza;
sza_add = input->meta.bias_sza;
#ifdef _OPENMP
#pragma omp parallel for private (i, xmus, rotoa)
#endif
for (i = 0; i < npixels; i++)
{
/* If this pixel is fill, continue with the next pixel. */
if (level1_qa_is_fill(qaband[i]))
{
sband[sband_ib][i] = FILL_VALUE;
continue;
}
/* Compute the TOA reflectance based on the per-pixel
sun angle (need to unscale the DN value) */
xmus = cos((sza[i] * sza_mult + sza_add) * DEG2RAD);
rotoa = (uband[i] * refl_mult) + refl_add;
rotoa /= xmus;
/* Save the scaled TOA reflectance value, but make
sure it falls within the defined valid range since it will
get used for SR computations */
if (rotoa < MIN_VALID_REFL)
sband[sband_ib][i] = MIN_VALID_REFL;
else if (rotoa > MAX_VALID_REFL)
sband[sband_ib][i] = MAX_VALID_REFL;
else
sband[sband_ib][i] = rotoa;
} /* for i */
} /* end if band <= band 9 */
/* Read the current band and calibrate thermal bands. Not available
for OLI-only scenes. */
else if ((ib == DNL_BAND10 || ib == DNL_BAND11) &&
strcmp (instrument, "OLI"))
{
/* Handle index differences between bands */
if (ib == DNL_BAND10)
{
th_indx = 0;
sband_ib = SRL_BAND10;
}
else
{ /* if (ib == DNL_BAND11) */
th_indx = 1;
sband_ib = SRL_BAND11;
}
/* Read the input thermal lines */
if (get_input_th_lines (input, th_indx, 0, nlines, uband)
!= SUCCESS)
{
sprintf (errmsg, "Reading band %d", ib+1);
error_handler (true, FUNC_NAME, errmsg);
return (ERROR);
}
/* Get brightness temp coefficients for this band from XML file */
xcals = input->meta.gain_th[th_indx];
xcalo = input->meta.bias_th[th_indx];
k1 = input->meta.k1_const[th_indx];
k2 = input->meta.k2_const[th_indx];
/* Compute brightness temp for band 10. Make sure it falls
within the min/max range for the thermal bands. */
#ifdef _OPENMP
#pragma omp parallel for private (i, tmpf)
#endif
for (i = 0; i < npixels; i++)
{
/* If this pixel is fill, continue with the next pixel. */
if (level1_qa_is_fill(qaband[i]))
{
sband[sband_ib][i] = FILL_VALUE;
continue;
}
/* Compute the TOA spectral radiance */
tmpf = xcals * uband[i] + xcalo;
/* Compute TOA brightness temp (K) */
tmpf = k2 / log (k1 / tmpf + 1.0);
/* Make sure the brightness temp falls within the specified
range, since it will get used for the SR computations */
if (tmpf < MIN_VALID_TH)
sband[sband_ib][i] = MIN_VALID_TH;
else if (tmpf > MAX_VALID_TH)
sband[sband_ib][i] = MAX_VALID_TH;
else
sband[sband_ib][i] = tmpf;
}
} /* end if band 10 || band 11*/
} /* end for ib */
printf ("\n");
/* The input data has been read and calibrated. The memory can be freed. */
free (uband);
/* Successful completion */
mytime = time(NULL);
printf ("End of TOA reflectance computations: %s", ctime(&mytime));
return (SUCCESS);
}
/******************************************************************************
MODULE: compute_landsat_sr_refl
PURPOSE: Computes the surface reflectance for all the Landsat reflectance
bands.
RETURN VALUE:
Type = int
Value Description
----- -----------
ERROR Error computing the reflectance
SUCCESS No errors encountered
PROJECT: Land Satellites Data System Science Research and Development (LSRD)
at the USGS EROS
NOTES:
1. Initializes the variables and data arrays from the lookup table and
auxiliary files.
2. The tauray array was originally read in from a static ASCII file, but it is
now hardcoded to save time from reading the file each time. This file was
generated (like many of the other auxiliary input tables) by running 6S and
storing the coefficients.
4. Aerosols are retrieved for all non-fill pixels. If the aerosol fails the
model residual or NDVI test, then the pixel is flagged as water. All water
pixels are run through a water-specific aerosol retrieval. If the model
residual fails, then that pixel is marked as failed aerosol retrieval. Any
pixel that failed retrieval is then interpolated using an average of the
clear (valid land pixel aerosols) and water (valid water pixel aerosols).
Those final aerosol values are used for the surface reflectance corrections.
5. Cloud-based QA information is not processed in this algorithm.
******************************************************************************/
int compute_landsat_sr_refl
(
Input_t *input, /* I: input structure for the Landsat product */
Espa_internal_meta_t *xml_metadata,
/* I: XML metadata structure */
char *xml_infile, /* I: input XML filename */
uint16 *qaband, /* I: QA band for the input image, nlines x nsamps */
uint16 *out_band, /* I: allocated array for writing scaled output */
int nlines, /* I: number of lines in reflectance, thermal bands */
int nsamps, /* I: number of samps in reflectance, thermal bands */
float pixsize, /* I: pixel size for the reflectance bands */
float **sband, /* I/O: input TOA (unscaled) and output surface
reflectance (unscaled) */
int16 *sza, /* I: scaled per-pixel solar zenith angles (degrees),
nlines x nsamps */
int16 *saa, /* I: scaled per-pixel solar azimuth angles (degrees),
nlines x nsamps */
int16 *vza, /* I: scaled per-pixel view zenith angles (degrees),
nlines x nsamps */
int16 *vaa, /* I: scaled per-pixel view azimuth angles (degrees),
nlines x nsamps */
float xts_center, /* I: scene center solar zenith angle (deg) */
float xmus_center, /* I: cosine of scene center solar zenith angle */
bool use_orig_aero, /* I: use the original aerosol handling if specified,
o/w use the semi-empirical approach */
char *anglehdf, /* I: angle HDF filename */
char *intrefnm, /* I: intrinsic reflectance filename */
char *transmnm, /* I: transmission filename */
char *spheranm, /* I: spherical albedo filename */
char *cmgdemnm, /* I: climate modeling grid (CMG) DEM filename */
char *rationm, /* I: ratio averages filename */
char *auxnm /* I: auxiliary filename for ozone and water vapor */
)
{
char errmsg[STR_SIZE]; /* error message */
char FUNC_NAME[] = "compute_landsat_sr_refl"; /* function name */
Sat_t sat = input->meta.sat; /* satellite */
int retval; /* return status */
int i, j; /* looping variable for pixels */
int ib; /* looping variable for input bands */
int iband; /* current band */
int curr_pix = -99; /* current pixel in 1D arrays of nlines * nsamps */
int center_pix; /* current pixel in 1D arrays of nlines * nsamps for
the center of the aerosol window */
int center_line; /* line for the center of the aerosol window */
int center_samp; /* sample for the center of the aerosol window */
int nearest_line; /* line for nearest non-fill/cloud pixel in the
aerosol window */
int nearest_samp; /* samp for nearest non-fill/cloud pixel in the
aerosol window */
long npixels; /* number of pixels to process */
float tmpf; /* temporary floating point value */
float rotoa; /* top of atmosphere reflectance */
float roslamb; /* lambertian surface reflectance */
float tgo; /* other gaseous transmittance (tgog * tgoz) */
float roatm; /* intrinsic atmospheric reflectance */
float ttatmg; /* total atmospheric transmission */
float satm; /* atmosphere spherical albedo */
float tgo_x_roatm; /* variable for tgo * roatm */
float tgo_x_ttatmg; /* variable for tgo * ttatmg */
float xrorayp; /* reflectance of the atmosphere due to molecular
(Rayleigh) scattering */
float erelc[NSR_BANDS]; /* band ratio variable for refl bands */
float troatm[NSR_BANDS]; /* atmospheric reflectance table for refl bands */
float btgo[NSR_BANDS]; /* other gaseous transmittance for refl bands */
float broatm[NSR_BANDS]; /* atmospheric reflectance for refl bands */
float bttatmg[NSR_BANDS]; /* ttatmg for refl bands */
float bsatm[NSR_BANDS]; /* atmosphere spherical albedo for refl bands */
int iband1; /* band index (zero-based) */
float raot; /* AOT reflectance */
float sraot1, sraot3;
/* raot values for three different eps values */
float residual; /* model residual */
float residual1, residual2, residual3;
/* residuals for 3 different eps values */
float rsurf; /* surface reflectance */
float corf; /* aerosol impact (higher values represent high
aerosol) */
float ros1, ros4, ros5; /* surface reflectance for bands 1, 4, and 5 */
#ifndef _OPENMP
int tmp_percent; /* current percentage for printing status */
int curr_tmp_percent; /* percentage for current line */
#endif
float lat, lon; /* pixel lat, long location */
int lcmg, scmg; /* line/sample index for the CMG */
int lcmg1, scmg1; /* line+1/sample+1 index for the CMG */
float u, v; /* line/sample index for the CMG */
float one_minus_u; /* 1.0 - u */
float one_minus_v; /* 1.0 - v */
float one_minus_u_x_one_minus_v; /* (1.0 - u) * (1.0 - v) */
float one_minus_u_x_v; /* (1.0 - u) * v */
float u_x_one_minus_v; /* u * (1.0 - v) */
float u_x_v; /* u * v */
float ndwi_th1, ndwi_th2; /* values for NDWI calculations */
float xcmg, ycmg; /* x/y location for CMG */
float xndwi; /* calculated NDWI value */
int uoz11, uoz21, uoz12, uoz22; /* ozone at line,samp; line, samp+1;
line+1, samp; and line+1, samp+1 */
float pres11, pres12, pres21, pres22; /* pressure at line,samp;
line, samp+1; line+1, samp; and line+1, samp+1 */
float wv11, wv12, wv21, wv22; /* water vapor at line,samp;
line, samp+1; line+1, samp; and line+1, samp+1 */
uint8 *ipflag = NULL; /* QA flag to assist with aerosol interpolation,
nlines x nsamps */
float *twvi = NULL; /* interpolated water vapor value,
nlines x nsamps */
float *tozi = NULL; /* interpolated ozone value, nlines x nsamps */
float *tp = NULL; /* interpolated pressure value, nlines x nsamps */
float *taero = NULL; /* aerosol values for each pixel, nlines x nsamps */
float *teps = NULL; /* angstrom coeff for each pixel, nlines x nsamps */
float *aerob1 = NULL; /* atmospherically corrected band 1 data
(unscaled TOA refl), nlines x nsamps */
float *aerob2 = NULL; /* atmospherically corrected band 2 data
(unscaled TOA refl), nlines x nsamps */
float *aerob4 = NULL; /* atmospherically corrected band 4 data
(unscaled TOA refl), nlines x nsamps */
float *aerob5 = NULL; /* atmospherically corrected band 5 data
(unscaled TOA refl), nlines x nsamps */
float *aerob7 = NULL; /* atmospherically corrected band 7 data
(unscaled TOA refl), nlines x nsamps */
/* Vars for forward/inverse mapping space */
Geoloc_t *space = NULL; /* structure for geolocation information */
Space_def_t space_def; /* structure to define the space mapping */
Img_coord_float_t img; /* coordinate in line/sample space */
Geo_coord_t geo; /* coordinate in lat/long space */
/* Lookup table variables */
float eps; /* angstrom coefficient */
float eps1, eps2, eps3; /* eps values for three runs */
float xts; /* solar zenith angle (deg) */
float xmus; /* cosine of solar zenith angle */
float xtv_center; /* scene center observation zenith angle (deg) */
float xmuv_center; /* cosine of scene center observation zenith angle */
float xtv; /* observation zenith angle (deg) */
float xmuv; /* cosine of observation zenith angle */
float xfi_center; /* azimuthal difference between the sun and
observation angle at scene center (deg) */
float cosxfi_center; /* cosine of azimuthal difference at scene center */
float xfi; /* azimuthal difference between the sun and
observation angle (deg) */
float cosxfi; /* cosine of azimuthal difference */
float xtsstep; /* solar zenith step value */
float xtsmin; /* minimum solar zenith value */
float xtvstep; /* observation step value */
float xtvmin; /* minimum observation value */
float *rolutt = NULL; /* intrinsic reflectance table
[NSR_BANDS x NPRES_VALS x NAOT_VALS x NSOLAR_VALS] */
float *transt = NULL; /* transmission table
[NSR_BANDS x NPRES_VALS x NAOT_VALS x NSUNANGLE_VALS] */
float *sphalbt = NULL; /* spherical albedo table
[NSR_BANDS x NPRES_VALS x NAOT_VALS] */
float *normext = NULL; /* aerosol extinction coefficient at the current
wavelength (normalized at 550nm)
[NSR_BANDS x NPRES_VALS x NAOT_VALS] */
float *tsmax = NULL; /* maximum scattering angle table
[NVIEW_ZEN_VALS x NSOLAR_ZEN_VALS] */
float *tsmin = NULL; /* minimum scattering angle table
[NVIEW_ZEN_VALS x NSOLAR_ZEN_VALS] */
float *nbfi = NULL; /* number of azimuth angles
[NVIEW_ZEN_VALS x NSOLAR_ZEN_VALS] */
float *nbfic = NULL; /* communitive number of azimuth angles
[NVIEW_ZEN_VALS x NSOLAR_ZEN_VALS] */
float *ttv = NULL; /* view angle table
[NVIEW_ZEN_VALS x NSOLAR_ZEN_VALS] */
float tts[22]; /* sun angle table */
int32 indts[22]; /* index for sun angle table */
int iaots; /* index for AOTs */
/* Atmospheric correction coefficient variables (semi-empirical approach) */
float tgo_arr[NREFL_BANDS]; /* per-band other gaseous transmittance */
float roatm_arr[NREFL_BANDS][NAOT_VALS]; /* per band AOT vals for roatm */
float ttatmg_arr[NREFL_BANDS][NAOT_VALS]; /* per band AOT vals for ttatmg */
float satm_arr[NREFL_BANDS][NAOT_VALS]; /* per band AOT vals for satm */
float roatm_coef[NREFL_BANDS][NCOEF]; /* per band poly coeffs for roatm */
float ttatmg_coef[NREFL_BANDS][NCOEF]; /* per band poly coeffs for ttatmg */
float satm_coef[NREFL_BANDS][NCOEF]; /* per band poly coeffs for satm */
float normext_p0a3_arr[NREFL_BANDS]; /* per band normext[iband][0][3] */
int roatm_iaMax[NREFL_BANDS]; /* ??? */
int ia; /* looping variable for AOTs */
int iaMaxTemp; /* max temp for current AOT level */
/* Auxiliary file variables */
int16 *dem = NULL; /* CMG DEM data array [DEM_NBLAT x DEM_NBLON] */
int16 *andwi = NULL; /* avg NDWI [RATIO_NBLAT x RATIO_NBLON] */
int16 *sndwi = NULL; /* standard NDWI [RATIO_NBLAT x RATIO_NBLON] */
int16 *ratiob1 = NULL; /* mean band1 ratio [RATIO_NBLAT x RATIO_NBLON] */
int16 *ratiob2 = NULL; /* mean band2 ratio [RATIO_NBLAT x RATIO_NBLON] */
int16 *ratiob7 = NULL; /* mean band7 ratio [RATIO_NBLAT x RATIO_NBLON] */
int16 *intratiob1 = NULL; /* intercept band1 ratio,
RATIO_NBLAT x RATIO_NBLON */
int16 *intratiob2 = NULL; /* intercept band2 ratio
RATIO_NBLAT x RATIO_NBLON */
int16 *intratiob7 = NULL; /* intercept band7 ratio
RATIO_NBLAT x RATIO_NBLON */
int16 *slpratiob1 = NULL; /* slope band1 ratio
RATIO_NBLAT x RATIO_NBLON */
int16 *slpratiob2 = NULL; /* slope band2 ratio
RATIO_NBLAT x RATIO_NBLON */
int16 *slpratiob7 = NULL; /* slope band7 ratio
RATIO_NBLAT x RATIO_NBLON */
uint16 *wv = NULL; /* water vapor values [CMG_NBLAT x CMG_NBLON] */
uint8 *oz = NULL; /* ozone values [CMG_NBLAT x CMG_NBLON] */
float raot550nm; /* nearest input value of AOT */
float uoz; /* total column ozone */
float uwv; /* total column water vapor (precipital water vapor) */
float pres; /* surface pressure */
float rb1; /* band ratio 1 (unscaled) */
float rb2; /* band ratio 2 (unscaled) */
float slpr11, slpr12, slpr21, slpr22; /* band ratio slope at line,samp;
line, samp+1; line+1, samp; and line+1, samp+1 */
float intr11, intr12, intr21, intr22; /* band ratio intercept at line,samp;
line, samp+1; line+1, samp; and line+1, samp+1 */
float slprb1, slprb2, slprb7; /* interpolated band ratio slope values for
band ratios 1, 2, 7 */
float intrb1, intrb2, intrb7; /* interpolated band ratio intercept values
for band ratios 1, 2, 7 */
int ratio_pix11; /* pixel location for ratio products [lcmg][scmg] */
int ratio_pix12; /* pixel location for ratio products [lcmg][scmg+1] */
int ratio_pix21; /* pixel location for ratio products [lcmg+1][scmg] */
int ratio_pix22; /* pixel location for ratio products [lcmg+1][scmg+1] */
int cmg_pix11; /* pixel location for CMG/DEM products [lcmg][scmg] */
int cmg_pix12; /* pixel location for CMG/DEM products [lcmg][scmg+1] */
int cmg_pix21; /* pixel location for CMG/DEM products [lcmg+1][scmg] */
int cmg_pix22; /* pixel location for CMG/DEM products [lcmg+1][scmg+1] */
/* Variables for finding the eps that minimizes the residual */
double xa, xb, xc, xd, xe, xf; /* coefficients */
double coefa, coefb; /* coefficients */
float epsmin; /* eps which minimizes the residual */
/* Output file info */
time_t mytime; /* timing variable */
Output_t *sr_output = NULL; /* output structure and metadata for the SR
product */
Envi_header_t envi_hdr; /* output ENVI header information */
char envi_file[STR_SIZE]; /* ENVI filename */
char *cptr = NULL; /* pointer to the file extension */
/* Table constants */
float aot550nm[NAOT_VALS] = /* AOT look-up table */
{0.01, 0.05, 0.10, 0.15, 0.20, 0.30, 0.40, 0.60, 0.80, 1.00, 1.20,
1.40, 1.60, 1.80, 2.00, 2.30, 2.60, 3.00, 3.50, 4.00, 4.50, 5.00};
float tpres[NPRES_VALS] = /* surface pressure table */
{1050.0, 1013.0, 900.0, 800.0, 700.0, 600.0, 500.0};
/* Atmospheric correction variables */
/* Look up table for atmospheric and geometric quantities. Taurary comes
from tauray-ldcm/msi.ASC and the oz, wv, og variables come from
gascoef-modis/msi.ASC. */
float tauray[NSRL_BANDS] = /* molecular optical thickness coefficients --
produced by running 6S */
{0.23638, 0.16933, 0.09070, 0.04827, 0.01563, 0.00129, 0.00037,
0.07984};
double oztransa[NSRL_BANDS] = /* ozone transmission coeff */
{-0.00255649, -0.0177861, -0.0969872, -0.0611428, 0.0001, 0.0001,
0.0001, -0.0834061};
double wvtransa[NSRL_BANDS] = /* water vapor transmission coeff */
{2.29849e-27, 2.29849e-27, 0.00194772, 0.00404159, 0.000729136,
0.00067324, 0.0177533, 0.00279738};
double wvtransb[NSRL_BANDS] = /* water vapor transmission coeff */
{0.999742, 0.999742, 0.775024, 0.774482, 0.893085, 0.939669, 0.65094,
0.759952};
double ogtransa1[NSRL_BANDS] = /* other gases transmission coeff */
{4.91586e-20, 4.91586e-20, 4.91586e-20, 1.04801e-05, 1.35216e-05,
0.0205425, 0.0256526, 0.000214329};
double ogtransb0[NSRL_BANDS] = /* other gases transmission coeff */
{0.000197019, 0.000197019, 0.000197019, 0.640215, -0.195998, 0.326577,
0.243961, 0.396322};
double ogtransb1[NSRL_BANDS] = /* other gases transmission coeff */
{9.57011e-16, 9.57011e-16, 9.57011e-16, -0.348785, 0.275239, 0.0117192,
0.0616101, 0.04728};
#ifdef WRITE_TAERO
FILE *aero_fptr=NULL; /* file pointer for aerosol files */
#endif
/* Start processing */
mytime = time(NULL);
printf ("Start surface reflectance corrections: %s", ctime(&mytime));
/* Allocate memory for the many arrays needed to do the surface reflectance
computations */
npixels = nlines * nsamps;
retval = landsat_memory_allocation_sr (nlines, nsamps, &aerob1, &aerob2,
&aerob4, &aerob5, &aerob7, &ipflag, &twvi, &tozi, &tp, &taero, &teps,
&dem, &andwi, &sndwi, &ratiob1, &ratiob2, &ratiob7, &intratiob1,
&intratiob2, &intratiob7, &slpratiob1, &slpratiob2, &slpratiob7, &wv,
&oz, &rolutt, &transt, &sphalbt, &normext, &tsmax, &tsmin, &nbfic,
&nbfi, &ttv);
if (retval != SUCCESS)
{
sprintf (errmsg, "Error allocating memory for the data arrays needed "
"for surface reflectance calculations.");
error_handler (false, FUNC_NAME, errmsg);
return (ERROR);
}
/* Initialize the geolocation space applications */
if (!get_geoloc_info (xml_metadata, &space_def))
{
sprintf (errmsg, "Getting the space definition from the XML file");
error_handler (true, FUNC_NAME, errmsg);
return (ERROR);
}
space = setup_mapping (&space_def);
if (space == NULL)
{
sprintf (errmsg, "Setting up the geolocation mapping");
error_handler (true, FUNC_NAME, errmsg);
return (ERROR);
}
/* Initialize the look up tables and atmospheric correction variables.
view zenith initialized to 0.0 (xtv)
azimuthal difference between sun and obs angle initialize to 0.0 (xfi)
surface pressure is initialized to the pressure at the center of the
scene (using the DEM) (pres)
water vapor is initialized to the value at the center of the scene (uwv)
ozone is initialized to the value at the center of the scene (uoz) */
retval = init_sr_refl (nlines, nsamps, input, &space_def, space, anglehdf,
intrefnm, transmnm, spheranm, cmgdemnm, rationm, auxnm, &eps, &iaots,
&xtv_center, &xmuv_center, &xfi_center, &cosxfi_center, &raot550nm,
&pres, &uoz, &uwv, &xtsstep, &xtsmin, &xtvstep, &xtvmin, tsmax, tsmin,
tts, ttv, indts, rolutt, transt, sphalbt, normext, nbfic, nbfi, dem,
andwi, sndwi, ratiob1, ratiob2, ratiob7, intratiob1, intratiob2,
intratiob7, slpratiob1, slpratiob2, slpratiob7, wv, oz);
if (retval != SUCCESS)
{
sprintf (errmsg, "Error initializing the lookup tables and "
"atmospheric correction variables.");
error_handler (false, FUNC_NAME, errmsg);
return (ERROR);
}
/* Loop through all the reflectance bands and perform atmospheric
corrections based on climatology */
mytime = time(NULL);
printf ("Performing atmospheric corrections for each Landsat reflectance "
"band ... %s", ctime(&mytime));
/* rotoa is not defined for the atmcorlamb2 call, which is ok, but the
roslamb value is not valid upon output. Just set it to 0.0 to be
consistent. */
rotoa = 0.0;
raot550nm = aot550nm[1];
eps = 2.5;
for (ib = 0; ib <= SRL_BAND7; ib++)
{
/* Get the parameters for the atmospheric correction */
retval = atmcorlamb2 (input->meta.sat, xts_center, xtv_center,
xmus_center, xmuv_center, xfi_center, cosxfi_center, raot550nm, ib,
pres, tpres, aot550nm, rolutt, transt, xtsstep, xtsmin, xtvstep,
xtvmin, sphalbt, normext, tsmax, tsmin, nbfic, nbfi, tts, indts,
ttv, uoz, uwv, tauray, ogtransa1, ogtransb0, ogtransb1, wvtransa,
wvtransb, oztransa, rotoa, &roslamb, &tgo, &roatm, &ttatmg, &satm,
&xrorayp, eps);
if (retval != SUCCESS)
{
sprintf (errmsg, "Performing lambertian atmospheric correction "
"type 2.");
error_handler (true, FUNC_NAME, errmsg);
return (ERROR);
}
/* Save these band-related parameters for later */
btgo[ib] = tgo;
broatm[ib] = roatm;
bttatmg[ib] = ttatmg;
bsatm[ib] = satm;
tgo_x_roatm = tgo * roatm;
tgo_x_ttatmg = tgo * ttatmg;
/* Perform atmospheric corrections for bands 1-7 */
#ifdef _OPENMP
#pragma omp parallel for private (i, roslamb)
#endif
for (i = 0; i < npixels; i++)
{
/* Skip fill pixels, which have already been marked in the
TOA calculations. */
if (level1_qa_is_fill(qaband[i]))
{
if (ib == DNL_BAND1)
/* Initialize the fill flag, only need to do for band 1 */
ipflag[i] = (1 << IPFLAG_FILL);
continue;
}
/* Store the unscaled TOA reflectance values for later use before
completing atmospheric corrections */
if (ib == DNL_BAND1)
aerob1[i] = sband[ib][i];
else if (ib == DNL_BAND2)
aerob2[i] = sband[ib][i];
else if (ib == DNL_BAND4)
aerob4[i] = sband[ib][i];
else if (ib == DNL_BAND5)
aerob5[i] = sband[ib][i];
else if (ib == DNL_BAND7)
aerob7[i] = sband[ib][i];
/* Apply the atmospheric corrections (ignoring the Rayleigh
scattering component and water vapor), and store the
unscaled value for further corrections. (NOTE: the full
computations are in atmcorlamb2) */
roslamb = sband[ib][i] - tgo_x_roatm;
roslamb /= tgo_x_ttatmg + satm * roslamb;
/* Save the unscaled surface reflectance value */
if (roslamb < MIN_VALID_REFL)
sband[ib][i] = MIN_VALID_REFL;
else if (roslamb > MAX_VALID_REFL)
sband[ib][i] = MAX_VALID_REFL;
else
sband[ib][i] = roslamb;
} /* end for i */
} /* for ib */
/* Start the retrieval of atmospheric correction parameters for each band */
mytime = time(NULL);
printf ("Starting retrieval of atmospheric correction parameters ... %s",
ctime(&mytime));
/* Get the coefficients for the semi-empirical atmospheric correction */
if (!use_orig_aero)
{
mytime = time(NULL);
printf ("Obtaining the coefficients for the semi-empirical approach "
"... %s", ctime(&mytime));
for (ib = 0; ib <= SRL_BAND7; ib++)
{
normext_p0a3_arr[ib] = normext[ib * NPRES_VALS * NAOT_VALS + 0 + 3];
/* normext[ib][0][3]; */
rotoa = 0.0;
eps = 2.5;
for (ia = 0; ia < NAOT_VALS; ia++)
{
raot550nm = aot550nm[ia];
retval = atmcorlamb2 (input->meta.sat, xts_center, xtv_center,
xmus_center, xmuv_center, xfi_center, cosxfi_center,
raot550nm, ib, pres, tpres, aot550nm, rolutt, transt,
xtsstep, xtsmin, xtvstep, xtvmin, sphalbt, normext, tsmax,
tsmin, nbfic, nbfi, tts, indts, ttv, uoz, uwv, tauray,
ogtransa1, ogtransb0, ogtransb1, wvtransa, wvtransb,
oztransa, rotoa, &roslamb, &tgo, &roatm, &ttatmg, &satm,
&xrorayp, eps);
if (retval != SUCCESS)
{
sprintf (errmsg, "Performing lambertian atmospheric "
"correction type 2 for band %d.", ib);
error_handler (true, FUNC_NAME, errmsg);
exit (ERROR);
}
/* Store the AOT-related variables for use in the atmospheric
corrections */
roatm_arr[ib][ia] = roatm;
ttatmg_arr[ib][ia] = ttatmg;
satm_arr[ib][ia] = satm;
}
/* Store the band-related variables for use in the atmospheric
corrections. tgo and xrorayp are the same for each AOT, so just
save the last set for this band. */
tgo_arr[ib] = tgo;
}
/* Setup the 3rd order polynomial coefficients for the semi-empirical
approach in the aerosol inversion */
for (ib = 0; ib <= SRL_BAND7; ib++)
{
/* Determine the maximum AOT index */
iaMaxTemp = 1;
for (ia = 1; ia < NAOT_VALS; ia++)
{
if (ia == NAOT_VALS-1)
iaMaxTemp = NAOT_VALS-1;
if ((roatm_arr[ib][ia] - roatm_arr[ib][ia-1]) > ESPA_EPSILON)
continue;
else
{
iaMaxTemp = ia-1;
break;
}
}
/* Get the polynomial coefficients for roatm */
roatm_iaMax[ib] = iaMaxTemp;
get_3rd_order_poly_coeff (aot550nm, roatm_arr[ib], iaMaxTemp,
roatm_coef[ib]);
/* Get the polynomial coefficients for ttatmg */
get_3rd_order_poly_coeff (aot550nm, ttatmg_arr[ib], NAOT_VALS,
ttatmg_coef[ib]);
/* Get the polynomial coefficients for satm */
get_3rd_order_poly_coeff (aot550nm, satm_arr[ib], NAOT_VALS,
satm_coef[ib]);
}
} /* if !use_orig_aero */
/* If using the original aerosol approach we need some auxiliary data to
be interpolated for every pixel so it's available for the final aerosol
correction */
if (use_orig_aero)
{
mytime = time(NULL);
printf ("Interpolating the auxiliary data ... %s", ctime(&mytime));
#ifdef _OPENMP
#pragma omp parallel for private (i, j, curr_pix, img, geo, lat, lon, xcmg, ycmg, lcmg, scmg, lcmg1, scmg1, u, v, one_minus_u, one_minus_v, one_minus_u_x_one_minus_v, one_minus_u_x_v, u_x_one_minus_v, u_x_v, cmg_pix11, cmg_pix12, cmg_pix21, cmg_pix22, wv11, wv12, wv21, wv22, uoz11, uoz12, uoz21, uoz22, pres11, pres12, pres21, pres22)
#endif
for (i = 0; i < nlines; i++)
{
curr_pix = i * nsamps;
for (j = 0; j < nsamps; j++, curr_pix++)
{
/* If this pixel is fill, do not process */
if (qaband[curr_pix] == 1)
{
ipflag[curr_pix] |= (1 << IPFLAG_FILL);
continue;
}
/* Get the lat/long for the current pixel */
img.l = i - 0.5;
img.s = j + 0.5;
img.is_fill = false;
if (!from_space (space, &img, &geo))
{
sprintf (errmsg, "Mapping line/sample (%d, %d) to "
"geolocation coords", i, j);
error_handler (true, FUNC_NAME, errmsg);
exit (ERROR);
}
lat = geo.lat * RAD2DEG;
lon = geo.lon * RAD2DEG;
/* Use that lat/long to determine the line/sample in the
CMG-related lookup tables, using the center of the UL
pixel. Note, we are basically making sure the line/sample
combination falls within -90, 90 and -180, 180 global climate
data boundaries. However, the source code below uses lcmg+1
and scmg+1, which for some scenes may wrap around the
dateline or the poles. Thus we need to wrap the CMG data
around to the beginning of the array. */
/* Each CMG pixel is 0.05 x 0.05 degrees. Use the center of the
pixel for each calculation. Negative latitude values should
be the largest line values in the CMG grid. Negative
longitude values should be the smallest sample values in the
CMG grid. */
/* The line/sample calculation from the x/ycmg values are not
rounded. The interpolation of the value using line+1 and
sample+1 are based on the truncated numbers, therefore
rounding up is not appropriate. */
ycmg = (89.975 - lat) * 20.0; /* vs / 0.05 */
xcmg = (179.975 + lon) * 20.0; /* vs / 0.05 */
lcmg = (int) ycmg;
scmg = (int) xcmg;
/* Handle the edges of the lat/long values in the CMG grid */
if (lcmg < 0)
lcmg = 0;
else if (lcmg >= CMG_NBLAT)
lcmg = CMG_NBLAT;
if (scmg < 0)
scmg = 0;
else if (scmg >= CMG_NBLON)
scmg = CMG_NBLON;
/* If the current CMG pixel is at the edge of the CMG array,
then allow the next pixel for interpolation to wrap around
the array */
if (scmg >= CMG_NBLON-1) /* 180 degrees so wrap around */
scmg1 = 0;
else
scmg1 = scmg + 1;
if (lcmg >= CMG_NBLAT-1) /* -90 degrees so wrap around */
lcmg1 = 0;
else
lcmg1 = lcmg + 1;
/* Determine the four CMG pixels to be used for the current
Landsat pixel */
cmg_pix11 = lcmg * CMG_NBLON + scmg;
cmg_pix12 = lcmg * CMG_NBLON + scmg1;
cmg_pix21 = lcmg1 * CMG_NBLON + scmg;
cmg_pix22 = lcmg1 * CMG_NBLON + scmg1;
/* Get the water vapor pixels. If the water vapor value is
fill (=0), then use it as-is. */
wv11 = wv[cmg_pix11];
wv12 = wv[cmg_pix12];
wv21 = wv[cmg_pix21];
wv22 = wv[cmg_pix22];
/* Get the ozone pixels. If the ozone value is fill (=0), then
use a default value of 120. */
uoz11 = oz[cmg_pix11];
if (uoz11 == 0)
uoz11 = 120;
uoz12 = oz[cmg_pix12];
if (uoz12 == 0)
uoz12 = 120;
uoz21 = oz[cmg_pix21];
if (uoz21 == 0)
uoz21 = 120;
uoz22 = oz[cmg_pix22];
if (uoz22 == 0)
uoz22 = 120;
/* Get the surface pressure from the global DEM. Set to 1013.0
(sea level) if the DEM is fill (= -9999), which is likely
ocean. The dimensions on the DEM array is the same as that
of the CMG arrays. Use the current pixel locations already
calculated. */
if (dem[cmg_pix11] != -9999)
pres11 = 1013.0 * exp (-dem[cmg_pix11] * ONE_DIV_8500);
else
pres11 = 1013.0;
if (dem[cmg_pix12] != -9999)
pres12 = 1013.0 * exp (-dem[cmg_pix12] * ONE_DIV_8500);
else
pres12 = 1013.0;
if (dem[cmg_pix21] != -9999)
pres21 = 1013.0 * exp (-dem[cmg_pix21] * ONE_DIV_8500);
else
pres21 = 1013.0;
if (dem[cmg_pix22] != -9999)
pres22 = 1013.0 * exp (-dem[cmg_pix22] * ONE_DIV_8500);
else
pres22 = 1013.0;
/* Determine the fractional difference between the integer
location and floating point pixel location to be used for
interpolation */
u = (ycmg - lcmg);
v = (xcmg - scmg);
one_minus_u = 1.0 - u;
one_minus_v = 1.0 - v;
one_minus_u_x_one_minus_v = one_minus_u * one_minus_v;
one_minus_u_x_v = one_minus_u * v;
u_x_one_minus_v = u * one_minus_v;
u_x_v = u * v;
/* Interpolate water vapor, and unscale */
twvi[curr_pix] = wv11 * one_minus_u_x_one_minus_v +
wv12 * one_minus_u_x_v +
wv21 * u_x_one_minus_v +
wv22 * u_x_v;
twvi[curr_pix] = twvi[curr_pix] * 0.01; /* vs / 100 */
/* Interpolate ozone, and unscale */
tozi[curr_pix] = uoz11 * one_minus_u_x_one_minus_v +
uoz12 * one_minus_u_x_v +
uoz21 * u_x_one_minus_v +
uoz22 * u_x_v;
tozi[curr_pix] = tozi[curr_pix] * 0.0025; /* vs / 400 */
/* Interpolate surface pressure */
tp[curr_pix] = pres11 * one_minus_u_x_one_minus_v +
pres12 * one_minus_u_x_v +
pres21 * u_x_one_minus_v +
pres22 * u_x_v;
} /* end for j */
} /* end for i */
} /* if use_orig_aero */
/* Initialize and compute some EPS values */
eps1 = LOW_EPS;
eps2 = MOD_EPS;
eps3 = HIGH_EPS;
xa = (eps1 * eps1) - (eps3 * eps3);
xd = (int) ((eps2 * eps2) - (eps3 * eps3));
xb = eps1 - eps3;
xe = eps2 - eps3;
/* Start the aerosol inversion */
mytime = time(NULL);
printf ("Aerosol Inversion using %d x %d aerosol window ... %s",
LAERO_WINDOW, LAERO_WINDOW, ctime(&mytime));
#ifdef _OPENMP
#pragma omp parallel for private (i, j, center_line, center_samp, nearest_line, nearest_samp, curr_pix, center_pix, img, geo, lat, lon, xcmg, ycmg, lcmg, scmg, lcmg1, scmg1, u, v, one_minus_u, one_minus_v, one_minus_u_x_one_minus_v, one_minus_u_x_v, u_x_one_minus_v, u_x_v, ratio_pix11, ratio_pix12, ratio_pix21, ratio_pix22, rb1, rb2, slpr11, slpr12, slpr21, slpr22, intr11, intr12, intr21, intr22, slprb1, slprb2, slprb7, intrb1, intrb2, intrb7, xndwi, ndwi_th1, ndwi_th2, ib, xtv, xmuv, xts, xmus, xfi, cosxfi, iband, iband1, iaots, pres, uoz, uwv, retval, eps, residual, residual1, residual2, residual3, raot, sraot1, sraot3, xa, xb, xc, xf, epsmin, corf, rotoa, raot550nm, roslamb, tgo, roatm, ttatmg, satm, xrorayp, ros1, ros5, ros4, erelc, troatm)
#else
tmp_percent = 0;
#endif
for (i = LHALF_AERO_WINDOW; i < nlines; i += LAERO_WINDOW)
{
#ifndef _OPENMP
/* update status, but not if multi-threaded */
curr_tmp_percent = 100 * i / nlines;
if (curr_tmp_percent > tmp_percent)
{
tmp_percent = curr_tmp_percent;
if (tmp_percent % 10 == 0)
{
printf ("%d%% ", tmp_percent);
fflush (stdout);
}
}
#endif
curr_pix = i * nsamps + LHALF_AERO_WINDOW;
for (j = LHALF_AERO_WINDOW; j < nsamps;
j += LAERO_WINDOW, curr_pix += LAERO_WINDOW)
{
/* Keep track of the center pixel for the current aerosol window;
may need to return here if this is fill, cloudy or water */
center_line = i;
center_samp = j;
center_pix = curr_pix;
/* If this pixel is fill */
if (level1_qa_is_fill (qaband[curr_pix]))
{
/* Look for other non-fill pixels in the window */
if (find_closest_non_fill (qaband, nlines, nsamps, center_line,
center_samp, LHALF_AERO_WINDOW, &nearest_line,
&nearest_samp))
{
/* Use the line/sample location of the non-fill pixel for
further processing of aerosols. However we will still
write to the center of the aerosol window for the
current window. */
i = nearest_line;
j = nearest_samp;
curr_pix = i * nsamps + j;
}
else
{
/* No other non-fill pixels found. Pixel is already
flagged as fill. Move to next aerosol window. */
continue;
}
}
/* Get the lat/long for the current pixel (which may not be the
center of the aerosol window), for the center of that pixel */
img.l = i - 0.5;
img.s = j + 0.5;
img.is_fill = false;
if (!from_space (space, &img, &geo))
{
sprintf (errmsg, "Mapping line/sample (%d, %d) to "
"geolocation coords", i, j);
error_handler (true, FUNC_NAME, errmsg);
exit (ERROR);
}
lat = geo.lat * RAD2DEG;
lon = geo.lon * RAD2DEG;
/* Use that lat/long to determine the line/sample in the
CMG-related lookup tables, using the center of the UL
pixel. Note, we are basically making sure the line/sample
combination falls within -90, 90 and -180, 180 global climate
data boundaries. However, the source code below uses lcmg+1
and scmg+1, which for some scenes may wrap around the
dateline or the poles. Thus we need to wrap the CMG data
around to the beginning of the array. */
/* Each CMG pixel is 0.05 x 0.05 degrees. Use the center of the
pixel for each calculation. Negative latitude values should
be the largest line values in the CMG grid. Negative
longitude values should be the smallest sample values in the
CMG grid. */
/* The line/sample calculation from the x/ycmg values are not
rounded. The interpolation of the value using line+1 and
sample+1 are based on the truncated numbers, therefore
rounding up is not appropriate. */
ycmg = (89.975 - lat) * 20.0; /* vs / 0.05 */
xcmg = (179.975 + lon) * 20.0; /* vs / 0.05 */
lcmg = (int) ycmg;
scmg = (int) xcmg;
/* Handle the edges of the lat/long values in the CMG grid */
if (lcmg < 0)
lcmg = 0;
else if (lcmg >= CMG_NBLAT)
lcmg = CMG_NBLAT - 1;
if (scmg < 0)
scmg = 0;
else if (scmg >= CMG_NBLON)
scmg = CMG_NBLON - 1;
/* If the current CMG pixel is at the edge of the CMG array, then
allow the next pixel for interpolation to wrap around the
array */
if (scmg >= CMG_NBLON-1) /* 180 degrees so wrap around */
scmg1 = 0;
else
scmg1 = scmg + 1;
if (lcmg >= CMG_NBLAT-1) /* -90 degrees, so set the next pixel
to also use -90 */
lcmg1 = lcmg;
else
lcmg1 = lcmg + 1;
/* Determine the fractional difference between the integer location
and floating point pixel location to be used for interpolation */
u = (ycmg - lcmg);
v = (xcmg - scmg);
one_minus_u = 1.0 - u;
one_minus_v = 1.0 - v;
one_minus_u_x_one_minus_v = one_minus_u * one_minus_v;
one_minus_u_x_v = one_minus_u * v;
u_x_one_minus_v = u * one_minus_v;
u_x_v = u * v;
/* Determine the band ratios and slope/intercept */
ratio_pix11 = lcmg * RATIO_NBLON + scmg;
ratio_pix12 = lcmg * RATIO_NBLON + scmg1;
ratio_pix21 = lcmg1 * RATIO_NBLON + scmg;
ratio_pix22 = lcmg1 * RATIO_NBLON + scmg1;
rb1 = ratiob1[ratio_pix11] * 0.001; /* vs. / 1000. */
rb2 = ratiob2[ratio_pix11] * 0.001; /* vs. / 1000. */
if (rb2 > 1.0 || rb1 > 1.0 || rb2 < 0.1 || rb1 < 0.1)
{
slpratiob1[ratio_pix11] = 0;
slpratiob2[ratio_pix11] = 0;
slpratiob7[ratio_pix11] = 0;
intratiob1[ratio_pix11] = 550;
intratiob2[ratio_pix11] = 600;
intratiob7[ratio_pix11] = 2000;
}
else if (sndwi[ratio_pix11] < 200)
{
slpratiob1[ratio_pix11] = 0;
slpratiob2[ratio_pix11] = 0;
slpratiob7[ratio_pix11] = 0;
intratiob1[ratio_pix11] = ratiob1[ratio_pix11];
intratiob2[ratio_pix11] = ratiob2[ratio_pix11];
intratiob7[ratio_pix11] = ratiob7[ratio_pix11];
}
rb1 = ratiob1[ratio_pix12] * 0.001; /* vs. / 1000. */
rb2 = ratiob2[ratio_pix12] * 0.001; /* vs. / 1000. */
if (rb2 > 1.0 || rb1 > 1.0 || rb2 < 0.1 || rb1 < 0.1)
{
slpratiob1[ratio_pix12] = 0;
slpratiob2[ratio_pix12] = 0;
slpratiob7[ratio_pix12] = 0;
intratiob1[ratio_pix12] = 550;
intratiob2[ratio_pix12] = 600;
intratiob7[ratio_pix12] = 2000;
}
else if (sndwi[ratio_pix12] < 200)
{
slpratiob1[ratio_pix12] = 0;
slpratiob2[ratio_pix12] = 0;
slpratiob7[ratio_pix12] = 0;
intratiob1[ratio_pix12] = ratiob1[ratio_pix12];
intratiob2[ratio_pix12] = ratiob2[ratio_pix12];
intratiob7[ratio_pix12] = ratiob7[ratio_pix12];
}
rb1 = ratiob1[ratio_pix21] * 0.001; /* vs. / 1000. */
rb2 = ratiob2[ratio_pix21] * 0.001; /* vs. / 1000. */
if (rb2 > 1.0 || rb1 > 1.0 || rb2 < 0.1 || rb1 < 0.1)
{
slpratiob1[ratio_pix21] = 0;
slpratiob2[ratio_pix21] = 0;
slpratiob7[ratio_pix21] = 0;
intratiob1[ratio_pix21] = 550;
intratiob2[ratio_pix21] = 600;
intratiob7[ratio_pix21] = 2000;
}
else if (sndwi[ratio_pix21] < 200)
{
slpratiob1[ratio_pix21] = 0;
slpratiob2[ratio_pix21] = 0;
slpratiob7[ratio_pix21] = 0;
intratiob1[ratio_pix21] = ratiob1[ratio_pix21];
intratiob2[ratio_pix21] = ratiob2[ratio_pix21];
intratiob7[ratio_pix21] = ratiob7[ratio_pix21];
}
rb1 = ratiob1[ratio_pix22] * 0.001; /* vs. / 1000. */
rb2 = ratiob2[ratio_pix22] * 0.001; /* vs. / 1000. */
if (rb2 > 1.0 || rb1 > 1.0 || rb2 < 0.1 || rb1 < 0.1)
{
slpratiob1[ratio_pix22] = 0;
slpratiob2[ratio_pix22] = 0;
slpratiob7[ratio_pix22] = 0;
intratiob1[ratio_pix22] = 550;
intratiob2[ratio_pix22] = 600;
intratiob7[ratio_pix22] = 2000;
}
else if (sndwi[ratio_pix22] < 200)
{
slpratiob1[ratio_pix22] = 0;
slpratiob2[ratio_pix22] = 0;
slpratiob7[ratio_pix22] = 0;
intratiob1[ratio_pix22] = ratiob1[ratio_pix22];
intratiob2[ratio_pix22] = ratiob2[ratio_pix22];
intratiob7[ratio_pix22] = ratiob7[ratio_pix22];
}
/* Interpolate the slope/intercept for each band, and unscale */
slpr11 = slpratiob1[ratio_pix11] * 0.001; /* vs / 1000 */
intr11 = intratiob1[ratio_pix11] * 0.001; /* vs / 1000 */
slpr12 = slpratiob1[ratio_pix12] * 0.001; /* vs / 1000 */
intr12 = intratiob1[ratio_pix12] * 0.001; /* vs / 1000 */
slpr21 = slpratiob1[ratio_pix21] * 0.001; /* vs / 1000 */
intr21 = intratiob1[ratio_pix21] * 0.001; /* vs / 1000 */
slpr22 = slpratiob1[ratio_pix22] * 0.001; /* vs / 1000 */
intr22 = intratiob1[ratio_pix22] * 0.001; /* vs / 1000 */
slprb1 = slpr11 * one_minus_u_x_one_minus_v +
slpr12 * one_minus_u_x_v +
slpr21 * u_x_one_minus_v +
slpr22 * u_x_v;
intrb1 = intr11 * one_minus_u_x_one_minus_v +
intr12 * one_minus_u_x_v +
intr21 * u_x_one_minus_v +
intr22 * u_x_v;
slpr11 = slpratiob2[ratio_pix11] * 0.001; /* vs / 1000 */
intr11 = intratiob2[ratio_pix11] * 0.001; /* vs / 1000 */
slpr12 = slpratiob2[ratio_pix12] * 0.001; /* vs / 1000 */
intr12 = intratiob2[ratio_pix12] * 0.001; /* vs / 1000 */
slpr21 = slpratiob2[ratio_pix21] * 0.001; /* vs / 1000 */
intr21 = intratiob2[ratio_pix21] * 0.001; /* vs / 1000 */
slpr22 = slpratiob2[ratio_pix22] * 0.001; /* vs / 1000 */
intr22 = intratiob2[ratio_pix22] * 0.001; /* vs / 1000 */
slprb2 = slpr11 * one_minus_u_x_one_minus_v +
slpr12 * one_minus_u_x_v +
slpr21 * u_x_one_minus_v +
slpr22 * u_x_v;
intrb2 = intr11 * one_minus_u_x_one_minus_v +
intr12 * one_minus_u_x_v +
intr21 * u_x_one_minus_v +
intr22 * u_x_v;
slpr11 = slpratiob7[ratio_pix11] * 0.001; /* vs / 1000 */
intr11 = intratiob7[ratio_pix11] * 0.001; /* vs / 1000 */
slpr12 = slpratiob7[ratio_pix12] * 0.001; /* vs / 1000 */
intr12 = intratiob7[ratio_pix12] * 0.001; /* vs / 1000 */
slpr21 = slpratiob7[ratio_pix21] * 0.001; /* vs / 1000 */
intr21 = intratiob7[ratio_pix21] * 0.001; /* vs / 1000 */
slpr22 = slpratiob7[ratio_pix22] * 0.001; /* vs / 1000 */
intr22 = intratiob7[ratio_pix22] * 0.001; /* vs / 1000 */
slprb7 = slpr11 * one_minus_u_x_one_minus_v +
slpr12 * one_minus_u_x_v +
slpr21 * u_x_one_minus_v +
slpr22 * u_x_v;
intrb7 = intr11 * one_minus_u_x_one_minus_v +
intr12 * one_minus_u_x_v +
intr21 * u_x_one_minus_v +
intr22 * u_x_v;
/* Calculate NDWI variables for the band ratios */
xndwi = ((double) sband[SRL_BAND5][curr_pix] -
(double) (sband[SRL_BAND7][curr_pix] * 0.5)) /
((double) sband[SRL_BAND5][curr_pix] +
(double) (sband[SRL_BAND7][curr_pix] * 0.5));
ndwi_th1 = (andwi[ratio_pix11] + 2.0 *
sndwi[ratio_pix11]) * 0.001;
ndwi_th2 = (andwi[ratio_pix11] - 2.0 *
sndwi[ratio_pix11]) * 0.001;
if (xndwi > ndwi_th1)
xndwi = ndwi_th1;
if (xndwi < ndwi_th2)
xndwi = ndwi_th2;
/* Initialize the band ratios */
for (ib = 0; ib < NSR_BANDS; ib++)
{
erelc[ib] = -1.0;
troatm[ib] = 0.0;
}
/* Compute the band ratio - coastal aerosol, blue, red, SWIR */
erelc[DNL_BAND1] = (xndwi * slprb1 + intrb1);
erelc[DNL_BAND2] = (xndwi * slprb2 + intrb2);
erelc[DNL_BAND4] = 1.0;
erelc[DNL_BAND7] = (xndwi * slprb7 + intrb7);
/* Retrieve the TOA reflectance values for the current pixel */
troatm[DNL_BAND1] = aerob1[curr_pix];
troatm[DNL_BAND2] = aerob2[curr_pix];
troatm[DNL_BAND4] = aerob4[curr_pix];
troatm[DNL_BAND7] = aerob7[curr_pix];
/* Determine the solar and view angles for the current pixel */
if (use_orig_aero)
{
xtv = vza[curr_pix] * 0.01;
xmuv = cos(xtv * DEG2RAD);
xts = sza[curr_pix] * 0.01;
xmus = cos(xts * DEG2RAD);
xfi = saa[curr_pix] * 0.01 - vaa[curr_pix] * 0.01 ;
cosxfi = cos(xfi * DEG2RAD);
}
/* Retrieve the aerosol information for low eps 1.0 */
iband1 = DNL_BAND4; /* red band */
iaots = 0;
if (use_orig_aero)
{
pres = tp[curr_pix];
uoz = tozi[curr_pix];
uwv = twvi[curr_pix];
retval = subaeroret (input->meta.sat, false, iband1, xts, xtv,
xmus, xmuv, xfi, cosxfi, pres, uoz, uwv, erelc, troatm,
tpres, rolutt, transt, xtsstep, xtsmin, xtvstep, xtvmin,
sphalbt, normext, tsmax, tsmin, nbfic, nbfi, tts, indts,
ttv, tauray, ogtransa1, ogtransb0, ogtransb1, wvtransa,
wvtransb, oztransa, &raot, &residual, &iaots, eps1);
if (retval != SUCCESS)
{
sprintf (errmsg, "Performing aerosol retrieval.");
error_handler (true, FUNC_NAME, errmsg);
exit (ERROR);
}
}
else
subaeroret_new (input->meta.sat, false, iband1, erelc, troatm,
tgo_arr, roatm_iaMax, roatm_coef, ttatmg_coef, satm_coef,
normext_p0a3_arr, &raot, &residual, &iaots, eps1);
/* Save the data */
residual1 = residual;
sraot1 = raot;
/* Retrieve the aerosol information for moderate eps 1.75 */
if (use_orig_aero)
{
retval = subaeroret (input->meta.sat, false, iband1, xts, xtv,
xmus, xmuv, xfi, cosxfi, pres, uoz, uwv, erelc, troatm,
tpres, rolutt, transt, xtsstep, xtsmin, xtvstep, xtvmin,
sphalbt, normext, tsmax, tsmin, nbfic, nbfi, tts, indts,
ttv, tauray, ogtransa1, ogtransb0, ogtransb1, wvtransa,
wvtransb, oztransa, &raot, &residual, &iaots, eps2);
if (retval != SUCCESS)
{
sprintf (errmsg, "Performing aerosol retrieval.");
error_handler (true, FUNC_NAME, errmsg);
exit (ERROR);
}
}
else
subaeroret_new (input->meta.sat, false, iband1, erelc, troatm,
tgo_arr, roatm_iaMax, roatm_coef, ttatmg_coef, satm_coef,
normext_p0a3_arr, &raot, &residual, &iaots, eps2);
/* Save the data */
residual2 = residual;
/* Retrieve the aerosol information for high eps 2.5 */
if (use_orig_aero)
{
retval = subaeroret (input->meta.sat, false, iband1, xts, xtv,
xmus, xmuv, xfi, cosxfi, pres, uoz, uwv, erelc, troatm,
tpres, rolutt, transt, xtsstep, xtsmin, xtvstep, xtvmin,
sphalbt, normext, tsmax, tsmin, nbfic, nbfi, tts, indts,
ttv, tauray, ogtransa1, ogtransb0, ogtransb1, wvtransa,
wvtransb, oztransa, &raot, &residual, &iaots, eps3);
if (retval != SUCCESS)
{
sprintf (errmsg, "Performing aerosol retrieval.");
error_handler (true, FUNC_NAME, errmsg);
exit (ERROR);
}
}
else
subaeroret_new (input->meta.sat, false, iband1, erelc, troatm,
tgo_arr, roatm_iaMax, roatm_coef, ttatmg_coef, satm_coef,
normext_p0a3_arr, &raot, &residual, &iaots, eps3);
/* Save the data */
residual3 = residual;
sraot3 = raot;
/* Find the eps (angstrom coefficient for AOT) that minimizes the
residual */
xc = residual1 - residual3;
xf = residual2 - residual3;
coefa = (xc*xe - xb*xf) / (xa*xe - xb*xd);
coefb = (xa*xf - xc*xd) / (xa*xe - xb*xd);
epsmin = -coefb / (2.0 * coefa);
eps = epsmin;
if (epsmin >= LOW_EPS && epsmin <= HIGH_EPS)
{
if (use_orig_aero)
{
retval = subaeroret (input->meta.sat, false, iband1, xts,
xtv, xmus, xmuv, xfi, cosxfi, pres, uoz, uwv, erelc,
troatm, tpres, rolutt, transt, xtsstep, xtsmin,
xtvstep, xtvmin, sphalbt, normext, tsmax, tsmin, nbfic,
nbfi, tts, indts, ttv, tauray, ogtransa1, ogtransb0,
ogtransb1, wvtransa, wvtransb, oztransa, &raot,
&residual, &iaots, epsmin);
if (retval != SUCCESS)
{
sprintf (errmsg, "Performing aerosol retrieval.");
error_handler (true, FUNC_NAME, errmsg);
exit (ERROR);
}
}
else
subaeroret_new (input->meta.sat, false, iband1, erelc,
troatm, tgo_arr, roatm_iaMax, roatm_coef, ttatmg_coef,
satm_coef, normext_p0a3_arr, &raot, &residual, &iaots,
epsmin);
}
else if (epsmin <= LOW_EPS)
{
eps = eps1;
residual = residual1;
raot = sraot1;
}
else if (epsmin >= HIGH_EPS)
{
eps = eps3;
residual = residual3;
raot = sraot3;
}
teps[center_pix] = eps;
taero[center_pix] = raot;
if (use_orig_aero)
corf = raot / xmus;
else
corf = raot / xmus_center;
/* Check the model residual. Corf represents aerosol impact.
Test the quality of the aerosol inversion. */
if (residual < (0.015 + 0.005 * corf + 0.10 * troatm[DNL_BAND7]))
{
/* Test if NIR band 5 makes sense */
iband = DNL_BAND5;
rotoa = aerob5[curr_pix];
raot550nm = raot;
if (use_orig_aero)
{
retval = atmcorlamb2 (input->meta.sat, xts, xtv, xmus,
xmuv, xfi, cosxfi, raot550nm, iband, pres, tpres,
aot550nm, rolutt, transt, xtsstep, xtsmin, xtvstep,
xtvmin, sphalbt, normext, tsmax, tsmin, nbfic, nbfi,
tts, indts, ttv, uoz, uwv, tauray, ogtransa1, ogtransb0,
ogtransb1, wvtransa, wvtransb, oztransa, rotoa,
&roslamb, &tgo, &roatm, &ttatmg, &satm, &xrorayp, eps);
if (retval != SUCCESS)
{
sprintf (errmsg, "Performing lambertian "
"atmospheric correction type 2.");
error_handler (true, FUNC_NAME, errmsg);
exit (ERROR);
}
}
else
atmcorlamb2_new (input->meta.sat, tgo_arr[iband],
aot550nm[roatm_iaMax[iband]], &roatm_coef[iband][0],
&ttatmg_coef[iband][0], &satm_coef[iband][0], raot550nm,
iband, normext_p0a3_arr[iband], rotoa, &roslamb, eps);
ros5 = roslamb;
/* Test if red band 4 makes sense */
iband = DNL_BAND4;
rotoa = aerob4[curr_pix];
raot550nm = raot;
if (use_orig_aero)
{
retval = atmcorlamb2 (input->meta.sat, xts, xtv, xmus,
xmuv, xfi, cosxfi, raot550nm, iband, pres, tpres,
aot550nm, rolutt, transt, xtsstep, xtsmin, xtvstep,
xtvmin, sphalbt, normext, tsmax, tsmin, nbfic, nbfi,
tts, indts, ttv, uoz, uwv, tauray, ogtransa1, ogtransb0,
ogtransb1, wvtransa, wvtransb, oztransa, rotoa,
&roslamb, &tgo, &roatm, &ttatmg, &satm, &xrorayp, eps);
if (retval != SUCCESS)
{
sprintf (errmsg, "Performing lambertian "
"atmospheric correction type 2.");
error_handler (true, FUNC_NAME, errmsg);
exit (ERROR);
}
}
else
atmcorlamb2_new (input->meta.sat, tgo_arr[iband],
aot550nm[roatm_iaMax[iband]], &roatm_coef[iband][0],
&ttatmg_coef[iband][0], &satm_coef[iband][0], raot550nm,
iband, normext_p0a3_arr[iband], rotoa, &roslamb, eps);
ros4 = roslamb;
/* Use the NDVI to validate the reflectance values or flag
as water */
if ((ros5 > 0.1) && ((ros5 - ros4) / (ros5 + ros4) > 0))
{
/* Clear pixel with valid aerosol retrieval */
ipflag[center_pix] |= (1 << IPFLAG_CLEAR);
}
else
{
/* Flag as water */
ipflag[center_pix] |= (1 << IPFLAG_WATER);
}
}
else
{
/* Flag as water */
ipflag[center_pix] |= (1 << IPFLAG_WATER);
}
/* Retest any water pixels to verify they are water and obtain
their aerosol */
if (lasrc_qa_is_water(ipflag[center_pix]))
{
/* Initialize the band ratios */
for (ib = 0; ib < NSR_BANDS; ib++)
erelc[ib] = -1.0;
troatm[DNL_BAND1] = aerob1[curr_pix];
troatm[DNL_BAND4] = aerob4[curr_pix];
troatm[DNL_BAND5] = aerob5[curr_pix];
troatm[DNL_BAND7] = aerob7[curr_pix];
/* Set the band ratio - coastal aerosol, red, NIR, SWIR */
erelc[DNL_BAND1] = 1.0;
erelc[DNL_BAND4] = 1.0;
erelc[DNL_BAND5] = 1.0;
erelc[DNL_BAND7] = 1.0;
/* Retrieve the water aerosol information for eps 1.5 */
eps = WATER_EPS;
iaots = 0;
if (use_orig_aero)
{
retval = subaeroret (input->meta.sat, true, iband1, xts,
xtv, xmus, xmuv, xfi, cosxfi, pres, uoz, uwv, erelc,
troatm, tpres, rolutt, transt, xtsstep, xtsmin,
xtvstep, xtvmin, sphalbt, normext, tsmax, tsmin, nbfic,
nbfi, tts, indts, ttv, tauray, ogtransa1, ogtransb0,
ogtransb1, wvtransa, wvtransb, oztransa, &raot,
&residual, &iaots, eps);
if (retval != SUCCESS)
{
sprintf (errmsg, "Performing aerosol retrieval.");
error_handler (true, FUNC_NAME, errmsg);
exit (ERROR);
}
}
else
subaeroret_new (input->meta.sat, true, iband1, erelc,
troatm, tgo_arr, roatm_iaMax, roatm_coef, ttatmg_coef,
satm_coef, normext_p0a3_arr, &raot, &residual, &iaots,
eps);
teps[center_pix] = eps;
taero[center_pix] = raot;
if (use_orig_aero)
corf = raot / xmus;
else
corf = raot / xmus_center;
/* Test band 1 reflectance to eliminate negative */
iband = DNL_BAND1;
rotoa = aerob1[curr_pix];
raot550nm = raot;
if (use_orig_aero)
{
retval = atmcorlamb2 (input->meta.sat, xts, xtv, xmus,
xmuv, xfi, cosxfi, raot550nm, iband, pres, tpres,
aot550nm, rolutt, transt, xtsstep, xtsmin, xtvstep,
xtvmin, sphalbt, normext, tsmax, tsmin, nbfic, nbfi,
tts, indts, ttv, uoz, uwv, tauray, ogtransa1, ogtransb0,
ogtransb1, wvtransa, wvtransb, oztransa, rotoa,
&roslamb, &tgo, &roatm, &ttatmg, &satm, &xrorayp, eps);
if (retval != SUCCESS)
{
sprintf (errmsg, "Performing lambertian "
"atmospheric correction type 2.");
error_handler (true, FUNC_NAME, errmsg);
exit (ERROR);
}
}
else
atmcorlamb2_new (input->meta.sat, tgo_arr[iband],
aot550nm[roatm_iaMax[iband]], &roatm_coef[iband][0],
&ttatmg_coef[iband][0], &satm_coef[iband][0], raot550nm,
iband, normext_p0a3_arr[iband], rotoa, &roslamb, eps);
ros1 = roslamb;
if (residual > (0.010 + 0.005 * corf) || ros1 < 0)
{
/* Not a valid water pixel (possibly urban). Clear all
the QA bits, and leave the IPFLAG_CLEAR bit off to
indicate the aerosol retrieval was not valid. */
ipflag[center_pix] = 0; /* IPFLAG_CLEAR bit is 0 */
}
else
{
/* Valid water pixel. Set the clear aerosol retrieval bit
and turn on the water bit. */
ipflag[center_pix] = (1 << IPFLAG_CLEAR);
ipflag[center_pix] |= (1 << IPFLAG_WATER);
}
} /* if water pixel */
/* Reset the looping variables to the center of the aerosol window
versus the actual non-fill/non-cloud pixel that was processed
so that we get the correct center for the next aerosol window */
i = center_line;
j = center_samp;
curr_pix = center_pix;
} /* end for j */
} /* end for i */
#ifndef _OPENMP
/* update status */
printf ("100%%\n");
fflush (stdout);
#endif
/* Done with the aerob* arrays */
free (aerob1); aerob1 = NULL;
free (aerob2); aerob2 = NULL;
free (aerob4); aerob4 = NULL;
free (aerob5); aerob5 = NULL;
free (aerob7); aerob7 = NULL;
/* Done with the ratiob* arrays */
free (andwi); andwi = NULL;
free (sndwi); sndwi = NULL;
free (ratiob1); ratiob1 = NULL;
free (ratiob2); ratiob2 = NULL;
free (ratiob7); ratiob7 = NULL;
free (intratiob1); intratiob1 = NULL;
free (intratiob2); intratiob2 = NULL;
free (intratiob7); intratiob7 = NULL;
free (slpratiob1); slpratiob1 = NULL;
free (slpratiob2); slpratiob2 = NULL;
free (slpratiob7); slpratiob7 = NULL;
/* Done with the DEM, water vapor, and ozone arrays */
free (dem); dem = NULL;
free (wv); wv = NULL;
free (oz); oz = NULL;
#ifdef WRITE_TAERO
/* Write the ipflag values for comparison with other algorithms */
aero_fptr = fopen ("ipflag.img", "w");
fwrite (ipflag, npixels, sizeof (uint8), aero_fptr);
fclose (aero_fptr);
/* Write the aerosol values for comparison with other algorithms */
aero_fptr = fopen ("aerosols.img", "w");
fwrite (taero, npixels, sizeof (float), aero_fptr);
fclose (aero_fptr);
#endif
/* Replace the invalid aerosol retrievals (taero and teps) with a local
average of those values */
mytime = time(NULL);
printf ("Filling invalid aerosol values in the 3x3 windows %s",
ctime(&mytime));
retval = fix_invalid_aerosols_landsat (ipflag, taero, teps, LAERO_WINDOW,
LHALF_AERO_WINDOW, nlines, nsamps);
if (retval != SUCCESS)
{
error_handler (true, FUNC_NAME, errmsg);
return (ERROR);
}
#ifdef WRITE_TAERO
/* Write the ipflag values for comparison with other algorithms */
aero_fptr = fopen ("ipflag_filled.img", "w");
fwrite (ipflag, npixels, sizeof (uint8), aero_fptr);
fclose (aero_fptr);
/* Write the aerosol values for comparison with other algorithms */
aero_fptr = fopen ("aerosols_filled.img", "w");
fwrite (taero, npixels, sizeof (float), aero_fptr);
fclose (aero_fptr);
#endif
/* Use the center of the aerosol windows to interpolate the remaining
pixels in the window for taero */
mytime = time(NULL);
printf ("Interpolating the aerosol values in the 3x3 windows %s",
ctime(&mytime));
aerosol_interp_landsat (xml_metadata, LAERO_WINDOW, LHALF_AERO_WINDOW,
qaband, ipflag, taero, nlines, nsamps);
#ifdef WRITE_TAERO
/* Write the ipflag values for comparison with other algorithms */
aero_fptr = fopen ("ipflag_final.img", "w");
fwrite (ipflag, npixels, sizeof (uint8), aero_fptr);
fclose (aero_fptr);
/* Write the aerosol values for comparison with other algorithms */
aero_fptr = fopen ("aerosols_final.img", "w");
fwrite (taero, npixels, sizeof (float), aero_fptr);
fclose (aero_fptr);
#endif
/* Use the center of the aerosol windows to interpolate the teps values
(angstrom coefficient). The median value used for filling in clouds and
water will be the default eps value. */
mytime = time(NULL);
printf ("Interpolating the teps values in the 3x3 windows %s",
ctime(&mytime));
aerosol_interp_landsat (xml_metadata, LAERO_WINDOW, LHALF_AERO_WINDOW,
qaband, ipflag, teps, nlines, nsamps);
/* Perform the second level of atmospheric correction using the aerosols */
mytime = time(NULL);
printf ("Performing atmospheric correction ... %s", ctime(&mytime));
/* 0 .. DNL_BAND7 is the same as 0 .. SRL_BAND7 here, since the pan band
isn't spanned */
for (ib = 0; ib <= DNL_BAND7; ib++)
{
#ifdef _OPENMP
#pragma omp parallel for private (i, rsurf, rotoa, raot550nm, eps, xtv, xmuv, xts, xfi, cosxfi, pres, uwv, uoz, retval, tmpf, roslamb, tgo, roatm, ttatmg, satm, xrorayp)
#endif
for (i = 0; i < npixels; i++)
{
/* If this pixel is fill, then don't process */
if (level1_qa_is_fill (qaband[i]))
continue;
/* Correct all pixels */
rsurf = sband[ib][i];
rotoa = (rsurf * bttatmg[ib] / (1.0 - bsatm[ib] * rsurf) +
broatm[ib]) * btgo[ib];
raot550nm = taero[i];
eps = teps[i];
if (use_orig_aero)
{
/* Determine the solar and view angles for the current pixel */
xtv = vza[i] * 0.01;
xmuv = cos(xtv * DEG2RAD);
xts = sza[i] * 0.01;
xmus = cos(xts * DEG2RAD);
xfi = saa[i] * 0.01 - vaa[i] * 0.01;
cosxfi = cos(xfi * DEG2RAD);
pres = tp[i];
uwv = twvi[i];
uoz = tozi[i];
retval = atmcorlamb2 (input->meta.sat, xts, xtv, xmus, xmuv,
xfi, cosxfi, raot550nm, ib, pres, tpres, aot550nm,
rolutt, transt, xtsstep, xtsmin, xtvstep, xtvmin, sphalbt,
normext, tsmax, tsmin, nbfic, nbfi, tts, indts, ttv, uoz,
uwv, tauray, ogtransa1, ogtransb0, ogtransb1, wvtransa,
wvtransb, oztransa, rotoa, &roslamb, &tgo, &roatm, &ttatmg,
&satm, &xrorayp, eps);
if (retval != SUCCESS)
{
sprintf (errmsg, "Performing lambertian "
"atmospheric correction type 2.");
error_handler (true, FUNC_NAME, errmsg);
exit (ERROR);
}
}
else
atmcorlamb2_new (input->meta.sat, tgo_arr[ib],
aot550nm[roatm_iaMax[ib]], &roatm_coef[ib][0],
&ttatmg_coef[ib][0], &satm_coef[ib][0], raot550nm, ib,
normext_p0a3_arr[ib], rotoa, &roslamb, eps);
/* If this is the coastal aerosol band then set the aerosol
bits in the QA band */
if (ib == DNL_BAND1)
{
/* Set up aerosol QA bits */
tmpf = fabs (rsurf - roslamb);
if (tmpf <= LOW_AERO_THRESH)
{ /* Set the first aerosol bit (low aerosols) */
ipflag[i] |= (1 << AERO1_QA);
}
else
{
if (tmpf < AVG_AERO_THRESH)
{ /* Set the second aerosol bit (average aerosols) */
ipflag[i] |= (1 << AERO2_QA);
}
else
{ /* Set both aerosol bits (high aerosols) */
ipflag[i] |= (1 << AERO1_QA);
ipflag[i] |= (1 << AERO2_QA);
}
}
} /* end if this is the coastal aerosol band */
/* Save the unscaled surface reflectance value */
if (roslamb < MIN_VALID_REFL)
sband[ib][i] = MIN_VALID_REFL;
else if (roslamb > MAX_VALID_REFL)
sband[ib][i] = MAX_VALID_REFL;
else
sband[ib][i] = roslamb;
} /* end for i */
} /* end for ib */
/* Free memory for arrays no longer needed */
if (use_orig_aero)
{
free (twvi);
free (tozi);
free (tp);
}
free (taero);
free (teps);
/* Write the data to the output file */
mytime = time(NULL);
printf ("Writing surface reflectance corrected data to the output "
"files ... %s", ctime(&mytime));
/* Open the output file */
sr_output = open_output (xml_metadata, input, OUTPUT_SR);
if (sr_output == NULL)
{ /* error message already printed */
error_handler (true, FUNC_NAME, errmsg);
return (ERROR);
}
/* Loop through the reflectance bands and write the data */
for (ib = 0; ib <= DNL_BAND7; ib++)
{
/* Scale the output data from float to int16 */
convert_output (sband, ib, nlines, nsamps, false, out_band);
/* Write the scaled product */
if (put_output_lines (sr_output, out_band, ib, 0, nlines,
sizeof (uint16)) != SUCCESS)
{
sprintf (errmsg, "Writing output data for band %d", ib);
error_handler (true, FUNC_NAME, errmsg);
return (ERROR);
}
/* Create the ENVI header file this band */
if (create_envi_struct (&sr_output->metadata.band[ib],
&xml_metadata->global, &envi_hdr) != SUCCESS)
{
sprintf (errmsg, "Creating ENVI header structure.");
error_handler (true, FUNC_NAME, errmsg);
return (ERROR);
}
/* Write the ENVI header */
strcpy (envi_file, sr_output->metadata.band[ib].file_name);
cptr = strchr (envi_file, '.');
strcpy (cptr, ".hdr");
if (write_envi_hdr (envi_file, &envi_hdr) != SUCCESS)
{
sprintf (errmsg, "Writing ENVI header file.");
error_handler (true, FUNC_NAME, errmsg);
return (ERROR);
}
}
/* Append the surface reflectance bands (1-7) to the XML file */
if (append_metadata (7, sr_output->metadata.band, xml_infile) !=
SUCCESS)
{
sprintf (errmsg, "Appending surface reflectance bands to the "
"XML file.");
error_handler (true, FUNC_NAME, errmsg);
return (ERROR);
}
/* Write the aerosol QA band */
printf (" Aerosol Band %d: %s\n", SRL_AEROSOL+1,
sr_output->metadata.band[SRL_AEROSOL].file_name);
if (put_output_lines (sr_output, ipflag, SRL_AEROSOL, 0, nlines,
sizeof (uint8)) != SUCCESS)
{
sprintf (errmsg, "Writing aerosol QA output data");
error_handler (true, FUNC_NAME, errmsg);
return (ERROR);
}
/* Free memory for ipflag data */
free (ipflag);
/* Create the ENVI header for the aerosol QA band */
if (create_envi_struct (&sr_output->metadata.band[SRL_AEROSOL],
&xml_metadata->global, &envi_hdr) != SUCCESS)
{
sprintf (errmsg, "Creating ENVI header structure.");
error_handler (true, FUNC_NAME, errmsg);
return (ERROR);
}
/* Write the ENVI header */
strcpy (envi_file, sr_output->metadata.band[SRL_AEROSOL].file_name);
cptr = strchr (envi_file, '.');
strcpy (cptr, ".hdr");
if (write_envi_hdr (envi_file, &envi_hdr) != SUCCESS)
{
sprintf (errmsg, "Writing ENVI header file.");
error_handler (true, FUNC_NAME, errmsg);
return (ERROR);
}
/* Append the aerosol QA band to the XML file */
if (append_metadata (1, &sr_output->metadata.band[SRL_AEROSOL],
xml_infile) != SUCCESS)
{
sprintf (errmsg, "Appending aerosol QA band to XML file.");
error_handler (true, FUNC_NAME, errmsg);
return (ERROR);
}
/* Close the output surface reflectance products */
close_output (sat, sr_output, OUTPUT_SR);
free_output (sr_output, OUTPUT_SR);
/* Free the spatial mapping pointer */
free (space);
/* Free the data arrays */
free (rolutt);
free (transt);
free (sphalbt);
free (normext);
free (tsmax);
free (tsmin);
free (nbfic);
free (nbfi);
free (ttv);
/* Successful completion */
mytime = time(NULL);
printf ("Surface reflectance correction complete ... %s\n", ctime(&mytime));
return (SUCCESS);
}
|
hermm_c_coo_n_lo_row.c | #include "alphasparse/kernel.h"
#include "alphasparse/util.h"
alphasparse_status_t
ONAME(const ALPHA_Number alpha, const ALPHA_SPMAT_COO *mat, const ALPHA_Number *x, const ALPHA_INT columns, const ALPHA_INT ldx, const ALPHA_Number beta, ALPHA_Number *y, const ALPHA_INT ldy)
{
ALPHA_INT m = mat->rows;
ALPHA_INT n = columns;
ALPHA_INT num_threads = alpha_get_thread_num();
#ifdef _OPENMP
#pragma omp parallel for num_threads(num_threads)
#endif
for (ALPHA_INT i = 0; i < mat->rows; i++)
for (ALPHA_INT j = 0; j < columns; j++)
alpha_mul(y[i * ldy + j], y[i * ldy + j], beta);
num_threads = ((columns/512*24 + 24) < num_threads) ? (columns/512*24 + 24) : num_threads;
#ifdef _OPENMP
#pragma omp parallel num_threads(num_threads)
#endif
{
ALPHA_INT tid = alpha_get_thread_id();
ALPHA_INT bcl = cross_block_low(tid, num_threads, columns);
ALPHA_INT bch = cross_block_high(tid, num_threads, columns);
ALPHA_INT bcs = cross_block_size(tid, num_threads, columns);
for (ALPHA_INT ai = 0; ai < mat->nnz; ai++)
{
ALPHA_INT ac = mat->col_indx[ai];
ALPHA_INT r = mat->row_indx[ai];
if (ac < r)
{
ALPHA_Number val;
alpha_mul(val, alpha, mat->values[ai]);
ALPHA_Number val_c;
alpha_conj(val_c,mat->values[ai]);
alpha_mul(val_c, alpha, val_c);
for (ALPHA_INT c = bcl; c < bch; ++c)
alpha_madde(y[index2(r, c, ldy)], val, x[index2(ac, c, ldx)]);
for (ALPHA_INT c = bcl; c < bch; ++c)
alpha_madde(y[index2(ac, c, ldy)], val_c, x[index2(r, c, ldx)]);
}
else if (ac == r)
{
ALPHA_Number val;
alpha_mul(val, alpha, mat->values[ai]);
for (ALPHA_INT c = bcl; c < bch; ++c)
alpha_madde(y[index2(r, c, ldy)], val, x[index2(ac, c, ldx)]);
}
}
}
return ALPHA_SPARSE_STATUS_SUCCESS;
}
|
GB_binop__bxnor_uint16.c | //------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_emult.h"
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB (_AaddB__bxnor_uint16)
// A.*B function (eWiseMult): GB (_AemultB_08__bxnor_uint16)
// A.*B function (eWiseMult): GB (_AemultB_02__bxnor_uint16)
// A.*B function (eWiseMult): GB (_AemultB_04__bxnor_uint16)
// A.*B function (eWiseMult): GB (_AemultB_bitmap__bxnor_uint16)
// A*D function (colscale): GB (_AxD__bxnor_uint16)
// D*A function (rowscale): GB (_DxB__bxnor_uint16)
// C+=B function (dense accum): GB (_Cdense_accumB__bxnor_uint16)
// C+=b function (dense accum): GB (_Cdense_accumb__bxnor_uint16)
// C+=A+B function (dense ewise3): GB ((none))
// C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__bxnor_uint16)
// C=scalar+B GB (_bind1st__bxnor_uint16)
// C=scalar+B' GB (_bind1st_tran__bxnor_uint16)
// C=A+scalar GB (_bind2nd__bxnor_uint16)
// C=A'+scalar GB (_bind2nd_tran__bxnor_uint16)
// C type: uint16_t
// A type: uint16_t
// B,b type: uint16_t
// BinaryOp: cij = ~((aij) ^ (bij))
#define GB_ATYPE \
uint16_t
#define GB_BTYPE \
uint16_t
#define GB_CTYPE \
uint16_t
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA,A_iso) \
uint16_t aij = GBX (Ax, pA, A_iso)
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB,B_iso) \
uint16_t bij = GBX (Bx, pB, B_iso)
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
uint16_t t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \
cij = GBX (Ax, pA, A_iso)
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \
cij = GBX (Bx, pB, B_iso)
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z,x,y,i,j) \
z = ~((x) ^ (y)) ;
// true if the binop must be flipped
#define GB_BINOP_FLIP \
0
// op is second
#define GB_OP_IS_SECOND \
0
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_BXNOR || GxB_NO_UINT16 || GxB_NO_BXNOR_UINT16)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
#if 0
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB ((none))
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
#endif
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_ewise3_noaccum__bxnor_uint16)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_dense_ewise3_noaccum_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumB__bxnor_uint16)
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumb__bxnor_uint16)
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type uint16_t
uint16_t bwork = (*((uint16_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_AxD__bxnor_uint16)
(
GrB_Matrix C,
const GrB_Matrix A, bool A_is_pattern,
const GrB_Matrix D, bool D_is_pattern,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint16_t *restrict Cx = (uint16_t *) C->x ;
#include "GB_AxB_colscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_DxB__bxnor_uint16)
(
GrB_Matrix C,
const GrB_Matrix D, bool D_is_pattern,
const GrB_Matrix B, bool B_is_pattern,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint16_t *restrict Cx = (uint16_t *) C->x ;
#include "GB_AxB_rowscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B
//------------------------------------------------------------------------------
GrB_Info GB (_AaddB__bxnor_uint16)
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool Ch_is_Mh,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GB_WERK_DECLARE (M_ek_slicing, int64_t) ;
GB_WERK_DECLARE (A_ek_slicing, int64_t) ;
GB_WERK_DECLARE (B_ek_slicing, int64_t) ;
#include "GB_add_template.c"
GB_FREE_WORK ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_08__bxnor_uint16)
(
GrB_Matrix C,
const int C_sparsity,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_08_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_02__bxnor_uint16)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool flipxy,
const int64_t *restrict Cp_kfirst,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if GB_BINOP_FLIP
// The operator is not commutative, and does not have a flipped
// variant. For example z=atan2(y,x).
if (flipxy)
{
// use fmult(y,x)
#undef GB_FLIPPED
#define GB_FLIPPED 1
#include "GB_emult_02_template.c"
}
else
{
// use fmult(x,y)
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
}
#else
// No need to handle the flip: the operator is either commutative, or
// has been handled by changing z=div(y,x) to z=rdiv(x,y) for example.
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_04__bxnor_uint16)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict Cp_kfirst,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_04_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_bitmap__bxnor_uint16)
(
GrB_Matrix C,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_bitmap_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB (_bind1st__bxnor_uint16)
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *restrict Bb,
int64_t bnz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint16_t *Cx = (uint16_t *) Cx_output ;
uint16_t x = (*((uint16_t *) x_input)) ;
uint16_t *Bx = (uint16_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < bnz ; p++)
{
if (!GBB (Bb, p)) continue ;
uint16_t bij = GBX (Bx, p, false) ;
Cx [p] = ~((x) ^ (bij)) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB (_bind2nd__bxnor_uint16)
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *restrict Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
uint16_t *Cx = (uint16_t *) Cx_output ;
uint16_t *Ax = (uint16_t *) Ax_input ;
uint16_t y = (*((uint16_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
uint16_t aij = GBX (Ax, p, false) ;
Cx [p] = ~((aij) ^ (y)) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
uint16_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = ~((x) ^ (aij)) ; \
}
GrB_Info GB (_bind1st_tran__bxnor_uint16)
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
uint16_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint16_t x = (*((const uint16_t *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
uint16_t
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
uint16_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = ~((aij) ^ (y)) ; \
}
GrB_Info GB (_bind2nd_tran__bxnor_uint16)
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint16_t y = (*((const uint16_t *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
GB_kroner.c | //------------------------------------------------------------------------------
// GB_kroner: Kronecker product, C = kron (A,B)
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// C = kron(A,B) where op determines the binary multiplier to use. The type of
// A and B are compatible with the x and y inputs of z=op(x,y), but can be
// different. The type of C is the type of z. C is hypersparse if either A
// or B are hypersparse.
// FUTURE: GB_kron would be faster with built-in types and operators.
// FUTURE: at most one thread is used for each vector of C=kron(A,B). The
// matrix C is normally very large, but if both A and B are n-by-1, then C is
// n^2-by-1 and only a single thread is used. A better method for this case
// would construct vectors of C in parallel.
// FUTURE: each vector C(:,k) takes O(nnz(C(:,k))) work, but this is not
// accounted for in the parallel load-balancing.
#include "GB_kron.h"
GrB_Info GB_kroner // C = kron (A,B)
(
GrB_Matrix *Chandle, // output matrix
const bool C_is_csc, // desired format of C
const GrB_BinaryOp op, // multiply operator
const GrB_Matrix A, // input matrix
const GrB_Matrix B, // input matrix
GB_Context Context
)
{
//--------------------------------------------------------------------------
// check inputs
//--------------------------------------------------------------------------
ASSERT (Chandle != NULL) ;
ASSERT_MATRIX_OK (A, "A for kron (A,B)", GB0) ;
ASSERT_MATRIX_OK (B, "B for kron (A,B)", GB0) ;
ASSERT_BINARYOP_OK (op, "op for kron (A,B)", GB0) ;
ASSERT (!GB_PENDING (A)) ; ASSERT (!GB_ZOMBIES (A)) ;
ASSERT (!GB_PENDING (B)) ; ASSERT (!GB_ZOMBIES (B)) ;
//--------------------------------------------------------------------------
// get inputs
//--------------------------------------------------------------------------
GrB_Info info ;
(*Chandle) = NULL ;
const int64_t *GB_RESTRICT Ap = A->p ;
const int64_t *GB_RESTRICT Ah = A->h ;
const int64_t *GB_RESTRICT Ai = A->i ;
const GB_void *GB_RESTRICT Ax = (GB_void *) A->x ;
const int64_t asize = A->type->size ;
const int64_t avlen = A->vlen ;
const int64_t avdim = A->vdim ;
int64_t anvec = A->nvec ;
int64_t anz = GB_NNZ (A) ;
const int64_t *GB_RESTRICT Bp = B->p ;
const int64_t *GB_RESTRICT Bh = B->h ;
const int64_t *GB_RESTRICT Bi = B->i ;
const GB_void *GB_RESTRICT Bx = (GB_void *) B->x ;
const int64_t bsize = B->type->size ;
const int64_t bvlen = B->vlen ;
const int64_t bvdim = B->vdim ;
int64_t bnvec = B->nvec ;
int64_t bnz = GB_NNZ (B) ;
//--------------------------------------------------------------------------
// determine the number of threads to use
//--------------------------------------------------------------------------
double work = ((double) anz) * ((double) bnz)
+ (((double) anvec) * ((double) bnvec)) ;
GB_GET_NTHREADS_MAX (nthreads_max, chunk, Context) ;
int nthreads = GB_nthreads (work, chunk, nthreads_max) ;
//--------------------------------------------------------------------------
// allocate the output matrix C
//--------------------------------------------------------------------------
// C has the same type as z for the multiply operator, z=op(x,y)
GrB_Index cvlen, cvdim, cnzmax, cnvec ;
bool ok = GB_Index_multiply (&cvlen, avlen, bvlen) ;
ok = ok & GB_Index_multiply (&cvdim, avdim, bvdim) ;
ok = ok & GB_Index_multiply (&cnzmax, anz, bnz) ;
ok = ok & GB_Index_multiply (&cnvec, anvec, bnvec) ;
ASSERT (ok) ;
// C is hypersparse if either A or B are hypersparse
bool C_is_hyper = (cvdim > 1) && (A->is_hyper || B->is_hyper) ;
GrB_Matrix C = NULL ; // allocate a new header for C
info = GB_create (&C, op->ztype, (int64_t) cvlen, (int64_t) cvdim,
GB_Ap_malloc, C_is_csc, GB_SAME_HYPER_AS (C_is_hyper), B->hyper_ratio,
cnvec, cnzmax, true, Context) ;
if (info != GrB_SUCCESS)
{
// out of memory
return (info) ;
}
//--------------------------------------------------------------------------
// get C
//--------------------------------------------------------------------------
int64_t *GB_RESTRICT Cp = C->p ;
int64_t *GB_RESTRICT Ch = C->h ;
int64_t *GB_RESTRICT Ci = C->i ;
GB_void *GB_RESTRICT Cx = (GB_void *) C->x ;
const int64_t csize = C->type->size ;
GxB_binary_function fmult = op->function ;
GB_cast_function
cast_A = GB_cast_factory (op->xtype->code, A->type->code),
cast_B = GB_cast_factory (op->ytype->code, B->type->code) ;
//--------------------------------------------------------------------------
// compute the column counts of C, and C->h if C is hypersparse
//--------------------------------------------------------------------------
int64_t kC ;
#pragma omp parallel for num_threads(nthreads) schedule(guided)
for (kC = 0 ; kC < cnvec ; kC++)
{
int64_t kA = kC / bnvec ;
int64_t kB = kC % bnvec ;
// get A(:,jA), the (kA)th vector of A
int64_t jA = (Ah == NULL) ? kA : Ah [kA] ;
int64_t aknz = Ap [kA+1] - Ap [kA] ;
// get B(:,jB), the (kB)th vector of B
int64_t jB = (Bh == NULL) ? kB : Bh [kB] ;
int64_t bknz = Bp [kB+1] - Bp [kB] ;
// determine # entries in C(:,jC), the (kC)th vector of C
// int64_t kC = kA * bnvec + kB ;
Cp [kC] = aknz * bknz ;
if (C_is_hyper)
{
Ch [kC] = jA * bvdim + jB ;
}
}
//--------------------------------------------------------------------------
// replace Cp with its cumulative sum
//--------------------------------------------------------------------------
GB_cumsum (Cp, cnvec, &(C->nvec_nonempty), nthreads) ;
if (C_is_hyper) C->nvec = cnvec ;
C->magic = GB_MAGIC ;
//--------------------------------------------------------------------------
// C = kron (A,B)
//--------------------------------------------------------------------------
#pragma omp parallel for num_threads(nthreads) schedule(guided)
for (kC = 0 ; kC < cnvec ; kC++)
{
int64_t kA = kC / bnvec ;
int64_t kB = kC % bnvec ;
// get B(:,jB), the (kB)th vector of B
int64_t pB_start = Bp [kB] ;
int64_t pB_end = Bp [kB+1] ;
int64_t bknz = pB_start - pB_end ;
if (bknz == 0) continue ;
GB_void bwork [GB_VLA(bsize)] ;
// get C(:,jC), the (kC)th vector of C
// int64_t kC = kA * bnvec + kB ;
int64_t pC = Cp [kC] ;
// get A(:,jA), the (kA)th vector of A
int64_t pA_start = Ap [kA] ;
int64_t pA_end = Ap [kA+1] ;
GB_void awork [GB_VLA(asize)] ;
for (int64_t pA = pA_start ; pA < pA_end ; pA++)
{
// awork = A(iA,jA), typecasted to op->xtype
int64_t iA = Ai [pA] ;
int64_t iAblock = iA * bvlen ;
cast_A (awork, Ax +(pA*asize), asize) ;
for (int64_t pB = pB_start ; pB < pB_end ; pB++)
{
// bwork = B(iB,jB), typecasted to op->ytype
int64_t iB = Bi [pB] ;
cast_B (bwork, Bx +(pB*bsize), bsize) ;
// C(iC,jC) = A(iA,jA) * B(iB,jB)
int64_t iC = iAblock + iB ;
Ci [pC] = iC ;
fmult (Cx +(pC*csize), awork, bwork) ;
pC++ ;
}
}
}
//--------------------------------------------------------------------------
// remove empty vectors from C, if hypersparse
//--------------------------------------------------------------------------
info = GB_hypermatrix_prune (C, Context) ;
if (info != GrB_SUCCESS)
{
// out of memory
GB_MATRIX_FREE (&C) ;
return (info) ;
}
ASSERT (C->nvec_nonempty == GB_nvec_nonempty (C, Context)) ;
//--------------------------------------------------------------------------
// return result
//--------------------------------------------------------------------------
ASSERT_MATRIX_OK (C, "C=kron(A,B)", GB0) ;
(*Chandle) = C ;
return (GrB_SUCCESS) ;
}
|
compare.c | /*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% CCCC OOO M M PPPP AAA RRRR EEEEE %
% C O O MM MM P P A A R R E %
% C O O M M M PPPP AAAAA RRRR EEE %
% C O O M M P A A R R E %
% CCCC OOO M M P A A R R EEEEE %
% %
% %
% MagickCore Image Comparison Methods %
% %
% Software Design %
% Cristy %
% December 2003 %
% %
% %
% Copyright 1999-2014 ImageMagick Studio LLC, a non-profit organization %
% dedicated to making software imaging solutions freely available. %
% %
% You may not use this file except in compliance with the License. You may %
% obtain a copy of the License at %
% %
% http://www.imagemagick.org/script/license.php %
% %
% Unless required by applicable law or agreed to in writing, software %
% distributed under the License is distributed on an "AS IS" BASIS, %
% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. %
% See the License for the specific language governing permissions and %
% limitations under the License. %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
%
%
*/
/*
Include declarations.
*/
#include "magick/studio.h"
#include "magick/artifact.h"
#include "magick/cache-view.h"
#include "magick/channel.h"
#include "magick/client.h"
#include "magick/color.h"
#include "magick/color-private.h"
#include "magick/colorspace.h"
#include "magick/colorspace-private.h"
#include "magick/compare.h"
#include "magick/composite-private.h"
#include "magick/constitute.h"
#include "magick/exception-private.h"
#include "magick/geometry.h"
#include "magick/image-private.h"
#include "magick/list.h"
#include "magick/log.h"
#include "magick/memory_.h"
#include "magick/monitor.h"
#include "magick/monitor-private.h"
#include "magick/option.h"
#include "magick/pixel-private.h"
#include "magick/property.h"
#include "magick/resource_.h"
#include "magick/string_.h"
#include "magick/string-private.h"
#include "magick/statistic.h"
#include "magick/thread-private.h"
#include "magick/transform.h"
#include "magick/utility.h"
#include "magick/version.h"
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% C o m p a r e I m a g e C h a n n e l s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% CompareImageChannels() compares one or more image channels of an image
% to a reconstructed image and returns the difference image.
%
% The format of the CompareImageChannels method is:
%
% Image *CompareImageChannels(const Image *image,
% const Image *reconstruct_image,const ChannelType channel,
% const MetricType metric,double *distortion,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o reconstruct_image: the reconstruct image.
%
% o channel: the channel.
%
% o metric: the metric.
%
% o distortion: the computed distortion between the images.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *CompareImages(Image *image,const Image *reconstruct_image,
const MetricType metric,double *distortion,ExceptionInfo *exception)
{
Image
*highlight_image;
highlight_image=CompareImageChannels(image,reconstruct_image,
CompositeChannels,metric,distortion,exception);
return(highlight_image);
}
MagickExport Image *CompareImageChannels(Image *image,
const Image *reconstruct_image,const ChannelType channel,
const MetricType metric,double *distortion,ExceptionInfo *exception)
{
CacheView
*highlight_view,
*image_view,
*reconstruct_view;
const char
*artifact;
Image
*difference_image,
*highlight_image;
MagickBooleanType
status;
MagickPixelPacket
highlight,
lowlight,
zero;
ssize_t
y;
assert(image != (Image *) NULL);
assert(image->signature == MagickSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(reconstruct_image != (const Image *) NULL);
assert(reconstruct_image->signature == MagickSignature);
assert(distortion != (double *) NULL);
*distortion=0.0;
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
if (metric != PerceptualHashErrorMetric)
if ((reconstruct_image->columns != image->columns) ||
(reconstruct_image->rows != image->rows))
ThrowImageException(ImageError,"ImageSizeDiffers");
status=GetImageChannelDistortion(image,reconstruct_image,channel,metric,
distortion,exception);
if (status == MagickFalse)
return((Image *) NULL);
difference_image=CloneImage(image,0,0,MagickTrue,exception);
if (difference_image == (Image *) NULL)
return((Image *) NULL);
(void) SetImageAlphaChannel(difference_image,OpaqueAlphaChannel);
highlight_image=CloneImage(image,image->columns,image->rows,MagickTrue,
exception);
if (highlight_image == (Image *) NULL)
{
difference_image=DestroyImage(difference_image);
return((Image *) NULL);
}
if (SetImageStorageClass(highlight_image,DirectClass) == MagickFalse)
{
InheritException(exception,&highlight_image->exception);
difference_image=DestroyImage(difference_image);
highlight_image=DestroyImage(highlight_image);
return((Image *) NULL);
}
(void) SetImageAlphaChannel(highlight_image,OpaqueAlphaChannel);
(void) QueryMagickColor("#f1001ecc",&highlight,exception);
artifact=GetImageArtifact(image,"highlight-color");
if (artifact != (const char *) NULL)
(void) QueryMagickColor(artifact,&highlight,exception);
(void) QueryMagickColor("#ffffffcc",&lowlight,exception);
artifact=GetImageArtifact(image,"lowlight-color");
if (artifact != (const char *) NULL)
(void) QueryMagickColor(artifact,&lowlight,exception);
if (highlight_image->colorspace == CMYKColorspace)
{
ConvertRGBToCMYK(&highlight);
ConvertRGBToCMYK(&lowlight);
}
/*
Generate difference image.
*/
status=MagickTrue;
GetMagickPixelPacket(image,&zero);
image_view=AcquireVirtualCacheView(image,exception);
reconstruct_view=AcquireVirtualCacheView(reconstruct_image,exception);
highlight_view=AcquireAuthenticCacheView(highlight_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) shared(status) \
magick_threads(image,highlight_image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
MagickBooleanType
sync;
MagickPixelPacket
pixel,
reconstruct_pixel;
register const IndexPacket
*restrict indexes,
*restrict reconstruct_indexes;
register const PixelPacket
*restrict p,
*restrict q;
register IndexPacket
*restrict highlight_indexes;
register PixelPacket
*restrict r;
register ssize_t
x;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception);
q=GetCacheViewVirtualPixels(reconstruct_view,0,y,image->columns,1,
exception);
r=QueueCacheViewAuthenticPixels(highlight_view,0,y,highlight_image->columns,
1,exception);
if ((p == (const PixelPacket *) NULL) ||
(q == (const PixelPacket *) NULL) || (r == (PixelPacket *) NULL))
{
status=MagickFalse;
continue;
}
indexes=GetCacheViewVirtualIndexQueue(image_view);
reconstruct_indexes=GetCacheViewVirtualIndexQueue(reconstruct_view);
highlight_indexes=GetCacheViewAuthenticIndexQueue(highlight_view);
pixel=zero;
reconstruct_pixel=zero;
for (x=0; x < (ssize_t) image->columns; x++)
{
MagickStatusType
difference;
SetMagickPixelPacket(image,p,indexes+x,&pixel);
SetMagickPixelPacket(reconstruct_image,q,reconstruct_indexes+x,
&reconstruct_pixel);
difference=MagickFalse;
if (channel == CompositeChannels)
{
if (IsMagickColorSimilar(&pixel,&reconstruct_pixel) == MagickFalse)
difference=MagickTrue;
}
else
{
if (((channel & RedChannel) != 0) &&
(GetPixelRed(p) != GetPixelRed(q)))
difference=MagickTrue;
if (((channel & GreenChannel) != 0) &&
(GetPixelGreen(p) != GetPixelGreen(q)))
difference=MagickTrue;
if (((channel & BlueChannel) != 0) &&
(GetPixelBlue(p) != GetPixelBlue(q)))
difference=MagickTrue;
if (((channel & OpacityChannel) != 0) &&
(image->matte != MagickFalse) &&
(GetPixelOpacity(p) != GetPixelOpacity(q)))
difference=MagickTrue;
if ((((channel & IndexChannel) != 0) &&
(image->colorspace == CMYKColorspace) &&
(reconstruct_image->colorspace == CMYKColorspace)) &&
(GetPixelIndex(indexes+x) !=
GetPixelIndex(reconstruct_indexes+x)))
difference=MagickTrue;
}
if (difference != MagickFalse)
SetPixelPacket(highlight_image,&highlight,r,highlight_indexes+x);
else
SetPixelPacket(highlight_image,&lowlight,r,highlight_indexes+x);
p++;
q++;
r++;
}
sync=SyncCacheViewAuthenticPixels(highlight_view,exception);
if (sync == MagickFalse)
status=MagickFalse;
}
highlight_view=DestroyCacheView(highlight_view);
reconstruct_view=DestroyCacheView(reconstruct_view);
image_view=DestroyCacheView(image_view);
(void) CompositeImage(difference_image,image->compose,highlight_image,0,0);
highlight_image=DestroyImage(highlight_image);
if (status == MagickFalse)
difference_image=DestroyImage(difference_image);
return(difference_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t I m a g e C h a n n e l D i s t o r t i o n %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetImageChannelDistortion() compares one or more image channels of an image
% to a reconstructed image and returns the specified distortion metric.
%
% The format of the GetImageChannelDistortion method is:
%
% MagickBooleanType GetImageChannelDistortion(const Image *image,
% const Image *reconstruct_image,const ChannelType channel,
% const MetricType metric,double *distortion,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o reconstruct_image: the reconstruct image.
%
% o channel: the channel.
%
% o metric: the metric.
%
% o distortion: the computed distortion between the images.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType GetImageDistortion(Image *image,
const Image *reconstruct_image,const MetricType metric,double *distortion,
ExceptionInfo *exception)
{
MagickBooleanType
status;
status=GetImageChannelDistortion(image,reconstruct_image,CompositeChannels,
metric,distortion,exception);
return(status);
}
static MagickBooleanType GetAbsoluteDistortion(const Image *image,
const Image *reconstruct_image,const ChannelType channel,double *distortion,
ExceptionInfo *exception)
{
CacheView
*image_view,
*reconstruct_view;
MagickBooleanType
status;
MagickPixelPacket
zero;
ssize_t
y;
/*
Compute the absolute difference in pixels between two images.
*/
status=MagickTrue;
GetMagickPixelPacket(image,&zero);
image_view=AcquireVirtualCacheView(image,exception);
reconstruct_view=AcquireVirtualCacheView(reconstruct_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) shared(status) \
magick_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
double
channel_distortion[CompositeChannels+1];
MagickPixelPacket
pixel,
reconstruct_pixel;
register const IndexPacket
*restrict indexes,
*restrict reconstruct_indexes;
register const PixelPacket
*restrict p,
*restrict q;
register ssize_t
i,
x;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception);
q=GetCacheViewVirtualPixels(reconstruct_view,0,y,reconstruct_image->columns,
1,exception);
if ((p == (const PixelPacket *) NULL) || (q == (const PixelPacket *) NULL))
{
status=MagickFalse;
continue;
}
indexes=GetCacheViewVirtualIndexQueue(image_view);
reconstruct_indexes=GetCacheViewVirtualIndexQueue(reconstruct_view);
pixel=zero;
reconstruct_pixel=pixel;
(void) ResetMagickMemory(channel_distortion,0,sizeof(channel_distortion));
for (x=0; x < (ssize_t) image->columns; x++)
{
SetMagickPixelPacket(image,p,indexes+x,&pixel);
SetMagickPixelPacket(reconstruct_image,q,reconstruct_indexes+x,
&reconstruct_pixel);
if (IsMagickColorSimilar(&pixel,&reconstruct_pixel) == MagickFalse)
{
if ((channel & RedChannel) != 0)
channel_distortion[RedChannel]++;
if ((channel & GreenChannel) != 0)
channel_distortion[GreenChannel]++;
if ((channel & BlueChannel) != 0)
channel_distortion[BlueChannel]++;
if (((channel & OpacityChannel) != 0) &&
(image->matte != MagickFalse))
channel_distortion[OpacityChannel]++;
if (((channel & IndexChannel) != 0) &&
(image->colorspace == CMYKColorspace))
channel_distortion[BlackChannel]++;
channel_distortion[CompositeChannels]++;
}
p++;
q++;
}
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_GetAbsoluteError)
#endif
for (i=0; i <= (ssize_t) CompositeChannels; i++)
distortion[i]+=channel_distortion[i];
}
reconstruct_view=DestroyCacheView(reconstruct_view);
image_view=DestroyCacheView(image_view);
return(status);
}
static size_t GetNumberChannels(const Image *image,const ChannelType channel)
{
size_t
channels;
channels=0;
if ((channel & RedChannel) != 0)
channels++;
if ((channel & GreenChannel) != 0)
channels++;
if ((channel & BlueChannel) != 0)
channels++;
if (((channel & OpacityChannel) != 0) &&
(image->matte != MagickFalse))
channels++;
if (((channel & IndexChannel) != 0) &&
(image->colorspace == CMYKColorspace))
channels++;
return(channels == 0 ? 1 : channels);
}
static MagickBooleanType GetFuzzDistortion(const Image *image,
const Image *reconstruct_image,const ChannelType channel,
double *distortion,ExceptionInfo *exception)
{
CacheView
*image_view,
*reconstruct_view;
MagickBooleanType
status;
register ssize_t
i;
ssize_t
y;
status=MagickTrue;
image_view=AcquireVirtualCacheView(image,exception);
reconstruct_view=AcquireVirtualCacheView(reconstruct_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) shared(status) \
magick_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
double
channel_distortion[CompositeChannels+1];
register const IndexPacket
*restrict indexes,
*restrict reconstruct_indexes;
register const PixelPacket
*restrict p,
*restrict q;
register ssize_t
i,
x;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception);
q=GetCacheViewVirtualPixels(reconstruct_view,0,y,reconstruct_image->columns,
1,exception);
if ((p == (const PixelPacket *) NULL) || (q == (const PixelPacket *) NULL))
{
status=MagickFalse;
continue;
}
indexes=GetCacheViewVirtualIndexQueue(image_view);
reconstruct_indexes=GetCacheViewVirtualIndexQueue(reconstruct_view);
(void) ResetMagickMemory(channel_distortion,0,sizeof(channel_distortion));
for (x=0; x < (ssize_t) image->columns; x++)
{
MagickRealType
distance,
Da,
Sa;
Sa=QuantumScale*(image->matte != MagickFalse ? GetPixelAlpha(p) :
(QuantumRange-OpaqueOpacity));
Da=QuantumScale*(reconstruct_image->matte != MagickFalse ?
GetPixelAlpha(q) : (QuantumRange-OpaqueOpacity));
if ((channel & RedChannel) != 0)
{
distance=QuantumScale*(Sa*GetPixelRed(p)-Da*GetPixelRed(q));
channel_distortion[RedChannel]+=distance*distance;
channel_distortion[CompositeChannels]+=distance*distance;
}
if ((channel & GreenChannel) != 0)
{
distance=QuantumScale*(Sa*GetPixelGreen(p)-Da*GetPixelGreen(q));
channel_distortion[GreenChannel]+=distance*distance;
channel_distortion[CompositeChannels]+=distance*distance;
}
if ((channel & BlueChannel) != 0)
{
distance=QuantumScale*(Sa*GetPixelBlue(p)-Da*GetPixelBlue(q));
channel_distortion[BlueChannel]+=distance*distance;
channel_distortion[CompositeChannels]+=distance*distance;
}
if (((channel & OpacityChannel) != 0) && ((image->matte != MagickFalse) ||
(reconstruct_image->matte != MagickFalse)))
{
distance=QuantumScale*((image->matte != MagickFalse ?
GetPixelOpacity(p) : OpaqueOpacity)-
(reconstruct_image->matte != MagickFalse ?
GetPixelOpacity(q): OpaqueOpacity));
channel_distortion[OpacityChannel]+=distance*distance;
channel_distortion[CompositeChannels]+=distance*distance;
}
if (((channel & IndexChannel) != 0) &&
(image->colorspace == CMYKColorspace) &&
(reconstruct_image->colorspace == CMYKColorspace))
{
distance=QuantumScale*(Sa*GetPixelIndex(indexes+x)-
Da*GetPixelIndex(reconstruct_indexes+x));
channel_distortion[BlackChannel]+=distance*distance;
channel_distortion[CompositeChannels]+=distance*distance;
}
p++;
q++;
}
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_GetFuzzDistortion)
#endif
for (i=0; i <= (ssize_t) CompositeChannels; i++)
distortion[i]+=channel_distortion[i];
}
reconstruct_view=DestroyCacheView(reconstruct_view);
image_view=DestroyCacheView(image_view);
for (i=0; i <= (ssize_t) CompositeChannels; i++)
distortion[i]/=((double) image->columns*image->rows);
if (((channel & OpacityChannel) != 0) && ((image->matte != MagickFalse) ||
(reconstruct_image->matte != MagickFalse)))
distortion[CompositeChannels]/=(double)
(GetNumberChannels(image,channel)-1);
else
distortion[CompositeChannels]/=(double) GetNumberChannels(image,channel);
distortion[CompositeChannels]=sqrt(distortion[CompositeChannels]);
return(status);
}
static MagickBooleanType GetMeanAbsoluteDistortion(const Image *image,
const Image *reconstruct_image,const ChannelType channel,
double *distortion,ExceptionInfo *exception)
{
CacheView
*image_view,
*reconstruct_view;
MagickBooleanType
status;
register ssize_t
i;
ssize_t
y;
status=MagickTrue;
image_view=AcquireVirtualCacheView(image,exception);
reconstruct_view=AcquireVirtualCacheView(reconstruct_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) shared(status) \
magick_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
double
channel_distortion[CompositeChannels+1];
register const IndexPacket
*restrict indexes,
*restrict reconstruct_indexes;
register const PixelPacket
*restrict p,
*restrict q;
register ssize_t
i,
x;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception);
q=GetCacheViewVirtualPixels(reconstruct_view,0,y,
reconstruct_image->columns,1,exception);
if ((p == (const PixelPacket *) NULL) || (q == (const PixelPacket *) NULL))
{
status=MagickFalse;
continue;
}
indexes=GetCacheViewVirtualIndexQueue(image_view);
reconstruct_indexes=GetCacheViewVirtualIndexQueue(reconstruct_view);
(void) ResetMagickMemory(channel_distortion,0,sizeof(channel_distortion));
for (x=0; x < (ssize_t) image->columns; x++)
{
MagickRealType
distance,
Da,
Sa;
Sa=QuantumScale*(image->matte != MagickFalse ? GetPixelAlpha(p) :
(QuantumRange-OpaqueOpacity));
Da=QuantumScale*(reconstruct_image->matte != MagickFalse ?
GetPixelAlpha(q) : (QuantumRange-OpaqueOpacity));
if ((channel & RedChannel) != 0)
{
distance=QuantumScale*fabs(Sa*GetPixelRed(p)-Da*GetPixelRed(q));
channel_distortion[RedChannel]+=distance;
channel_distortion[CompositeChannels]+=distance;
}
if ((channel & GreenChannel) != 0)
{
distance=QuantumScale*fabs(Sa*GetPixelGreen(p)-Da*GetPixelGreen(q));
channel_distortion[GreenChannel]+=distance;
channel_distortion[CompositeChannels]+=distance;
}
if ((channel & BlueChannel) != 0)
{
distance=QuantumScale*fabs(Sa*GetPixelBlue(p)-Da*GetPixelBlue(q));
channel_distortion[BlueChannel]+=distance;
channel_distortion[CompositeChannels]+=distance;
}
if (((channel & OpacityChannel) != 0) &&
(image->matte != MagickFalse))
{
distance=QuantumScale*fabs(GetPixelOpacity(p)-(double)
GetPixelOpacity(q));
channel_distortion[OpacityChannel]+=distance;
channel_distortion[CompositeChannels]+=distance;
}
if (((channel & IndexChannel) != 0) &&
(image->colorspace == CMYKColorspace))
{
distance=QuantumScale*fabs(Sa*GetPixelIndex(indexes+x)-Da*
GetPixelIndex(reconstruct_indexes+x));
channel_distortion[BlackChannel]+=distance;
channel_distortion[CompositeChannels]+=distance;
}
p++;
q++;
}
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_GetMeanAbsoluteError)
#endif
for (i=0; i <= (ssize_t) CompositeChannels; i++)
distortion[i]+=channel_distortion[i];
}
reconstruct_view=DestroyCacheView(reconstruct_view);
image_view=DestroyCacheView(image_view);
for (i=0; i <= (ssize_t) CompositeChannels; i++)
distortion[i]/=((double) image->columns*image->rows);
distortion[CompositeChannels]/=(double) GetNumberChannels(image,channel);
return(status);
}
static MagickBooleanType GetMeanErrorPerPixel(Image *image,
const Image *reconstruct_image,const ChannelType channel,double *distortion,
ExceptionInfo *exception)
{
CacheView
*image_view,
*reconstruct_view;
MagickBooleanType
status;
MagickRealType
area,
maximum_error,
mean_error;
ssize_t
y;
status=MagickTrue;
area=0.0;
maximum_error=0.0;
mean_error=0.0;
image_view=AcquireVirtualCacheView(image,exception);
reconstruct_view=AcquireVirtualCacheView(reconstruct_image,exception);
for (y=0; y < (ssize_t) image->rows; y++)
{
register const IndexPacket
*restrict indexes,
*restrict reconstruct_indexes;
register const PixelPacket
*restrict p,
*restrict q;
register ssize_t
x;
p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception);
q=GetCacheViewVirtualPixels(reconstruct_view,0,y,reconstruct_image->columns,
1,exception);
if ((p == (const PixelPacket *) NULL) || (q == (const PixelPacket *) NULL))
{
status=MagickFalse;
break;
}
indexes=GetCacheViewVirtualIndexQueue(image_view);
reconstruct_indexes=GetCacheViewVirtualIndexQueue(reconstruct_view);
for (x=0; x < (ssize_t) image->columns; x++)
{
MagickRealType
distance,
Da,
Sa;
Sa=QuantumScale*(image->matte != MagickFalse ? GetPixelAlpha(p) :
(QuantumRange-OpaqueOpacity));
Da=QuantumScale*(reconstruct_image->matte != MagickFalse ?
GetPixelAlpha(q) : (QuantumRange-OpaqueOpacity));
if ((channel & RedChannel) != 0)
{
distance=fabs(Sa*GetPixelRed(p)-Da*GetPixelRed(q));
distortion[RedChannel]+=distance;
distortion[CompositeChannels]+=distance;
mean_error+=distance*distance;
if (distance > maximum_error)
maximum_error=distance;
area++;
}
if ((channel & GreenChannel) != 0)
{
distance=fabs(Sa*GetPixelGreen(p)-Da*GetPixelGreen(q));
distortion[GreenChannel]+=distance;
distortion[CompositeChannels]+=distance;
mean_error+=distance*distance;
if (distance > maximum_error)
maximum_error=distance;
area++;
}
if ((channel & BlueChannel) != 0)
{
distance=fabs(Sa*GetPixelBlue(p)-Da*GetPixelBlue(q));
distortion[BlueChannel]+=distance;
distortion[CompositeChannels]+=distance;
mean_error+=distance*distance;
if (distance > maximum_error)
maximum_error=distance;
area++;
}
if (((channel & OpacityChannel) != 0) &&
(image->matte != MagickFalse))
{
distance=fabs((double) GetPixelOpacity(p)-
GetPixelOpacity(q));
distortion[OpacityChannel]+=distance;
distortion[CompositeChannels]+=distance;
mean_error+=distance*distance;
if (distance > maximum_error)
maximum_error=distance;
area++;
}
if (((channel & IndexChannel) != 0) &&
(image->colorspace == CMYKColorspace) &&
(reconstruct_image->colorspace == CMYKColorspace))
{
distance=fabs(Sa*GetPixelIndex(indexes+x)-Da*
GetPixelIndex(reconstruct_indexes+x));
distortion[BlackChannel]+=distance;
distortion[CompositeChannels]+=distance;
mean_error+=distance*distance;
if (distance > maximum_error)
maximum_error=distance;
area++;
}
p++;
q++;
}
}
reconstruct_view=DestroyCacheView(reconstruct_view);
image_view=DestroyCacheView(image_view);
image->error.mean_error_per_pixel=distortion[CompositeChannels]/area;
image->error.normalized_mean_error=QuantumScale*QuantumScale*mean_error/area;
image->error.normalized_maximum_error=QuantumScale*maximum_error;
return(status);
}
static MagickBooleanType GetMeanSquaredDistortion(const Image *image,
const Image *reconstruct_image,const ChannelType channel,
double *distortion,ExceptionInfo *exception)
{
CacheView
*image_view,
*reconstruct_view;
MagickBooleanType
status;
register ssize_t
i;
ssize_t
y;
status=MagickTrue;
image_view=AcquireVirtualCacheView(image,exception);
reconstruct_view=AcquireVirtualCacheView(reconstruct_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) shared(status) \
magick_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
double
channel_distortion[CompositeChannels+1];
register const IndexPacket
*restrict indexes,
*restrict reconstruct_indexes;
register const PixelPacket
*restrict p,
*restrict q;
register ssize_t
i,
x;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception);
q=GetCacheViewVirtualPixels(reconstruct_view,0,y,
reconstruct_image->columns,1,exception);
if ((p == (const PixelPacket *) NULL) || (q == (const PixelPacket *) NULL))
{
status=MagickFalse;
continue;
}
indexes=GetCacheViewVirtualIndexQueue(image_view);
reconstruct_indexes=GetCacheViewVirtualIndexQueue(reconstruct_view);
(void) ResetMagickMemory(channel_distortion,0,sizeof(channel_distortion));
for (x=0; x < (ssize_t) image->columns; x++)
{
MagickRealType
distance,
Da,
Sa;
Sa=QuantumScale*(image->matte != MagickFalse ? GetPixelAlpha(p) :
(QuantumRange-OpaqueOpacity));
Da=QuantumScale*(reconstruct_image->matte != MagickFalse ?
GetPixelAlpha(q) : (QuantumRange-OpaqueOpacity));
if ((channel & RedChannel) != 0)
{
distance=QuantumScale*(Sa*GetPixelRed(p)-Da*GetPixelRed(q));
channel_distortion[RedChannel]+=distance*distance;
channel_distortion[CompositeChannels]+=distance*distance;
}
if ((channel & GreenChannel) != 0)
{
distance=QuantumScale*(Sa*GetPixelGreen(p)-Da*GetPixelGreen(q));
channel_distortion[GreenChannel]+=distance*distance;
channel_distortion[CompositeChannels]+=distance*distance;
}
if ((channel & BlueChannel) != 0)
{
distance=QuantumScale*(Sa*GetPixelBlue(p)-Da*GetPixelBlue(q));
channel_distortion[BlueChannel]+=distance*distance;
channel_distortion[CompositeChannels]+=distance*distance;
}
if (((channel & OpacityChannel) != 0) &&
(image->matte != MagickFalse))
{
distance=QuantumScale*(GetPixelOpacity(p)-(MagickRealType)
GetPixelOpacity(q));
channel_distortion[OpacityChannel]+=distance*distance;
channel_distortion[CompositeChannels]+=distance*distance;
}
if (((channel & IndexChannel) != 0) &&
(image->colorspace == CMYKColorspace) &&
(reconstruct_image->colorspace == CMYKColorspace))
{
distance=QuantumScale*(Sa*GetPixelIndex(indexes+x)-Da*
GetPixelIndex(reconstruct_indexes+x));
channel_distortion[BlackChannel]+=distance*distance;
channel_distortion[CompositeChannels]+=distance*distance;
}
p++;
q++;
}
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_GetMeanSquaredError)
#endif
for (i=0; i <= (ssize_t) CompositeChannels; i++)
distortion[i]+=channel_distortion[i];
}
reconstruct_view=DestroyCacheView(reconstruct_view);
image_view=DestroyCacheView(image_view);
for (i=0; i <= (ssize_t) CompositeChannels; i++)
distortion[i]/=((double) image->columns*image->rows);
distortion[CompositeChannels]/=(double) GetNumberChannels(image,channel);
return(status);
}
static MagickBooleanType GetNormalizedCrossCorrelationDistortion(
const Image *image,const Image *reconstruct_image,const ChannelType channel,
double *distortion,ExceptionInfo *exception)
{
#define SimilarityImageTag "Similarity/Image"
CacheView
*image_view,
*reconstruct_view;
ChannelStatistics
*image_statistics,
*reconstruct_statistics;
MagickBooleanType
status;
MagickOffsetType
progress;
MagickRealType
area;
register ssize_t
i;
ssize_t
y;
/*
Normalize to account for variation due to lighting and exposure condition.
*/
image_statistics=GetImageChannelStatistics(image,exception);
reconstruct_statistics=GetImageChannelStatistics(reconstruct_image,exception);
if ((image_statistics == (ChannelStatistics *) NULL) ||
(reconstruct_statistics == (ChannelStatistics *) NULL))
{
if (image_statistics != (ChannelStatistics *) NULL)
image_statistics=(ChannelStatistics *) RelinquishMagickMemory(
image_statistics);
if (reconstruct_statistics != (ChannelStatistics *) NULL)
reconstruct_statistics=(ChannelStatistics *) RelinquishMagickMemory(
reconstruct_statistics);
return(MagickFalse);
}
status=MagickTrue;
progress=0;
for (i=0; i <= (ssize_t) CompositeChannels; i++)
distortion[i]=0.0;
area=1.0/((MagickRealType) image->columns*image->rows-1);
image_view=AcquireVirtualCacheView(image,exception);
reconstruct_view=AcquireVirtualCacheView(reconstruct_image,exception);
for (y=0; y < (ssize_t) image->rows; y++)
{
register const IndexPacket
*restrict indexes,
*restrict reconstruct_indexes;
register const PixelPacket
*restrict p,
*restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception);
q=GetCacheViewVirtualPixels(reconstruct_view,0,y,reconstruct_image->columns,
1,exception);
if ((p == (const PixelPacket *) NULL) || (q == (const PixelPacket *) NULL))
{
status=MagickFalse;
continue;
}
indexes=GetCacheViewVirtualIndexQueue(image_view);
reconstruct_indexes=GetCacheViewVirtualIndexQueue(reconstruct_view);
for (x=0; x < (ssize_t) image->columns; x++)
{
MagickRealType
Da,
Sa;
Sa=QuantumScale*(image->matte != MagickFalse ? GetPixelAlpha(p) :
(QuantumRange-OpaqueOpacity));
Da=QuantumScale*(reconstruct_image->matte != MagickFalse ?
GetPixelAlpha(q) : (QuantumRange-OpaqueOpacity));
if ((channel & RedChannel) != 0)
distortion[RedChannel]+=area*QuantumScale*(Sa*GetPixelRed(p)-
image_statistics[RedChannel].mean)*(Da*GetPixelRed(q)-
reconstruct_statistics[RedChannel].mean);
if ((channel & GreenChannel) != 0)
distortion[GreenChannel]+=area*QuantumScale*(Sa*GetPixelGreen(p)-
image_statistics[GreenChannel].mean)*(Da*GetPixelGreen(q)-
reconstruct_statistics[GreenChannel].mean);
if ((channel & BlueChannel) != 0)
distortion[BlueChannel]+=area*QuantumScale*(Sa*GetPixelBlue(p)-
image_statistics[BlueChannel].mean)*(Da*GetPixelBlue(q)-
reconstruct_statistics[BlueChannel].mean);
if (((channel & OpacityChannel) != 0) &&
(image->matte != MagickFalse))
distortion[OpacityChannel]+=area*QuantumScale*(
GetPixelOpacity(p)-image_statistics[OpacityChannel].mean)*
(GetPixelOpacity(q)-reconstruct_statistics[OpacityChannel].mean);
if (((channel & IndexChannel) != 0) &&
(image->colorspace == CMYKColorspace) &&
(reconstruct_image->colorspace == CMYKColorspace))
distortion[BlackChannel]+=area*QuantumScale*(Sa*
GetPixelIndex(indexes+x)-image_statistics[BlackChannel].mean)*(Da*
GetPixelIndex(reconstruct_indexes+x)-
reconstruct_statistics[BlackChannel].mean);
p++;
q++;
}
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
proceed=SetImageProgress(image,SimilarityImageTag,progress++,
image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
reconstruct_view=DestroyCacheView(reconstruct_view);
image_view=DestroyCacheView(image_view);
/*
Divide by the standard deviation.
*/
for (i=0; i < (ssize_t) CompositeChannels; i++)
{
double
gamma;
gamma=image_statistics[i].standard_deviation*
reconstruct_statistics[i].standard_deviation;
gamma=PerceptibleReciprocal(gamma);
distortion[i]=QuantumRange*gamma*distortion[i];
}
distortion[CompositeChannels]=0.0;
if ((channel & RedChannel) != 0)
distortion[CompositeChannels]+=distortion[RedChannel]*
distortion[RedChannel];
if ((channel & GreenChannel) != 0)
distortion[CompositeChannels]+=distortion[GreenChannel]*
distortion[GreenChannel];
if ((channel & BlueChannel) != 0)
distortion[CompositeChannels]+=distortion[BlueChannel]*
distortion[BlueChannel];
if (((channel & OpacityChannel) != 0) && (image->matte != MagickFalse))
distortion[CompositeChannels]+=distortion[OpacityChannel]*
distortion[OpacityChannel];
if (((channel & IndexChannel) != 0) &&
(image->colorspace == CMYKColorspace))
distortion[CompositeChannels]+=distortion[BlackChannel]*
distortion[BlackChannel];
distortion[CompositeChannels]=sqrt(distortion[CompositeChannels]/
GetNumberChannels(image,channel));
/*
Free resources.
*/
reconstruct_statistics=(ChannelStatistics *) RelinquishMagickMemory(
reconstruct_statistics);
image_statistics=(ChannelStatistics *) RelinquishMagickMemory(
image_statistics);
return(status);
}
static MagickBooleanType GetPeakAbsoluteDistortion(const Image *image,
const Image *reconstruct_image,const ChannelType channel,
double *distortion,ExceptionInfo *exception)
{
CacheView
*image_view,
*reconstruct_view;
MagickBooleanType
status;
ssize_t
y;
status=MagickTrue;
image_view=AcquireVirtualCacheView(image,exception);
reconstruct_view=AcquireVirtualCacheView(reconstruct_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) shared(status) \
magick_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
double
channel_distortion[CompositeChannels+1];
register const IndexPacket
*restrict indexes,
*restrict reconstruct_indexes;
register const PixelPacket
*restrict p,
*restrict q;
register ssize_t
i,
x;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception);
q=GetCacheViewVirtualPixels(reconstruct_view,0,y,
reconstruct_image->columns,1,exception);
if ((p == (const PixelPacket *) NULL) || (q == (const PixelPacket *) NULL))
{
status=MagickFalse;
continue;
}
indexes=GetCacheViewVirtualIndexQueue(image_view);
reconstruct_indexes=GetCacheViewVirtualIndexQueue(reconstruct_view);
(void) ResetMagickMemory(channel_distortion,0,sizeof(channel_distortion));
for (x=0; x < (ssize_t) image->columns; x++)
{
MagickRealType
distance,
Da,
Sa;
Sa=QuantumScale*(image->matte != MagickFalse ? GetPixelAlpha(p) :
(QuantumRange-OpaqueOpacity));
Da=QuantumScale*(reconstruct_image->matte != MagickFalse ?
GetPixelAlpha(q) : (QuantumRange-OpaqueOpacity));
if ((channel & RedChannel) != 0)
{
distance=QuantumScale*fabs(Sa*GetPixelRed(p)-Da*GetPixelRed(q));
if (distance > channel_distortion[RedChannel])
channel_distortion[RedChannel]=distance;
if (distance > channel_distortion[CompositeChannels])
channel_distortion[CompositeChannels]=distance;
}
if ((channel & GreenChannel) != 0)
{
distance=QuantumScale*fabs(Sa*GetPixelGreen(p)-Da*GetPixelGreen(q));
if (distance > channel_distortion[GreenChannel])
channel_distortion[GreenChannel]=distance;
if (distance > channel_distortion[CompositeChannels])
channel_distortion[CompositeChannels]=distance;
}
if ((channel & BlueChannel) != 0)
{
distance=QuantumScale*fabs(Sa*GetPixelBlue(p)-Da*GetPixelBlue(q));
if (distance > channel_distortion[BlueChannel])
channel_distortion[BlueChannel]=distance;
if (distance > channel_distortion[CompositeChannels])
channel_distortion[CompositeChannels]=distance;
}
if (((channel & OpacityChannel) != 0) &&
(image->matte != MagickFalse))
{
distance=QuantumScale*fabs(GetPixelOpacity(p)-(double)
GetPixelOpacity(q));
if (distance > channel_distortion[OpacityChannel])
channel_distortion[OpacityChannel]=distance;
if (distance > channel_distortion[CompositeChannels])
channel_distortion[CompositeChannels]=distance;
}
if (((channel & IndexChannel) != 0) &&
(image->colorspace == CMYKColorspace) &&
(reconstruct_image->colorspace == CMYKColorspace))
{
distance=QuantumScale*fabs(Sa*GetPixelIndex(indexes+x)-Da*
GetPixelIndex(reconstruct_indexes+x));
if (distance > channel_distortion[BlackChannel])
channel_distortion[BlackChannel]=distance;
if (distance > channel_distortion[CompositeChannels])
channel_distortion[CompositeChannels]=distance;
}
p++;
q++;
}
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_GetPeakAbsoluteError)
#endif
for (i=0; i <= (ssize_t) CompositeChannels; i++)
if (channel_distortion[i] > distortion[i])
distortion[i]=channel_distortion[i];
}
reconstruct_view=DestroyCacheView(reconstruct_view);
image_view=DestroyCacheView(image_view);
return(status);
}
static inline double MagickLog10(const double x)
{
#define Log10Epsilon (1.0e-11)
if (fabs(x) < Log10Epsilon)
return(log10(Log10Epsilon));
return(log10(fabs(x)));
}
static MagickBooleanType GetPeakSignalToNoiseRatio(const Image *image,
const Image *reconstruct_image,const ChannelType channel,
double *distortion,ExceptionInfo *exception)
{
MagickBooleanType
status;
status=GetMeanSquaredDistortion(image,reconstruct_image,channel,distortion,
exception);
if ((channel & RedChannel) != 0)
distortion[RedChannel]=20.0*MagickLog10((double) 1.0/sqrt(
distortion[RedChannel]));
if ((channel & GreenChannel) != 0)
distortion[GreenChannel]=20.0*MagickLog10((double) 1.0/sqrt(
distortion[GreenChannel]));
if ((channel & BlueChannel) != 0)
distortion[BlueChannel]=20.0*MagickLog10((double) 1.0/sqrt(
distortion[BlueChannel]));
if (((channel & OpacityChannel) != 0) &&
(image->matte != MagickFalse))
distortion[OpacityChannel]=20.0*MagickLog10((double) 1.0/sqrt(
distortion[OpacityChannel]));
if (((channel & IndexChannel) != 0) &&
(image->colorspace == CMYKColorspace))
distortion[BlackChannel]=20.0*MagickLog10((double) 1.0/sqrt(
distortion[BlackChannel]));
distortion[CompositeChannels]=20.0*MagickLog10((double) 1.0/sqrt(
distortion[CompositeChannels]));
return(status);
}
static MagickBooleanType GetPerceptualHashDistortion(const Image *image,
const Image *reconstruct_image,const ChannelType channel,double *distortion,
ExceptionInfo *exception)
{
ChannelPerceptualHash
*image_phash,
*reconstruct_phash;
double
difference;
register ssize_t
i;
/*
Compute perceptual hash in the sRGB colorspace.
*/
image_phash=GetImageChannelPerceptualHash(image,exception);
if (image_phash == (ChannelPerceptualHash *) NULL)
return(MagickFalse);
reconstruct_phash=GetImageChannelPerceptualHash(reconstruct_image,exception);
if (reconstruct_phash == (ChannelPerceptualHash *) NULL)
{
image_phash=(ChannelPerceptualHash *) RelinquishMagickMemory(image_phash);
return(MagickFalse);
}
for (i=0; i < 7; i++)
{
/*
Compute sum of moment differences squared.
*/
if ((channel & RedChannel) != 0)
{
difference=reconstruct_phash[RedChannel].P[i]-
image_phash[RedChannel].P[i];
distortion[RedChannel]+=difference*difference;
distortion[CompositeChannels]+=difference*difference;
}
if ((channel & GreenChannel) != 0)
{
difference=reconstruct_phash[GreenChannel].P[i]-
image_phash[GreenChannel].P[i];
distortion[GreenChannel]+=difference*difference;
distortion[CompositeChannels]+=difference*difference;
}
if ((channel & BlueChannel) != 0)
{
difference=reconstruct_phash[BlueChannel].P[i]-
image_phash[BlueChannel].P[i];
distortion[BlueChannel]+=difference*difference;
distortion[CompositeChannels]+=difference*difference;
}
if (((channel & OpacityChannel) != 0) && (image->matte != MagickFalse) &&
(reconstruct_image->matte != MagickFalse))
{
difference=reconstruct_phash[OpacityChannel].P[i]-
image_phash[OpacityChannel].P[i];
distortion[OpacityChannel]+=difference*difference;
distortion[CompositeChannels]+=difference*difference;
}
if (((channel & IndexChannel) != 0) &&
(image->colorspace == CMYKColorspace) &&
(reconstruct_image->colorspace == CMYKColorspace))
{
difference=reconstruct_phash[IndexChannel].P[i]-
image_phash[IndexChannel].P[i];
distortion[IndexChannel]+=difference*difference;
distortion[CompositeChannels]+=difference*difference;
}
}
/*
Compute perceptual hash in the HCLP colorspace.
*/
for (i=0; i < 7; i++)
{
/*
Compute sum of moment differences squared.
*/
if ((channel & RedChannel) != 0)
{
difference=reconstruct_phash[RedChannel].Q[i]-
image_phash[RedChannel].Q[i];
distortion[RedChannel]+=difference*difference;
distortion[CompositeChannels]+=difference*difference;
}
if ((channel & GreenChannel) != 0)
{
difference=reconstruct_phash[GreenChannel].Q[i]-
image_phash[GreenChannel].Q[i];
distortion[GreenChannel]+=difference*difference;
distortion[CompositeChannels]+=difference*difference;
}
if ((channel & BlueChannel) != 0)
{
difference=reconstruct_phash[BlueChannel].Q[i]-
image_phash[BlueChannel].Q[i];
distortion[BlueChannel]+=difference*difference;
distortion[CompositeChannels]+=difference*difference;
}
if (((channel & OpacityChannel) != 0) && (image->matte != MagickFalse) &&
(reconstruct_image->matte != MagickFalse))
{
difference=reconstruct_phash[OpacityChannel].Q[i]-
image_phash[OpacityChannel].Q[i];
distortion[OpacityChannel]+=difference*difference;
distortion[CompositeChannels]+=difference*difference;
}
if (((channel & IndexChannel) != 0) &&
(image->colorspace == CMYKColorspace) &&
(reconstruct_image->colorspace == CMYKColorspace))
{
difference=reconstruct_phash[IndexChannel].Q[i]-
image_phash[IndexChannel].Q[i];
distortion[IndexChannel]+=difference*difference;
distortion[CompositeChannels]+=difference*difference;
}
}
/*
Free resources.
*/
reconstruct_phash=(ChannelPerceptualHash *) RelinquishMagickMemory(
reconstruct_phash);
image_phash=(ChannelPerceptualHash *) RelinquishMagickMemory(image_phash);
return(MagickTrue);
}
static MagickBooleanType GetRootMeanSquaredDistortion(const Image *image,
const Image *reconstruct_image,const ChannelType channel,double *distortion,
ExceptionInfo *exception)
{
MagickBooleanType
status;
status=GetMeanSquaredDistortion(image,reconstruct_image,channel,distortion,
exception);
if ((channel & RedChannel) != 0)
distortion[RedChannel]=sqrt(distortion[RedChannel]);
if ((channel & GreenChannel) != 0)
distortion[GreenChannel]=sqrt(distortion[GreenChannel]);
if ((channel & BlueChannel) != 0)
distortion[BlueChannel]=sqrt(distortion[BlueChannel]);
if (((channel & OpacityChannel) != 0) &&
(image->matte != MagickFalse))
distortion[OpacityChannel]=sqrt(distortion[OpacityChannel]);
if (((channel & IndexChannel) != 0) &&
(image->colorspace == CMYKColorspace))
distortion[BlackChannel]=sqrt(distortion[BlackChannel]);
distortion[CompositeChannels]=sqrt(distortion[CompositeChannels]);
return(status);
}
MagickExport MagickBooleanType GetImageChannelDistortion(Image *image,
const Image *reconstruct_image,const ChannelType channel,
const MetricType metric,double *distortion,ExceptionInfo *exception)
{
double
*channel_distortion;
MagickBooleanType
status;
size_t
length;
assert(image != (Image *) NULL);
assert(image->signature == MagickSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(reconstruct_image != (const Image *) NULL);
assert(reconstruct_image->signature == MagickSignature);
assert(distortion != (double *) NULL);
*distortion=0.0;
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
if (metric != PerceptualHashErrorMetric)
if ((reconstruct_image->columns != image->columns) ||
(reconstruct_image->rows != image->rows))
ThrowBinaryException(ImageError,"ImageSizeDiffers",image->filename);
/*
Get image distortion.
*/
length=CompositeChannels+1UL;
channel_distortion=(double *) AcquireQuantumMemory(length,
sizeof(*channel_distortion));
if (channel_distortion == (double *) NULL)
ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed");
(void) ResetMagickMemory(channel_distortion,0,length*
sizeof(*channel_distortion));
switch (metric)
{
case AbsoluteErrorMetric:
{
status=GetAbsoluteDistortion(image,reconstruct_image,channel,
channel_distortion,exception);
break;
}
case FuzzErrorMetric:
{
status=GetFuzzDistortion(image,reconstruct_image,channel,
channel_distortion,exception);
break;
}
case MeanAbsoluteErrorMetric:
{
status=GetMeanAbsoluteDistortion(image,reconstruct_image,channel,
channel_distortion,exception);
break;
}
case MeanErrorPerPixelMetric:
{
status=GetMeanErrorPerPixel(image,reconstruct_image,channel,
channel_distortion,exception);
break;
}
case MeanSquaredErrorMetric:
{
status=GetMeanSquaredDistortion(image,reconstruct_image,channel,
channel_distortion,exception);
break;
}
case NormalizedCrossCorrelationErrorMetric:
default:
{
status=GetNormalizedCrossCorrelationDistortion(image,reconstruct_image,
channel,channel_distortion,exception);
break;
}
case PeakAbsoluteErrorMetric:
{
status=GetPeakAbsoluteDistortion(image,reconstruct_image,channel,
channel_distortion,exception);
break;
}
case PeakSignalToNoiseRatioMetric:
{
status=GetPeakSignalToNoiseRatio(image,reconstruct_image,channel,
channel_distortion,exception);
break;
}
case PerceptualHashErrorMetric:
{
status=GetPerceptualHashDistortion(image,reconstruct_image,channel,
channel_distortion,exception);
break;
}
case RootMeanSquaredErrorMetric:
{
status=GetRootMeanSquaredDistortion(image,reconstruct_image,channel,
channel_distortion,exception);
break;
}
}
*distortion=channel_distortion[CompositeChannels];
channel_distortion=(double *) RelinquishMagickMemory(channel_distortion);
(void) FormatImageProperty(image,"distortion","%.*g",GetMagickPrecision(),
*distortion);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t I m a g e C h a n n e l D i s t o r t i o n s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetImageChannelDistortions() compares the image channels of an image to a
% reconstructed image and returns the specified distortion metric for each
% channel.
%
% The format of the GetImageChannelDistortions method is:
%
% double *GetImageChannelDistortions(const Image *image,
% const Image *reconstruct_image,const MetricType metric,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o reconstruct_image: the reconstruct image.
%
% o metric: the metric.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport double *GetImageChannelDistortions(Image *image,
const Image *reconstruct_image,const MetricType metric,
ExceptionInfo *exception)
{
double
*channel_distortion;
MagickBooleanType
status;
size_t
length;
assert(image != (Image *) NULL);
assert(image->signature == MagickSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(reconstruct_image != (const Image *) NULL);
assert(reconstruct_image->signature == MagickSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
if (metric != PerceptualHashErrorMetric)
if ((reconstruct_image->columns != image->columns) ||
(reconstruct_image->rows != image->rows))
{
(void) ThrowMagickException(&image->exception,GetMagickModule(),
ImageError,"ImageSizeDiffers","`%s'",image->filename);
return((double *) NULL);
}
/*
Get image distortion.
*/
length=CompositeChannels+1UL;
channel_distortion=(double *) AcquireQuantumMemory(length,
sizeof(*channel_distortion));
if (channel_distortion == (double *) NULL)
ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed");
(void) ResetMagickMemory(channel_distortion,0,length*
sizeof(*channel_distortion));
status=MagickTrue;
switch (metric)
{
case AbsoluteErrorMetric:
{
status=GetAbsoluteDistortion(image,reconstruct_image,CompositeChannels,
channel_distortion,exception);
break;
}
case FuzzErrorMetric:
{
status=GetFuzzDistortion(image,reconstruct_image,CompositeChannels,
channel_distortion,exception);
break;
}
case MeanAbsoluteErrorMetric:
{
status=GetMeanAbsoluteDistortion(image,reconstruct_image,
CompositeChannels,channel_distortion,exception);
break;
}
case MeanErrorPerPixelMetric:
{
status=GetMeanErrorPerPixel(image,reconstruct_image,CompositeChannels,
channel_distortion,exception);
break;
}
case MeanSquaredErrorMetric:
{
status=GetMeanSquaredDistortion(image,reconstruct_image,CompositeChannels,
channel_distortion,exception);
break;
}
case NormalizedCrossCorrelationErrorMetric:
default:
{
status=GetNormalizedCrossCorrelationDistortion(image,reconstruct_image,
CompositeChannels,channel_distortion,exception);
break;
}
case PeakAbsoluteErrorMetric:
{
status=GetPeakAbsoluteDistortion(image,reconstruct_image,
CompositeChannels,channel_distortion,exception);
break;
}
case PeakSignalToNoiseRatioMetric:
{
status=GetPeakSignalToNoiseRatio(image,reconstruct_image,
CompositeChannels,channel_distortion,exception);
break;
}
case PerceptualHashErrorMetric:
{
status=GetPerceptualHashDistortion(image,reconstruct_image,
CompositeChannels,channel_distortion,exception);
break;
}
case RootMeanSquaredErrorMetric:
{
status=GetRootMeanSquaredDistortion(image,reconstruct_image,
CompositeChannels,channel_distortion,exception);
break;
}
}
if (status == MagickFalse)
{
channel_distortion=(double *) RelinquishMagickMemory(channel_distortion);
return((double *) NULL);
}
return(channel_distortion);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% I s I m a g e s E q u a l %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% IsImagesEqual() measures the difference between colors at each pixel
% location of two images. A value other than 0 means the colors match
% exactly. Otherwise an error measure is computed by summing over all
% pixels in an image the distance squared in RGB space between each image
% pixel and its corresponding pixel in the reconstruct image. The error
% measure is assigned to these image members:
%
% o mean_error_per_pixel: The mean error for any single pixel in
% the image.
%
% o normalized_mean_error: The normalized mean quantization error for
% any single pixel in the image. This distance measure is normalized to
% a range between 0 and 1. It is independent of the range of red, green,
% and blue values in the image.
%
% o normalized_maximum_error: The normalized maximum quantization
% error for any single pixel in the image. This distance measure is
% normalized to a range between 0 and 1. It is independent of the range
% of red, green, and blue values in your image.
%
% A small normalized mean square error, accessed as
% image->normalized_mean_error, suggests the images are very similar in
% spatial layout and color.
%
% The format of the IsImagesEqual method is:
%
% MagickBooleanType IsImagesEqual(Image *image,
% const Image *reconstruct_image)
%
% A description of each parameter follows.
%
% o image: the image.
%
% o reconstruct_image: the reconstruct image.
%
*/
MagickExport MagickBooleanType IsImagesEqual(Image *image,
const Image *reconstruct_image)
{
CacheView
*image_view,
*reconstruct_view;
ExceptionInfo
*exception;
MagickBooleanType
status;
MagickRealType
area,
maximum_error,
mean_error,
mean_error_per_pixel;
ssize_t
y;
assert(image != (Image *) NULL);
assert(image->signature == MagickSignature);
assert(reconstruct_image != (const Image *) NULL);
assert(reconstruct_image->signature == MagickSignature);
if ((reconstruct_image->columns != image->columns) ||
(reconstruct_image->rows != image->rows))
ThrowBinaryException(ImageError,"ImageSizeDiffers",image->filename);
area=0.0;
maximum_error=0.0;
mean_error_per_pixel=0.0;
mean_error=0.0;
exception=(&image->exception);
image_view=AcquireVirtualCacheView(image,exception);
reconstruct_view=AcquireVirtualCacheView(reconstruct_image,exception);
for (y=0; y < (ssize_t) image->rows; y++)
{
register const IndexPacket
*restrict indexes,
*restrict reconstruct_indexes;
register const PixelPacket
*restrict p,
*restrict q;
register ssize_t
x;
p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception);
q=GetCacheViewVirtualPixels(reconstruct_view,0,y,reconstruct_image->columns,
1,exception);
if ((p == (const PixelPacket *) NULL) || (q == (const PixelPacket *) NULL))
break;
indexes=GetCacheViewVirtualIndexQueue(image_view);
reconstruct_indexes=GetCacheViewVirtualIndexQueue(reconstruct_view);
for (x=0; x < (ssize_t) image->columns; x++)
{
MagickRealType
distance;
distance=fabs(GetPixelRed(p)-(double) GetPixelRed(q));
mean_error_per_pixel+=distance;
mean_error+=distance*distance;
if (distance > maximum_error)
maximum_error=distance;
area++;
distance=fabs(GetPixelGreen(p)-(double) GetPixelGreen(q));
mean_error_per_pixel+=distance;
mean_error+=distance*distance;
if (distance > maximum_error)
maximum_error=distance;
area++;
distance=fabs(GetPixelBlue(p)-(double) GetPixelBlue(q));
mean_error_per_pixel+=distance;
mean_error+=distance*distance;
if (distance > maximum_error)
maximum_error=distance;
area++;
if (image->matte != MagickFalse)
{
distance=fabs(GetPixelOpacity(p)-(double) GetPixelOpacity(q));
mean_error_per_pixel+=distance;
mean_error+=distance*distance;
if (distance > maximum_error)
maximum_error=distance;
area++;
}
if ((image->colorspace == CMYKColorspace) &&
(reconstruct_image->colorspace == CMYKColorspace))
{
distance=fabs(GetPixelIndex(indexes+x)-(double)
GetPixelIndex(reconstruct_indexes+x));
mean_error_per_pixel+=distance;
mean_error+=distance*distance;
if (distance > maximum_error)
maximum_error=distance;
area++;
}
p++;
q++;
}
}
reconstruct_view=DestroyCacheView(reconstruct_view);
image_view=DestroyCacheView(image_view);
image->error.mean_error_per_pixel=(double) (mean_error_per_pixel/area);
image->error.normalized_mean_error=(double) (QuantumScale*QuantumScale*
mean_error/area);
image->error.normalized_maximum_error=(double) (QuantumScale*maximum_error);
status=image->error.mean_error_per_pixel == 0.0 ? MagickTrue : MagickFalse;
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S i m i l a r i t y I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SimilarityImage() compares the reference image of the image and returns the
% best match offset. In addition, it returns a similarity image such that an
% exact match location is completely white and if none of the pixels match,
% black, otherwise some gray level in-between.
%
% The format of the SimilarityImageImage method is:
%
% Image *SimilarityImage(const Image *image,const Image *reference,
% RectangleInfo *offset,double *similarity,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o reference: find an area of the image that closely resembles this image.
%
% o the best match offset of the reference image within the image.
%
% o similarity: the computed similarity between the images.
%
% o exception: return any errors or warnings in this structure.
%
*/
static double GetSimilarityMetric(const Image *image,const Image *reference,
const MetricType metric,const ssize_t x_offset,const ssize_t y_offset,
ExceptionInfo *exception)
{
double
distortion;
Image
*similarity_image;
MagickBooleanType
status;
RectangleInfo
geometry;
SetGeometry(reference,&geometry);
geometry.x=x_offset;
geometry.y=y_offset;
similarity_image=CropImage(image,&geometry,exception);
if (similarity_image == (Image *) NULL)
return(0.0);
distortion=0.0;
status=GetImageDistortion(similarity_image,reference,metric,&distortion,
exception);
(void) status;
similarity_image=DestroyImage(similarity_image);
return(distortion);
}
MagickExport Image *SimilarityImage(Image *image,const Image *reference,
RectangleInfo *offset,double *similarity_metric,ExceptionInfo *exception)
{
Image
*similarity_image;
similarity_image=SimilarityMetricImage(image,reference,
RootMeanSquaredErrorMetric,offset,similarity_metric,exception);
return(similarity_image);
}
static inline double MagickMin(const double x,const double y)
{
if (x < y)
return(x);
return(y);
}
MagickExport Image *SimilarityMetricImage(Image *image,const Image *reference,
const MetricType metric,RectangleInfo *offset,double *similarity_metric,
ExceptionInfo *exception)
{
#define SimilarityImageTag "Similarity/Image"
CacheView
*similarity_view;
const char
*artifact;
double
similarity_threshold;
Image
*similarity_image;
MagickBooleanType
status;
MagickOffsetType
progress;
ssize_t
y;
assert(image != (const Image *) NULL);
assert(image->signature == MagickSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickSignature);
assert(offset != (RectangleInfo *) NULL);
SetGeometry(reference,offset);
*similarity_metric=MagickMaximumValue;
if ((reference->columns > image->columns) || (reference->rows > image->rows))
ThrowImageException(ImageError,"ImageSizeDiffers");
similarity_image=CloneImage(image,image->columns-reference->columns+1,
image->rows-reference->rows+1,MagickTrue,exception);
if (similarity_image == (Image *) NULL)
return((Image *) NULL);
if (SetImageStorageClass(similarity_image,DirectClass) == MagickFalse)
{
InheritException(exception,&similarity_image->exception);
similarity_image=DestroyImage(similarity_image);
return((Image *) NULL);
}
(void) SetImageAlphaChannel(similarity_image,DeactivateAlphaChannel);
/*
Measure similarity of reference image against image.
*/
similarity_threshold=(-1.0);
artifact=GetImageArtifact(image,"compare:similarity-threshold");
if (artifact != (const char *) NULL)
similarity_threshold=StringToDouble(artifact,(char **) NULL);
status=MagickTrue;
progress=0;
similarity_view=AcquireVirtualCacheView(similarity_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) \
shared(progress,status,similarity_metric) \
magick_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) (image->rows-reference->rows+1); y++)
{
double
similarity;
register ssize_t
x;
register PixelPacket
*restrict q;
if (status == MagickFalse)
continue;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp flush(similarity_metric)
#endif
if (*similarity_metric <= similarity_threshold)
continue;
q=GetCacheViewAuthenticPixels(similarity_view,0,y,similarity_image->columns,
1,exception);
if (q == (const PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) (image->columns-reference->columns+1); x++)
{
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp flush(similarity_metric)
#endif
if (*similarity_metric <= similarity_threshold)
break;
similarity=GetSimilarityMetric(image,reference,metric,x,y,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_SimilarityImage)
#endif
if (similarity < *similarity_metric)
{
*similarity_metric=similarity;
offset->x=x;
offset->y=y;
}
if (metric == PerceptualHashErrorMetric)
similarity=MagickMin(0.01*similarity,1.0);
SetPixelRed(q,ClampToQuantum(QuantumRange-QuantumRange*similarity));
SetPixelGreen(q,GetPixelRed(q));
SetPixelBlue(q,GetPixelRed(q));
q++;
}
if (SyncCacheViewAuthenticPixels(similarity_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_SimilarityImage)
#endif
proceed=SetImageProgress(image,SimilarityImageTag,progress++,
image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
similarity_view=DestroyCacheView(similarity_view);
if (status == MagickFalse)
similarity_image=DestroyImage(similarity_image);
return(similarity_image);
}
|
GB_unop__identity_fp64_fp32.c | //------------------------------------------------------------------------------
// GB_unop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_atomics.h"
#include "GB_unop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop_apply__identity_fp64_fp32
// op(A') function: GB_unop_tran__identity_fp64_fp32
// C type: double
// A type: float
// cast: double cij = (double) aij
// unaryop: cij = aij
#define GB_ATYPE \
float
#define GB_CTYPE \
double
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
float aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = x ;
// casting
#define GB_CAST(z, aij) \
double z = (double) aij ;
// cij = op (aij)
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
float aij = Ax [pA] ; \
/* Cx [pC] = op (cast (aij)) */ \
double z = (double) aij ; \
Cx [pC] = z ; \
}
// true if operator is the identity op with no typecasting
#define GB_OP_IS_IDENTITY_WITH_NO_TYPECAST \
0
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_IDENTITY || GxB_NO_FP64 || GxB_NO_FP32)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop_apply__identity_fp64_fp32
(
double *Cx, // Cx and Ax may be aliased
const float *Ax,
const int8_t *GB_RESTRICT Ab, // A->b if A is bitmap
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
if (Ab == NULL)
{
#if ( GB_OP_IS_IDENTITY_WITH_NO_TYPECAST )
GB_memcpy (Cx, Ax, anz * sizeof (float), nthreads) ;
#else
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
float aij = Ax [p] ;
double z = (double) aij ;
Cx [p] = z ;
}
#endif
}
else
{
// bitmap case, no transpose; A->b already memcpy'd into C->b
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!Ab [p]) continue ;
float aij = Ax [p] ;
double z = (double) aij ;
Cx [p] = z ;
}
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop_tran__identity_fp64_fp32
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *GB_RESTRICT *Workspaces,
const int64_t *GB_RESTRICT A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
kmp_set_dispatch_buf.c | // RUN: %libomp-compile
// RUN: env KMP_DISP_NUM_BUFFERS=0 %libomp-run
// RUN: env KMP_DISP_NUM_BUFFERS=1 %libomp-run
// RUN: env KMP_DISP_NUM_BUFFERS=3 %libomp-run
// RUN: env KMP_DISP_NUM_BUFFERS=4 %libomp-run
// RUN: env KMP_DISP_NUM_BUFFERS=7 %libomp-run
// RUN: %libomp-compile -DMY_SCHEDULE=guided
// RUN: env KMP_DISP_NUM_BUFFERS=1 %libomp-run
// RUN: env KMP_DISP_NUM_BUFFERS=3 %libomp-run
// RUN: env KMP_DISP_NUM_BUFFERS=4 %libomp-run
// RUN: env KMP_DISP_NUM_BUFFERS=7 %libomp-run
// UNSUPPORTED: clang-11, clang-12, clang-13
#include <stdio.h>
#include <omp.h>
#include <stdlib.h>
#include <limits.h>
#include "omp_testsuite.h"
#define INCR 7
#define MY_MAX 200
#define MY_MIN -200
#define NUM_LOOPS 100
#ifndef MY_SCHEDULE
# define MY_SCHEDULE dynamic
#endif
int a, b, a_known_value, b_known_value;
int test_kmp_set_disp_num_buffers()
{
int success = 1;
a = 0;
b = 0;
// run many small dynamic loops to stress the dispatch buffer system
#pragma omp parallel
{
int i,j;
for (j = 0; j < NUM_LOOPS; j++) {
#pragma omp for schedule(MY_SCHEDULE) nowait
for (i = MY_MIN; i < MY_MAX; i+=INCR) {
#pragma omp atomic
a++;
}
#pragma omp for schedule(MY_SCHEDULE) nowait
for (i = MY_MAX; i >= MY_MIN; i-=INCR) {
#pragma omp atomic
b++;
}
}
}
// detect failure
if (a != a_known_value || b != b_known_value) {
success = 0;
printf("a = %d (should be %d), b = %d (should be %d)\n", a, a_known_value,
b, b_known_value);
}
return success;
}
int main(int argc, char** argv)
{
int i,j;
int num_failed=0;
// figure out the known values to compare with calculated result
a_known_value = 0;
b_known_value = 0;
for (j = 0; j < NUM_LOOPS; j++) {
for (i = MY_MIN; i < MY_MAX; i+=INCR)
a_known_value++;
for (i = MY_MAX; i >= MY_MIN; i-=INCR)
b_known_value++;
}
for(i = 0; i < REPETITIONS; i++) {
if(!test_kmp_set_disp_num_buffers()) {
num_failed++;
}
}
return num_failed;
}
|
nested.c | // RUN: %libomp-compile && env OMP_DISPLAY_AFFINITY=true OMP_PLACES=threads OMP_PROC_BIND=spread,close %libomp-run | %python %S/check.py -c 'CHECK' %s
// REQUIRES: affinity && !abt
#include <stdio.h>
#include <stdlib.h>
#include <omp.h>
int main(int argc, char** argv) {
omp_set_affinity_format("TESTER: tl:%L at:%a tn:%n nt:%N");
omp_set_nested(1);
#pragma omp parallel num_threads(4)
{
#pragma omp parallel num_threads(3)
{ }
}
return 0;
}
// CHECK: num_threads=4 TESTER: tl:1 at:0 tn:[0-3] nt:4
// CHECK: num_threads=3 TESTER: tl:2 at:[0-3] tn:[0-2] nt:3
// CHECK: num_threads=3 TESTER: tl:2 at:[0-3] tn:[0-2] nt:3
// CHECK: num_threads=3 TESTER: tl:2 at:[0-3] tn:[0-2] nt:3
// CHECK: num_threads=3 TESTER: tl:2 at:[0-3] tn:[0-2] nt:3
|
DRB062-matrixvector2-orig-no.c | /*
Copyright (c) 2017, Lawrence Livermore National Security, LLC.
Produced at the Lawrence Livermore National Laboratory
Written by Chunhua Liao, Pei-Hung Lin, Joshua Asplund,
Markus Schordan, and Ian Karlin
(email: liao6@llnl.gov, lin32@llnl.gov, asplund1@llnl.gov,
schordan1@llnl.gov, karlin1@llnl.gov)
LLNL-CODE-732144
All rights reserved.
This file is part of DataRaceBench. For details, see
https://github.com/LLNL/dataracebench. Please also see the LICENSE file
for our additional BSD notice.
Redistribution and use in source and binary forms, with
or without modification, are permitted provided that the following
conditions are met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the disclaimer below.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the disclaimer (as noted below)
in the documentation and/or other materials provided with the
distribution.
* Neither the name of the LLNS/LLNL nor the names of its contributors
may be used to endorse or promote products derived from this
software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL LAWRENCE LIVERMORE NATIONAL
SECURITY, LLC, THE U.S. DEPARTMENT OF ENERGY OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY,
OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
Matrix-vector multiplication: inner level parallelization.
*/
#define N 1000
double a[N][N],v[N],v_out[N];
int init()
{
int i,j,k;
#pragma omp parallel for private(i ,j )
for (i = 0; i < N; i++) {
#pragma omp parallel for private(j )
for (j = 0; j < N; j++) {
a[i][j] = i * j;
}
v_out[i] = i * j;
v[i] = i * j;
}
return 0;
}
void mv()
{
int i,j;
#pragma omp parallel for private(i ,j )
for (i = 0; i < N; i++)
{
float sum = 0.0;
#pragma omp parallel for private(j ) reduction(+:sum)
for (j = 0; j < N; j++)
{
sum += a[i][j]*v[j];
}
v_out[i] = sum;
}
}
int print()
{
int i,j,k;
for (i = 0; i < N; i++) {
for (j = 0; j < N; j++) {
printf("%lf\n", a[i][j]);
}
printf("%lf\n",v_out[i]);
printf("%lf\n",v[i]);
}
return 0;
}
int main()
{
init();
mv();
print();
return 0;
}
|
masterNoBarrier.c | #include <stdio.h>
#include <stdlib.h>
#include <omp.h>
int main(int argc, char **argv) {
int i, n=20, tid, a[n], suma=0, sumalocal;
if (argc < 2) {
fprintf(stderr,"\nFalta iteraciones\n");
exit(-1);
}
n = atoi(argv[1]);
if (n>20) n=20;
for (i=0; i<n; i++) a[i] = i;
#pragma omp parallel private(sumalocal,tid)
{
sumalocal=0;
tid=omp_get_thread_num();
#pragma omp for schedule(static)
for (i=0; i<n; i++){
sumalocal += a[i];
printf(" thread %d suma de a[%d]=%d sumalocal=%d\n", tid, i, a[i], sumalocal);
}
#pragma omp atomic
suma += sumalocal;
//#pragma omp barrier
#pragma omp master
printf("thread master=%d imprime suma=%d\n", tid, suma);
}
}
|
markstm.c | //** 2 functions called from i2.c **//
/* Move markers by using simple Runge-Kutta method */
void movemarkomp()
{
/* Vx, Vy buffer */
double dvxdx,dvxdy,dvydx,dvydy,celdx,celdy,vx0,vx1,vx2,vx3,vx4,vy0,vy1,vy2,vy3,vy4,ee0,ee1,ee2,ee3,ee4,sp0,sp1,sp2,sp3,sp4,pr0,pr1,pr2,pr3,pr4;
/* Water */
double vxwater,vywater;
long int mm1,marknum1,m10,m20,m30,m1,m2,m3;
/* Erosion-Sedimentation Y/N */
int n1;
int mm2;
/* Nonstabilyty for immobile markers */
double xnonstab=0.50,ynonstab=0.60;
double dpdx,dpdy,e,n,vxkoef,vykoef,dx,dy;
double start;
/* Hydration front progress */
#if setup>9
start=omp_get_wtime();
/* dehydration */
if(vyfluid!=0 && timesum>1e+11) hydration2omp();
fprintf(fp_log,"\n Time taken for hydration = %e s \n",omp_get_wtime()-start);
#endif
/* Save number of markers */
marknum1=marknum;
/* Surface changes */
#if setup>9
start=omp_get_wtime();
if (timestep && erosmod) erosion();
fprintf(fp_log,"\n Time taken for erosion = %e s \n",omp_get_wtime()-start);
#endif
/* Move markers */
#pragma omp parallel for shared(markx,marky,markt,markim,markk,markxx,markxy,markd,markv,markp,markexx,markexy,markw,marke,marknum1,follow,nm,ystpy,m10_hr,m11_hr,marknum,outgrid,eroslev,vyfluid,vymelt,GXKOEF,GYKOEF,markmod,timestep,zdeep,tdeep,markht,xsize,ysize,xnumx,ynumy,gx,gy,pr,esp,exx,exy,vx,vy,markcp,markkt,markkf,markkp,markro,markbb,markaa,marknu,markn0,markn1,markll,marka0,marka1,markb0,markb1,markdh,markdv,markss,markmm,marks1,start_cond,stoksmod) \
private(mm1,mm2,m10,m20,m1,m2,m3,n1,vxwater,vywater,e,n,dpdx,dpdy,a,b,vxkoef,vykoef,vx0,vy0,sp0,ee0,pr0,vx1,vy1,sp1,ee1,pr1,vx2,vy2,sp2,ee2,pr2,vx3,vy3,sp3,ee3,pr3,vx4,vy4,sp4,ee4,pr4,dx,dy) \
schedule(runtime)
for (mm1=0;mm1<marknum;mm1++)
{
/* Marker type */
mm2=(int)markt[mm1]; if (mm2>=100) mm2-=100;
if( ((markx[mm1]>=0 && marky[mm1]>=0 && (markx[mm1])<=xsize && (marky[mm1])<=ysize) || outgrid!=1) && !markim[mm2] )
{
// Search marker location within nodal grid
m10=m1serch(markx[mm1]);
m20=m2serch(marky[mm1]);
/**/
/* Erosion-Sedimentation */
if((marky[mm1])<=eroslev) n1=1; else n1=0;
/* Water marker move */
vxwater=vywater=0;
if(markt[mm1]>=50 && markt[mm1]<100)
{
/* Water velocity */
vywater=vyfluid; if(markd[mm1]>1100.0) vywater=vymelt;
/* Fluid in rock */
if(vyfluid>0 && (markk[mm1]==0 || markk[mm1]>298.0))
{
/* Horizontal,Vertical P-cell index */
m1=m10; if(markx[mm1]>(gx[m1]+gx[m1+1])/2.0) m1+=1;
if(m1<1) m1=1; if(m1>xnumx-2) m1=xnumx-2;
m2=m20; if(marky[mm1]>(gy[m2]+gy[m2+1])/2.0) m2+=1;
if(m2<1) m2=1; if(m2>ynumy-2) m2=ynumy-2;
/* Pressure gradients */
e=(markx[mm1]-(gx[m1-1]+gx[m1])/2.0)/((gx[m1+1]-gx[m1-1])/2.0);
n=(marky[mm1]-(gy[m2-1]+gy[m2])/2.0)/((gy[m2+1]-gy[m2-1])/2.0);
m3=m1*ynumy+m2;
dpdx=2.0*((1.0-n)*(pr[m3+ynumy]-pr[m3])+n*(pr[m3+ynumy+1]-pr[m3+1]))/(gx[m1+1]-gx[m1-1]);
dpdy=2.0*((1.0-e)*(pr[m3+1]-pr[m3])+e*(pr[m3+ynumy+1]-pr[m3+ynumy]))/(gy[m2+1]-gy[m2-1]);
/* Recalc velocity koefficients */
vxkoef=(1000.0*GXKOEF-dpdx)/(2300.0*9.81);
vykoef=(1000.0*GYKOEF-dpdy)/(2300.0*9.81);
if(vxkoef>2.0) vxkoef=2.0; if(vxkoef<-2.0) vxkoef=-2.0;
if(vykoef>2.0) vykoef=2.0; if(vykoef<-2.0) vykoef=-2.0;
/* Recalc velocity */
vxwater=vywater*vxkoef;
vywater*=vykoef;
}
else
/* Fluid in water */
{
vxwater=0;
vywater=-ABSV(vywater);
}
}
/* Motion Calc ///////////////////////////////// */
/* Vx, Vy, EpsII Simple calc */
if(markmod==1)
{
/* Interpolate velocity, pressure?, EE(eii), and ESP (spin) */
// These marker values are not stored, they are used in this routine to move each marker in private, thats it.
allinteriomp(markx[mm1],marky[mm1],m10,m20,&vx0,&vy0,&pr0,&sp0,&ee0);
vx0+=vxwater; vy0+=vywater;
/**/
/* fprintf(fp_log,"SIMPLE %ld %d %e %e %e %e %e",mm1,markt[mm1],markx[mm1],marky[mm1],vx0,vy0,sp0); getchar(); */
}
/* Vx, Vy, EpsII 4 Runge-Kutta koef calc */
else
{
allinteriomp(markx[mm1],marky[mm1],m10,m20,&vx1,&vy1,&pr1,&sp1,&ee1);
vx1+=vxwater; vy1+=vywater;
/**/
//fprintf(fltest,"RK4 %ld %d %e %e %e %e %e %e \n",mm1,markt[mm1],markx[mm1],marky[mm1],vx1,vy1,sp1,ee1);
/**/
allinteriomp(markx[mm1]+vx1*timestep/2.0,marky[mm1]+vy1*timestep/2.0,m10,m20,&vx2,&vy2,&pr2,&sp2,&ee2);
vx2+=vxwater; vy2+=vywater;
/**/
allinteriomp(markx[mm1]+vx2*timestep/2.0,marky[mm1]+vy2*timestep/2.0,m10,m20,&vx3,&vy3,&pr3,&sp3,&ee3);
vx3+=vxwater; vy3+=vywater;
/**/
allinteriomp(markx[mm1]+vx3*timestep,marky[mm1]+vy3*timestep,m10,m20,&vx4,&vy4,&pr4,&sp4,&ee4);
vx4+=vxwater; vy4+=vywater;
/**/
/* Vx,Vy, EpsXX, EpsYY, EpsXY calc after Runge-Kutta */
vx0=(vx1+2.0*vx2+2.0*vx3+vx4)/6.0;
vy0=(vy1+2.0*vy2+2.0*vy3+vy4)/6.0;
if(markmod==2)
{
sp0=(sp1+2.0*sp2+2.0*sp3+sp4)/6.0;
ee0=(ee1+2.0*ee2+2.0*ee3+ee4)/6.0;
}
else
{
sp0=sp1;
ee0=ee1;
}
}
/* Orthogonal motion only */
if (outgrid==2)
{
if(markx[mm1]<0 || (markx[mm1])>xsize) vy0=0;
if(marky[mm1]<0 || (marky[mm1])>ysize) vx0=0;
}
/* Normal markers */
if(markt[mm1]<100)
{
/* Markers coming from below the model */
if(marky[mm1]>zdeep && markk[mm1]<tdeep) markk[mm1]=tdeep;
// If you do not want to apply a large temperature lower boundary condition use:
//if(marky[mm1]>zdeep && vy0<0 && markk[mm1]<tdeep) markk[mm1]=tdeep;
/* Normal markers */
/* X,Y calc after Runge-Kutta */
markx[mm1]+=(timestep*vx0);
marky[mm1]+=(timestep*vy0);
if(marke[mm1]>0)
{
marke[mm1]+=(timestep*ee0);
}
sp0*=timestep;
/* Turcotte & Schubert, 1995 rotation formula */
if(stoksmod==1)
{
sp1=markxx[mm1]*cos(sp0)*cos(sp0)-markxx[mm1]*sin(sp0)*sin(sp0)+markxy[mm1]*sin(2.0*sp0);
sp3=0.5*(-markxx[mm1]-markxx[mm1])*sin(2.0*sp0)+markxy[mm1]*cos(2.0*sp0);
markxx[mm1]=sp1;
markxy[mm1]=sp3;
}
/* Jaumann corrotation formula */
if(stoksmod==2)
{
sp1=markxx[mm1]+markxy[mm1]*2.0*sp0;
sp3=markxy[mm1]+0.5*(-markxx[mm1]-markxx[mm1])*2.0*sp0;
markxx[mm1]=sp1;
markxy[mm1]=sp3;
}
/* Out of grid marker reset */
if(markx[mm1]<0 || marky[mm1]<0 || (markx[mm1])>xsize || (marky[mm1])>ysize)
{
markk[mm1]=0;
markd[mm1]=-1.0;
markw[mm1]=-1.0;
marke[mm1]=0;
}
}
/* Immobile markers */
else
{
/* X,Y calc after Runge-Kutta */
// Which velocity is used here, if were before located outside of grid ...
markx[mm1]+=(timestep*vx0);
marky[mm1]+=(timestep*vy0);
/* Check new position, add marker at end (marknum1) */
// Immobile markers that now enter grid
if(markx[mm1]>=0 && marky[mm1]>=0 && markx[mm1]<=xsize && marky[mm1]<=ysize)
{
#pragma omp critical(newmark)
{
#pragma omp flush(marknum1)
/* Type save */
markt[marknum1]=markt[mm1]-100;
/* X,Y calc after Runge-Kutta */
// Give marker new location (within grid)
markx[marknum1]=markx[mm1];
marky[marknum1]=marky[mm1];
/* Temperature Reset */
markk[marknum1]=0;
markd[marknum1]=-1.0;
markv[marknum1]=0;
/* Strain Reset */
marke[marknum1]=0;
/* Stress Reset */
markxx[marknum1]=0;
markxy[marknum1]=0;
/* Pressure Reset */
markp[marknum1]=0;
/* Strain rate Reset */
markexx[marknum1]=0;
markexy[marknum1]=0;
/* Add aditional markers counter */
marknum1++;
/* X,Y reset for immobile marker */
markx[mm1]=markk[mm1];
marky[mm1]=markv[mm1];
// If new marker is interesting for picking algorithm, flag to follow
// Note is hard-coded in i2.c as well. Only here excluded fluid markers, since immobile can not become fluid
#if setup>9
if (start_cond==1 && marky[marknum1]<85e3 && markx[marknum1]>gx[m10_hr] && markx[marknum1]<gx[m11_hr] && markt[marknum1]>1 && markt[marknum1]<50)
{
follow[marknum1]=1;
// #pragma omp flush(nm)
nm++;
}
#endif
}
}
/* Check,Reset old position */
// Use markk and v as dummy from above, so dx and/or dy are 0 if marker is newly added
dx=markx[mm1]-markk[mm1];
dy=marky[mm1]-markv[mm1];
dy=pow(dx*dx+dy*dy,0.5);
/*
if(dy>ystpy || (marky[mm1]<0 && vy0<0) || (marky[mm1]>ysize && vy0>0) || (markx[mm1]<0 && vx0<0) || (markx[mm1]>xsize && vx0>0))
*/
// If moved by more than one cell, reset to old position ?
if(dy>ystpy)
{
/* X,Y reset for immobile marker */
markx[mm1]=markk[mm1];
marky[mm1]=markv[mm1];
}
}
/* End Motion Calc ///////////////////////////////// */
}
}
// End omp-section move markers
/* Mark num */
if(marknum1>MAXMRK) {fprintf(fp_log,"Space out in markx[]"); fflush(fp_log); exit(0);}
/* Reset aditional markers */
mm1=0;
while(marknum1>marknum && mm1<marknum)
{
/* Reload marker */
if((markx[mm1]<0 || marky[mm1]<0 || (markx[mm1])>xsize || (marky[mm1])>ysize) && markt[mm1]<100)
{
/* Decrease aditional markers counter */
marknum1--;
/* Type save */
markt[mm1]=markt[marknum1];
/* Temperature Reset */
markk[mm1]=0;
markd[mm1]=-1.0;
/* Strain Reset */
marke[mm1]=0;
/* Stress Reset */
markxx[mm1]=0;
markxy[mm1]=0;
/* Pressure Reset */
markp[mm1]=0;
/* Strain rate Reset */
markexx[mm1]=0;
markexy[mm1]=0;
/* X,Y reload */
markx[mm1]=markx[marknum1];
marky[mm1]=marky[marknum1];
}
/* Increase markers counter */
mm1++;
}
fprintf(fp_log,"\n Number of markers: OLD = %ld NEW = %ld \n",marknum,marknum1);fflush(fp_log);
/* Set new marker number */
marknum=marknum1;
/* Incr cycle of sedimentation */
sedimnum++;
}
/* End OMP move markers by using Simple/Runge-Kutta method */
/* ro[],nu[] recalc after marker positions */
void ronurecalcomp()
{
/* Counters */
long int m1,m2,m3,m10,m20;
int mm2,yn,mm3,n1,n2,ncount=0,nt,tid;
long int mm1;
double dx,dy,swt,swt1,celdx,celdy;
double wro,mnu,mgg,maa,mdro,msxxe,msxye,mexxe,mexye,mro,mcp,mkt,mht,mbb,mdi0,mdi1,mwa,dmwa,mxmelt,mhlatent;
double Mgg,Mro,Mwa,Mcp,Mbb,Maa,Mdhh,Mkt;
// Here in this loop epsin is 2nd invariant of visco-plastic strainrate
double sigin,epsin;
/* TD Database variables, dTK,dPB - TK, PB step for tabulation in TD database */
double H0,H1,H2,H3,R0,R1,R2,R3,G0,G1,G2,G3,W0,W1,W2,W3,dTK=20.0,dPB=1000.0,n,e;
/* Phase transition variables */
double p_pl_out,p_ga_in,rokf,p_sp_in,p_ol_out,p_pv_in,p_sp_out,p_st_in;
/* RO, NU equations var */
double mpb=1.0,mtk=300.0,numax=0,numin=0;
double start,xwall,b1,b2,slope_wall,gelbeg;
start=omp_get_wtime();
Mgg=Mro=Mwa=Mcp=Mbb=Maa=Mdhh=Mkt=0;
if (printmod) fprintf(fp_log,"\n Number of nodes = %ld Number of markers = %ld \n",nodenum,marknum);
fflush(fp_log);
#pragma omp parallel
{nt=omp_get_num_threads();}
/* Layering on sediments */
m1=(long int)(sedimnum/sedimcyc);
m2=((long int)(m1/2))*2;
if(m2==m1) yn=3; else yn=4;
/* ADD MARKERS TO THE v-CELLS ========================== */
/* Clear ro[],nu[] wt */
for (m1=0;m1<nodenum;m1++)
{
ro0[m1]=0;
et0[m1]=0;
nu0[m1]=0;
nd0[m1]=0;
gg0[m1]=0;
gd0[m1]=0;
sxxe0[m1]=0;
sppe0[m1]=0;
sbritn0[m1]=0; // yield stress
sxye0[m1]=0;
exxe0[m1]=0;
exye0[m1]=0;
dro0[m1]=0;
drp0[m1]=0;
cp0[m1]=0;
kt0[m1]=0;
ht0[m1]=0;
tk0[m1]=0;
mrx0[m1]=0;
mry0[m1]=0;
mvx0[m1]=0;
mvy0[m1]=0;
sol0[m1]=0;
sol0[nodenum+m1]=0;
sol0[nodenum2+m1]=0;
sol1[m1]=0;
sol1[nodenum+m1]=0;
sol1[nodenum2+m1]=0;
}
#if setup>9
/* (1) Erosion-sedimentation and melting account for all markers */
#pragma omp parallel for shared(markx,marky,markk,markt,marke,markd,marknum,waterlev,erosmod,deserp,dyserp,xsize,ysize,gx,gy,xnumx,ynumy,ep,pr,timesum,res_high) \
private(mm1,mm2,m3,mtk,mpb,m10,m20,mxmelt,mhlatent) \
firstprivate(yn) \
schedule(runtime)
for (mm1=0;mm1<marknum;mm1++)
{
/* Check markers out of grid */
if(markx[mm1]>0 && marky[mm1]>0 && (markx[mm1])<xsize && (marky[mm1])<ysize && markk[mm1]>0 && markt[mm1]<50)
{
/* Up Left Node X,Y Num */
m10=m1serch(markx[mm1]);
m20=m2serch(marky[mm1]);
m3=m10*ynumy+m20;
mm2=(int)markt[mm1];
/* Erosion/sedimentation account */
if(erosmod) erosmarkomp(mm1,yn,m10,markx[mm1],marky[mm1],markt,marke,markd);
/* Water/Air account */
if(markt[mm1]<2)
{
/* Change marker type */
if((marky[mm1])>waterlev) markt[mm1]=1; else markt[mm1]=0;
}
/* P, T parameters calc */
// 1e-5 since convert to bars for melting look-up?
mpb=1e-5*allinterpomp(markx[mm1],marky[mm1],m10,m20);
mtk=markk[mm1];
// Remove initial weak zone for subduction initiation after X My
if(timesum>(48.6e6*3.15576e+7) && markt[mm1]==12)
{markt[mm1]=9;}
/* Serpentinization of brittle mantle faults at sub-surface */
if((markt[mm1]==9 || markt[mm1]==9 || markt[mm1]==9) && marke[mm1]>deserp && marky[mm1]<dyserp)
{
/* Mantle to Antigorite transformation */
markt[mm1]=13;
markd[mm1]=-1.0;
}
/* Mantle to Antigorite transformation */
antigoromp(mtk,mpb,markx[mm1],marky[mm1],mm1,m10,markt);
/* Rocks to rock+melt transformation */
// Note markt passes in address of first element of array to function and allows for modification there
if (meltmod) meltingomp(mtk,mpb,mm1,mm2,markt,marke,&mxmelt,&mhlatent);
}
}
// End OMP section erosion-sedimentation
// Open file for storing marker interface data
if (n0==1)
{
flfric = fopen(fileTxtOutputFric,"w");
fprintf(flfric," rocktype markx marky markpressure sbrit markxx markxy \n");
}
#endif
if (printmod==10000) fprintf(fp_log,"\n Time taken for erosmark/antigor/melting in ronurecalc = %e s \n",omp_get_wtime()-start);
start=omp_get_wtime();
/* (2) Add ro[] nu[] etc. using selected markers */
#pragma omp parallel shared(marknum,gridmod,markx,marky,markk,markt,erosmod,sedilev,markw,markd,eroslev, \
waterlev,zdeep,densimod,markht,marke,markexx,markexy,markxx,markxy,markp, \
nodenum,nodenum2,markvx,markvy,markv,markwa,markim,xsize,ysize,gx,gy,xnumx,ynumy,ep,pr,vx,vy,markf0,markf1,markbb,markaa,markro,markgg, \
markn0,markn1,marks0,marks1,marknu,markcp,markkt,markkf,markkp,nubeg,nuend,strmin,strmax,\
exy,esp,exx,pbmin,pbstp,pbnum,tkmin,tkstp,tknum,pbmin1,pbstp1,pbnum1,tkmin1,tkstp1,tknum1,timesum,td, \
zmpor,tkpor,markll,hidrl,hidry,lambfld,marka0,markb0,marke1,marka1,markb1,marke0,msbrit,msii_old, \
tk_updipsez0,tk_updipsez1,mgamma_vw,mgamma_vs,mvc_vs,mvc_vw,mus_vs,markdh,markdv,markss,markmm,timestepe,cyc0max,start_cond,veldepfric,stoksmod,res_high) \
private(mm1,mm2,tid,m10,m20,m1,m3,mpb,mtk,mro,mbb,maa,mcp,mkt,mht,mnu,mgg,mxmelt,mhlatent,mwa,wro,dmwa,mdi0,mdi1,mdro, \
celdx,celdy,swt,swt1,dx,dy,msxxe,msxye,mexxe,mexye,sigin,epsin,Mgg,Mro,Mwa,Mcp,Mbb,Maa,Mdhh,Mkt,p_pl_out,p_ga_in,rokf,p_sp_in,p_ol_out,p_pv_in,p_sp_out,p_st_in) \
firstprivate(yn)
{
// Initialize temporarily interpolation arrays (capitilized) to zero (inside pragma, so is private)
double* Nu0 = (double*) calloc(nodenum,sizeof(double));
double* Nd0 = (double*) calloc(nodenum,sizeof(double));
double* Gg0 = (double*) calloc(nodenum,sizeof(double));
double* Gd0 = (double*) calloc(nodenum,sizeof(double));
double* Ro0 = (double*) calloc(nodenum,sizeof(double));
double* Sxxe0 = (double*) calloc(nodenum,sizeof(double));
double* Sppe0 = (double*) calloc(nodenum,sizeof(double));
double* Sbritn0 = (double*) calloc(nodenum,sizeof(double));
double* Sxye0 = (double*) calloc(nodenum,sizeof(double));
double* Exxe0 = (double*) calloc(nodenum,sizeof(double));
double* Exye0 = (double*) calloc(nodenum,sizeof(double));
double* Et0 = (double*) calloc(nodenum,sizeof(double));
double* Dro0 = (double*) calloc(nodenum,sizeof(double));
double* Drp0 = (double*) calloc(nodenum,sizeof(double));
double* Cp0 = (double*) calloc(nodenum,sizeof(double));
double* Kt0 = (double*) calloc(nodenum,sizeof(double));
double* Ht0 = (double*) calloc(nodenum,sizeof(double));
double* Tk = (double*) calloc(nodenum,sizeof(double));
double* Mrx0 = (double*) calloc(nodenum,sizeof(double));
double* Mry0 = (double*) calloc(nodenum,sizeof(double));
double* Mvx0 = (double*) calloc(nodenum,sizeof(double));
double* Mvy0 = (double*) calloc(nodenum,sizeof(double));
double* Sol0 = (double*) calloc(nodenum*3,sizeof(double));
double* Sol1 = (double*) calloc(nodenum*3,sizeof(double));
#pragma omp for \
schedule(runtime)
for (mm1=0;mm1<marknum;mm1+=gridmod)
{
/* Check markers out of grid */
if(markx[mm1]>0 && marky[mm1]>0 && (markx[mm1])<xsize && (marky[mm1])<ysize && markk[mm1]>0 && markt[mm1]<50)
{
tid=omp_get_thread_num();
m10=m1serch(markx[mm1]);
m20=m2serch(marky[mm1]);
/* Marker type */
mm2=(int)markt[mm1]; if (mm2>=100) mm2-=100;
#if setup>9
/* 1a. --- Remove water, rocks --- */
if(erosmod==0)
{
if(marky[mm1]>sedilev && mm2<2)
{
mm2=yn; markt[mm1]=yn;
markw[mm1]=0;
markd[mm1]=-1.0;
}
if(marky[mm1]<eroslev && mm2>1)
{
if((marky[mm1])>waterlev) markt[mm1]=1; else markt[mm1]=0;
mm2=markt[mm1];
markw[mm1]=0;
markd[mm1]=-1.0;
}
}
/* 1b. Remove Plumes */
if(marky[mm1]>zdeep && mm2!=10)
{
mm2=10; markt[mm1]=10;
markw[mm1]=0;
markd[mm1]=-1.0;
}
#endif
/* P, T parameters calc */
mpb=1e-5*allinterpomp(markx[mm1],marky[mm1],m10,m20);
mtk=(markk[mm1]);
/* Reset water/air temperature */
// if (mm2<2) mtk=markk[mm1]=273.0;
/* 2.-3. --- Calculate density --- */
// & just points to address of normally defined variables at start of this routine; more efficient for passing! this way variable here can be changed inside subroutine
#if setup>9
dencalcomp(mtk,mpb,markx[mm1],marky[mm1],mm2,&mro,&mbb,&maa);
mcp=markcp[mm2];
mkt=(markkt[mm2]+markkf[mm2]/(mtk+77.0))*exp(markkp[mm2]*mpb);
/* ========================= */
/* Mantle phase transitions */
/* ========================= */
if (densimod==1)
{
/*
if(mm2>=9 && mm2<=14 && markex[mm1]>0) mro*=1.0-0.04*markex[mm1];
*/
/* Eclogitization, St, Pv transitions in oceanic crust */
if(mm2==7 || mm2==8 )
{
/* Eclogitization Ito and Kennedy, 1971 */
/*basalt=>garnet granulite (Ga-In) transition*/
p_ga_in=-9222.0+mtk*14.0;
/*Not to have granulites at pressure lower than 2 kbar*/
if(p_ga_in<2000.0) p_ga_in=2000.0;
/*garnet granulite=>eclogite (Pl-Out) transition*/
p_pl_out=-1460.0+mtk*20.0;
/*Not to have eclogites at pressure lower than 12 kbar*/
if(p_pl_out<12000.0) p_pl_out=12000.0;
if(mpb>p_ga_in)
{
rokf=0;
if(mtk>teclmin)
{
if(mtk>teclmax)
{
rokf=0.16;
}
else
{
rokf=0.16*(mtk-teclmin)/(teclmax-teclmin);
}
}
if(mpb>=p_pl_out)
{
mro*=1.0+rokf;
}
else
{
mro*=(1.0+rokf*(mpb-p_ga_in)/(p_pl_out-p_ga_in));
}
}
/* Coe->St transition Gerya et al., 2004, PCM */
p_st_in=59100.0+mtk*22.6;
if(mpb>p_st_in) mro*=1.06;
/* Pv transition, Mishin et al., 2008 with slope from Ito et al., 1990 */
/* Sp-out transition*/
p_sp_out=354000.0-mtk*40.0;
/* Pv-in transition*/
p_pv_in=352000.0-mtk*40.0;
if(mpb>p_pv_in)
{
rokf=0.08;
if(mpb>=p_sp_out)
{
mro*=1.0+rokf;
}
else
{
mro*=(1.0+rokf*(mpb-p_pv_in)/(p_sp_out-p_pv_in));
}
}
}
/* Ol-Sp and Pv transitions in the mantle */
if(mm2>=9 && mm2<=14)
{
/* Ol-Sp transition, Katsura & Ito, 1989 */
/* Ol-out transition*/
p_ol_out=91000.0+mtk*27.0;
/* Sp-in transition*/
p_sp_in=66000.0+mtk*39.0;
/*Limit width of Sp-Ol transition to 2 kbar */
if(p_sp_in>p_ol_out-2000.0) p_sp_in=p_ol_out-2000.0;
if(mpb>p_sp_in)
{
rokf=0.06;
if(mpb>=p_ol_out)
{
mro*=1.0+rokf;
}
else
{
mro*=(1.0+rokf*(mpb-p_sp_in)/(p_ol_out-p_sp_in));
}
}
/* Pv transition, Ito et al., 1990 */
/* Sp-out transition*/
p_sp_out=304000.0-mtk*40.0;
/* Pv-in transition*/
p_pv_in=302000.0-mtk*40.0;
if(mpb>p_pv_in)
{
rokf=0.11;
if(mpb>=p_sp_out)
{
mro*=1.0+rokf;
}
else
{
mro*=(1.0+rokf*(mpb-p_pv_in)/(p_sp_out-p_pv_in));
}
}
}
}
/* ========================== end */
/* Test Heat conductivity k=ko/(1+b*(T-To)/To) */
if (markkt[mm2]<0) mkt=-markkt[mm2]/(1.0+markkf[mm2]*(mtk-markkp[mm2])/markkp[mm2]);
mht=markht[mm2];
/* 4. Molten rocks */
// Note: Calls viscalc only for melted rocks here !
if (mm2>20) { meltpartomp(mtk,mpb,markx[mm1],marky[mm1],mm1,mm2,&mro,&mbb,&maa,&mnu,&mcp,&mkt,&mgg,&mxmelt,&mhlatent); }
/* ~X Thermodynamic database use for water */
// ATAT QUESTION TARAS: I would suggest to remove this if-statement, since it is harldy use. Do you agree? To avoid what was it included?
/* Density && Water wt% save */
// Hardly used; something that is larger than air or water in rock type number, but has negative density, at very start of model..
if ( densimod==3 && mm2>1 && (timesum<=1e+11 || markd[mm1]<=0) )
{
// tdbasecalc(mtk,mpb,mm2,mm1);
// markw[mm1]=eps[42];
// markd[mm1]=mro;
// ATATOMP QUESTION TARAS: the above form of mro means that it comes from dencalcomp or meltpartomp, and not from tdbasecalc above that stored is as eps.. ; what you wanted ?
// Use capital letters since Taras does not always assign eps value into this loop for m.. (eg mkt in next densimod2 call few lines below)
// ATATOMP QUESTION TARAS: Is this intentionally? With what purpose ? eg for mkt in next densimod2 call few lines below
tdbasecalcomp(markx[mm1],marky[mm1],mtk,mpb,mm2,mm1,m10,&Mgg,&Mro,&Mwa,&Mcp,&Mbb,&Maa,&Mdhh,&Mkt);
markw[mm1]=Mwa;
markd[mm1]=mro;
}
#elif setup<10
// In lab have constant density per rocktype and no thermal evolution, so much faster
mro=markro[mm2];
#endif
/* ---> (3) Marker rheology: calculate viscosity and stresses <--- */
mdi0=0;
mdi1=1.0;
#if setup>9
if(mm2<=20)
#endif
{
// yn = 1 now means that plasticity IS executed in this routine call to viscalc! This is the only successfull call within current code for normal, non-melting rocks !
viscalcomp(mtk,mpb,markx[mm1],marky[mm1],markv[mm1],markwa[mm1],markk[mm1],markp[mm1],markt[mm1],markexx[mm1],markexy[mm1],markxx,markxy,marke,mm1,mm2,1,m10,&mnu,&mdi0);
/* XXX Density correction for the dilation angle XXX = not executed */
if(markf0[mm2]>0 && markf1[mm2]>0 && marke[mm1]>0)
{
/* Second invariant of viscoplastic strain calc, check */
sigin=pow(markxx[mm1]*markxx[mm1]+markxy[mm1]*markxy[mm1],0.5);
epsin=marke[mm1]-sigin/2.0/markgg[mm2];
if(epsin>markf1[mm2]) epsin=markf1[mm2];
if(epsin>0) mdi1=exp(-2.0*epsin*markf0[mm2]);
}
}
msxxe=markxx[mm1];
msxye=markxy[mm1];
mexxe=markexx[mm1];
mexye=markexy[mm1];
mgg=markgg[mm2];
/* Min,Max NU limitation */
if(mnu<nubeg) mnu=nubeg; if(mnu>nuend) mnu=nuend;
/* Water/Air account */
#if setup>9
if(mm2<2)
{
markd[mm1]=mro;
mdi0=0;
mdi1=1.0;
}
#endif
/* End Water/Air account */
/* Calc log density derivative, save new density */
if(markd[mm1]<=0 || densimod==0)
{markd[mm1]=mro; mdi0=0;}
mdro=0;
maa=0;
mdi1=1.0;
#if setup>9
if(timestepe)
{
mdro=mro/markd[mm1];
mdro=log(mdro)-mdi0;
//if(epsin>0 && debugmod) {fprintf(fp_log,"d %ld %d %e %e %e %e %e %e %e %e %e %e",mm1,mm2,markx[mm1],marky[mm1],marke[mm1]*2.0*markgg[mm2],sigin,marke[mm1],epsin,-2.0*epsin*markf0[mm2],markd[mm1],mro,mdro);getchar();}
}
#endif
/* Save new density */
mdro=-mdi0;
markd[mm1]=mro;
/* Correct new density for dilation */
mro*=mdi1;
/* Saving marker viscosity */
markv[mm1]=mnu;
// if(debugmod) {fprintf(fp_log,"num=%ld type=%d x=%e y=%e mpb=%e mtk=%e nu=%e ro=%e cp=%e kt=%e ht=%e",mm1,mm2,markx[mm1],marky[mm1],mpb,mtk,mnu,mro,mcp,mkt,mht);getchar()};
/* --> (4) Interpolation from markers to 4 corners of the cell ====================================*/
/* Marker weight calculation using dimension of current Cell */
celdx=gx[m10+1]-gx[m10];
celdy=gy[m20+1]-gy[m20];
swt1=1.0/celdx/celdy;
/* Marker weights calculation using dimension of current Cell */
celdx=(markx[mm1]-gx[m10])/(gx[m10+1]-gx[m10]);
celdy=(marky[mm1]-gy[m20])/(gy[m20+1]-gy[m20]);
if (celdx<0 || celdy<0 || celdx>1.0 ||celdy>1.0) {fprintf(fp_log," WARNING !!! num=%ld type=%d x=%e y=%e celdx=%e celdy=%e",mm1,mm2,markx[mm1],marky[mm1],celdx,celdy); fflush(fp_log); getchar();}
/* --- Interpolate ro,nu etc to nodes using interpolation coefficients --- */
for (m1=0;m1<4;m1++)
{
/* Marker weight calculation using dimension of current Cell */
/* Different corners */
/* 0 2 */
/* 1 3 */
switch(m1)
{
case 0:
/* Calc node number */
m3=m10*ynumy+m20;
/* Add shear viscosity Nu */
if (celdx<0.5 && celdy<0.5)
{
dx=1.0-2.0*celdx;
dy=1.0-2.0*celdy;
swt=swt1*dx*dy;
Nu0[m3]+=mnu*swt;
Gg0[m3]+=mnu/mgg*swt;
Sxye0[m3]+=msxye*swt;
Exye0[m3]+=mexye*swt;
Sol0[nodenum+m3]+=swt;
}
/* Add Vx and Mx from markers */
if (celdx<0.5)
{
dx=1.0-celdx;
dy=1.0-ABSV(celdy-0.5);
swt=swt1*dx*dy;
Mvx0[m3]+=markvx[mm1]*mro*swt;
Mrx0[m3]+=mro*swt;
Sol0[nodenum2+m3]+=swt;
}
/* Add Vy and My from markers */
if (celdy<0.5)
{
dx=1.0-ABSV(celdx-0.5);
dy=1.0-celdy;
swt=swt1*dx*dy;
Mvy0[m3]+=markvy[mm1]*mro*swt;
Mry0[m3]+=mro*swt;
Sol1[nodenum2+m3]+=swt;
}
// Calculate standard weight for physical properties
swt=swt1*(1.0-celdx)*(1.0-celdy);
break;
case 1:
/* Calc node number */
m3=m10*ynumy+m20+1;
/* Add shear viscosity Nu */
if (celdx<0.5 && celdy>0.5)
{
dx=1.0-2.0*celdx;
dy=2.0*celdy-1.0;
swt=swt1*dx*dy;
Nu0[m3]+=mnu*swt;
Gg0[m3]+=mnu/mgg*swt;
Sxye0[m3]+=msxye*swt;
Exye0[m3]+=mexye*swt;
Sol0[nodenum+m3]+=swt;
}
/* Add Vy and My from markers */
if (celdy>0.5)
{
dx=1.0-ABSV(celdx-0.5);
dy=celdy;
swt=swt1*dx*dy;
Mvy0[m3]+=markvy[mm1]*mro*swt;
Mry0[m3]+=mro*swt;
Sol1[nodenum2+m3]+=swt;
}
// Calculate standard weight for physical properties
swt=swt1*(1.0-celdx)*celdy;
break;
case 2:
/* Calc node number */
m3=(m10+1)*ynumy+m20;
/* Add shear viscosity Nu, Sxy */
if (celdx>0.5 && celdy<0.5)
{
dx=2.0*celdx-1.0;
dy=1.0-2.0*celdy;
swt=swt1*dx*dy;
Nu0[m3]+=mnu*swt;
Gg0[m3]+=mnu/mgg*swt;
Sxye0[m3]+=msxye*swt;
Exye0[m3]+=mexye*swt;
Sol0[nodenum+m3]+=swt;
}
/* Add Vx and Mx from markers */
if (celdx>0.5)
{
dx=celdx;
dy=1.0-ABSV(celdy-0.5);
swt=swt1*dx*dy;
Mvx0[m3]+=markvx[mm1]*mro*swt;
Mrx0[m3]+=mro*swt;
Sol0[nodenum2+m3]+=swt;
}
// Calculate standard weight for physical properties
swt=swt1*celdx*(1.0-celdy);
break;
case 3:
/* Calc node number */
m3=(m10+1)*ynumy+m20+1;
/* Add shear viscosity Nu */
if (celdx>0.5 && celdy>0.5)
{
dx=2.0*celdx-1.0;
dy=2.0*celdy-1.0;
swt=swt1*dx*dy;
Nu0[m3]+=mnu*swt;
Gg0[m3]+=mnu/mgg*swt;
Sxye0[m3]+=msxye*swt;
Exye0[m3]+=mexye*swt;
Sol0[nodenum+m3]+=swt;
}
// Add values to central node once
// Brackets determine the scope of the to here limited variables, but currently make no difference
{
dx=1.0-2.0*ABSV(celdx-0.5);
dy=1.0-2.0*ABSV(celdy-0.5);
swt=swt1*dx*dy;
Nd0[m3]+=mnu*swt;
Gd0[m3]+=mnu/mgg*swt;
Sxxe0[m3]+=msxxe*swt;
Sppe0[m3]+=markp[mm1]*swt;
Sbritn0[m3]+=msbrit[mm1]*swt; // Yield stress in the pressure node
Exxe0[m3]+=mexxe*swt;
Dro0[m3]+=mdro*swt;
Drp0[m3]+=maa*swt;
Sol1[nodenum+m3]+=swt;
}
// Calculate standard weight for physical properties
swt=swt1*celdx*celdy;
break;
}
// End switch of weight calculation
/* Add Physical Properties: ro,nu, etc. */
// fprintf(fp_log,"num=%ld type=%d x=%e y=%e cell=%ld dx=%e dy=%e swt=%e",mm1,mm2,markx[mm1],marky[mm1],m3,dx,dy,swt);getchar();
// nu0[m3]+=mnu*swt;
// ATAT TARAS Why use mcp*MRO? in routines calculate purely from markcp... not done in manuele's, later in heat mrocp, but unrelated
Ro0[m3]+=mro*swt;
Et0[m3]+=mbb*swt;
Cp0[m3]+=mcp*mro*swt;
Kt0[m3]+=mkt*swt;
Ht0[m3]+=mht*swt;
Sol0[m3]+=swt;
/* Add T */
if(!markim[mm2])
{
Tk[m3]+=mtk*swt;
Sol1[m3]+=swt;
}
}
/* End Interpolation from markers to nodes ====================================*/
}
}
// Add interpolation arrays from different processors and free their memory
#pragma omp critical (sumsolarrays)
{
for (m3=0;m3<nodenum;m3++)
{
nu0[m3]+=Nu0[m3];
nd0[m3]+=Nd0[m3];
gd0[m3]+=Gd0[m3];
gg0[m3]+=Gg0[m3];
ro0[m3]+=Ro0[m3];
cp0[m3]+=Cp0[m3];
kt0[m3]+=Kt0[m3];
ht0[m3]+=Ht0[m3];
dro0[m3]+=Dro0[m3];
drp0[m3]+=Drp0[m3];
sxxe0[m3]+=Sxxe0[m3];
sxye0[m3]+=Sxye0[m3];
sppe0[m3]+=Sppe0[m3];
sbritn0[m3]+=Sbritn0[m3];
exxe0[m3]+=Exxe0[m3];
exye0[m3]+=Exye0[m3];
et0[m3]+=Et0[m3];
tk0[m3]+=Tk[m3];
mrx0[m3]+=Mrx0[m3];
mry0[m3]+=Mry0[m3];
mvx0[m3]+=Mvx0[m3];
mvy0[m3]+=Mvy0[m3];
sol0[m3]+=Sol0[m3];
sol1[m3]+=Sol1[m3];
sol0[nodenum+m3]+=Sol0[nodenum+m3];
sol1[nodenum+m3]+=Sol1[nodenum+m3];
sol0[nodenum2+m3]+=Sol0[nodenum2+m3];
sol1[nodenum2+m3]+=Sol1[nodenum2+m3];
}
}
// Free dynamically allocated interpolation arrays
free(Nu0);
free(Nd0);
free(Gg0);
free(Gd0);
free(Ro0);
free(Sxxe0);
free(Sppe0);
free(Sbritn0);
free(Sxye0);
free(Exxe0);
free(Exye0);
free(Et0);
free(Dro0);
free(Drp0);
free(Cp0);
free(Kt0);
free(Ht0);
free(Tk);
free(Mrx0);
free(Mry0);
free(Mvx0);
free(Mvy0);
free(Sol0);
free(Sol1);
}
// End OMP section marker to node interpolation
#if setup>9
if (n0==1){fclose(flfric);}
#endif
if (printmod==10000) fprintf(fp_log,"\n Time taken for rho and vis calc + M->N1 in ronurecalc = %e s \n",omp_get_wtime()-start);
start=omp_get_wtime();
/* Recalculate ro[] nu[] */
for (m1=0;m1<xnumx;m1++)
{
for (m2=0;m2<ynumy;m2++)
{
/* Current node num, wt */
m3=m1*ynumy+m2;
/* Shear viscosity recalc check */
if(sol0[nodenum+m3])
{
// Boundary Condition Viscosity (set in mu)
if(mu[m3] && (timesum<timebond || m1<=2 || m2<=2 || m1>=xnumx-4 || m2>=ynumy-3))
{
// BC value defined in init.t3c
if(mu[m3]>0)
{
nu0[m3]=mu[m3];
}
else
{
nu0[m3]/=sol0[nodenum+m3];
if(nu0[m3]>-mu[m3]) nu0[m3]=-mu[m3];
}
}
// Rest; solution
else
{
nu0[m3]/=sol0[nodenum+m3];
}
/* Min,Max NU limitation */
if(nu0[m3]<nubeg) nu0[m3]=nubeg; if(nu0[m3]>nuend) nu0[m3]=nuend;
/* Min,Max NU definition for nu contrast limit */
if(numin==0 || nu0[m3]<numin) numin=nu0[m3]; if(numax==0 || nu0[m3]>numax) numax=nu0[m3];
nu[m3]=nu0[m3];
/* Elastic shear stress Sxy recalc */
sxye[m3]=sxye0[m3]/sol0[nodenum+m3];
exye[m3]=exye0[m3]/sol0[nodenum+m3];
/* Shear shear modulus recalc */
gg[m3]=nu[m3]/(gg0[m3]/sol0[nodenum+m3]);
/* Reset weight */
sol0[nodenum+m3]=0;
}
/* Normal viscosity recalc check */
if(sol1[nodenum+m3])
{
if(mu[m3] && (timesum<timebond || m1<=2 || m2<=2 || m1>=xnumx-4 || m2>=ynumy-3))
{
if(mu[m3]>0)
{
nd0[m3]=mu[m3];
}
else
{
nd0[m3]/=sol1[nodenum+m3];
if(nd0[m3]>-mu[m3]) nd0[m3]=-mu[m3];
}
}
else
{
nd0[m3]/=sol1[nodenum+m3];
}
/* Min,Max NU limitation */
if(nd0[m3]<nubeg) nd0[m3]=nubeg; if(nd0[m3]>nuend) nd0[m3]=nuend;
/* Min,Max NU definition for nu contrast limit */
if(numin==0 || nd0[m3]<numin) numin=nd0[m3]; if(numax==0 || nd0[m3]>numax) numax=nd0[m3];
nd[m3]=nd0[m3];
/* Elastic Normal stress recalc */
sxxe[m3]=sxxe0[m3]/sol1[nodenum+m3];
sppe[m3]=sppe0[m3]/sol1[nodenum+m3];
sbritn[m3]=sbritn0[m3]/sol1[nodenum+m3];
exxe[m3]=exxe0[m3]/sol1[nodenum+m3];
/* Density changes recalc */
dro[m3]=dro0[m3]/sol1[nodenum+m3];
drp[m3]=drp0[m3]/sol1[nodenum+m3];
/* Normal shear modulus recalc */
gd[m3]=nd[m3]/(gd0[m3]/sol1[nodenum+m3]);
/* Reset weight */
sol1[nodenum+m3]=0;
}
/* Vx Mx recalc check */
if(sol0[nodenum2+m3])
{
/* Material constants recalc */
mvx[m3]=mvx0[m3]/mrx0[m3];
mrx[m3]=mrx0[m3]/sol0[nodenum2+m3];
sol0[nodenum2+m3]=0;
}
/* Vy My recalc check */
if(sol1[nodenum2+m3])
{
/* Material constants recalc */
mvy[m3]=mvy0[m3]/mry0[m3];
mry[m3]=mry0[m3]/sol1[nodenum2+m3];
sol1[nodenum2+m3]=0;
}
/* Other variables recalc check */
if(sol0[m3])
{
/* Material constants recalc */
ro[m3]=ro0[m3]/sol0[m3];
#if setup>9
if(gy[m2]<waterlev && ro[m3]<1000.1) ro[m3]=1.0;
if(gy[m2]>=waterlev && ro[m3]<1000.1) ro[m3]=1000.0;
#endif
et[m3]=et0[m3]/sol0[m3];
cp[m3]=(cp0[m3]/sol0[m3])/ro[m3];
kt[m3]=kt0[m3]/sol0[m3];
ht[m3]=ht0[m3]/sol0[m3];
/* Advective addition for T K in nodes recalc */
if (sol1[m3])
{
tk[m3]=tk0[m3]/sol1[m3];
sol1[m3]=0;
}
/* Reset weight */
sol0[m3]=0;
}
}
}
if (printmod) fprintf(fp_log,"Min, Max viscosity %e %e \n",numin,numax); fflush(fp_log);
/* Reset advective temperature */
for (m3=0;m3<nodenum;m3++) {tk3[m3]=0;}
/* Check Upper/Lower limits for nu[] after given contrast */
if(nucontr>1.0 && numin>0) numax=numin*nucontr;
if(nucontr<1.0 && numax>0) numin=numax*nucontr;
for (m3=0;m3<nodenum;m3++)
{
if(nu[m3]<numin) nu[m3]=numin; if(nu[m3]>numax) nu[m3]=numax;
if(nd[m3]<numin) nd[m3]=numin; if(nd[m3]>numax) nd[m3]=numax;
}
/* Water/air density */
#if setup>9
for (m1=0;m1<xnumx;m1++)
for (m2=0;m2<ynumy;m2++)
{
m3=m1*ynumy+m2;
if(gy[m2]<waterlev && ro[m3]<1000.1) ro[m3]=1.0;
if(gy[m2]>=waterlev && ro[m3]<1000.1) ro[m3]=1000.0;
}
#endif
/* ---> 5. Set Boundary conditions for T <---*/
if (printmod) fprintf(fp_log,"\n AVERAGE TEMPERATURE CORRECTION FOR BOUNDARY CONDITIONS ...\n"); fflush(fp_log);
tkrecalc();
if (printmod) fprintf(fp_log,"AVERAGE TEMPERATURE OK!\n"); fflush(fp_log);
/* Adiabate computing */
if(1==0 && timesum<3.15576e+7*1e+3)
{
/* Lower boundary TK - Node Cycle */
for (m1=0;m1<xnumx;m1++)
{
/* Cur Line Num in bondm[] */
m2=(m1+1)*ynumy-1;
m3=bondm[m2+nodenum3];
if(m3)
{bondv[m3][0]=tk[m2-1]*2.0-tk[m2-2];}
}
}
if (printmod==10000) fprintf(fp_log,"\n Time taken for M->N2 in ronurecalc = %e s \n",omp_get_wtime()-start);
/* END ADD MARKERS TO THE v-CELLS ========================== */
}
/* End ro[],nu[] recalc after marker positions - routine */
/* Calc density for given P,T */
void dencalcomp(double mtk, double mpb, double x, double y, int mm2, double *mro, double *mbb, double *maa)
/* mtk - T, K */
/* mpb - P, bar */
/* x,y - XY location of point for Vx,Vy calc */
/* mm2 - Rock number */
{
/* Ro=ro0*(1-bro*(TK-298.15))*(1+aro*(Pkbar-0.001)) */
*mro=markro[mm2]*(1.0-markbb[mm2]*(mtk-298.15))*(1.0+markaa[mm2]*(mpb-1.0)*1e-3);
/* Adiabatic term: al=bro/(1-bro*(Tk-298.15)) */
*mbb=markbb[mm2]/(1.0-markbb[mm2]*(mtk-298.15));
/* Compressibility: be=aro/(1+aro*(Pkbar-0.0001) */
*maa=1.e-8*markaa[mm2]/(1.0+markaa[mm2]*(mpb-1.0)*1e-3);
/* Constant density */
if (densimod==0) *mro=markro[mm2];
}
/* End OMP Calc density for given P,T */
/* OMP Antigorite weakening of mantle */
void antigoromp(double mtk, double mpb, double x, double y, long int mm1, long int m10, char markt[])
/* mtk - T, K */
/* mpb - P, bar */
/* x,y - XY location of point for Vx,Vy calc */
/* mm1 - mark number */
/* m10 - Up Left Node X,Y Num */
{
/* Val buffer */
double k1,sy1,e,hydry,yfiltr,hydryl,tsubd,vxs,vys;
/* Check marker type */
if(markt[mm1]!=11 && markt[mm1]!=13) return;
/* Relativ Normalized coord Calc */
e=(x-gx[m10])/(gx[m10+1]-gx[m10]);
/* Erosion surface; oceanic crust top */
sy1=(e*ep[m10+1]+(1.0-e)*ep[m10]);
/* Antigorite weakening of mantle above oceanic crust */
/* Atg stability field after Schmidt and Poli, 1998 */
if((y-sy1)>63000.0)
{
k1=1013.17699-0.060387633e-3*(y-sy1)-0.004289442e-6*(y-sy1)*(y-sy1);
}
else
{
k1=751.490422+6.00773668e-3*(y-sy1)-0.034690759e-6*(y-sy1)*(y-sy1);
}
/* Change marker Type */
/* Serpentinized (13) - to hydrated (11) */
if(k1<=mtk && markt[mm1]==13) markt[mm1]=11;
/* Hydrated(11) - to serpentinized (13) */
if(k1>mtk && markt[mm1]==11) markt[mm1]=13;
}
/* OMP End Antigorite weakening of mantle */
/* Nu calc after reological equation */
// Uses timestepe or computational visco-elastic timestep
/* P-T-stress dependent rheology without/with brittle/ductile transition */
/* Reological equations */
/* Stress>SScr */
/* Power law dislocation creep: SSii={NU0*EEii*exp[(E+PV)/RT]}^(1/n) */
/* Effective viscosity: NU=1/2*{NU0*exp[(E+PV)/RT]}^(1/n)*EEii^[(1-n)/n] */
/* Stress<SScr */
/* Newtonian diffusion creep: SSii=NU1*EEii*exp[(E+PV)/RT] */
/* Effective viscosity: NU=NU0/2*exp[(E+PV)/RT] */
/* NU1=NU0/SScr^(n-1) */
/* SScr - dislocation, diffusion transition stress */
/* SSii - second invariant of deviatoric stress tensor */
/* EEii - epsin - second invariant of strain rate tensor */
/* E - activation energy, J */
/* V - activation volume, J/bar */
/* R - gase constant 8.314 J/K */
/* Viscosity NU calc after reological equations */
/* NU=SSii/(2*EEii) */
/* Brittle - Ductile transition */
/* sbrit=MINV(0.85e+5*pb,60e+6+0.6e+5*pb)*lambda; (Schott & Schmeling, 1998) */
/* sbrit=MINV(0.667e+5*pb,51.2e+6+0.512e+5*pb)*lambda; (Brace & Kohlsstedt, 1980) */
void viscalcomp(double mtk, double mpb, double cmx, double cmy, double Markv, double Markwa, double Markk, double Markp, double Markt, double Markexx, double Markexy, double Markxx[], double Markxy[], double Marke[], long int mm1, int mm2, int yn, long int m10,double *mnu, double *mdi0)
/* mtk - T, K */
/* mpb - P, bar */
/* cmx,cmy - XY location of point for Vx,Vy calc */
/* mm1 - Marker number */
/* mm2 - rock type */
/* yn - plastic reset yes(1)/no(0) - switch from version 1 to 2 ! */
// bbrit_cor - slip velocity dependent correction for friction coefficient
{
/* Val buffer */
double xnu,nnu,e,n,rt=8.314*mtk,k1,e1,epsin,sigin,sduct,sbrit,nueff,strain,abrit,bbrit,nubrit,nunewt,nupowl,nuduct;
/* Reological Eq par */
double sy1,lamb,xelvis,sxxnew,sxynew,siginnew,mnu0,mnu1,mnu2,siginnew0,siginnew1,siginnew2,dsiginnew0,dsiginnew1,dsiginnew2;
/* Counters */
long int m1;
int ncount=0;
// Slip velocity dependent friction
double relvw=60,dvw,dvs;
/* Melted rocks */
// But incoming mm2 is already mm2_actual-20
#if setup>9
if (mm2>20) { *mnu = markn0[mm2]; return; }
#endif
/* Non-melted rocks, mm2 <= 20 */
/* Calc effective strain rate, stress after second strain rate Tenzor invariant EEii=(1/2SUM(EPSik^2))^(1/2) */
// Interpolation to these markers done at end of viterate() of previous timestep
epsin=pow(Markexx*Markexx+Markexy*Markexy,0.5);
sigin=pow(Markxx[mm1]*Markxx[mm1]+Markxy[mm1]*Markxy[mm1],0.5);
/* --- 1. Calculate components of brittle strength; cohesion, friction, and hydrostatic pore pressure weakening factor --- */
// - Lambda brittle weakening factor for hydrostatic pore pressure -
/* Up Left Node X,Y Num */
m1=m10;
/* Relative normalized coord calc */
e=(cmx-gx[m1])/(gx[m1+1]-gx[m1]);
n=(e*ep[m1+1]+(1.0-e)*ep[m1]);
// Pore fluid pressure correction: lamb = Pf/Ps
lamb=markll[mm2];
#if setup>9
// Predefine fluid pressures near surface
if ((cmy-n)<=0) lamb=hidrl;
if ((cmy-n)>0 && (cmy-n)<hidry) lamb=hidrl*(1.0-(cmy-n)/hidry)+lamb*(cmy-n)/hidry;
// Lower friction in fluid/melt present areas
if (Markwa==1) {lamb=lambfld;}
#endif
/* - Strain weakening - */
strain=Marke[mm1];
/* A,B coefficients calc depending on integral strain */
abrit=marka0[mm2];
bbrit=markb0[mm2];
if(strain>marke1[mm2])
{
abrit=marka1[mm2];
bbrit=markb1[mm2];
}
else
{
if(strain>marke0[mm2] && marke1[mm2]>marke0[mm2])
{
abrit=marka0[mm2]+(marka1[mm2]-marka0[mm2])*(strain-marke0[mm2])/(marke1[mm2]-marke0[mm2]);
bbrit=markb0[mm2]+(markb1[mm2]-markb0[mm2])*(strain-marke0[mm2])/(marke1[mm2]-marke0[mm2]);
}
}
/* --- End calculation of brittle strength components; cohesion, friction, and hydrostatic pore pressure weakening factor --- */
/* --- Start ductile viscosity calculation -------------------------------------------*/
/* Inverted value of newtonian NU set */
nunewt=0;
/* Inverted value of power-low NU set */
nupowl=0;
/* Check for the presence of ductile rheology */
// For more viscosity options, see codes of version 1
if (marknu[mm2])
{
/* A) Simple Newtonian rheology */
// - used in laboratory model of van Dinther et al., JGR, 2013a -
/* Newtonian creep: SSii=NU0*2.0*EEii */
/* Effective viscosity: NU=NU0 */
/* Effective viscosity member in Stoks: NUs=NU */
if(markdh[mm2]==0 && markdv[mm2]==0 && (markss[mm2]==0 || markmm[mm2]==1.0))
{
/* Inverted value of newtonian NU calc */
nunewt=1.0/marknu[mm2];
}
/* --> D) P-T-stress dependent rheology without/with brittle/ductile transition <--*/
// - used in large-scale models PhD thesis van Dinther -
/* Reological equations */
/* Stress>SScr */
/* Power law dislocation creep: SSii={NU0*EEii*exp[(E+PV)/RT]}^(1/n) */
/* Effective viscosity: NU=1/2*{NU0*exp[(E+PV)/RT]}^(1/n)*EEii^[(1-n)/n] */
/* Effective viscosity member in Stoks: NUs=NU/n */
/* Stress<SScr */
/* Newtonian diffusion creep: SSii=NU1*EEii*exp[(E+PV)/RT] */
/* Effective viscosity: NU=NU0/2*exp[(E+PV)/RT] */
/* Effective viscosity member in Stoks: NUs=NU */
/* NU1=NU0/SScr^(n-1) */
if(marknu[mm2]>0 && (markdh[mm2]!=0 || markdv[mm2]!=0) && markss[mm2]!=0 && markmm[mm2]!=1.0)
{
// ---> 2. Calculate ductile viscosity <---
/* T-P exponent for effective NU calc */
e1=(markdh[mm2]+markdv[mm2]*mpb)/rt;
if(e1>150.0) e1=150.0;
e1=exp(e1);
/* Koef for stress independent creep NU1 calc */
k1=marknu[mm2]/pow(markss[mm2],markmm[mm2]-1.0);
/* Inverted value of newtonian NU calc for diffusion creep */
nunewt=1.0/(0.5*k1*e1);
mnu2=nunewt;
/* Effective viscosity1 calc */
siginnew1=siginnew=sigin;
nupowl=0;
// Calculate dislocation creep viscosity
if (siginnew>0) nupowl=1.0/(0.5*siginnew*marknu[mm2]*e1/pow(siginnew,markmm[mm2]));
mnu1=nupowl;
//Take arithmetic average of dislocation and diffusionc creep for effective ductile viscosity
mnu0=1.0/(mnu1+mnu2);
// ---> 3. Include elastic part for estimation future viscoelastic stresses <---
// Calculate visco-elasticity factor
xelvis=markgg[mm2]*timestepe/(markgg[mm2]*timestepe+mnu0);
// Calculate viscoelastic stress
siginnew2=2.0*mnu0*epsin*xelvis+sigin*(1.0-xelvis);
dsiginnew1=siginnew2-siginnew1;
/* Effective viscosity2 calc */
// See above for description. Repeated here
siginnew=siginnew2;
nupowl=0;
if (siginnew>0) nupowl=1.0/(0.5*siginnew*marknu[mm2]*e1/pow(siginnew,markmm[mm2]));
mnu1=nupowl;
mnu0=1.0/(mnu1+mnu2);
// Calculate visco-elasticity factor
xelvis=markgg[mm2]*timestepe/(markgg[mm2]*timestepe+mnu0);
// Calculate viscoelastic stress
siginnew=2.0*mnu0*epsin*xelvis+sigin*(1.0-xelvis);
dsiginnew2=siginnew-siginnew2;
/* ---> 4. Local iterations for dislocation viscosity calculation by Bisection method <--- */
ncount=0;
// Locally iterate over nupowl and siginnew until siginnew-siginnew0<10 Pa (or 100 iterations)
do
{
// Check to prevent num issue when stress is not changing: only not true and in if sigma_2_1 = sigma_1_1 = sigma_0 : almost never no stress change?
dsiginnew0=ABSV(dsiginnew1)+ABSV(dsiginnew2);
if(dsiginnew0>0)
{
// Weigth factor: 0.5 = midpoint
dsiginnew0=0.5;
// Calculate midpoint = new estimate stress
siginnew0=siginnew=siginnew1*(1.0-dsiginnew0)+siginnew2*dsiginnew0;
// Update viscosity with that new stress
nupowl=0;
if (siginnew>0) nupowl=1.0/(0.5*siginnew*marknu[mm2]*e1/pow(siginnew,markmm[mm2]));
mnu1=nupowl;
mnu0=1.0/(mnu1+mnu2);
// Update stress estimate with new viscosity
xelvis=markgg[mm2]*timestepe/(markgg[mm2]*timestepe+mnu0);
siginnew=2.0*mnu0*epsin*xelvis+sigin*(1.0-xelvis);
// Calculate difference new and last stress estimate -> converging?
dsiginnew0=siginnew-siginnew0;
// Use this newest estimate for stress change to see if in same direction
// If yes; keep going in that direction; leave oldest estimate behind
if((dsiginnew0>=0 && dsiginnew1>=0) || (dsiginnew0<0 && dsiginnew1<0))
{
siginnew1=siginnew0;
dsiginnew1=dsiginnew0;
}
// If in opposite direction; passed optimal stress so turn back; leave newest 1 estimate behind
else
{
siginnew2=siginnew0;
dsiginnew2=dsiginnew0;
}
}
ncount++;
}
while(ABSV(dsiginnew0)>10.0 && ncount<101);
}
}
/* --- End Ductile viscosity calculation -------------------------------------------*/
// Check ductile effective viscosity calculation
nueff=1.0/(nunewt+nupowl);
/* Mantle viscosity */
#if setup > 9
if((Markt==9 || Markt==10) && timesum<3.15576e+7*1e+4 && nueff<1e+20) nueff=1e+20;
#endif
if(nueff<nubeg) nueff=nubeg; if(nueff>nuend) nueff=nuend;
if(nueff<markn0[mm2]) nueff=markn0[mm2]; if(nueff>markn1[mm2]) nueff=markn1[mm2];
nuduct=nueff;
*mdi0=0;
/* ------------------ Calculate viscoplastic viscosity ---------------------------- */
// Calculate brittle strength - sbrit -
// Plasticity switched off when both terms in the yield strength formulation are 0
if(((1-markll[mm2])*markb0[mm2] || abrit) && epsin)
{
// --- Strong slip velocity dependency of friction coefficient ---
// After Burridge and Knopoff (1967), Ampuero and Ben-Zion (2008), etc.
// Adapt friction parameters based on x-location (lab) or temperature (large-scale)
#if setup==10
if (mm2==7 && cmx>(700e3-shift_km) && cmx<(1150e3-shift_km) && cmy<100e3 && veldepfric==1)
#endif
#if setup==11 // Including off-megathrust rate weakening
if (cmx>(700e3-shift_km) && cmx<(1150e3-shift_km) && cmy<100e3 && veldepfric==1)
#endif
#if setup==12 // Collisional setup - L. Dal Zilio
if (cmx>(1700e3-shift_km) && cmx<(2150e3-shift_km) && cmy<100e3 && veldepfric==1)
#endif
#if setup < 10
if (mm2==5 && veldepfric==1)
#endif
{
// Calculate Relative amount of Velocity-Weakening vs Velocity-Strengthening
#if setup>9
// Velocity-strengthening region
if (Markk<=tk_updipsez0) // && mm2==7)
{ relvw = 0; }
// Transitions to seismogenic zone: updip
else if (Markk>tk_updipsez0 && Markk<tk_updipsez1) // && mm2==7)
{ relvw = (Markk-tk_updipsez0)/(tk_updipsez1-tk_updipsez0); }
// Velocity-weakening for Seismogenic Zone (and off-megathrust region)
else
{ relvw = 1;}
// Note for the off-events setup there is no strengthening outside the subduction channel of basaltic crust
#elif setup < 10
// Change mm2 locally in this viscalc-routine, so that also for viscosity, shear modulus, Pf/Ps etc use this
// Seismogenic Zone = velocity-weakening
if (cmx >= end_updip && cmx <= start_downdip)
{ relvw = 1; mm2 = 6; }
// Transitions to seismogenic zone: updip
else if (cmx >= start_updip && cmx <= end_updip)
{ relvw = (cmx-start_updip)/(2*half_range);}
// Transitions away from seismogenic zone: downdip
else if (cmx >= start_downdip && cmx <= end_downdip)
{ relvw = 1 - ( cmx-start_downdip )/( 2*half_range );}
// Velocity-strenghtening region
else
{ relvw = 0; }
#endif
// Calculate slip-rate dependent change of coefficients
mvslip[mm1] = 2.0*epsin*res_high;
dvw = (1-mgamma_vw)+mgamma_vw/(1.0+mvslip[mm1]/mvc_vw);
dvs = (1-mgamma_vs)+mgamma_vs/(1.0+mvslip[mm1]/mvc_vs);
// Change friction coefficient accordingly
bbrit = mus_vs*dvs + relvw*(markb0[mm2]*dvw-mus_vs*dvs);
// Change cohesion as a function of slip velocity, if desired (if marka0[5]=~marka0[6])
#if setup>9
abrit = marka0[mm2];
#elif setup < 10
abrit = marka0[5] + relvw*(marka0[6]-marka0[5]);
#endif
// Iterate locally to obtain stable estimate of slip-rate
sbrit=abrit+bbrit*(1-lamb)*Markp;
if(sbrit>0 && Markv>0)
{
for(ncount=0;ncount<5;ncount++)
{
if(sbrit>0)
{
// epsin = 0.5* sbrit/eta in viscous formulation
mvslip[mm1] = sbrit/Markv*res_high;
dvw = (1-mgamma_vw)+mgamma_vw/(1.0+mvslip[mm1]/mvc_vw);
dvs = (1-mgamma_vs)+mgamma_vs/(1.0+mvslip[mm1]/mvc_vs);
bbrit = mus_vs*dvs + relvw*(markb0[mm2]*dvw-mus_vs*dvs);
sbrit=abrit+bbrit*(1-lamb)*Markp;
}
else
{
fprintf(fp_log,"LOOK: Sbrit is <= 0 within v-w loop: %e, abrit = %e, bbrit = %e, pr = %e, markvis = %e, x = %e, y = %e \n",sbrit,abrit,bbrit,Markp,Markv,cmx,cmy); fflush(fp_log);
}
}
}
// Calculate average value stresses and strainrates seismogenic zone
// But here do not have proper stress and vel(e) yet ! Only yield strength ..
#if setup < 10
if (relvw==1 && cmy<=gy[n_glayer+1])
{
sbrit_ave = sbrit_ave + (abrit + bbrit*(1-lamb)*Markp);
count_sezm = count_sezm + 1;
}
#endif
}
// In case of no rate dependency also calculate yield strength
else
{
sbrit=abrit+bbrit*(1-lamb)*Markp;
}
// Check strength values
if(sbrit<0) sbrit=0;
if(sbrit>marks1[mm2]) sbrit=marks1[mm2];
// Save frictional properties to file for analyses
// Save time and space by only doing at last timestep in prn output cycle
#if setup > 9
if (n0==1 && start_cond==1 && relvw<=1.0)
{ fprintf(flfric," %d %e %e %e %e %e %e %e %e \n", mm2, cmx, cmy, Markp, sbrit, Markxx[mm1], Markxy[mm1],lamb,bbrit); }
#endif
// Store yield stress for post-processing and interpolation to nodes
msbrit[mm1] = sbrit;
// Store old-stress
msii_old[mm1] = sigin;
/* ---> 5. ! Viscoelastic case ! <--- */
if(stoksmod && timestepe && epsin)
{
/* Future plastic creep */
/* Future stresses calc */
xelvis=markgg[mm2]*timestepe/(markgg[mm2]*timestepe+nueff);
siginnew=2.0*nueff*epsin*xelvis+sigin*(1.0-xelvis);
// Plastic yielding if new estimate or stress of previous timestep exceeds strength
if(sbrit<siginnew || sbrit<sigin)
{
/* Executing plasticity by reseting stresses and viscosities */
// Note yn is defined at call to this viscalc routine
if(yn==1)
{
/* XXX Density correction for the dilation angle XXX */
// We do not use dilation ! Not for any rock type
if(markf0[mm2]>0 && markf1[mm2]>0)
{
/* Second invariant of viscoplastic strain calc, check */
e1=Marke[mm1]-sbrit/2.0/markgg[mm2];
/* Correction of divergence rate for plastic strain rate */
if(e1<markf1[mm2])
{
e1=epsin-sbrit/2.0/nuduct;
if(e1) *mdi0=2.0*e1*markf0[mm2]*timestepe;
}
}
/* ! Recompute stress ! So stress no longer exceed strength */
if(sigin && sbrit<sigin)
{
Markxx[mm1] *= sbrit/sigin;
Markxy[mm1] *= sbrit/sigin;
sigin=sbrit;
}
/* ! Recompute viscosity ! So decrease viscosity accordingly to localize deformation */
nubrit=sbrit/(2.0*epsin+(sigin-sbrit)/timestepe/markgg[mm2]);
if(nubrit<nueff) nueff=nubrit;
/* Set initial plastic strain */
if(Marke[mm1]<=0) Marke[mm1]=1e-20;
}
}
else
{
if(yn==1) Marke[mm1]=0;
}
}
}
/* ------------------ End calculation viscoplastic viscosity ---------------------------- */
/* Check calculated viscosity to be within hard code minimum and maximum */
if(nueff<nubeg) nueff=nubeg; if(nueff>nuend) nueff=nuend;
if(nueff<markn0[mm2]) nueff=markn0[mm2]; if(nueff>markn1[mm2]) nueff=markn1[mm2];
// Pass final viscosity back to main model
*mnu = nueff;
}
/* End OMP: Nu calc after reological equation */
/* Number of nearest left vertical line find */
long int m1serch(double cmx)
/* cmx - X coordinate */
{
/* Variables */
long int m1,m10=0,m11=xnumx-1;
/* Serch cycle */
do
{
m1=(m10+m11)/2;
if (gx[m1]>cmx) m11=m1; else m10=m1;
}
while((m11-m10)>1);
if(m10>xnumx-2) m10=xnumx-2;
return m10;
}
/* Number of nearest left vertical line find */
/* Number of nearest upper horizontal line find */
long int m2serch(double cmy)
/* cmy - Y coordinate */
{
/* Variables */
long int m2,m20=0,m21=ynumy-1;
/* Serch cycle */
do
{
m2=(m20+m21)/2;
if (gy[m2]>cmy) m21=m2; else m20=m2;
}
while((m21-m20)>1);
if(m20>ynumy-2) m20=ynumy-2;
return m20;
}
/* Number of nearest upper horizontal line find */
/* Erosion/Sedimentation Function for markers */
/* mardy - marker vertical size, m */
void erosmarkomp(long int mm1, int yn, long int m10, double x, double y, char markt[], double marke[], double markd[])
/* mm1 - marker number */
/* yn - current sedimnts type 2,3 */
/* m1 - Up Left Node X,Y Num */
{
/* Variables */
double e,e0;
/* Surface level elevation definition */
/* Relativ Normalized coord Calc */
e=(x-gx[m10])/(gx[m10+1]-gx[m10]);
/* Surface level elevation for marker definition */
e0=(e*ep[m10+1]+(1.0-e)*ep[m10]);
/* Marker surface elevation definition */
if(markt[mm1]<2)
{
/* Water/Air -> Sediments conversion */
if(y>e0) {markt[mm1]=yn; marke[mm1]=0; markd[mm1]=-1.0;}
}
if(markt[mm1]>1)
{
/* Rock->Water/Air conversion */
if(y<e0) {markt[mm1]=0; marke[mm1]=0; markd[mm1]=-1.0;}
}
}
/* OMP End Erosion/Sedimentation Function for markers */
/* OMP Rock to rock+melt transformation */
void meltingomp(double mtk, double mpb, long int mm1, int mm2, char Markt[], double Marke[], double *mxmelt, double *mhlatent)
/* mtk - T, K */
/* mpb - P, bar */
/* mm1 - mark number */
{
/* Melting related cahnge of the marker type */
/* Check marker type */
if (mm2==3 || mm2==4 || mm2==5 || mm2==6 || mm2==7 || mm2==8 || mm2==11 || mm2==16 || mm2==23 || mm2==24 || mm2==25 || mm2==26 || mm2==27 || mm2==28 || mm2==34 || mm2==36 || mm2==37 || mm2==38)
if (mpb<0) mpb=0;
switch(mm2)
{
/* Sediments, upper crust */
case 3:
case 4:
case 5:
case 17:
case 23:
case 24:
case 25:
case 26:
case 37:
/* Basalt, Gabbro */
case 7:
case 8:
case 16:
case 6:
case 18:
case 27:
case 28:
case 36:
case 38:
// mxmelt and mhlatent are already pointers to mem address, so you can enter them without &
meltpart1omp(mtk,mpb,mm2,mxmelt,mhlatent);
if(*mxmelt>0 && mm2<20) {Markt[mm1]+=20; Marke[mm1]=0;}
if(*mxmelt<=0 && mm2>20) {Markt[mm1]-=20; Marke[mm1]=0;}
return;
/* Hydrated Peridotite */
case 11:
case 34:
meltpart1omp(mtk,mpb,mm2,mxmelt,mhlatent);
if(*mxmelt>0 && mm2==11) {Markt[mm1]=34; Marke[mm1]=0;}
if(*mxmelt<=0 && mm2==34) {Markt[mm1]=14; Marke[mm1]=0;}
return;
/* Others */
default: return;
}
}
/* OMP End Rock to rock+melt transformation */
/* Melt fraction, density, viscosity, heat capacity calculation */
void meltpartomp(double mtk, double mpb, double x, double y, long int mm1, int mm2, double *mro,double *mbb, double *maa, double *mnu, double *mcp, double *mkt, double *mgg, double *mxmelt, double *mhlatent)
/* mtk - T, K */
/* mpb - P, bar */
/* x,y - XY location of point for Vx,Vy calc */
/* mm1 - mark number */
/* mm2 - mark type */
{
/* Val buffer */
double xmelt=0,ival,dmpb,dmtk,sduct,nueff,smin,smax,nmin,nmax,cpadd=0,vx0,vy0,pr0,sp0,ee0;
long int m1,m10,m20;
double Mnu,mdi0;
double p_pl_out,p_ga_in,rokf,p_sp_in,p_ol_out,p_pv_in,p_sp_out,p_st_in;
m10=m1serch(x);
/* Check marker type */
if (mm2==23 || mm2==24 || mm2==25 || mm2==26 || mm2==27 || mm2==28 || mm2==34 || mm2==36 || mm2==37 || mm2==38)
{
/* Calculate melt fraction */
// mxmelt and mhlatent are already pointers to mem address, so you can enter them without &
meltpart1omp(mtk,mpb,mm2,mxmelt,mhlatent);
xmelt = *mxmelt;
/* Standard adiabatic term: al=bro/(1+bro*(Tk-298.15)) */
*mbb=(markbb[mm2]*xmelt+markbb[mm2-20]*(1.0-xmelt))/(1.0-(markbb[mm2]*xmelt+markbb[mm2-20]*(1.0-xmelt))*(mtk-298.15));
*maa=(markaa[mm2]*xmelt+markaa[mm2-20]*(1.0-xmelt))/(1.0+(markaa[mm2]*xmelt+markaa[mm2-20]*(1.0-xmelt))*(mpb-1.0)*1e-3);
/* Density */
/* Ro=ro0 */
if (densimod==0)
{
*mro=markro[mm2]*xmelt+markro[mm2-20]*(1.0-xmelt);
}
/* Ro=ro0*(1-bro*(TK-298.15))*(1+aro*(Pkbar-0.001)) */
else
{
ival=1.0;
/* ========================= */
/* Mantle phase transitions */
/* ========================= */
/*
if(mm2>=29 && mm2<=34 && markex[mm1]>0) ival=1.0-0.04*markex[mm1];
*/
/* Eclogitization, St, Pv transitions in oceanic crust */
if(mm2>=27 && mm2<=28)
{
/* Eclogitization Ito and Kennedy, 1971 */
/*basalt=>garnet granulite (Ga-In) transition*/
p_ga_in=-9222.0+mtk*14.0;
/*Not to have granulites at pressure lower than 2 kbar*/
if(p_ga_in<2000.0) p_ga_in=2000.0;
/*garnet granulite=>eclogite (Pl-Out) transition*/
p_pl_out=-1460.0+mtk*20.0;
/*Not to have eclogites at pressure lower than 12 kbar*/
if(p_pl_out<12000.0) p_pl_out=12000.0;
if(mpb>p_ga_in)
{
rokf=0;
if(mtk>teclmin)
{
if(mtk>teclmax)
{
rokf=0.16;
}
else
{
rokf=0.16*(mtk-teclmin)/(teclmax-teclmin);
}
}
if(mpb>=p_pl_out)
{
ival=1.0+rokf;
}
else
{
ival=(1.0+rokf*(mpb-p_ga_in)/(p_pl_out-p_ga_in));
}
}
/* Coe->St transition Gerya et al., 2004, PCM */
p_st_in=59100.0+mtk*22.6;
if(mpb>p_st_in) ival*=1.06;
/* Pv transition, Mishin et al., 2008 with slope from Ito et al., 1990 */
/* Sp-out transition*/
p_sp_out=354000.0-mtk*40.0;
/* Pv-in transition*/
p_pv_in=352000.0-mtk*40.0;
if(mpb>p_pv_in)
{
rokf=0.08;
if(mpb>=p_sp_out)
{
ival*=1.0+rokf;
}
else
{
ival*=(1.0+rokf*(mpb-p_pv_in)/(p_sp_out-p_pv_in));
}
}
}
/* Ol-Sp and Pv transitions in the mantle */
if(mm2>=29 && mm2<=34)
{
/* Ol-Sp transition, Katsura & Ito, 1989 */
/* Ol-out transition*/
p_ol_out=91000.0+mtk*27.0;
/* Sp-in transition*/
p_sp_in=66000.0+mtk*39.0;
/*Limit width of Sp-Ol transition to 2 kbar */
if(p_sp_in>p_ol_out-2000.0) p_sp_in=p_ol_out-2000.0;
if(mpb>p_sp_in)
{
rokf=0.06;
if(mpb>=p_ol_out)
{
ival*=1.0+rokf;
}
else
{
ival*=(1.0+rokf*(mpb-p_sp_in)/(p_ol_out-p_sp_in));
}
}
/* Pv transition, Ito et al., 1990 */
/* Sp-out transition*/
p_sp_out=304000.0-mtk*40.0;
/* Pv-in transition*/
p_pv_in=302000.0-mtk*40.0;
if(mpb>p_pv_in)
{
rokf=0.11;
if(mpb>=p_sp_out)
{
ival*=1.0+rokf;
}
else
{
ival*=(1.0+rokf*(mpb-p_pv_in)/(p_sp_out-p_pv_in));
}
}
}
/* Density calculation with corrections */
*mro=xmelt*markro[mm2]*(1.0-markbb[mm2]*(mtk-298.15))*(1.0+markaa[mm2]*(mpb-1.0)*1e-3)+(1.0-xmelt)*ival*markro[mm2-20]*(1.0-markbb[mm2-20]*(mtk-298.15))*(1.0+markaa[mm2-20]*(mpb-1.0)*1e-3);
}
/**/
/* Viscosity */
/* Effective NU calc check */
/* Little melt */
// Assume similar to no melt, since go into viscalc..
if(xmelt<0.1)
{
// QUESTION TARAS - why plastic reset here? (i switched yn=1 to yes wrt old version, but before was set to 0 here)
// while mm2 going in is mm2-20 ? And mm2>20 returns immediately; ok that put here mm2-20 ?
viscalcomp(mtk,mpb,markx[mm1],marky[mm1],markv[mm1],markwa[mm1],markk[mm1],markp[mm1],markt[mm1],markexx[mm1],markexy[mm1],markxx,markxy,marke,mm1,mm2-20,1,m10,&Mnu,&mdi0);
*mnu=Mnu;
*mgg=markgg[mm2-20];
}
/* Significant melt */
// Allowed to drop viscosity below minimum for rock type (init.t3c), but not below minimum for whole model (mode.t3c)
else
{
/* Set viscosity and stress limits */
nmin=MAXV(markn0[mm2],nubeg);
nmax=MINV(markn1[mm2],nuend);
smin=MAXV(marks0[mm2],strmin);
smax=MINV(marks1[mm2],strmax);
/* Calc effective strain rate after second strain rate tensor invariant EEii=(1/2SUM(EPSik^2))^(1/2) */
m20=m2serch(y);
allinteriomp(x,y,m10,m20,&vx0,&vy0,&pr0,&sp0,&ee0);
// ee0=pow(eps[6]*eps[6]+eps[4]*eps[4],0.5); (was epsin)
/* Effective NU calc check */
nueff=marknu[mm2]*exp(2.5+pow((1.0-xmelt)/xmelt,0.48)*(1.0-xmelt));
if(nueff<nmin) nueff=nmin;
if(nueff>nmax) nueff=nmax;
/* Ductile stress calc check */
sduct=nueff*2.0*ee0;
if(sduct<smin && ee0) {nueff=0.5*smin/ee0; sduct=smin;}
if(sduct>smax) {nueff=0.5*smax/ee0; sduct=smax;}
*mnu=nueff;
/* Shear modulus */
*mgg=markgg[mm2];
}
/* Heat capacity */
*mcp=markcp[mm2]*xmelt+markcp[mm2-20]*(1.0-xmelt);
/* heat conductivity */
*mkt=((markkt[mm2]+markkf[mm2]/(mtk+77.0))*exp(markkp[mm2]*mpb))*xmelt+((markkt[mm2-20]+markkf[mm2-20]/(mtk+77.0))*exp(markkp[mm2-20]*mpb))*(1.0-xmelt);
/* Additional melting adiabatic term, heat capacity */
if(xmelt>0 && xmelt<1.0)
{
/* Melting adiabatic term: alm=-ro*(dHlat/dP)/T */
/* Numerical differentiation */
dmpb=mpb*0.001;
meltpart1omp(mtk,mpb-dmpb,mm2,mxmelt,mhlatent);
ival= *mhlatent;
meltpart1omp(mtk,mpb+dmpb,mm2,mxmelt,mhlatent);
ival-= *mhlatent;
ival *= *mro / (mtk*2.0*dmpb*1e+5);
*mbb+=ival;
/* Melting heat capacity term: cpm=dHlat/dT */
/* Numerical differentiation */
dmtk=1.0;
meltpart1omp(mtk+dmtk,mpb,mm2,mxmelt,mhlatent);
ival= *mhlatent;
meltpart1omp(mtk-dmtk,mpb,mm2,mxmelt,mhlatent);
ival-= *mhlatent;
ival/=2.0*dmtk;
*mcp+=ival;
}
}
else
{
*maa= *mbb= *mxmelt= *mhlatent= *mro= *mnu= *mcp= *mkt= 0;
}
}
/* End OMP Rock to rock+melt transformation */
/* Melt fraction, latent heat calculation */
void meltpart1omp(double mtk, double mpb, int mm2, double *mxmelt, double *mhlatent)
/* mtk - T, K */
/* mpb - P, bar */
/* x,y - XY location of point for Vx,Vy calc */
/* mm1 - mark number */
/* mm2 - mark type */
/* yn - type of calculation: 0 - Ro, 1 - Nu, 2 - Cp, 3 - kt */
{
/* Val buffer */
double xmelt=0,hlatent=0,ival;
long int m1;
double ykm=mpb*3e-3,ts=0,tl=0;
/* Calculate melt fraction using marker type */
if (ykm>0)
switch(mm2)
{
/* Sediments: latent heat 300 kJ/kg (Bittner & Schmeling, 1995) */
case 3:
case 4:
case 5:
case 17:
case 23:
case 24:
case 25:
case 37:
/* Wet Solidus Temperature, Johannes, 1985, Poli & Schmidt, 2002 */
if (ykm<36.0)
{
ts=889.0+536.6/(ykm+1.609)+18.21/(ykm+1.609)/(ykm+1.609);
}
else
{
ts=831.3+2.0*ykm;
}
/* Dry Granite Liquidus, Johannes, 1985 */
tl=1262.0+3.0*ykm;
hlatent=300000.0;
break;
/* Basalt, Gabbro: latent heat 380 kJ/kg (Bittner & Schmeling, 1995) */
case 7:
case 8:
case 16:
case 27:
case 28:
case 36:
case 6:
case 18:
case 26:
case 38:
/* Wet solidus, Schmidt & Poli, 1998 */
if (ykm<48.0)
{
ts=972.6-2111.0/(ykm+10.63)+70033.0/(ykm+10.63)/(ykm+10.63);
}
else
{
ts=935.4+0.1162*ykm+0.006937*ykm*ykm;
}
/* Dry Toleitic Basalt Liquidus, Hess, 1989 */
tl=1423.15+3.5*ykm;
hlatent=380000.0;
break;
/* Peridotite: latent heat 400 kJ/kg Turcotte & Schubert, 1982, p.171 */
case 11:
case 34:
/* Wet solidus, Schmidt & Poli, 1998 */
if (ykm<72.0)
{
ts=1239.8+1493.0/(ykm+9.701);
}
else
{
ts=1266.3-0.3948*ykm+0.003893*ykm*ykm;
}
/* Dry Peridotite Liquidus, Hess, 1989 */
tl=2073.15+3.8*ykm;
hlatent=400000.0;
break;
/* Other rocks - No melting */
default:
break;
}
/* Melt fraction, latent heat calculation */
*mxmelt = *mhlatent = 0;
if(tl)
{
/* Melt fraction calc, check */
xmelt=(mtk-ts)/(tl-ts);
if(xmelt<0) xmelt=0;
if(xmelt>1.0) xmelt=1.0;
*mxmelt = xmelt;
/* Latent heat calc */
hlatent *= xmelt;
*mhlatent=hlatent;
}
}
/* End OMP Melt fraction, latent heat calculation */
/* Hydration front progress after H2O budget */
double hydration2omp()
{
/* Val buffer */
double ysurf,vfiltr,yfiltr,dydx,dydx1,sy1,sy2,sy3,sy4,sy5,e1,mwamin,x0,y0,x1,y1,vx1,vy1;
double hytimesum,hytimesum0;
/* TD Database variables */
double W0,W1,W2,W3,R0,R1,R2,R3,n,e,dx,dy;
double mtk,mpb,mwa,mro,dmwa,wro;
double Mgg,Mro,Mwa,Mcp,Mbb,Maa,Mdhh,Mkt;
long int m1,m2,m3,mm1,marknum1=marknum;
int mm2,mm3,n1,n2;
fprintf(fp_log,"\n WATER Transport BEGIN \n");fflush(fp_log);
/* Marker steps */
dx=dxwater;
dy=dywater;
/* Min water contents in the hydraten mantle wt% */
mwamin=0.1;
/* Min Distance from erosion surface for water release */
ysurf=8000.0;
/* Clear wa[] wt */
for (m1=0;m1<nodenum;m1++)
{
wa0[m1]=0;
wa1[m1]=0;
sol0[m1]=0;
sol1[m1]=0;
sol0[nodenum+m1]=1e+30;
sol1[nodenum+m1]=-1e+30;
sol0[nodenum2+m1]=1e+30;
sol1[nodenum2+m1]=-1e+30;
fre0[ m1]=1e+30;
fre0[nodenum +m1]=-1e+30;
fre0[nodenum2+m1]=1e+30;
fre0[nodenum3+m1]=-1e+30;
}
/* Fluid marker generation cycle */
double start=omp_get_wtime();
for (mm1=0;mm1<marknum;mm1++)
{
// Reset fluid presence indicator for next marker for loop
markwa[mm1] = 0;
/* Marker type */
mm2=(int)markt[mm1]; if (mm2>=100) mm2-=100;
/* Marker cell number */
m1=m1serch(markx[mm1]);
m2=m2serch(marky[mm1]);
m3=m1*ynumy+m2;
/* Erosion surface */
e1=(markx[mm1]-gx[m1])/(gx[m1+1]-gx[m1]);
sy1=(e1*ep[m1+1]+(1.0-e1)*ep[m1]);
/* Check markers out of grid and within hydration range */
if(markx[mm1]>0 && marky[mm1]>0 && (markx[mm1])<xsize && (marky[mm1])<ysize && (markk[mm1]>0 || markt[mm1]>=50) && markt[mm1]<100)
if((markd[mm1])>=0 && (markw[mm1])>=0 && mm2>1 && mm2!=9 && mm2!=10)
{
if(mm2<50)
{
/* P, T parameters calc */
mpb=1e-5*allinterpomp(markx[mm1],marky[mm1],m1,m2);
mtk=(markk[mm1]);
/* Mantle to Antigorite transformation */
antigoromp(mtk,mpb,markx[mm1],marky[mm1],mm1,m1,markt);
/* Rocks to rock+melt transformation */
if (markt[mm1]>=20)
{
/* Check melting extent */
if(fre0[ +m3]>markx[mm1]-dx) fre0[ m3]=markx[mm1]-dx;
if(fre0[nodenum +m3]<markx[mm1]+dx) fre0[nodenum +m3]=markx[mm1]+dx;
if(fre0[nodenum2+m3]>marky[mm1]-dy) fre0[nodenum2+m3]=marky[mm1]-dy;
if(fre0[nodenum3+m3]<marky[mm1]+dy) fre0[nodenum3+m3]=marky[mm1]+dy;
}
/* Compute TD variables */
tdbasecalcomp(markx[mm1],marky[mm1],mtk,mpb,mm2,mm1,m1,&Mgg,&Mro,&Mwa,&Mcp,&Mbb,&Maa,&Mdhh,&Mkt);
mro=Mro;
mwa=Mwa;
/* Water changes in kg/m3 calc */
dmwa=mro*(mwa-markw[mm1])*1e-2;
//{fprintf(fp_log,"H2O MARKER %ld %d %d %e %e %e %e %e %e %e",mm1,mm2,mm3,mtk-273.15,mpb/1000.0,mwa,mro,markw[mm1],markd[mm1],dmwa);getchar();}
//{fprintf(fp_log,"H2O RELEASE %ld %d %d %e %e %e %e %e %e %e",mm1,mm2,mm3,mtk-273.15,mpb/1000.0,mwa,mro,markw[mm1],markd[mm1],dmwa);getchar();}
/* Add water changes to the current cell, kg/m3 */
/* Water release */
if ((markw[mm1]-mwa)>dmwamin)
{
/* Save new water content */
markw[mm1]=mwa;
/* Generation of fluid marker (NO FLUID From melts */
if (markt[mm1]<20 && marky[mm1]>sy1)
{
markt[marknum1]=markt[mm1]+50;
markx[marknum1]=markx[mm1];
marky[marknum1]=marky[mm1];
markk[marknum1]=markk[mm1];
markd[marknum1]=1050.0;
markw[marknum1]=-dmwa;
/* Add aditional markers counter */
marknum1++;
// If new marker is interesting for picking algorithm, flag to follow
// Note is hard-coded in i2.c as well. Only here excluded fluid markers, since immobile can not become fluid
if ( start_cond==1 && marky[marknum1]<85e3 && markx[marknum1]>gx[m10_hr] && markx[marknum1]<gx[m11_hr] && markt[marknum1]>49 && markt[marknum1]<100)
{
follow[marknum1]=2;
nmf++;
}
/* Check hydration extent */
if(sol0[nodenum+m3]>markx[mm1]-dx) sol0[nodenum+m3]=markx[mm1]-dx;
if(sol1[nodenum+m3]<markx[mm1]+dx) sol1[nodenum+m3]=markx[mm1]+dx;
if(sol0[nodenum2+m3]>marky[mm1]-dy) sol0[nodenum2+m3]=marky[mm1]-dy;
if(sol1[nodenum2+m3]<marky[mm1]+dy) sol1[nodenum2+m3]=marky[mm1]+dy;
}
}
else
/* Water consuming */
{
if(dmwa>0)
{
wa1[m3]+=dmwa;
sol1[m3]+=1.0;
}
}
}
else
/* Fluid marker count */
{
/* Check position */
if(marky[mm1]>sy1)
{
/* Check hydration extent */
if(sol0[nodenum+m3]>markx[mm1]-dx) sol0[nodenum+m3]=markx[mm1]-dx;
if(sol1[nodenum+m3]<markx[mm1]+dx) sol1[nodenum+m3]=markx[mm1]+dx;
if(sol0[nodenum2+m3]>marky[mm1]-dy) sol0[nodenum2+m3]=marky[mm1]-dy;
if(sol1[nodenum2+m3]<marky[mm1]+dy) sol1[nodenum2+m3]=marky[mm1]+dy;
}
else
/* Erase fluid marker */
{
markx[mm1]=-1.0;
markk[mm1]=0;
}
}
}
}
/* Rock hydration cycle: rocks get hydrated by changing marker type mm2 */
start=omp_get_wtime();
for (mm1=0;mm1<marknum;mm1++)
if(markx[mm1]>0 && marky[mm1]>0 && (markx[mm1])<xsize && (marky[mm1])<ysize && markt[mm1]<50)
{
/* Marker cell number */
m1=m1serch(markx[mm1]);
m2=m2serch(marky[mm1]);
m3=m1*ynumy+m2;
/* Check markers within hydration range */
if(markx[mm1]>sol0[nodenum+m3] && marky[mm1]>sol0[nodenum2+m3] && (markx[mm1])<sol1[nodenum+m3] && (marky[mm1])<sol1[nodenum2+m3])
{
/* Fluid presence mark */
markwa[mm1]=1;
if(markt[mm1]==9 || markt[mm1]==10 || markt[mm1]==12 || markt[mm1]==14 || markt[mm1]==5 || markt[mm1]==6)
{
/* Mantle Hydration */
if (markt[mm1]!=5 && markt[mm1]!=6)
{
mm2=markt[mm1]=11;
}
else
{
mm2=markt[mm1]=markt[mm1]+12;
}
/* P, T parameters calc */
mpb=1e-5*allinterpomp(markx[mm1],marky[mm1],m1,m2);
mtk=(markk[mm1]);
/* Mantle to Antigorite transformation */
antigoromp(mtk,mpb,markx[mm1],marky[mm1],mm1,m1,markt);
/* Rocks to rock+melt transformation */
if (markt[mm1]>=20)
{
/* Check melting extent */
if(fre0[ +m3]>markx[mm1]-dx) fre0[ m3]=markx[mm1]-dx;
if(fre0[nodenum +m3]<markx[mm1]+dx) fre0[nodenum +m3]=markx[mm1]+dx;
if(fre0[nodenum2+m3]>marky[mm1]-dy) fre0[nodenum2+m3]=marky[mm1]-dy;
if(fre0[nodenum3+m3]<marky[mm1]+dy) fre0[nodenum3+m3]=marky[mm1]+dy;
}
/* Thermodynamic database use for Ro as function of Water content */
/* Compute TD variables */
tdbasecalcomp(markx[mm1],marky[mm1],mtk,mpb,mm2,mm1,m1,&Mgg,&Mro,&Mwa,&Mcp,&Mbb,&Maa,&Mdhh,&Mkt);
mro=Mro;
mwa=Mwa;
/* Water changes in kg/m3 calc */
dmwa=mro*(mwa-markw[mm1])*1e-2;
/* Add water changes to the current cell, kg/m3 */
/* Water consuming */
if (dmwa>0)
{
wa1[m3]+=dmwa;
sol1[m3]+=1.0;
}
}
}
}
/* Fluid marker computing cycle */
start=omp_get_wtime();
for (mm1=0;mm1<marknum1;mm1++)
{
/* Check markers out of grid and within hydration range */
if(markt[mm1]>=50 && markt[mm1]<100 && markx[mm1]>0 && marky[mm1]>0 && (markx[mm1])<xsize && (marky[mm1])<ysize)
{
/* Marker cell number */
m1=m1serch(markx[mm1]);
m2=m2serch(marky[mm1]);
m3=m1*ynumy+m2;
/* Erosion surface */
e1=(markx[mm1]-gx[m1])/(gx[m1+1]-gx[m1]);
sy1=(e1*ep[m1+1]+(1.0-e1)*ep[m1]);
/* Water in melt region conversion */
if(markd[mm1]<1100.0 && markx[mm1]>fre0[m3] && marky[mm1]>fre0[nodenum2+m3] && markx[mm1]<fre0[nodenum+m3] && marky[mm1]<fre0[nodenum3+m3]) markd[mm1]=1150.0;
/* Check position, no fluid above erosion/sedimentation level, no fluid passing through the melt */
if(marky[mm1]>sy1 && marky[mm1]<zdeep && (markd[mm1]<1100.0 || (markx[mm1]>fre0[m3] && marky[mm1]>fre0[nodenum2+m3] && markx[mm1]<fre0[nodenum+m3] && marky[mm1]<fre0[nodenum3+m3])))
{
wa0[m3]+=markw[mm1];
sol0[m3]+=1.0;
}
else
/* Erase fluid marker */
{
markx[mm1]=-1.0;
markk[mm1]=0;
}
}
}
if (printmod==10000) fprintf(fp_log,"\n Time taken for fluid computing cycle = %e s \n",omp_get_wtime()-start);
/* Fluid marker consuming cycle */
start=omp_get_wtime();
for (mm1=0;mm1<marknum1;mm1++)
{
/* Marker type */
mm2=(int)markt[mm1]; if (mm2>=100) mm2-=100;
// What use? since will not use mm1>100 anyway..
/* Marker cell number */
m1=m1serch(markx[mm1]);
m2=m2serch(marky[mm1]);
m3=m1*ynumy+m2;
/* Change water consuming rocks and fluid makers */
if(markx[mm1]>0 && marky[mm1]>0 && (markx[mm1])<xsize && (marky[mm1])<ysize && (markk[mm1]>0 || markt[mm1]>=50) && markt[mm1]<100)
if((markd[mm1])>=0 && (markw[mm1])>=0 && mm2>1 && mm2!=9 && mm2!=10 && mm2!=12 && mm2!=14 && mm2!=5 && mm2!=6)
{
// For all assimilating rock types: 0-50, except those one line above
if(mm2<50)
{
/* P, T parameters calc */
// Why need to do this every time again?
mpb=1e-5*allinterpomp(markx[mm1],marky[mm1],m1,m2);
mtk=markk[mm1];
/* Thermodynamic database use for Ro, Water */
/* Compute TD variables */
tdbasecalcomp(markx[mm1],marky[mm1],mtk,mpb,mm2,mm1,m1,&Mgg,&Mro,&Mwa,&Mcp,&Mbb,&Maa,&Mdhh,&Mkt);
mwa=Mwa;
mro=Mro;
/* Water change */
dmwa=mwa-markw[mm1];
/* Add water changes to the current cell, kg/m3 */
/* Water consuming */
if(dmwa>0)
{
if (wa1[m3]<=wa0[m3])
{
/* Save complete new water content */
markw[mm1]=mwa;
}
else
{
/* COmpute, Save partial new water content */
markw[mm1]=markw[mm1]+dmwa*wa0[m3]/wa1[m3];
}
}
}
// For all fluid markers: 50-100
else
/* Fluid marker change */
{
// Evaluate wether all free water is finished
if(wa1[m3]<wa0[m3])
{
/* Count water changes for fluid marker */
markw[mm1]*=1.0-wa1[m3]/wa0[m3];
}
else
/* Erase fluid marker */
{
markx[mm1]=-1.0;
markk[mm1]=0;
}
}
}
}
/* Reset aditional markers */
fprintf(fp_log,"\n WATER BEG Number of markers: OLD = %ld NEW = %ld \n",marknum,marknum1); fflush(fp_log);
mm1=0;
while(marknum1>marknum && mm1<marknum)
{
/* Reload marker */
if((markx[mm1]<0 || marky[mm1]<0 || (markx[mm1])>xsize || (marky[mm1])>ysize) && markt[mm1]<100)
{
/* Decrease aditional markers counter */
marknum1--;
if(markx[marknum1]>=0);
{
/* Type save */
markt[mm1]=markt[marknum1];
/* X,Y, water reload */
markx[mm1]=markx[marknum1];
marky[mm1]=marky[marknum1];
markw[mm1]=markw[marknum1];
markd[mm1]=markd[marknum1];
markk[mm1]=markk[marknum1];
}
}
/* Increase markers counter */
mm1++;
}
fprintf(fp_log,"\n WATER END Number of markers: OLD = %ld NEW = %ld \n",marknum,marknum1); fflush(fp_log);
/* Set new marker number */
marknum=marknum1;
return 0;
}
/* End OMP Hydration front progress after H2O budget */
/* Erosion Surface progress */
void erosion()
{
/* Val buffer */
double v0,v1,dydx,x1,vx1,vy1,dy;
double ertimesum,ertimesum0;
long int m1,m2;
/**/
/* Erosion Solution Cycle ------------------------------------------ */
ertimesum=0;
ertimesum0=timestep;
do
{
/* Save old cycle results */
for (m1=0;m1<xnumx;m1++)
{
ep0[m1]=ep[m1];
ep0[xnumx+m1]=ep[xnumx+m1];
}
/**/
/**/
/**/
/* Initial timestep definition */
timestep=ertimesum0-ertimesum;
/**/
/**/
/**/
/* Erosion timestep definition using material velosity field */
for (m1=0;m1<xnumx;m1++)
{
/* Calc horisontal Coordinate */
x1=gx[m1];
/**/
/* EROSION SURFACE */
/* Calc material velocity on the Surface using velosity field */
allinteri(x1,ep0[m1]);
vx1=eps[11];
vy1=eps[12];
/* Check horizontal timestep */
/* Calc x derivative of y position of the Surface using upwind differences */
dydx=0;
if(vx1>0 && m1>0)
{
timestep=MINV(timestep,(gx[m1]-gx[m1-1])/vx1);
/*
fprintf(fp_log,"111 %ld %e %e %e %e %e %e %e",m1,vx1,vy1,(gx[m1]-gx[m1-1])/vx1,ertimesum0,ertimesum,ertimesum0-ertimesum,timestep);getchar();
*/
}
if(vx1<0 && m1<xnumx-1)
{
timestep=MINV(timestep,(gx[m1]-gx[m1+1])/vx1);
/*
fprintf(fp_log,"222 %ld %e %e %e %e %e %e %e",m1,vx1,vy1,(gx[m1]-gx[m1+1])/vx1,ertimesum0,ertimesum,ertimesum0-ertimesum,timestep);getchar();
*/
}
/* Check vertical timestep */
if(vy1)
{
/* Horizontal line num definition */
m2=m2serch(ep0[m1]);
/* Check timestep */
timestep=MINV(timestep,(gy[m2+1]-gy[m2])/ABSV(vy1));
/*
fprintf(fp_log,"333 %ld %e %e %e %e %e %e %e",m2,vx1,vy1,(gy[m2+1]-gy[m2])/ABSV(vy1),ertimesum0,ertimesum,ertimesum0-ertimesum,timestep);getchar();
*/
}
/**/
/**/
/* INITIAL SURFACE */
/* Calc material velocity on the Initial Surface using velosity field */
allinteri(x1,ep0[xnumx+m1]);
vx1=eps[11];
vy1=eps[12];
/* Check horizontal timestep */
/* Calc x derivative of y position of the Surface using upwind differences */
dydx=0;
if(vx1>0 && m1>0)
{
timestep=MINV(timestep,(gx[m1]-gx[m1-1])/vx1);
/*
fprintf(fp_log,"444 %ld %e %e %e %e %e %e %e",m1,vx1,vy1,(gx[m1]-gx[m1-1])/vx1,ertimesum0,ertimesum,ertimesum0-ertimesum,timestep);getchar();
*/
}
if(vx1<0 && m1<xnumx-1)
{
timestep=MINV(timestep,(gx[m1]-gx[m1+1])/vx1);
/*
fprintf(fp_log,"555 %ld %e %e %e %e %e %e %e",m1,vx1,vy1,(gx[m1]-gx[m1+1])/vx1,ertimesum0,ertimesum,ertimesum0-ertimesum,timestep);getchar();
*/
}
/* Check vertical timestep */
if(vy1)
{
/* Horizontal line num definition */
m2=m2serch(ep0[xnumx+m1]);
/* Check timestep */
timestep=MINV(timestep,(gy[m2+1]-gy[m2])/ABSV(vy1));
/*
fprintf(fp_log,"666 %ld %e %e %e %e %e %e %e",m2,vx1,vy1,(gy[m2+1]-gy[m2])/ABSV(vy1),ertimesum0,ertimesum,ertimesum0-ertimesum,timestep);getchar();
*/
}
}
/*
fprintf(fp_log,"777 %e %e %e %e",ertimesum0,ertimesum,ertimesum0-ertimesum,timestep);getchar();
*/
/**/
/**/
/**/
/* Displace Surface boundary */
/*
for (m1=1;m1<xnumx-1;m1++)
*/
for (m1=0;m1<xnumx;m1++)
{
/* EROSION SURFACE */
/* Calculation of errosion rate */
v0=0;
if(ep0[m1]<eroslev)
{
v0=eroscon+eroskoe*(eroslev-ep0[m1]);
}
/* Calculation of sedimentation rate */
v1=0;
if(ep0[m1]>sedilev)
{
v1=sedicon+sedikoe*(ep0[m1]-sedilev);
}
/* Calc horisontal Coordinate */
x1=gx[m1];
/**/
/* Calc material velocity on the Surface using velosity field */
allinteri(x1,ep0[m1]);
vx1=eps[11];
vy1=eps[12];
/**/
/* Erase erosion/sedimentation rate for marginal points */
if((m1==0 && vx1>0) || (m1==xnumx-1 && vx1<0)) v0=v1=0;
/**/
/* Calc x derivative of y position of the Surface using upwind differences */
dydx=0;
if(vx1>0 && m1>0)
{
dydx=(ep0[m1]-ep0[m1-1])/(gx[m1]-gx[m1-1]);
/*
fprintf(fp_log,"AAA %e %e",ep0[m1],dydx);getchar();
*/
}
if(vx1<0 && m1<xnumx-1)
{
dydx=(ep0[m1+1]-ep0[m1])/(gx[m1+1]-gx[m1]);
/*
fprintf(fp_log,"BBB %e %e",ep0[m1],dydx);getchar();
*/
}
/* Recalc new Surface position */
ep[m1]+=timestep*(v0-v1+vy1-dydx*vx1);
/*
fprintf(fp_log,"SURFACE %ld %e %e %e %e %e %e %e %e",m1,x1,v0,v1,vx1,vy1,dydx,ep[m1]);getchar();
*/
/**/
/**/
/**/
/* INITIAL SURFACE */
/* Initial surface displacement */
/* Calc material velocity on the Surface using velosity field */
allinteri(x1,ep0[xnumx+m1]);
vx1=eps[11];
vy1=eps[12];
/* Calc x derivative of y position of Initial Surface using upwind differences */
dydx=0;
if(vx1>0 && m1>0)
{
dydx=(ep0[xnumx+m1]-ep0[xnumx+m1-1])/(gx[m1]-gx[m1-1]);
/*
fprintf(fp_log,"AAA %e ",dydx);getchar();
fprintf(fp_log,"AAA %e ",dydx);getchar();
*/
}
if(vx1<0 && m1<xnumx-1)
{
dydx=(ep0[xnumx+m1+1]-ep0[xnumx+m1])/(gx[m1+1]-gx[m1]);
/*
fprintf(fp_log,"BBB %e ",dydx);getchar();
*/
}
/* Recalc new Initial Surface position */
ep[xnumx+m1]+=timestep*(vy1-dydx*vx1);
/**/
}
/**/
/**/
/**/
/**/
/* Relax EROSION surface */
if (0==0)
for (m1=0;m1<xnumx-1;m1++)
{
/* Calc x derivative of y position */
dydx=(ep[m1+1]-ep[m1])/(gx[m1+1]-gx[m1]);
/* Relax surface for critical slope */
if(dydx>slopemax)
{
dy=((ep[m1+1]-ep[m1])-slopemax*(gx[m1+1]-gx[m1]))/2.0;
ep[m1] +=dy;
ep[m1+1]-=dy;
/*
dydx=(ep[m1+1]-ep[m1])/(gx[m1+1]-gx[m1]);
fprintf(fp_log,"AAA %ld %e %e",m1,slopemax,dydx);getchar();
*/
}
if(dydx<-slopemax)
{
dy=((ep[m1+1]-ep[m1])+slopemax*(gx[m1+1]-gx[m1]))/2.0;
ep[m1] +=dy;
ep[m1+1]-=dy;
/*
dydx=(ep[m1+1]-ep[m1])/(gx[m1+1]-gx[m1]);
fprintf(fp_log,"BBB %ld %e %e",m1,slopemax,dydx);getchar();
*/
}
}
/**/
/**/
/**/
/* Add Erosion step */
ertimesum+=timestep;
/**/
/**/
/**/
/* Print Results */
if (printmod) { fprintf(fp_log,"\n EROSION STEP = %e YEARS EROSION TIME = %e YEARS \n",timestep/3.15576e+7,ertimesum/3.15576e+7); fflush(fp_log); }
}
while(ertimesum<ertimesum0);
/* Restore timestep */
timestep=ertimesum0;
}
/* Erosion Surface progress */
/* Thermodynamic database use for ro, Cp */
// Within a loop over all markers, do:
// Interpolation properties between four nearest points in thermodynamic database dep. on T,P,composition
void tdbasecalcomp(double x, double y, double mtk, double mpb, int mm2, long int mm1, long int m10, double *Mgg, double *Mro, double *Mwa, double *Mcp, double *Mbb, double *Maa, double *Mdhh, double *Mkt)
{
/* TD Database variables, dTK,dPB - TK, PB step for tabulation in TD database */
double H0,H1,H2,H3,R0,R1,R2,R3,G0,G1,G2,G3,W0,W1,W2,W3,n,e;
/* Val Buffers */
int n1,n2,mm3,ynpb;
double mhh0,mhh1,mdhh,maa,mwa,dmwa,wro,mro,mcp,mbb,mgg,mkt,mkt1,pbmax,xold,kr01,kr1,kr10,xkr,krad;
long int m1=m10;
double sy1,e1;
/* Maximal pressure for the shallow database */
pbmax=pbmin+pbstp*(double)(pbnum-1);
/* Adiabate computing */
ynpb=0; if(1==0 && timesum<3.15576e+7*1e+3) {fprintf(fp_log,"in adiabate: can not right ? \n"); fflush(fp_log); mpb*=timesum/(3.15576e+7*1e+3); ynpb=1;}
/* Reset TD variables */
*Mgg=*Mro=*Mwa=*Mcp=*Mbb=*Maa=0;
/* Thermal conductivity */
/* m895 Dry peridotite Fe=12 */
/* Olivine: Hoffmeister, 1999; Hoffmeister & Yuen, 2005 */
if(mpb<235000.0)
{
/* Lattice k */
mkt1=(1.878+770.9/MINV(mtk,1200.0))*(1.0+4.26e-6*mpb);
/* Radiative k 0.1 mm */
kr01=pow(mtk/4000.0,3.0);
/* Radiative k 1 mm */
kr1=pow(mtk/1774.0,3.0);
/* Radiative k 10 mm */
xkr=pow(mtk/1636.0,10.0);
xkr/=xkr+1.0; kr10=pow((mtk-1000.0*xkr)/1011.0,3.0)-0.7713*xkr;
}
/* Perovskite: Hoffmeister, 1999; Hoffmeister & Yuen, 2005 */
else
{
/* Lattice k */
mkt1=(1.291+1157.0/MINV(mtk,2100.0))*(1.0+2.50e-6*mpb);
/* Radiative k 0.1 mm */
kr01=pow(mtk/3591.0,3.0);
/* Radiative k 1 mm */
kr1=pow(mtk/2117.0,3.0);
/* Radiative k 10 mm */
xkr=pow(mtk/1500.0,4.0); xkr/=xkr+1.0;
kr10=pow((mtk+4000.0*xkr)/5776.0,3.0)+2.822*xkr;
}
krad=kr1;
/* Shallow TD base type */
if(mpb<pbmax && ynpb==0)
{
/* TD base type */
switch (mm2)
{
/* Dry Upper crust */
case 5: mm3=11; break;
/* Wet Upper crust */
case 17: mm3=12; break;
/* Dry Lower crust */
case 6: mm3=13; break;
/* Wet Lower crust */
case 18: mm3=14; break;
/* Sediments */
case 2:
case 3:
case 4: mm3=5; break;
/* Molten Sediments */
case 37:
case 25:
case 22:
case 23:
case 24: mm3=6; break;
/* Basalt */
case 16:
case 7: mm3=7; break;
/* Molten Basalt */
case 36:
case 27: mm3=8; break;
/* Gabbro */
case 38:
case 26:
case 8: mm3=3; break;
/* Molten Gabbro */
case 28: mm3=4; break;
/* Dry peridotite */
case 9:
case 12:
case 14:
case 10: mm3=0; break;
/* Wet peridotite */
case 13:
case 11: mm3=1; break;
/* Molten peridotite */
case 34: mm3=2; break;
/* Unknown type */
default: {fprintf(fp_log,"Shallow TD: Unknown rock type for TD database %d, for marker %ld with T= %f, P=%f \n",mm2,mm1,mtk,mpb); fflush(fp_log); exit(0);}
}
/* ABCD-4Cell Number */
// Get weights for nearest points in thermodynamic database
e=(mtk-tkmin)/tkstp;
if(e<0) e=0;
if(e>(double)(tknum-1)) e=(double)(tknum-1);
n=(mpb-pbmin)/pbstp;
if(n<0) n=0;
if(n>(double)(pbnum-1)) n=(double)(pbnum-1);
n1=(int)(e);
if(n1>tknum-2) n1=tknum-2;
n2=(int)(n);
if(n2>pbnum-2) n2=pbnum-2;
/* e,n Calc */
e=(e-(double)(n1));
n=(n-(double)(n2));
/* Ro H values */
/* 0 2 */
/* 1 3 */
R0=td[n1 ][n2 ][mm3][0]*1000.0;
R1=td[n1 ][n2+1][mm3][0]*1000.0;
R2=td[n1+1][n2 ][mm3][0]*1000.0;
R3=td[n1+1][n2+1][mm3][0]*1000.0;
H0=td[n1 ][n2 ][mm3][1]*1000.0*4.1837;
H1=td[n1 ][n2+1][mm3][1]*1000.0*4.1837;
H2=td[n1+1][n2 ][mm3][1]*1000.0*4.1837;
H3=td[n1+1][n2+1][mm3][1]*1000.0*4.1837;
W0=td[n1 ][n2 ][mm3][4];
W1=td[n1 ][n2+1][mm3][4];
W2=td[n1+1][n2 ][mm3][4];
W3=td[n1+1][n2+1][mm3][4];
G0=td[n1 ][n2 ][mm3][3]*1000.0;G0*=G0*R0;
G1=td[n1 ][n2+1][mm3][3]*1000.0;G1*=G1*R1;
G2=td[n1+1][n2 ][mm3][3]*1000.0;G2*=G2*R2;
G3=td[n1+1][n2+1][mm3][3]*1000.0;G3*=G3*R3;
/* Shear modulus calc by interpolation */
mgg=((G0*(1.0-n)+G1*n)*(1.0-e)+(G2*(1.0-n)+G3*n)*e);
/* Ro calc by interpolation */
mro=((R0*(1.0-n)+R1*n)*(1.0-e)+(R2*(1.0-n)+R3*n)*e);
/* Water wt% calc by interpolation */
mwa=((W0*(1.0-n)+W1*n)*(1.0-e)+(W2*(1.0-n)+W3*n)*e);
/* Add pore fluid */
/* Erosion surface */
e1=(x-gx[m10])/(gx[m10+1]-gx[m10]);
sy1=y-(e1*ep[m10+1]+(1.0-e1)*ep[m10]);
if(marks0[mm2]>0 && sy1>0 && sy1<zmpor && mtk<tkpor)
{
dmwa=marks0[mm2]*(tkpor-mtk)/(tkpor-273.15)*(zmpor-sy1)/zmpor;
mwa+=dmwa;
wro=1050.0;
mro=mro/(1.0+dmwa*1e-2*(mro/wro-1.0));
}
/* Cp calc by interpolation */
mcp=((H2-H0)*(1.0-n)+(H3-H1)*n)/tkstp;
if(mcp<1e+2) mcp=1e+2; else if(mcp>5e+4) mcp=5e+4;
/* Effective adiabatic betta=1/V*dV/dT=ro/T*[-dH/dP+V] calc by interpolation */
mbb=(2.0/(R1+R0)-(H1-H0)/pbstp/1e+5)*(1.0-e)+(2.0/(R3+R2)-(H3-H2)/pbstp/1e+5)*e;
mbb*=mro/mtk;
if(mbb<-1e-2) mbb=-1e-2; else if(mbb>1e-2) mbb=1e-2;
/* Effective compressibility term alpha=1/ro*d(ro)/dP calc by interpolation */
maa=(2.0/(R1+R0)*(R1-R0)*(1.0-e)+2.0/(R3+R2)*(R3-R2)*e)/pbstp/1e+5;
if(maa<0) maa=0;
/* Activation enthalpy recalc using enthalpy changes */
/* Current Enthalpy */
mhh1=((H0*(1.0-n)+H1*n)*(1.0-e)+(H2*(1.0-n)+H3*n)*e);
/* Pmin Enthalpy */
mhh0=(td[n1][0 ][mm3][1]*(1.0-e) + td[n1+1][0 ][mm3][1]*e)*1000.0*4.1837;
/* Enthalpy Difference calc */
mdhh=(mhh1-mhh0);
/* Save TD variables */
*Mgg=mgg;
*Mro=mro;
*Mwa=mwa;
*Mcp=mcp;
*Mbb=mbb;
*Maa=maa;
*Mdhh=mdhh;
*Mkt+=krad;
}
/* Deep TD base type */
if(1==0 || mpb>0.75*pbmax || ynpb==1)
{
switch (mm2)
{
/* MORB DATABASE */
/* UPPER, LOWER Crust */
case 5:
case 6:
case 17:
case 18:
case 37:
case 38:
/* Sediments */
case 2:
case 3:
case 4:
/* Molten Sediments */
case 22:
case 23:
case 24:
/* Molten crust */
case 25:
case 26:
/* Basalt */
case 16:
case 7:
/* Molten Basalt */
case 36:
case 27:
/* Gabbro */
case 8:
/* Molten Gabbro */
case 28: mm3=10; break;
/**/
/* PIROLITE DATABASE */
/* Dry peridotite */
case 9:
case 12:
case 14:
case 10:
/* Wet peridotite */
case 13:
case 11:
/* Molten peridotite */
case 34: mm3=9; break;
// Added missing rock types
case 15:
case 19:
case 20:
case 21:
case 29:
case 30:
/* Unknown type */
default: {fprintf(fp_log,"Deep TD: Unknown rock type for TD database %d, for marker %ld with T= %f, P=%f \n",mm2,mm1,mtk,mpb); fflush(fp_log); exit(0);}
}
/* ABCD-4Cell Number */
e=(mtk-tkmin1)/tkstp1;
if(e<0) e=0;
if(e>(double)(tknum1-1)) e=(double)(tknum1-1);
n=(mpb-pbmin1)/pbstp1;
if(n<0) n=0;
if(n>(double)(pbnum1-1)) n=(double)(pbnum1-1);
n1=(int)(e);
if(n1>tknum1-2) n1=tknum1-2;
n2=(int)(n);
if(n2>pbnum1-2) n2=pbnum1-2;
/* e,n Calc */
e=(e-(double)(n1));
n=(n-(double)(n2));
/* Ro H values */
/* 0 2 */
/* 1 3 */
R0=td[n1 ][n2 ][mm3][0]*1000.0;
R1=td[n1 ][n2+1][mm3][0]*1000.0;
R2=td[n1+1][n2 ][mm3][0]*1000.0;
R3=td[n1+1][n2+1][mm3][0]*1000.0;
H0=td[n1 ][n2 ][mm3][1]*1000.0*4.1837;
H1=td[n1 ][n2+1][mm3][1]*1000.0*4.1837;
H2=td[n1+1][n2 ][mm3][1]*1000.0*4.1837;
H3=td[n1+1][n2+1][mm3][1]*1000.0*4.1837;
W0=td[n1 ][n2 ][mm3][4];
W1=td[n1 ][n2+1][mm3][4];
W2=td[n1+1][n2 ][mm3][4];
W3=td[n1+1][n2+1][mm3][4];
G0=td[n1 ][n2 ][mm3][3]*1000.0;G0*=G0*R0;
G1=td[n1 ][n2+1][mm3][3]*1000.0;G1*=G1*R1;
G2=td[n1+1][n2 ][mm3][3]*1000.0;G2*=G2*R2;
G3=td[n1+1][n2+1][mm3][3]*1000.0;G3*=G3*R3;
/* Shear modulus calc by interpolation */
mgg=((G0*(1.0-n)+G1*n)*(1.0-e)+(G2*(1.0-n)+G3*n)*e);
/* Ro calc by interpolation */
mro=((R0*(1.0-n)+R1*n)*(1.0-e)+(R2*(1.0-n)+R3*n)*e);
/* Water wt% calc by interpolation */
mwa=0;
/* Water in crystals */
if(mm2!=9 && mm2!=10 && mm2!=14 && mpb<235000.0)
{
dmwa=0.1;
mwa+=dmwa;
wro=1050.0;
mro=100.0/((100.0-dmwa)/mro+dmwa/wro);
}
/* Cp calc by interpolation */
mcp=((H2-H0)*(1.0-n)+(H3-H1)*n)/tkstp1;
if(mcp<1e+2) mcp=1e+2; else if(mcp>5e+4) mcp=5e+4;
/* Effective adiabatic betta=1/V*dV/dT=ro/T*[-dH/dP+V] calc by interpolation */
mbb=(2.0/(R1+R0)-(H1-H0)/pbstp1/1e+5)*(1.0-e)+(2.0/(R3+R2)-(H3-H2)/pbstp1/1e+5)*e;
mbb*=mro/mtk;
if(mbb<-1e-2) mbb=-1e-2; else if(mbb>1e-2) mbb=1e-2;
/* Effective compressibility term alpha=1/ro*d(ro)/dP calc by interpolation */
maa=(2.0/(R1+R0)*(R1-R0)*(1.0-e)+2.0/(R3+R2)*(R3-R2)*e)/pbstp1/1e+5;
if(maa<0) maa=0;
/* Activation enthalpy recalc using enthalpy changes */
/* Current Enthalpy */
mhh1=((H0*(1.0-n)+H1*n)*(1.0-e)+(H2*(1.0-n)+H3*n)*e);
/* Pmin Enthalpy */
mhh0=(td[n1][0 ][mm3][1]*(1.0-e) + td[n1+1][0 ][mm3][1]*e)*1000.0*4.1837;
/* Enthalpy Difference calc */
mdhh=(mhh1-mhh0);
/* Thermal conductivity */
mkt=mkt1+krad;
/* Computing transitional parameters */
if(1==0 || mpb>pbmax || ynpb==1)
// Manny has 1==1
{
/* Save TD variables */
*Mgg=mgg;
*Mro=mro;
*Mwa=mwa;
*Mcp=mcp;
*Mbb=mbb;
*Maa=maa;
*Mdhh=mdhh;
*Mkt=mkt;
}
else
{
xold=(pbmax-mpb)/(0.25*pbmax);
/* Save TD variables */
// Second column comes from shallow database assignment above, but I never reach into this deep one !
mgg=mgg*(1.0-xold)+ *Mgg *xold;
mro=mro*(1.0-xold)+ *Mro *xold;
mwa=mwa*(1.0-xold)+ *Mwa *xold;
mcp=mcp*(1.0-xold)+ *Mcp *xold;
mbb=mbb*(1.0-xold)+ *Mbb *xold;
maa=maa*(1.0-xold)+ *Maa *xold;
mdhh=mdhh*(1.0-xold)+ *Mdhh *xold;
mkt=mkt*(1.0-xold)+ *Mkt *xold;
*Mgg=mgg;
*Mro=mro;
*Mwa=mwa;
*Mcp=mcp;
*Mbb=mbb;
*Maa=maa;
*Mdhh=mdhh;
*Mkt=mkt;
}
}
}
/* End OMP Thermodynamic database use for ro, Cp */
// *** Interpolation routines using the following nodal locations ***
/* Staggered Nodes num */
/* [0] [3] [6] */
/* T0,xy0 Vy0 T3,xy3 Vy3 */
/* */
/* Vx0 P4,xx4,yy4 Vx3 P7,xx7,yy7 */
/* */
/* [1] [4] [7] */
/* T,xy1 Vy1 T4,xy4 Vy4 */
/* */
/* Vx1 P5,xx5,yy5 Vx4 P8,xx8,yy8 */
/* */
/* [2] [5] [8] */
/* */
/* */
/* Weights for horizontal and vertical nodes calculation for marker interpolation */
void nodewt(long int m1min, long int m1max, long int m2min, long int m2max, double x, double y, int ynx, int yny)
/* m1min,m1max, m2min,m2max - node X,Y number limits */
/* x,y - current pont coordinates */
/* ynx, yny - Type of shifts: No(0), Back(-1), Forw(1) */
{
/* Eyy vertical position */
long int m3;
int nx,ny;
/* Weigths in horizontal directions */
/* Load distances to xn[] */
if(ynx<0)
{
for (m3=m1min;m3<=m1max;m3++)
{
xn[m3-m1min]=(gx[m3]+gx[m3-1])/2.0;
}
}
if(ynx==0)
{
for (m3=m1min;m3<=m1max;m3++)
{
xn[m3-m1min]=gx[m3];
}
}
if(ynx>0)
{
for (m3=m1min;m3<=m1max;m3++)
{
xn[m3-m1min]=(gx[m3]+gx[m3+1])/2.0;
}
}
/* Calc maximal position in xn[] */
nx=(int)(m1max-m1min);
/* Calc coefficients for horizontal direction */
fdweight(nx,0,x);
/**/
/* Reload horizontal coefficients to cn[] */
for (m3=0;m3<=nx;m3++)
{
cn[m3][1]=cn[m3][0];
}
/* Weigths in vertical directions */
/* Load distances to xn[] */
if(yny<0)
{
for (m3=m2min;m3<=m2max;m3++)
{
xn[m3-m2min]=(gy[m3]+gy[m3-1])/2.0;
}
}
if(yny==0)
{
for (m3=m2min;m3<=m2max;m3++)
{
xn[m3-m2min]=gy[m3];
}
}
if(yny>0)
{
for (m3=m2min;m3<=m2max;m3++)
{
xn[m3-m2min]=(gy[m3]+gy[m3+1])/2.0;
}
}
/* Calc maximal position in xn[] */
ny=(int)(m2max-m2min);
/* Calc coefficients for horizontal direction */
fdweight(ny,0,y);
}
/* End Weights for horizontal and vertical nodes calculation for marker interpolation */
/* Calculation of EE,VX,VY,ESP, and PR by Interpolation */
void allinteriomp(double x, double y, long int m10, long int m20, double *VX, double *VY, double *PR, double *ESP, double *EE)
/* x,y - XY location of point for Vx,Vy calc */
{
/* Counters */
long int m1,m2,m3;
/* en-NormalisedDistance */
// Keep EXX and EXY local here, so only calculates EE
double e,n,ival,xrat,EXX,EXY;
/**/
/**/
/* Check X,Y */
if(x<0) x=0; else if(x>xsize) x=xsize;
if(y<0) y=0; else if(y>ysize) y=ysize;
/**/
/**/
/**/
/* Check weighting for interpolation */
xrat=2.0/3.0;
if(x<(gx[0]+gx[1])/2.0) xrat=1.0;
if(x>(gx[xnumx-2]+gx[xnumx-1])/2.0) xrat=1.0;
if(y<(gy[0]+gy[1])/2.0) xrat=1.0;
if(y>(gy[ynumy-2]+gy[ynumy-1])/2.0) xrat=1.0;
/**/
/**/
/**/
// Store for more usage throughout subroutine
m1=m10;
m2=m20;
/**/
/**/
/**/
/* EXY, ESP interpolation ------------------------ */
// Clear buffer
*ESP=0;
/* Horizontal,Vertical limits for interpolation calc */
if(m10<1) m10=1; if(m10>xnumx-3) m10=xnumx-3;
if(m20<1) m20=1; if(m20>ynumy-3) m20=ynumy-3;
/**/
/* Calc normalized distances */
// Note that the nodal distance is now fixed, while before could change it with intermod. If want that again see old scripts in dynwif/CleanOldRun..
e=(x-gx[m10])/(gx[m10+1]-gx[m10]);
n=(y-gy[m20])/(gy[m20+1]-gy[m20]);
/* Vx interpolation ------------------------ */
m3=m10*ynumy+m20;
EXY=(1.0-e)*(1.0-n)*exy[m3]+(1.0-e)*n*exy[m3+1]+e*(1.0-n)*exy[m3+ynumy]+e*n*exy[m3+ynumy+1];
*ESP=(1.0-e)*(1.0-n)*esp[m3]+(1.0-e)*n*esp[m3+1]+e*(1.0-n)*esp[m3+ynumy]+e*n*esp[m3+ynumy+1];
/* End EXY, ESP interpolation ------------------------ */
/**/
/**/
/**/
/* Exx, P interpolation ------------------------ */
// Reset and clear buffer
m10=m1;
m20=m2;
*EE=0;
*PR=0;
*VX=0;
*VY=0;
/* Horizontal,Vertical limits for interpolation calc */
if(x>(gx[m10]+gx[m10+1])/2.0) m10++;
if(y>(gy[m20]+gy[m20+1])/2.0) m20++;
if(m10<1) m10=1; if(m10>xnumx-2) m10=xnumx-2;
if(m20<1) m20=1; if(m20>ynumy-2) m20=ynumy-2;
/* Calc normalized distances */
e=(x-(gx[m10-1]+gx[m10])/2.0)/((gx[m10+1]-gx[m10-1])/2.0);
n=(y-(gy[m20-1]+gy[m20])/2.0)/((gy[m20+1]-gy[m20-1])/2.0);
/* Interpolation ------------------------ */
m3=m10*ynumy+m20;
EXX=(1.0-e)*(1.0-n)*exx[m3]+(1.0-e)*n*exx[m3+1]+e*(1.0-n)*exx[m3+ynumy]+e*n*exx[m3+ynumy+1];
// QUESTION TARAS why Interpolate pressure here, do already in interp or d? Now I do port it back, so rm if no need ...
// I guess you could also formulate this more in general, as sometimes I have the feeling some variables are interpolated needlessly. Could you please go over these routines and removed what is not really need to speed the code up?
*PR=(1.0-e)*(1.0-n)*pr[m3]+(1.0-e)*n*pr[m3+1]+e*(1.0-n)*pr[m3+ynumy]+e*n*pr[m3+ynumy+1];
// Include small weight (xrat) from farther away nodes for velocities
*VX=( (1.0-e)*(1.0-n)*(vx[m3-1]+vx[m3-ynumy-1])+(1.0-e)*n*(vx[m3]+vx[m3-ynumy]) +e*(1.0-n)*(vx[m3+ynumy-1]+vx[m3-1])+e*n*(vx[m3+ynumy]+vx[m3]) ) * 0.5*(1.0-xrat);
*VY=( (1.0-e)*(1.0-n)*(vy[m3-ynumy]+vy[m3-ynumy-1])+(1.0-e)*n*(vy[m3-ynumy+1]+vy[m3-ynumy]) +e*(1.0-n)*(vy[m3]+vy[m3-1])+e*n*(vy[m3+1]+vy[m3]) ) * 0.5*(1.0-xrat);
//eps[11]+=ival*(vx[m3-1]+vx[m3-ynumy-1])*0.5*(1.0-xrat); QUESTION TARAS Why use nodes above and left above here ??
//eps[12]+=ival*(vy[m3-ynumy]+vy[m3-ynumy-1])*0.5*(1.0-xrat);
// Calculate second invariant
*EE=pow(EXX*EXX+EXY*EXY,0.5);
/* End SIGxx*EPSxx,SIGyy*EPSyy interpolation ------------------------ */
/* Vx interpolation ------------------------ */
// Reset and clear buffer
m10=m1;
m20=m2;
/* Horizontal,Vertical limits for interpolation calc */
if(y<(gy[m20]+gy[m20+1])/2.0) m20-=1;
if(m10<0) m10=0; if(m10>xnumx-2) m10=xnumx-2;
if(m20<0) m20=0; if(m20>ynumy-3) m20=ynumy-3;
/* Calc normalized distances */
e=(x-gx[m10])/(gx[m10+1]-gx[m10]);
n=(y-(gy[m20]+gy[m20+1])/2.0)/((gy[m20+2]-gy[m20])/2.0);
/* Vx interpolation ------------------------ */
m3=m10*ynumy+m20;
*VX+=((1.0-e)*(1.0-n)*vx[m3]+(1.0-e)*n*vx[m3+1]+e*(1.0-n)*vx[m3+ynumy]+e*n*vx[m3+ynumy+1])*xrat;
/* End Vx interpolation ------------------------ */
/**/
/**/
/**/
/* Vy interpolation ------------------------ */
// Reset and clear buffer
m10=m1;
m20=m2;
/* Horizontal,Vertical limits for interpolation calc */
if(x<(gx[m10]+gx[m10+1])/2.0) m10-=1;
if(m10<0) m10=0; if(m10>xnumx-3) m10=xnumx-3;
if(m20<0) m20=0; if(m20>ynumy-2) m20=ynumy-2;
/* Calc normalized distances */
e=(x-(gx[m10]+gx[m10+1])/2.0)/((gx[m10+2]-gx[m10])/2.0);
n=(y-gy[m20])/(gy[m20+1]-gy[m20]);
/* Vy interpolation ------------------------ */
m3=m10*ynumy+m20;
*VY+=((1.0-e)*(1.0-n)*vy[m3]+(1.0-e)*n*vy[m3+1]+e*(1.0-n)*vy[m3+ynumy]+e*n*vy[m3+ynumy+1])*xrat;
/* End Vy interpolation ------------------------ */
/**/
/**/
/**/
}
/* OMP Interpolate Vx,Vy, EPSxx,EPSyy,EPSxy, SPINxy from surrounding nodes to marker at x,y */
/* OMP Calculation of T,T0 for current location by Interpolation */
void allintertomp(double x, double y, long int m10, long int m20, double *TK, double *TK2)
/* x,y - XY location of point for Vx,Vy calc */
/* m10, m20 - Upper left node */
// TK - marker temperature
{
/* Counters */
long int m3;
/* en-NormalizedDistance */
double e,n,ival;
/* Check X,Y */
if(x<0) x=0; else if(x>xsize) x=xsize;
if(y<0) y=0; else if(y>ysize) y=ysize;
/* T interpolation ------------------------ */
/* Buffer clear */
*TK=*TK2=0;
/* Horizontal,Vertical limits for interpolation calc */
if(m10<0) m10=0; if(m10>xnumx-2) m10=xnumx-2;
if(m20<0) m20=0; if(m20>ynumy-2) m20=ynumy-2;
/* Calc normalized distances */
e=(x-gx[m10])/(gx[m10+1]-gx[m10]);
n=(y-gy[m20])/(gy[m20+1]-gy[m20]);
/* T interpolation ------------------------ */
m3=m10*ynumy+m20;
*TK=(1.0-e)*(1.0-n)*tk[m3]+(1.0-e)*n*tk[m3+1]+e*(1.0-n)*tk[m3+ynumy]+e*n*tk[m3+ynumy+1];
*TK2=(1.0-e)*(1.0-n)*tk2[m3]+(1.0-e)*n*tk2[m3+1]+e*(1.0-n)*tk2[m3+ynumy]+e*n*tk2[m3+ynumy+1];
/* End T interpolation ------------------------ */
}
/* OMP Calculation of T,T0 for current location by Interpolation */
/* OMP Calculation of P by Interpolation */
double allinterpomp(double x, double y, long int m10, long int m20)
/* x,y - XY location of point for Vx,Vy calc */
/* m10, m20 - Upper left node */
{
/* Counters */
long int m3;
/* en-Normalized distance */
double ival,e,n;
/* Check X,Y */
if(x<0) x=0; else if(x>xsize) x=xsize;
if(y<0) y=0; else if(y>ysize) y=ysize;
/* Buffer clear */
ival=0;
/* Horizontal,Vertical limits for interpolation calc */
if(x>(gx[m10]+gx[m10+1])/2.0) m10++;
if(y>(gy[m20]+gy[m20+1])/2.0) m20++;
if(m10<1) m10=1; if(m10>xnumx-2) m10=xnumx-2;
if(m20<1) m20=1; if(m20>ynumy-2) m20=ynumy-2;
/* Calc normalized distances */
e=(x-(gx[m10-1]+gx[m10])/2.0)/((gx[m10+1]-gx[m10-1])/2.0);
n=(y-(gy[m20-1]+gy[m20])/2.0)/((gy[m20+1]-gy[m20-1])/2.0);
/* P interpolation ------------------------ */
m3=m10*ynumy+m20;
ival=(1.0-e)*(1.0-n)*pr[m3]+(1.0-e)*n*pr[m3+1]+e*(1.0-n)*pr[m3+ynumy]+e*n*pr[m3+ynumy+1];
/* Return pressure */
return ival;
/*
fprintf(fp_log,"eps %e %e ",m1,m2,e,n); getchar();
if(timestep){fprintf(fp_log,"P1 %e %e %ld %ld %e %e %e",x,y,m10,m20,e,n,ival);getchar();}
*/
}
/* OMP End calculation of P by Interpolation */
/* Calculation of SIGij by Interpolation */
void allinterdomp(double x, double y,long int m10, long int m20, double *TK,double *EXY,double *EXYE,double *SXY,double *SXYE,double *EXX,double *SXX,double *PR,double *SXXE,double *SPPE,double *EXXE,double *VX, double *MVX, double *VY, double *MVY)
/* x,y - XY location of point for Vx,Vy calc */
{
/* Counters */
long int m1,m2,m3;
/* en-NormalisedDistance */
double ival,e,n,xrat;
/**/
/**/
/* Check X,Y */
if(x<0) x=0; else if(x>xsize) x=xsize;
if(y<0) y=0; else if(y>ysize) y=ysize;
/**/
/**/
/**/
/* Store Up Left Node X,Y Num for later re-usage */
m1=m10;
m2=m20;
/**/
/**/
/* Check weighting for interpolation */
xrat=2.0/3.0;
if(x<(gx[0]+gx[1])/2.0) xrat=1.0;
if(x>(gx[xnumx-2]+gx[xnumx-1])/2.0) xrat=1.0;
if(y<(gy[0]+gy[1])/2.0) xrat=1.0;
if(y>(gy[ynumy-2]+gy[ynumy-1])/2.0) xrat=1.0;
/**/
/**/
/* T interpolation ------------------------ */
/* Buffer clear */
*TK=0;
/* Horizontal,Vertical limits for interpolation calc */
if(m10<0) m10=0; if(m10>xnumx-2) m10=xnumx-2;
if(m20<0) m20=0; if(m20>ynumy-2) m20=ynumy-2;
/* Calc normalized distances */
e=(x-gx[m10])/(gx[m10+1]-gx[m10]);
n=(y-gy[m20])/(gy[m20+1]-gy[m20]);
/* EPSxy Interpolate after interpolation weights */
m3=m10*ynumy+m20;
*TK=(1.0-e)*(1.0-n)*tk[m3]+(1.0-e)*n*tk[m3+1]+e*(1.0-n)*tk[m3+ynumy]+e*n*tk[m3+ynumy+1];
/**/
/* End SIGij old interpolation ------------------------ */
/* End T interpolation ------------------------ */
/**/
/**/
/**/
/* SIGxy interpolation ------------------------ */
// Reset and clear buffer
m10=m1;
m20=m2;
*EXY=*EXYE=*SXY=*SXYE=0;
/* Horizontal,Vertical limits for interpolation calc */
if(m10<1) m10=1; if(m10>xnumx-3) m10=xnumx-3;
if(m20<1) m20=1; if(m20>ynumy-3) m20=ynumy-3;
/* Calc normalized distances */
e=(x-gx[m10])/(gx[m10+1]-gx[m10]);
n=(y-gy[m20])/(gy[m20+1]-gy[m20]);
/**/
/* EPSxy Interpolate after interpolation weights */
m3=m10*ynumy+m20;
*EXY=(1.0-e)*(1.0-n)*exy[m3]+(1.0-e)*n*exy[m3+1]+e*(1.0-n)*exy[m3+ynumy]+e*n*exy[m3+ynumy+1];
*EXYE=(1.0-e)*(1.0-n)*exye[m3]+(1.0-e)*n*exye[m3+1]+e*(1.0-n)*exye[m3+ynumy]+e*n*exye[m3+ynumy+1];
*SXY=(1.0-e)*(1.0-n)*sxy[m3]+(1.0-e)*n*sxy[m3+1]+e*(1.0-n)*sxy[m3+ynumy]+e*n*sxy[m3+ynumy+1];
*SXYE=(1.0-e)*(1.0-n)*sxye[m3]+(1.0-e)*n*sxye[m3+1]+e*(1.0-n)*sxye[m3+ynumy]+e*n*sxye[m3+ynumy+1];
/* End SIGxy interpolation ------------------------ */
/**/
/**/
/**/
/* SIGxx,SIGyy interpolation ------------------------ */
// Reset and clear buffer
m10=m1;
m20=m2;
*EXX=*SXX=*PR=*SXXE=*SPPE=*EXXE=0;
*VX=*MVX=0;
*VY=*MVY=0;
/* Horizontal,Vertical limits for interpolation calc */
if(x>(gx[m10]+gx[m10+1])/2.0) m10++;
if(y>(gy[m20]+gy[m20+1])/2.0) m20++;
if(m10<1) m10=1; if(m10>xnumx-2) m10=xnumx-2;
if(m20<1) m20=1; if(m20>ynumy-2) m20=ynumy-2;
/* Calc normalized distances */
e=(x-(gx[m10-1]+gx[m10])/2.0)/((gx[m10+1]-gx[m10-1])/2.0);
n=(y-(gy[m20-1]+gy[m20])/2.0)/((gy[m20+1]-gy[m20-1])/2.0);
/* P interpolation ------------------------ */
m3=m10*ynumy+m20;
*EXX =(1.0-e)*(1.0-n)*exx[m3]+(1.0-e)*n*exx[m3+1]+e*(1.0-n)*exx[m3+ynumy]+e*n*exx[m3+ynumy+1];
*SXX =(1.0-e)*(1.0-n)*sxx[m3]+(1.0-e)*n*sxx[m3+1]+e*(1.0-n)*sxx[m3+ynumy]+e*n*sxx[m3+ynumy+1];
*PR =(1.0-e)*(1.0-n)*pr[m3]+(1.0-e)*n*pr[m3+1]+e*(1.0-n)*pr[m3+ynumy]+e*n*pr[m3+ynumy+1];
*SPPE=(1.0-e)*(1.0-n)*sppe[m3]+(1.0-e)*n*sppe[m3+1]+e*(1.0-n)*sppe[m3+ynumy]+e*n*sppe[m3+ynumy+1];
*SXXE=(1.0-e)*(1.0-n)*sxxe[m3]+(1.0-e)*n*sxxe[m3+1]+e*(1.0-n)*sxxe[m3+ynumy]+e*n*sxxe[m3+ynumy+1];
*EXXE=(1.0-e)*(1.0-n)*exxe[m3]+(1.0-e)*n*exxe[m3+1]+e*(1.0-n)*exxe[m3+ynumy]+e*n*exxe[m3+ynumy+1];
*VX=( (1.0-e)*(1.0-n)*(vx[m3-1]+vx[m3-ynumy-1])+(1.0-e)*n*(vx[m3]+vx[m3-ynumy]) +e*(1.0-n)*(vx[m3+ynumy-1]+vx[m3-1])+e*n*(vx[m3+ynumy]+vx[m3]) )*0.5 *(1.0-xrat);
*VY=( (1.0-e)*(1.0-n)*(vy[m3-ynumy]+vy[m3-ynumy-1])+(1.0-e)*n*(vy[m3-ynumy+1]+vy[m3-ynumy]) +e*(1.0-n)*(vy[m3]+vy[m3-1])+e*n*(vy[m3+1]+vy[m3]) )*0.5 *(1.0-xrat);
*MVX=( (1.0-e)*(1.0-n)*(mvx[m3-1]+mvx[m3-ynumy-1])+(1.0-e)*n*(mvx[m3]+mvx[m3-ynumy]) +e*(1.0-n)*(mvx[m3+ynumy-1]+mvx[m3-1])+e*n*(mvx[m3+ynumy]+mvx[m3]) )*0.5 *(1.0-xrat);
*MVY=( (1.0-e)*(1.0-n)*(mvy[m3-ynumy]+mvy[m3-ynumy-1])+(1.0-e)*n*(mvy[m3-ynumy+1]+mvy[m3-ynumy]) +e*(1.0-n)*(mvy[m3]+mvy[m3-1])+e*n*(mvy[m3+1]+mvy[m3]) )*0.5 *(1.0-xrat);
/* End SIGxx,SIGyy interpolation ------------------------ */
/**/
/**/
/**/
/* Vx interpolation ------------------------ */
// Reset and clear buffer
m10=m1;
m20=m2;
/* Horizontal,Vertical limits for interpolation calc */
if(y<(gy[m20]+gy[m20+1])/2.0) m20-=1;
if(m10<0) m10=0; if(m10>xnumx-2) m10=xnumx-2;
if(m20<0) m20=0; if(m20>ynumy-3) m20=ynumy-3;
/* Calc normalized distances */
e=(x-gx[m10])/(gx[m10+1]-gx[m10]);
n=(y-(gy[m20]+gy[m20+1])/2.0)/((gy[m20+2]-gy[m20])/2.0);
/* Vx interpolation ------------------------ */
m3=m10*ynumy+m20;
//*VX=(1.0-e)*(1.0-n)*vx[m3]+(1.0-e)*n*vx[m3+1]+e*(1.0-n)*vx[m3+ynumy]+e*n*vx[m3+ynumy+1];
//*MVX=(1.0-e)*(1.0-n)*mvx[m3]+(1.0-e)*n*mvx[m3+1]+e*(1.0-n)*mvx[m3+ynumy]+e*n*mvx[m3+ynumy+1];
// Include small weight (xrat) from farther away nodes for velocities
*VX+=((1.0-e)*(1.0-n)*vx[m3]+(1.0-e)*n*vx[m3+1]+e*(1.0-n)*vx[m3+ynumy]+e*n*vx[m3+ynumy+1]) *xrat;
*MVX+=((1.0-e)*(1.0-n)*mvx[m3]+(1.0-e)*n*mvx[m3+1]+e*(1.0-n)*mvx[m3+ynumy]+e*n*mvx[m3+ynumy+1]) *xrat;
/* End Vx interpolation ------------------------ */
/**/
/**/
/**/
/* Vy interpolation ------------------------ */
// Reset and clear buffer
m10=m1;
m20=m2;
/* Horizontal,Vertical limits for interpolation calc */
if(x<(gx[m10]+gx[m10+1])/2.0) m10-=1;
if(m10<0) m10=0; if(m10>xnumx-3) m10=xnumx-3;
if(m20<0) m20=0; if(m20>ynumy-2) m20=ynumy-2;
/* Calc normalized distances */
e=(x-(gx[m10]+gx[m10+1])/2.0)/((gx[m10+2]-gx[m10])/2.0);
n=(y-gy[m20])/(gy[m20+1]-gy[m20]);
/* Vy interpolation ------------------------ */
m3=m10*ynumy+m20;
//*VY=(1.0-e)*(1.0-n)*vy[m3]+(1.0-e)*n*vy[m3+1]+e*(1.0-n)*vy[m3+ynumy]+e*n*vy[m3+ynumy+1];
//*MVY=(1.0-e)*(1.0-n)*mvy[m3]+(1.0-e)*n*mvy[m3+1]+e*(1.0-n)*mvy[m3+ynumy]+e*n*mvy[m3+ynumy+1];
// Include small weight (xrat) from farther away nodes for velocities
*VY+=((1.0-e)*(1.0-n)*vy[m3]+(1.0-e)*n*vy[m3+1]+e*(1.0-n)*vy[m3+ynumy]+e*n*vy[m3+ynumy+1]) *xrat;
*MVY+=((1.0-e)*(1.0-n)*mvy[m3]+(1.0-e)*n*mvy[m3+1]+e*(1.0-n)*mvy[m3+ynumy]+e*n*mvy[m3+ynumy+1]) *xrat;
/* End Vy interpolation ------------------------ */
/*
fprintf(fp_log,"eps %e %e ",m1,m2,e,n); getchar();
*/
}
/* Calculation of SIGij by Interpolation */
/* Calculation of Vx,Vy, EPSxx*SIGxx,EPSyy*SIGyy,EPSxy*SIGxy by Interpolation */
// Not adapted for parallelization
void allinters(double x, double y)
/* x,y - XY location of point for Vx,Vy calc */
{
/* Counters */
long int m1,m2,m3,m10,m20,m1min,m1max,m2min,m2max;
/* en-NormalisedDistance */
double ival;
/**/
/**/
/* Check X,Y */
if(x<0) x=0; else if(x>xsize) x=xsize;
if(y<0) y=0; else if(y>ysize) y=ysize;
/**/
/**/
/**/
/* Up Left Node X,Y Num */
wn[0]=m10=m1serch(x);
wn[1]=m20=m2serch(y);
/**/
/**/
/**/
/* SIGxy*EPSxy interpolation ------------------------ */
/* Buffer clear */
eps[13]=0;
/* Horizontal,Vertical limits for interpolation calc */
m1min=m10; if(m1min<1) m1min=1; if(m1min>xnumx-3) m1min=xnumx-3;
m1max=m1min+1+intermod; if(m1max>xnumx-2) m1max=xnumx-2;
m1min=m1min-intermod; if(m1min<1) m1min=1;
/**/
m2min=m20; if(m2min<1) m2min=1; if(m2min>ynumy-3) m2min=ynumy-3;
m2max=m2min+1+intermod; if(m2max>ynumy-2) m2max=ynumy-2;
m2min=m2min-intermod; if(m2min<1) m2min=1;
/**/
/* Interpolation weights calc after Fornberg (1996) */
nodewt(m1min,m1max,m2min,m2max,x,y,0,0);
/**/
/* SIGxy,EPSxy Interpolate after interpolation weights */
for (m1=m1min;m1<=m1max;m1++)
for (m2=m2min;m2<=m2max;m2++)
{
/* Current node num, wt */
m3=m1*ynumy+m2;
ival=cn[m1-m1min][1]*cn[m2-m2min][0];
eps[13]+=ival*sxy[m3]*sxy[m3]/(2.0*nu[m3]);
}
/* End SIGxy*EPSxy interpolation ------------------------ */
/**/
/**/
/**/
/* SIGxx*EPSxx, SIGyy*EPSyy interpolation ------------------------ */
/* Buffer clear */
eps[14]=0;
/* Horizontal,Vertical limits for interpolation calc */
m1min=m10; if(x>(gx[m10]+gx[m10+1])/2.0) m1min+=1;
if(m1min<1) m1min=1; if(m1min>xnumx-2) m1min=xnumx-2;
m1max=m1min+1+intermod; if(m1max>xnumx-1) m1max=xnumx-1;
m1min=m1min-intermod; if(m1min<1) m1min=1;
/**/
m2min=m20; if(y>(gy[m20]+gy[m20+1])/2.0) m2min+=1;
if(m2min<1) m2min=1; if(m2min>ynumy-2) m2min=ynumy-2;
m2max=m2min+1+intermod; if(m2max>ynumy-1) m2max=ynumy-1;
m2min=m2min-intermod; if(m2min<1) m2min=1;
/**/
/* Interpolation weights calc after Fornberg (1996) */
nodewt(m1min,m1max,m2min,m2max,x,y,-1,-1);
/**/
/* SIGxx,EPSxx,SIGyy,EPSyy,P Interpolate after interpolation weights */
for (m1=m1min;m1<=m1max;m1++)
for (m2=m2min;m2<=m2max;m2++)
{
/* Current node num, wt */
m3=m1*ynumy+m2;
ival=cn[m1-m1min][1]*cn[m2-m2min][0];
eps[14]+=ival*sxx[m3]*sxx[m3]/(2.0*nd[m3]);
}
/* End SIGxx*EPSxx,SIGyy*EPSyy interpolation ------------------------ */
/**/
/**/
/**/
/* Vx interpolation ------------------------ */
/* Buffer clear */
eps[11]=0;
/* Horizontal,Vertical limits for interpolation calc */
m1min=m10-intermod; if(m1min<0) m1min=0;
m1max=m10+1+intermod; if(m1max>xnumx-1) m1max=xnumx-1;
/**/
m2min=m20; if(y<(gy[m20]+gy[m20+1])/2.0) m2min-=1;
if(m2min<0) m2min=0; if(m2min>ynumy-3) m2min=ynumy-3;
m2max=m2min+1+intermod; if(m2max>ynumy-2) m2max=ynumy-2;
m2min=m2min-intermod; if(m2min<0) m2min=0;
/**/
/* Interpolation weights calc after Fornberg (1996) */
nodewt(m1min,m1max,m2min,m2max,x,y,0,+1);
/**/
/* Vx Interpolate after interpolation weights */
for (m1=m1min;m1<=m1max;m1++)
for (m2=m2min;m2<=m2max;m2++)
{
/* Current node num, wt */
m3=m1*ynumy+m2;
ival=cn[m1-m1min][1]*cn[m2-m2min][0];
eps[11]+=ival*vx[m3];
}
/* End Vx interpolation ------------------------ */
/**/
/**/
/**/
/* Vy interpolation ------------------------ */
/* Buffer clear */
eps[12]=0;
/* Horizontal,Vertical limits for interpolation calc */
m1min=m10; if(x<(gx[m10]+gx[m10+1])/2.0) m1min-=1;
if(m1min<0) m1min=0; if(m1min>xnumx-3) m1min=xnumx-3;
m1max=m1min+1+intermod; if(m1max>xnumx-2) m1max=xnumx-2;
m1min=m1min-intermod; if(m1min<0) m1min=0;
/**/
m2min=m20-intermod; if(m2min<0) m2min=0;
m2max=m20+1+intermod; if(m2max>ynumy-1) m2max=ynumy-1;
/**/
/* Interpolation weights calc after Fornberg (1996) */
nodewt(m1min,m1max,m2min,m2max,x,y,+1,0);
/**/
/* Vy Interpolate after interpolation weights */
for (m1=m1min;m1<=m1max;m1++)
for (m2=m2min;m2<=m2max;m2++)
{
/* Current node num, wt */
m3=m1*ynumy+m2;
ival=cn[m1-m1min][1]*cn[m2-m2min][0];
eps[12]+=ival*vy[m3];
}
/* End Vy interpolation ------------------------ */
/*
fprintf(fp_log,"eps %e %e ",m1,m2,e,n); getchar();
*/
}
/* Calculation of Vx,Vy, EPSxx*SIGxx,EPSyy*SIGyy,EPSxy*SIGxy by Interpolation */
/* Calculation of Vx,Vy, EPSxx,EPSyy,EPSxy, SPINxy by Interpolation */
// Not adapted for parallelization
void allinteri(double x, double y)
/* x,y - XY location of point for Vx,Vy calc */
{
/* Counters */
long int m1,m2,m3,m10,m20,m1min,m1max,m2min,m2max;
/* en-NormalisedDistance */
double ival,xrat;
/**/
/**/
/* Check X,Y */
if(x<0) x=0; else if(x>xsize) x=xsize;
if(y<0) y=0; else if(y>ysize) y=ysize;
/**/
/**/
/**/
/* Check weighting for interpolation */
xrat=2.0/3.0;
if(x<(gx[0]+gx[1])/2.0) xrat=1.0;
if(x>(gx[xnumx-2]+gx[xnumx-1])/2.0) xrat=1.0;
if(y<(gy[0]+gy[1])/2.0) xrat=1.0;
if(y>(gy[ynumy-2]+gy[ynumy-1])/2.0) xrat=1.0;
/**/
/**/
/**/
/* Up Left Node X,Y Num */
wn[0]=m10=m1serch(x);
wn[1]=m20=m2serch(y);
/**/
/**/
/**/
/* EPSxy, SPINxy interpolation ------------------------ */
/* Buffer clear */
eps[4]=eps[30]=0;
/* Horizontal,Vertical limits for interpolation calc */
m1min=m10; if(m1min<1) m1min=1; if(m1min>xnumx-3) m1min=xnumx-3;
m1max=m1min+1+intermod; if(m1max>xnumx-2) m1max=xnumx-2;
m1min=m1min-intermod; if(m1min<1) m1min=1;
/**/
m2min=m20; if(m2min<1) m2min=1; if(m2min>ynumy-3) m2min=ynumy-3;
m2max=m2min+1+intermod; if(m2max>ynumy-2) m2max=ynumy-2;
m2min=m2min-intermod; if(m2min<1) m2min=1;
/**/
/* Interpolation weights calc after Fornberg (1996) */
nodewt(m1min,m1max,m2min,m2max,x,y,0,0);
/**/
/* SIGxy,EPSxy Interpolate after interpolation weights */
for (m1=m1min;m1<=m1max;m1++)
for (m2=m2min;m2<=m2max;m2++)
{
/* Current node num, wt */
m3=m1*ynumy+m2;
ival=cn[m1-m1min][1]*cn[m2-m2min][0];
eps[4]+=ival*exy[m3];
eps[30]+=ival*esp[m3];
}
/* End SIGxy*EPSxy interpolation ------------------------ */
/**/
/**/
/**/
/* EPSxx, EPSyy, P interpolation ------------------------ */
/* Buffer clear */
eps[6]=eps[10]=eps[11]=eps[12]=0;
/* Horizontal,Vertical limits for interpolation calc */
m1min=m10; if(x>(gx[m10]+gx[m10+1])/2.0) m1min+=1;
if(m1min<1) m1min=1; if(m1min>xnumx-2) m1min=xnumx-2;
m1max=m1min+1+intermod; if(m1max>xnumx-1) m1max=xnumx-1;
m1min=m1min-intermod; if(m1min<1) m1min=1;
/**/
m2min=m20; if(y>(gy[m20]+gy[m20+1])/2.0) m2min+=1;
if(m2min<1) m2min=1; if(m2min>ynumy-2) m2min=ynumy-2;
m2max=m2min+1+intermod; if(m2max>ynumy-1) m2max=ynumy-1;
m2min=m2min-intermod; if(m2min<1) m2min=1;
/**/
/* Interpolation weights calc after Fornberg (1996) */
nodewt(m1min,m1max,m2min,m2max,x,y,-1,-1);
/**/
/* SIGxx,EPSxx,SIGyy,EPSyy,P Interpolate after interpolation weights */
for (m1=m1min;m1<=m1max;m1++)
for (m2=m2min;m2<=m2max;m2++)
{
/* Current node num, wt */
m3=m1*ynumy+m2;
ival=cn[m1-m1min][1]*cn[m2-m2min][0];
eps[6]+=ival*exx[m3];
eps[10]+=ival*pr[m3];
eps[11]+=ival*(vx[m3-1]+vx[m3-ynumy-1])*0.5*(1.0-xrat);
eps[12]+=ival*(vy[m3-ynumy]+vy[m3-ynumy-1])*0.5*(1.0-xrat);
}
/* End SIGxx*EPSxx,SIGyy*EPSyy interpolation ------------------------ */
/*
depthp(x,y);eps[10]=eps[50];
*/
/**/
/**/
/**/
/* Vx interpolation ------------------------ */
/* Horizontal,Vertical limits for interpolation calc */
m1min=m10-intermod; if(m1min<0) m1min=0;
m1max=m10+1+intermod; if(m1max>xnumx-1) m1max=xnumx-1;
/**/
m2min=m20; if(y<(gy[m20]+gy[m20+1])/2.0) m2min-=1;
if(m2min<0) m2min=0; if(m2min>ynumy-3) m2min=ynumy-3;
m2max=m2min+1+intermod; if(m2max>ynumy-2) m2max=ynumy-2;
m2min=m2min-intermod; if(m2min<0) m2min=0;
/**/
/* Interpolation weights calc after Fornberg (1996) */
nodewt(m1min,m1max,m2min,m2max,x,y,0,+1);
/**/
/* Vx Interpolate after interpolation weights */
for (m1=m1min;m1<=m1max;m1++)
for (m2=m2min;m2<=m2max;m2++)
{
/* Current node num, wt */
m3=m1*ynumy+m2;
ival=cn[m1-m1min][1]*cn[m2-m2min][0];
eps[11]+=ival*vx[m3]*xrat;
}
/* End Vx interpolation ------------------------ */
/**/
/**/
/**/
/* Vy interpolation ------------------------ */
/* Horizontal,Vertical limits for interpolation calc */
m1min=m10; if(x<(gx[m10]+gx[m10+1])/2.0) m1min-=1;
if(m1min<0) m1min=0; if(m1min>xnumx-3) m1min=xnumx-3;
m1max=m1min+1+intermod; if(m1max>xnumx-2) m1max=xnumx-2;
m1min=m1min-intermod; if(m1min<0) m1min=0;
/**/
m2min=m20-intermod; if(m2min<0) m2min=0;
m2max=m20+1+intermod; if(m2max>ynumy-1) m2max=ynumy-1;
/**/
/* Interpolation weights calc after Fornberg (1996) */
nodewt(m1min,m1max,m2min,m2max,x,y,+1,0);
/**/
/* Vy Interpolate after interpolation weights */
for (m1=m1min;m1<=m1max;m1++)
for (m2=m2min;m2<=m2max;m2++)
{
/* Current node num, wt */
m3=m1*ynumy+m2;
ival=cn[m1-m1min][1]*cn[m2-m2min][0];
eps[12]+=ival*vy[m3]*xrat;
}
/* End Vy interpolation ------------------------ */
/*
fprintf(fp_log,"eps %e %e ",m1,m2,e,n); getchar();
*/
}
/* Calculation of Vx,Vy, EPSxx,EPSyy,EPSxy, SPINxy by Interpolation */
|
icv-threads-openmp3x.c | /*******************************************************************
OpenMP-3.0 Example Codes Beta-v1.0
File : icv-threads-openmp3x.c
Description : Simple example program to demonstrate the use of
OpenMP Library calls to change the default values
of the internal control variables.
- omp_set_nested() change ICV nest-vars : which enable
of disable the nested parallelism
- omp_set_max_active_levels() change ICV max_active-levels-var
:which limits the number of nested active parallel regions
- omp_set_dynamic() change ICV dyn-vars : which enable or disable
the dynamic adjustment of the nuber of threads vailable for the
execution of subsequent parallel region.
- omp_set_num_threads() change ICV nthread-vars : which sets the
number of threads for the next parallel region.
OpenMP Pragma /
Function Used :
- omp_set_nested()
- omp_set_max_active_levels()
- omp_set_dynamic()
- omp_set_num_threads()
- omp_get_max_active_levels(),
- omp_get_num_threads(),
- omp_get_max_threads()
Input : None
Output : Values of Internal Control Variables
**********************************************************************/
/* Header file inclusion */
#include <stdio.h>
#include <omp.h>
/* main function */
int main (void)
{
/* OpenMP library functions to change the default values
of the internal control variable */
omp_set_nested(1); /* Enable the Nested Parallel region */
omp_set_max_active_levels(8); /* Enable the maximum active levels */
omp_set_dynamic(0); /* Disable the dynamic thread creation */
omp_set_num_threads(2); /* Set the no. of threads to 2 */
/* Outer : Create the parallel region */
#pragma omp parallel
{
omp_set_num_threads(3);
/* Inner: Create the parallel region inside the outer parallel region */
#pragma omp parallel
{
omp_set_num_threads(4);
#pragma omp single /* Restricting the one thread to do the work */
{
/*
* The following should print:
* Inner: max_act_lev=8, num_thds=3, max_thds=4
* Inner: max_act_lev=8, num_thds=3, max_thds=4
*/
printf ("\n\t\t Inner: max_act_lev=%d, num_thds=%d,max_thds=%d\n",omp_get_max_active_levels(), omp_get_num_threads(),omp_get_max_threads());
}
} /* End of inner parallel region */
#pragma omp barrier /* Syncronization point */
#pragma omp single /* Outer: Restricting the one thread to do the work */
{
/*
* The following should print:
* Outer: max_act_lev=8, num_ thds=2, max_thds=3
*/
printf ("\n\t\t Outer: max_act_lev=%d, num_thds=%d,max_thds=%d\n",omp_get_max_active_levels(), omp_get_num_threads(),omp_get_max_threads());
}
} /* End of outer parallel region */
} /* End of main function */
|
GB_unop__identity_int16_uint16.c | //------------------------------------------------------------------------------
// GB_unop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_atomics.h"
#include "GB_unop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop_apply__identity_int16_uint16
// op(A') function: GB_unop_tran__identity_int16_uint16
// C type: int16_t
// A type: uint16_t
// cast: int16_t cij = (int16_t) aij
// unaryop: cij = aij
#define GB_ATYPE \
uint16_t
#define GB_CTYPE \
int16_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
uint16_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = x ;
// casting
#define GB_CAST(z, aij) \
int16_t z = (int16_t) aij ;
// cij = op (aij)
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
uint16_t aij = Ax [pA] ; \
/* Cx [pC] = op (cast (aij)) */ \
int16_t z = (int16_t) aij ; \
Cx [pC] = z ; \
}
// true if operator is the identity op with no typecasting
#define GB_OP_IS_IDENTITY_WITH_NO_TYPECAST \
0
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_IDENTITY || GxB_NO_INT16 || GxB_NO_UINT16)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop_apply__identity_int16_uint16
(
int16_t *Cx, // Cx and Ax may be aliased
const uint16_t *Ax,
const int8_t *GB_RESTRICT Ab, // A->b if A is bitmap
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
if (Ab == NULL)
{
#if ( GB_OP_IS_IDENTITY_WITH_NO_TYPECAST )
GB_memcpy (Cx, Ax, anz * sizeof (uint16_t), nthreads) ;
#else
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
uint16_t aij = Ax [p] ;
int16_t z = (int16_t) aij ;
Cx [p] = z ;
}
#endif
}
else
{
// bitmap case, no transpose; A->b already memcpy'd into C->b
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!Ab [p]) continue ;
uint16_t aij = Ax [p] ;
int16_t z = (int16_t) aij ;
Cx [p] = z ;
}
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop_tran__identity_int16_uint16
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *GB_RESTRICT *Workspaces,
const int64_t *GB_RESTRICT A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
3d25pt_var.lbpar.c | #include <omp.h>
#include <math.h>
#define ceild(n,d) ceil(((double)(n))/((double)(d)))
#define floord(n,d) floor(((double)(n))/((double)(d)))
#define max(x,y) ((x) > (y)? (x) : (y))
#define min(x,y) ((x) < (y)? (x) : (y))
/*
* Order-1, 3D 25 point stencil with axis-symmetric ariable coefficients
* Adapted from PLUTO and Pochoir test bench
*
* Tareq Malas
*/
#include <stdio.h>
#include <stdlib.h>
#include <sys/time.h>
#ifdef LIKWID_PERFMON
#include <likwid.h>
#endif
#include "print_utils.h"
#define TESTS 2
#define MAX(a,b) ((a) > (b) ? a : b)
#define MIN(a,b) ((a) < (b) ? a : b)
/* Subtract the `struct timeval' values X and Y,
* storing the result in RESULT.
*
* Return 1 if the difference is negative, otherwise 0.
*/
int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y)
{
/* Perform the carry for the later subtraction by updating y. */
if (x->tv_usec < y->tv_usec)
{
int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1;
y->tv_usec -= 1000000 * nsec;
y->tv_sec += nsec;
}
if (x->tv_usec - y->tv_usec > 1000000)
{
int nsec = (x->tv_usec - y->tv_usec) / 1000000;
y->tv_usec += 1000000 * nsec;
y->tv_sec -= nsec;
}
/* Compute the time remaining to wait.
* tv_usec is certainly positive.
*/
result->tv_sec = x->tv_sec - y->tv_sec;
result->tv_usec = x->tv_usec - y->tv_usec;
/* Return 1 if result is negative. */
return x->tv_sec < y->tv_sec;
}
int main(int argc, char *argv[])
{
int t, i, j, k, m, test;
int Nx, Ny, Nz, Nt;
if (argc > 3) {
Nx = atoi(argv[1])+8;
Ny = atoi(argv[2])+8;
Nz = atoi(argv[3])+8;
}
if (argc > 4)
Nt = atoi(argv[4]);
// allocate the arrays
double ****A = (double ****) malloc(sizeof(double***)*2);
for(m=0; m<2;m++){
A[m] = (double ***) malloc(sizeof(double**)*Nz);
for(i=0; i<Nz; i++){
A[m][i] = (double**) malloc(sizeof(double*)*Ny);
for(j=0;j<Ny;j++){
A[m][i][j] = (double*) malloc(sizeof(double)*Nx);
}
}
}
double ****coef = (double ****) malloc(sizeof(double***)*13);
for(m=0; m<13;m++){
coef[m] = (double ***) malloc(sizeof(double**)*Nz);
for(i=0; i<Nz; i++){
coef[m][i] = (double**) malloc(sizeof(double*)*Ny);
for(j=0;j<Ny;j++){
coef[m][i][j] = (double*) malloc(sizeof(double)*Nx);
}
}
}
// tile size information, including extra element to decide the list length
int *tile_size = (int*) malloc(sizeof(int));
tile_size[0] = -1;
// The list is modified here before source-to-source transformations
tile_size = (int*) realloc((void *)tile_size, sizeof(int)*5);
tile_size[0] = 24;
tile_size[1] = 24;
tile_size[2] = 32;
tile_size[3] = 32;
tile_size[4] = -1;
// for timekeeping
int ts_return = -1;
struct timeval start, end, result;
double tdiff = 0.0, min_tdiff=1.e100;
const int BASE = 1024;
// initialize variables
//
srand(42);
for (i = 1; i < Nz; i++) {
for (j = 1; j < Ny; j++) {
for (k = 1; k < Nx; k++) {
A[0][i][j][k] = 1.0 * (rand() % BASE);
}
}
}
for (m=0; m<13; m++) {
for (i=1; i<Nz; i++) {
for (j=1; j<Ny; j++) {
for (k=1; k<Nx; k++) {
coef[m][i][j][k] = 1.0 * (rand() % BASE);
}
}
}
}
#ifdef LIKWID_PERFMON
LIKWID_MARKER_INIT;
#pragma omp parallel
{
LIKWID_MARKER_THREADINIT;
#pragma omp barrier
LIKWID_MARKER_START("calc");
}
#endif
int num_threads = 1;
#if defined(_OPENMP)
num_threads = omp_get_max_threads();
#endif
for(test=0; test<TESTS; test++){
gettimeofday(&start, 0);
// serial execution - Addition: 6 && Multiplication: 2
/* Copyright (C) 1991-2014 Free Software Foundation, Inc.
This file is part of the GNU C Library.
The GNU C Library is free software; you can redistribute it and/or
modify it under the terms of the GNU Lesser General Public
License as published by the Free Software Foundation; either
version 2.1 of the License, or (at your option) any later version.
The GNU C Library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public
License along with the GNU C Library; if not, see
<http://www.gnu.org/licenses/>. */
/* This header is separate from features.h so that the compiler can
include it implicitly at the start of every compilation. It must
not itself include <features.h> or any other header that includes
<features.h> because the implicit include comes before any feature
test macros that may be defined in a source file before it first
explicitly includes a system header. GCC knows the name of this
header in order to preinclude it. */
/* glibc's intent is to support the IEC 559 math functionality, real
and complex. If the GCC (4.9 and later) predefined macros
specifying compiler intent are available, use them to determine
whether the overall intent is to support these features; otherwise,
presume an older compiler has intent to support these features and
define these macros by default. */
/* wchar_t uses ISO/IEC 10646 (2nd ed., published 2011-03-15) /
Unicode 6.0. */
/* We do not support C11 <threads.h>. */
int t1, t2, t3, t4, t5, t6, t7, t8;
int lb, ub, lbp, ubp, lb2, ub2;
register int lbv, ubv;
/* Start of CLooG code */
if ((Nt >= 1) && (Nx >= 9) && (Ny >= 9) && (Nz >= 9)) {
for (t1=-1;t1<=floord(Nt-1,3);t1++) {
lbp=max(ceild(t1,2),ceild(6*t1-Nt+2,6));
ubp=min(floord(4*Nt+Nz-9,24),floord(12*t1+Nz+6,24));
#pragma omp parallel for private(lbv,ubv,t3,t4,t5,t6,t7,t8)
for (t2=lbp;t2<=ubp;t2++) {
for (t3=max(max(max(0,ceild(3*t1-3*t2-2,4)),ceild(3*t1-6,8)),ceild(24*t2-Nz-19,32));t3<=min(min(min(floord(4*Nt+Ny-9,32),floord(12*t1+Ny+15,32)),floord(24*t2+Ny+11,32)),floord(24*t1-24*t2+Nz+Ny+13,32));t3++) {
for (t4=max(max(max(max(0,ceild(3*t1-3*t2-2,4)),ceild(3*t1-6,8)),ceild(24*t2-Nz-19,32)),ceild(32*t3-Ny-19,32));t4<=min(min(min(min(floord(4*Nt+Nx-9,32),floord(12*t1+Nx+15,32)),floord(24*t2+Nx+11,32)),floord(32*t3+Nx+19,32)),floord(24*t1-24*t2+Nz+Nx+13,32));t4++) {
for (t5=max(max(max(max(max(0,ceild(24*t2-Nz+5,4)),ceild(32*t3-Ny+5,4)),ceild(32*t4-Nx+5,4)),3*t1),6*t1-6*t2+1);t5<=min(min(min(min(min(floord(24*t1-24*t2+Nz+18,4),Nt-1),3*t1+5),6*t2+4),8*t3+6),8*t4+6);t5++) {
for (t6=max(max(24*t2,4*t5+4),-24*t1+24*t2+8*t5-23);t6<=min(min(24*t2+23,-24*t1+24*t2+8*t5),4*t5+Nz-5);t6++) {
for (t7=max(32*t3,4*t5+4);t7<=min(32*t3+31,4*t5+Ny-5);t7++) {
lbv=max(32*t4,4*t5+4);
ubv=min(32*t4+31,4*t5+Nx-5);
#pragma ivdep
#pragma vector always
for (t8=lbv;t8<=ubv;t8++) {
A[( t5 + 1) % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] = (((((((((((((coef[0][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)]) + (coef[1][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6) - 1][ (-4*t5+t7)][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6) + 1][ (-4*t5+t7)][ (-4*t5+t8)]))) + (coef[2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) - 1][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) + 1][ (-4*t5+t8)]))) + (coef[3][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) - 1] + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) + 1]))) + (coef[4][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6) - 2][ (-4*t5+t7)][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6) + 2][ (-4*t5+t7)][ (-4*t5+t8)]))) + (coef[5][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) - 2][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) + 2][ (-4*t5+t8)]))) + (coef[6][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) - 2] + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) + 2]))) + (coef[7][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6) - 3][ (-4*t5+t7)][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6) + 3][ (-4*t5+t7)][ (-4*t5+t8)]))) + (coef[8][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) - 3][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) + 3][ (-4*t5+t8)]))) + (coef[9][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) - 3] + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) + 3]))) + (coef[10][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6) - 4][ (-4*t5+t7)][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6) + 4][ (-4*t5+t7)][ (-4*t5+t8)]))) + (coef[11][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) - 4][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) + 4][ (-4*t5+t8)]))) + (coef[12][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) - 4] + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) + 4])));;
}
}
}
}
}
}
}
}
}
/* End of CLooG code */
gettimeofday(&end, 0);
ts_return = timeval_subtract(&result, &end, &start);
tdiff = (double) (result.tv_sec + result.tv_usec * 1.0e-6);
min_tdiff = min(min_tdiff, tdiff);
printf("Rank 0 TEST# %d time: %f\n", test, tdiff);
}
PRINT_RESULTS(4, "variable axis-symmetric")
#ifdef LIKWID_PERFMON
#pragma omp parallel
{
LIKWID_MARKER_STOP("calc");
}
LIKWID_MARKER_CLOSE;
#endif
// Free allocated arrays
for(i=0; i<Nz; i++){
for(j=0;j<Ny;j++){
free(A[0][i][j]);
free(A[1][i][j]);
}
free(A[0][i]);
free(A[1][i]);
}
free(A[0]);
free(A[1]);
for(m=0; m<13;m++){
for(i=0; i<Nz; i++){
for(j=0;j<Ny;j++){
free(coef[m][i][j]);
}
free(coef[m][i]);
}
free(coef[m]);
}
return 0;
}
|
GB_unop__identity_int16_bool.c | //------------------------------------------------------------------------------
// GB_unop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_atomics.h"
#include "GB_unop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB (_unop_apply__identity_int16_bool)
// op(A') function: GB (_unop_tran__identity_int16_bool)
// C type: int16_t
// A type: bool
// cast: int16_t cij = (int16_t) aij
// unaryop: cij = aij
#define GB_ATYPE \
bool
#define GB_CTYPE \
int16_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
bool aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = x ;
// casting
#define GB_CAST(z, aij) \
int16_t z = (int16_t) aij ;
// cij = op (aij)
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
bool aij = Ax [pA] ; \
/* Cx [pC] = op (cast (aij)) */ \
int16_t z = (int16_t) aij ; \
Cx [pC] = z ; \
}
// true if operator is the identity op with no typecasting
#define GB_OP_IS_IDENTITY_WITH_NO_TYPECAST \
0
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_IDENTITY || GxB_NO_INT16 || GxB_NO_BOOL)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_apply__identity_int16_bool)
(
int16_t *Cx, // Cx and Ax may be aliased
const bool *Ax,
const int8_t *restrict Ab, // A->b if A is bitmap
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
// TODO: if OP is ONE and uniform-valued matrices are exploited, then
// do this in O(1) time
if (Ab == NULL)
{
#if ( GB_OP_IS_IDENTITY_WITH_NO_TYPECAST )
GB_memcpy (Cx, Ax, anz * sizeof (bool), nthreads) ;
#else
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
bool aij = Ax [p] ;
int16_t z = (int16_t) aij ;
Cx [p] = z ;
}
#endif
}
else
{
// bitmap case, no transpose; A->b already memcpy'd into C->b
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!Ab [p]) continue ;
bool aij = Ax [p] ;
int16_t z = (int16_t) aij ;
Cx [p] = z ;
}
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_tran__identity_int16_bool)
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
quantize.c | /*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% QQQ U U AAA N N TTTTT IIIII ZZZZZ EEEEE %
% Q Q U U A A NN N T I ZZ E %
% Q Q U U AAAAA N N N T I ZZZ EEEEE %
% Q QQ U U A A N NN T I ZZ E %
% QQQQ UUU A A N N T IIIII ZZZZZ EEEEE %
% %
% %
% MagickCore Methods to Reduce the Number of Unique Colors in an Image %
% %
% Software Design %
% Cristy %
% July 1992 %
% %
% %
% Copyright 1999-2018 ImageMagick Studio LLC, a non-profit organization %
% dedicated to making software imaging solutions freely available. %
% %
% You may not use this file except in compliance with the License. You may %
% obtain a copy of the License at %
% %
% https://www.imagemagick.org/script/license.php %
% %
% Unless required by applicable law or agreed to in writing, software %
% distributed under the License is distributed on an "AS IS" BASIS, %
% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. %
% See the License for the specific language governing permissions and %
% limitations under the License. %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% Realism in computer graphics typically requires using 24 bits/pixel to
% generate an image. Yet many graphic display devices do not contain the
% amount of memory necessary to match the spatial and color resolution of
% the human eye. The Quantize methods takes a 24 bit image and reduces
% the number of colors so it can be displayed on raster device with less
% bits per pixel. In most instances, the quantized image closely
% resembles the original reference image.
%
% A reduction of colors in an image is also desirable for image
% transmission and real-time animation.
%
% QuantizeImage() takes a standard RGB or monochrome images and quantizes
% them down to some fixed number of colors.
%
% For purposes of color allocation, an image is a set of n pixels, where
% each pixel is a point in RGB space. RGB space is a 3-dimensional
% vector space, and each pixel, Pi, is defined by an ordered triple of
% red, green, and blue coordinates, (Ri, Gi, Bi).
%
% Each primary color component (red, green, or blue) represents an
% intensity which varies linearly from 0 to a maximum value, Cmax, which
% corresponds to full saturation of that color. Color allocation is
% defined over a domain consisting of the cube in RGB space with opposite
% vertices at (0,0,0) and (Cmax, Cmax, Cmax). QUANTIZE requires Cmax =
% 255.
%
% The algorithm maps this domain onto a tree in which each node
% represents a cube within that domain. In the following discussion
% these cubes are defined by the coordinate of two opposite vertices (vertex
% nearest the origin in RGB space and the vertex farthest from the origin).
%
% The tree's root node represents the entire domain, (0,0,0) through
% (Cmax,Cmax,Cmax). Each lower level in the tree is generated by
% subdividing one node's cube into eight smaller cubes of equal size.
% This corresponds to bisecting the parent cube with planes passing
% through the midpoints of each edge.
%
% The basic algorithm operates in three phases: Classification,
% Reduction, and Assignment. Classification builds a color description
% tree for the image. Reduction collapses the tree until the number it
% represents, at most, the number of colors desired in the output image.
% Assignment defines the output image's color map and sets each pixel's
% color by restorage_class in the reduced tree. Our goal is to minimize
% the numerical discrepancies between the original colors and quantized
% colors (quantization error).
%
% Classification begins by initializing a color description tree of
% sufficient depth to represent each possible input color in a leaf.
% However, it is impractical to generate a fully-formed color description
% tree in the storage_class phase for realistic values of Cmax. If
% colors components in the input image are quantized to k-bit precision,
% so that Cmax= 2k-1, the tree would need k levels below the root node to
% allow representing each possible input color in a leaf. This becomes
% prohibitive because the tree's total number of nodes is 1 +
% sum(i=1, k, 8k).
%
% A complete tree would require 19,173,961 nodes for k = 8, Cmax = 255.
% Therefore, to avoid building a fully populated tree, QUANTIZE: (1)
% Initializes data structures for nodes only as they are needed; (2)
% Chooses a maximum depth for the tree as a function of the desired
% number of colors in the output image (currently log2(colormap size)).
%
% For each pixel in the input image, storage_class scans downward from
% the root of the color description tree. At each level of the tree it
% identifies the single node which represents a cube in RGB space
% containing the pixel's color. It updates the following data for each
% such node:
%
% n1: Number of pixels whose color is contained in the RGB cube which
% this node represents;
%
% n2: Number of pixels whose color is not represented in a node at
% lower depth in the tree; initially, n2 = 0 for all nodes except
% leaves of the tree.
%
% Sr, Sg, Sb: Sums of the red, green, and blue component values for all
% pixels not classified at a lower depth. The combination of these sums
% and n2 will ultimately characterize the mean color of a set of pixels
% represented by this node.
%
% E: the distance squared in RGB space between each pixel contained
% within a node and the nodes' center. This represents the
% quantization error for a node.
%
% Reduction repeatedly prunes the tree until the number of nodes with n2
% > 0 is less than or equal to the maximum number of colors allowed in
% the output image. On any given iteration over the tree, it selects
% those nodes whose E count is minimal for pruning and merges their color
% statistics upward. It uses a pruning threshold, Ep, to govern node
% selection as follows:
%
% Ep = 0
% while number of nodes with (n2 > 0) > required maximum number of colors
% prune all nodes such that E <= Ep
% Set Ep to minimum E in remaining nodes
%
% This has the effect of minimizing any quantization error when merging
% two nodes together.
%
% When a node to be pruned has offspring, the pruning procedure invokes
% itself recursively in order to prune the tree from the leaves upward.
% n2, Sr, Sg, and Sb in a node being pruned are always added to the
% corresponding data in that node's parent. This retains the pruned
% node's color characteristics for later averaging.
%
% For each node, n2 pixels exist for which that node represents the
% smallest volume in RGB space containing those pixel's colors. When n2
% > 0 the node will uniquely define a color in the output image. At the
% beginning of reduction, n2 = 0 for all nodes except a the leaves of
% the tree which represent colors present in the input image.
%
% The other pixel count, n1, indicates the total number of colors within
% the cubic volume which the node represents. This includes n1 - n2
% pixels whose colors should be defined by nodes at a lower level in the
% tree.
%
% Assignment generates the output image from the pruned tree. The output
% image consists of two parts: (1) A color map, which is an array of
% color descriptions (RGB triples) for each color present in the output
% image; (2) A pixel array, which represents each pixel as an index
% into the color map array.
%
% First, the assignment phase makes one pass over the pruned color
% description tree to establish the image's color map. For each node
% with n2 > 0, it divides Sr, Sg, and Sb by n2 . This produces the mean
% color of all pixels that classify no lower than this node. Each of
% these colors becomes an entry in the color map.
%
% Finally, the assignment phase reclassifies each pixel in the pruned
% tree to identify the deepest node containing the pixel's color. The
% pixel's value in the pixel array becomes the index of this node's mean
% color in the color map.
%
% This method is based on a similar algorithm written by Paul Raveling.
%
*/
/*
Include declarations.
*/
#include "magick/studio.h"
#include "magick/attribute.h"
#include "magick/cache-view.h"
#include "magick/color.h"
#include "magick/color-private.h"
#include "magick/colormap.h"
#include "magick/colorspace.h"
#include "magick/colorspace-private.h"
#include "magick/enhance.h"
#include "magick/exception.h"
#include "magick/exception-private.h"
#include "magick/histogram.h"
#include "magick/image.h"
#include "magick/image-private.h"
#include "magick/list.h"
#include "magick/memory_.h"
#include "magick/monitor.h"
#include "magick/monitor-private.h"
#include "magick/option.h"
#include "magick/pixel-private.h"
#include "magick/quantize.h"
#include "magick/quantum.h"
#include "magick/resource_.h"
#include "magick/string_.h"
#include "magick/thread-private.h"
/*
Define declarations.
*/
#if !defined(__APPLE__) && !defined(TARGET_OS_IPHONE)
#define CacheShift 2
#else
#define CacheShift 3
#endif
#define ErrorQueueLength 16
#define MaxNodes 266817
#define MaxTreeDepth 8
#define NodesInAList 1920
/*
Typdef declarations.
*/
typedef struct _NodeInfo
{
struct _NodeInfo
*parent,
*child[16];
MagickSizeType
number_unique;
DoublePixelPacket
total_color;
MagickRealType
quantize_error;
size_t
color_number,
id,
level;
} NodeInfo;
typedef struct _Nodes
{
NodeInfo
*nodes;
struct _Nodes
*next;
} Nodes;
typedef struct _CubeInfo
{
NodeInfo
*root;
size_t
colors,
maximum_colors;
ssize_t
transparent_index;
MagickSizeType
transparent_pixels;
DoublePixelPacket
target;
MagickRealType
distance,
pruning_threshold,
next_threshold;
size_t
nodes,
free_nodes,
color_number;
NodeInfo
*next_node;
Nodes
*node_queue;
MemoryInfo
*memory_info;
ssize_t
*cache;
DoublePixelPacket
error[ErrorQueueLength];
MagickRealType
weights[ErrorQueueLength];
QuantizeInfo
*quantize_info;
MagickBooleanType
associate_alpha;
ssize_t
x,
y;
size_t
depth;
MagickOffsetType
offset;
MagickSizeType
span;
} CubeInfo;
/*
Method prototypes.
*/
static CubeInfo
*GetCubeInfo(const QuantizeInfo *,const size_t,const size_t);
static NodeInfo
*GetNodeInfo(CubeInfo *,const size_t,const size_t,NodeInfo *);
static MagickBooleanType
AssignImageColors(Image *,CubeInfo *),
ClassifyImageColors(CubeInfo *,const Image *,ExceptionInfo *),
DitherImage(Image *,CubeInfo *),
SetGrayscaleImage(Image *);
static size_t
DefineImageColormap(Image *,CubeInfo *,NodeInfo *);
static void
ClosestColor(const Image *,CubeInfo *,const NodeInfo *),
DestroyCubeInfo(CubeInfo *),
PruneLevel(CubeInfo *,const NodeInfo *),
PruneToCubeDepth(CubeInfo *,const NodeInfo *),
ReduceImageColors(const Image *,CubeInfo *);
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% A c q u i r e Q u a n t i z e I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% AcquireQuantizeInfo() allocates the QuantizeInfo structure.
%
% The format of the AcquireQuantizeInfo method is:
%
% QuantizeInfo *AcquireQuantizeInfo(const ImageInfo *image_info)
%
% A description of each parameter follows:
%
% o image_info: the image info.
%
*/
MagickExport QuantizeInfo *AcquireQuantizeInfo(const ImageInfo *image_info)
{
QuantizeInfo
*quantize_info;
quantize_info=(QuantizeInfo *) AcquireMagickMemory(sizeof(*quantize_info));
if (quantize_info == (QuantizeInfo *) NULL)
ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed");
GetQuantizeInfo(quantize_info);
if (image_info != (ImageInfo *) NULL)
{
const char
*option;
quantize_info->dither=image_info->dither;
option=GetImageOption(image_info,"dither");
if (option != (const char *) NULL)
quantize_info->dither_method=(DitherMethod) ParseCommandOption(
MagickDitherOptions,MagickFalse,option);
quantize_info->measure_error=image_info->verbose;
}
return(quantize_info);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ A s s i g n I m a g e C o l o r s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% AssignImageColors() generates the output image from the pruned tree. The
% output image consists of two parts: (1) A color map, which is an array
% of color descriptions (RGB triples) for each color present in the
% output image; (2) A pixel array, which represents each pixel as an
% index into the color map array.
%
% First, the assignment phase makes one pass over the pruned color
% description tree to establish the image's color map. For each node
% with n2 > 0, it divides Sr, Sg, and Sb by n2 . This produces the mean
% color of all pixels that classify no lower than this node. Each of
% these colors becomes an entry in the color map.
%
% Finally, the assignment phase reclassifies each pixel in the pruned
% tree to identify the deepest node containing the pixel's color. The
% pixel's value in the pixel array becomes the index of this node's mean
% color in the color map.
%
% The format of the AssignImageColors() method is:
%
% MagickBooleanType AssignImageColors(Image *image,CubeInfo *cube_info)
%
% A description of each parameter follows.
%
% o image: the image.
%
% o cube_info: A pointer to the Cube structure.
%
*/
static inline void AssociateAlphaPixel(const CubeInfo *cube_info,
const PixelPacket *pixel,DoublePixelPacket *alpha_pixel)
{
MagickRealType
alpha;
alpha_pixel->index=0;
if ((cube_info->associate_alpha == MagickFalse) ||
(pixel->opacity == OpaqueOpacity))
{
alpha_pixel->red=(MagickRealType) GetPixelRed(pixel);
alpha_pixel->green=(MagickRealType) GetPixelGreen(pixel);
alpha_pixel->blue=(MagickRealType) GetPixelBlue(pixel);
alpha_pixel->opacity=(MagickRealType) GetPixelOpacity(pixel);
return;
}
alpha=(MagickRealType) (QuantumScale*(QuantumRange-GetPixelOpacity(pixel)));
alpha_pixel->red=alpha*GetPixelRed(pixel);
alpha_pixel->green=alpha*GetPixelGreen(pixel);
alpha_pixel->blue=alpha*GetPixelBlue(pixel);
alpha_pixel->opacity=(MagickRealType) GetPixelOpacity(pixel);
}
static inline size_t ColorToNodeId(const CubeInfo *cube_info,
const DoublePixelPacket *pixel,size_t index)
{
size_t
id;
id=(size_t) (((ScaleQuantumToChar(ClampPixel(GetPixelRed(pixel))) >> index) &
0x01) | ((ScaleQuantumToChar(ClampPixel(GetPixelGreen(pixel))) >> index) &
0x01) << 1 | ((ScaleQuantumToChar(ClampPixel(GetPixelBlue(pixel))) >>
index) & 0x01) << 2);
if (cube_info->associate_alpha != MagickFalse)
id|=((ScaleQuantumToChar(ClampPixel(GetPixelOpacity(pixel))) >> index) &
0x1) << 3;
return(id);
}
static inline MagickBooleanType IsSameColor(const Image *image,
const PixelPacket *p,const PixelPacket *q)
{
if ((GetPixelRed(p) != GetPixelRed(q)) ||
(GetPixelGreen(p) != GetPixelGreen(q)) ||
(GetPixelBlue(p) != GetPixelBlue(q)))
return(MagickFalse);
if ((image->matte != MagickFalse) &&
(GetPixelOpacity(p) != GetPixelOpacity(q)))
return(MagickFalse);
return(MagickTrue);
}
static MagickBooleanType AssignImageColors(Image *image,CubeInfo *cube_info)
{
#define AssignImageTag "Assign/Image"
ssize_t
y;
/*
Allocate image colormap.
*/
if ((cube_info->quantize_info->colorspace != UndefinedColorspace) &&
(cube_info->quantize_info->colorspace != CMYKColorspace))
(void) TransformImageColorspace(image,cube_info->quantize_info->colorspace);
else
if (IssRGBCompatibleColorspace(image->colorspace) == MagickFalse)
(void) TransformImageColorspace(image,sRGBColorspace);
if (AcquireImageColormap(image,cube_info->colors) == MagickFalse)
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
image->colors=0;
cube_info->transparent_pixels=0;
cube_info->transparent_index=(-1);
(void) DefineImageColormap(image,cube_info,cube_info->root);
/*
Create a reduced color image.
*/
if ((cube_info->quantize_info->dither != MagickFalse) &&
(cube_info->quantize_info->dither_method != NoDitherMethod))
(void) DitherImage(image,cube_info);
else
{
CacheView
*image_view;
ExceptionInfo
*exception;
MagickBooleanType
status;
status=MagickTrue;
exception=(&image->exception);
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) shared(status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
CubeInfo
cube;
register IndexPacket
*magick_restrict indexes;
register PixelPacket
*magick_restrict q;
register ssize_t
x;
ssize_t
count;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,
exception);
if (q == (PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
indexes=GetCacheViewAuthenticIndexQueue(image_view);
cube=(*cube_info);
for (x=0; x < (ssize_t) image->columns; x+=count)
{
DoublePixelPacket
pixel;
register const NodeInfo
*node_info;
register ssize_t
i;
size_t
id,
index;
/*
Identify the deepest node containing the pixel's color.
*/
for (count=1; (x+count) < (ssize_t) image->columns; count++)
if (IsSameColor(image,q,q+count) == MagickFalse)
break;
AssociateAlphaPixel(&cube,q,&pixel);
node_info=cube.root;
for (index=MaxTreeDepth-1; (ssize_t) index > 0; index--)
{
id=ColorToNodeId(&cube,&pixel,index);
if (node_info->child[id] == (NodeInfo *) NULL)
break;
node_info=node_info->child[id];
}
/*
Find closest color among siblings and their children.
*/
cube.target=pixel;
cube.distance=(MagickRealType) (4.0*(QuantumRange+1.0)*
(QuantumRange+1.0)+1.0);
ClosestColor(image,&cube,node_info->parent);
index=cube.color_number;
for (i=0; i < (ssize_t) count; i++)
{
if (image->storage_class == PseudoClass)
SetPixelIndex(indexes+x+i,index);
if (cube.quantize_info->measure_error == MagickFalse)
{
SetPixelRgb(q,image->colormap+index);
if (cube.associate_alpha != MagickFalse)
SetPixelOpacity(q,image->colormap[index].opacity);
}
q++;
}
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_AssignImageColors)
#endif
proceed=SetImageProgress(image,AssignImageTag,(MagickOffsetType) y,
image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
image_view=DestroyCacheView(image_view);
}
if (cube_info->quantize_info->measure_error != MagickFalse)
(void) GetImageQuantizeError(image);
if ((cube_info->quantize_info->number_colors == 2) &&
((cube_info->quantize_info->colorspace == LinearGRAYColorspace) ||
(cube_info->quantize_info->colorspace == GRAYColorspace)))
{
double
intensity;
/*
Monochrome image.
*/
intensity=0.0;
if ((image->colors > 1) &&
(GetPixelLuma(image,image->colormap+0) >
GetPixelLuma(image,image->colormap+1)))
intensity=(double) QuantumRange;
image->colormap[0].red=intensity;
image->colormap[0].green=intensity;
image->colormap[0].blue=intensity;
if (image->colors > 1)
{
image->colormap[1].red=(double) QuantumRange-intensity;
image->colormap[1].green=(double) QuantumRange-intensity;
image->colormap[1].blue=(double) QuantumRange-intensity;
}
}
(void) SyncImage(image);
if ((cube_info->quantize_info->colorspace != UndefinedColorspace) &&
(cube_info->quantize_info->colorspace != CMYKColorspace))
(void) TransformImageColorspace((Image *) image,sRGBColorspace);
return(MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ C l a s s i f y I m a g e C o l o r s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ClassifyImageColors() begins by initializing a color description tree
% of sufficient depth to represent each possible input color in a leaf.
% However, it is impractical to generate a fully-formed color
% description tree in the storage_class phase for realistic values of
% Cmax. If colors components in the input image are quantized to k-bit
% precision, so that Cmax= 2k-1, the tree would need k levels below the
% root node to allow representing each possible input color in a leaf.
% This becomes prohibitive because the tree's total number of nodes is
% 1 + sum(i=1,k,8k).
%
% A complete tree would require 19,173,961 nodes for k = 8, Cmax = 255.
% Therefore, to avoid building a fully populated tree, QUANTIZE: (1)
% Initializes data structures for nodes only as they are needed; (2)
% Chooses a maximum depth for the tree as a function of the desired
% number of colors in the output image (currently log2(colormap size)).
%
% For each pixel in the input image, storage_class scans downward from
% the root of the color description tree. At each level of the tree it
% identifies the single node which represents a cube in RGB space
% containing It updates the following data for each such node:
%
% n1 : Number of pixels whose color is contained in the RGB cube
% which this node represents;
%
% n2 : Number of pixels whose color is not represented in a node at
% lower depth in the tree; initially, n2 = 0 for all nodes except
% leaves of the tree.
%
% Sr, Sg, Sb : Sums of the red, green, and blue component values for
% all pixels not classified at a lower depth. The combination of
% these sums and n2 will ultimately characterize the mean color of a
% set of pixels represented by this node.
%
% E: the distance squared in RGB space between each pixel contained
% within a node and the nodes' center. This represents the quantization
% error for a node.
%
% The format of the ClassifyImageColors() method is:
%
% MagickBooleanType ClassifyImageColors(CubeInfo *cube_info,
% const Image *image,ExceptionInfo *exception)
%
% A description of each parameter follows.
%
% o cube_info: A pointer to the Cube structure.
%
% o image: the image.
%
*/
static inline void SetAssociatedAlpha(const Image *image,CubeInfo *cube_info)
{
MagickBooleanType
associate_alpha;
associate_alpha=image->matte;
if ((cube_info->quantize_info->number_colors == 2) &&
((cube_info->quantize_info->colorspace == LinearGRAYColorspace) ||
(cube_info->quantize_info->colorspace == GRAYColorspace)))
associate_alpha=MagickFalse;
cube_info->associate_alpha=associate_alpha;
}
static MagickBooleanType ClassifyImageColors(CubeInfo *cube_info,
const Image *image,ExceptionInfo *exception)
{
#define ClassifyImageTag "Classify/Image"
CacheView
*image_view;
DoublePixelPacket
error,
mid,
midpoint,
pixel;
MagickBooleanType
proceed;
MagickRealType
bisect;
NodeInfo
*node_info;
size_t
count,
id,
index,
level;
ssize_t
y;
/*
Classify the first cube_info->maximum_colors colors to a tree depth of 8.
*/
SetAssociatedAlpha(image,cube_info);
if ((cube_info->quantize_info->colorspace != UndefinedColorspace) &&
(cube_info->quantize_info->colorspace != CMYKColorspace))
(void) TransformImageColorspace((Image *) image,
cube_info->quantize_info->colorspace);
else
if (IssRGBCompatibleColorspace(image->colorspace) == MagickFalse)
(void) TransformImageColorspace((Image *) image,sRGBColorspace);
midpoint.red=(MagickRealType) QuantumRange/2.0;
midpoint.green=(MagickRealType) QuantumRange/2.0;
midpoint.blue=(MagickRealType) QuantumRange/2.0;
midpoint.opacity=(MagickRealType) QuantumRange/2.0;
midpoint.index=(MagickRealType) QuantumRange/2.0;
error.opacity=0.0;
image_view=AcquireVirtualCacheView(image,exception);
for (y=0; y < (ssize_t) image->rows; y++)
{
register const PixelPacket
*magick_restrict p;
register ssize_t
x;
p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception);
if (p == (const PixelPacket *) NULL)
break;
if (cube_info->nodes > MaxNodes)
{
/*
Prune one level if the color tree is too large.
*/
PruneLevel(cube_info,cube_info->root);
cube_info->depth--;
}
for (x=0; x < (ssize_t) image->columns; x+=(ssize_t) count)
{
/*
Start at the root and descend the color cube tree.
*/
for (count=1; (x+(ssize_t) count) < (ssize_t) image->columns; count++)
if (IsSameColor(image,p,p+count) == MagickFalse)
break;
AssociateAlphaPixel(cube_info,p,&pixel);
index=MaxTreeDepth-1;
bisect=((MagickRealType) QuantumRange+1.0)/2.0;
mid=midpoint;
node_info=cube_info->root;
for (level=1; level <= MaxTreeDepth; level++)
{
double
distance;
bisect*=0.5;
id=ColorToNodeId(cube_info,&pixel,index);
mid.red+=(id & 1) != 0 ? bisect : -bisect;
mid.green+=(id & 2) != 0 ? bisect : -bisect;
mid.blue+=(id & 4) != 0 ? bisect : -bisect;
mid.opacity+=(id & 8) != 0 ? bisect : -bisect;
if (node_info->child[id] == (NodeInfo *) NULL)
{
/*
Set colors of new node to contain pixel.
*/
node_info->child[id]=GetNodeInfo(cube_info,id,level,node_info);
if (node_info->child[id] == (NodeInfo *) NULL)
{
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","`%s'",
image->filename);
continue;
}
if (level == MaxTreeDepth)
cube_info->colors++;
}
/*
Approximate the quantization error represented by this node.
*/
node_info=node_info->child[id];
error.red=QuantumScale*(pixel.red-mid.red);
error.green=QuantumScale*(pixel.green-mid.green);
error.blue=QuantumScale*(pixel.blue-mid.blue);
if (cube_info->associate_alpha != MagickFalse)
error.opacity=QuantumScale*(pixel.opacity-mid.opacity);
distance=(double) (error.red*error.red+error.green*error.green+
error.blue*error.blue+error.opacity*error.opacity);
if (IsNaN(distance) != MagickFalse)
distance=0.0;
node_info->quantize_error+=count*sqrt(distance);
cube_info->root->quantize_error+=node_info->quantize_error;
index--;
}
/*
Sum RGB for this leaf for later derivation of the mean cube color.
*/
node_info->number_unique+=count;
node_info->total_color.red+=count*QuantumScale*ClampPixel(pixel.red);
node_info->total_color.green+=count*QuantumScale*ClampPixel(pixel.green);
node_info->total_color.blue+=count*QuantumScale*ClampPixel(pixel.blue);
if (cube_info->associate_alpha != MagickFalse)
node_info->total_color.opacity+=count*QuantumScale*
ClampPixel(pixel.opacity);
else
node_info->total_color.opacity+=count*QuantumScale*
ClampPixel(OpaqueOpacity);
p+=count;
}
if (cube_info->colors > cube_info->maximum_colors)
{
PruneToCubeDepth(cube_info,cube_info->root);
break;
}
proceed=SetImageProgress(image,ClassifyImageTag,(MagickOffsetType) y,
image->rows);
if (proceed == MagickFalse)
break;
}
for (y++; y < (ssize_t) image->rows; y++)
{
register const PixelPacket
*magick_restrict p;
register ssize_t
x;
p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception);
if (p == (const PixelPacket *) NULL)
break;
if (cube_info->nodes > MaxNodes)
{
/*
Prune one level if the color tree is too large.
*/
PruneLevel(cube_info,cube_info->root);
cube_info->depth--;
}
for (x=0; x < (ssize_t) image->columns; x+=(ssize_t) count)
{
/*
Start at the root and descend the color cube tree.
*/
for (count=1; (x+(ssize_t) count) < (ssize_t) image->columns; count++)
if (IsSameColor(image,p,p+count) == MagickFalse)
break;
AssociateAlphaPixel(cube_info,p,&pixel);
index=MaxTreeDepth-1;
bisect=((MagickRealType) QuantumRange+1.0)/2.0;
mid=midpoint;
node_info=cube_info->root;
for (level=1; level <= cube_info->depth; level++)
{
double
distance;
bisect*=0.5;
id=ColorToNodeId(cube_info,&pixel,index);
mid.red+=(id & 1) != 0 ? bisect : -bisect;
mid.green+=(id & 2) != 0 ? bisect : -bisect;
mid.blue+=(id & 4) != 0 ? bisect : -bisect;
mid.opacity+=(id & 8) != 0 ? bisect : -bisect;
if (node_info->child[id] == (NodeInfo *) NULL)
{
/*
Set colors of new node to contain pixel.
*/
node_info->child[id]=GetNodeInfo(cube_info,id,level,node_info);
if (node_info->child[id] == (NodeInfo *) NULL)
{
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","%s",
image->filename);
continue;
}
if (level == cube_info->depth)
cube_info->colors++;
}
/*
Approximate the quantization error represented by this node.
*/
node_info=node_info->child[id];
error.red=QuantumScale*(pixel.red-mid.red);
error.green=QuantumScale*(pixel.green-mid.green);
error.blue=QuantumScale*(pixel.blue-mid.blue);
if (cube_info->associate_alpha != MagickFalse)
error.opacity=QuantumScale*(pixel.opacity-mid.opacity);
distance=(double) (error.red*error.red+error.green*error.green+
error.blue*error.blue+error.opacity*error.opacity);
if (IsNaN(distance))
distance=0.0;
node_info->quantize_error+=count*sqrt(distance);
cube_info->root->quantize_error+=node_info->quantize_error;
index--;
}
/*
Sum RGB for this leaf for later derivation of the mean cube color.
*/
node_info->number_unique+=count;
node_info->total_color.red+=count*QuantumScale*ClampPixel(pixel.red);
node_info->total_color.green+=count*QuantumScale*ClampPixel(pixel.green);
node_info->total_color.blue+=count*QuantumScale*ClampPixel(pixel.blue);
if (cube_info->associate_alpha != MagickFalse)
node_info->total_color.opacity+=count*QuantumScale*ClampPixel(
pixel.opacity);
else
node_info->total_color.opacity+=count*QuantumScale*
ClampPixel(OpaqueOpacity);
p+=count;
}
proceed=SetImageProgress(image,ClassifyImageTag,(MagickOffsetType) y,
image->rows);
if (proceed == MagickFalse)
break;
}
image_view=DestroyCacheView(image_view);
if ((cube_info->quantize_info->colorspace != UndefinedColorspace) &&
(cube_info->quantize_info->colorspace != CMYKColorspace))
(void) TransformImageColorspace((Image *) image,sRGBColorspace);
return(y < (ssize_t) image->rows ? MagickFalse : MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% C l o n e Q u a n t i z e I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% CloneQuantizeInfo() makes a duplicate of the given quantize info structure,
% or if quantize info is NULL, a new one.
%
% The format of the CloneQuantizeInfo method is:
%
% QuantizeInfo *CloneQuantizeInfo(const QuantizeInfo *quantize_info)
%
% A description of each parameter follows:
%
% o clone_info: Method CloneQuantizeInfo returns a duplicate of the given
% quantize info, or if image info is NULL a new one.
%
% o quantize_info: a structure of type info.
%
*/
MagickExport QuantizeInfo *CloneQuantizeInfo(const QuantizeInfo *quantize_info)
{
QuantizeInfo
*clone_info;
clone_info=(QuantizeInfo *) AcquireMagickMemory(sizeof(*clone_info));
if (clone_info == (QuantizeInfo *) NULL)
ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed");
GetQuantizeInfo(clone_info);
if (quantize_info == (QuantizeInfo *) NULL)
return(clone_info);
clone_info->number_colors=quantize_info->number_colors;
clone_info->tree_depth=quantize_info->tree_depth;
clone_info->dither=quantize_info->dither;
clone_info->dither_method=quantize_info->dither_method;
clone_info->colorspace=quantize_info->colorspace;
clone_info->measure_error=quantize_info->measure_error;
return(clone_info);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ C l o s e s t C o l o r %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ClosestColor() traverses the color cube tree at a particular node and
% determines which colormap entry best represents the input color.
%
% The format of the ClosestColor method is:
%
% void ClosestColor(const Image *image,CubeInfo *cube_info,
% const NodeInfo *node_info)
%
% A description of each parameter follows.
%
% o image: the image.
%
% o cube_info: A pointer to the Cube structure.
%
% o node_info: the address of a structure of type NodeInfo which points to a
% node in the color cube tree that is to be pruned.
%
*/
static void ClosestColor(const Image *image,CubeInfo *cube_info,
const NodeInfo *node_info)
{
register ssize_t
i;
size_t
number_children;
/*
Traverse any children.
*/
number_children=cube_info->associate_alpha == MagickFalse ? 8UL : 16UL;
for (i=0; i < (ssize_t) number_children; i++)
if (node_info->child[i] != (NodeInfo *) NULL)
ClosestColor(image,cube_info,node_info->child[i]);
if (node_info->number_unique != 0)
{
MagickRealType
pixel;
register DoublePixelPacket
*magick_restrict q;
register MagickRealType
alpha,
beta,
distance;
register PixelPacket
*magick_restrict p;
/*
Determine if this color is "closest".
*/
p=image->colormap+node_info->color_number;
q=(&cube_info->target);
alpha=1.0;
beta=1.0;
if (cube_info->associate_alpha != MagickFalse)
{
alpha=(MagickRealType) (QuantumScale*GetPixelAlpha(p));
beta=(MagickRealType) (QuantumScale*GetPixelAlpha(q));
}
pixel=alpha*GetPixelRed(p)-beta*GetPixelRed(q);
distance=pixel*pixel;
if (distance <= cube_info->distance)
{
pixel=alpha*GetPixelGreen(p)-beta*GetPixelGreen(q);
distance+=pixel*pixel;
if (distance <= cube_info->distance)
{
pixel=alpha*GetPixelBlue(p)-beta*GetPixelBlue(q);
distance+=pixel*pixel;
if (distance <= cube_info->distance)
{
if (cube_info->associate_alpha != MagickFalse)
{
pixel=GetPixelAlpha(p)-GetPixelAlpha(q);
distance+=pixel*pixel;
}
if (distance <= cube_info->distance)
{
cube_info->distance=distance;
cube_info->color_number=node_info->color_number;
}
}
}
}
}
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% C o m p r e s s I m a g e C o l o r m a p %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% CompressImageColormap() compresses an image colormap by removing any
% duplicate or unused color entries.
%
% The format of the CompressImageColormap method is:
%
% MagickBooleanType CompressImageColormap(Image *image)
%
% A description of each parameter follows:
%
% o image: the image.
%
*/
MagickExport MagickBooleanType CompressImageColormap(Image *image)
{
QuantizeInfo
quantize_info;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
if (IsPaletteImage(image,&image->exception) == MagickFalse)
return(MagickFalse);
GetQuantizeInfo(&quantize_info);
quantize_info.number_colors=image->colors;
quantize_info.tree_depth=MaxTreeDepth;
return(QuantizeImage(&quantize_info,image));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ D e f i n e I m a g e C o l o r m a p %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DefineImageColormap() traverses the color cube tree and notes each colormap
% entry. A colormap entry is any node in the color cube tree where the
% of unique colors is not zero. DefineImageColormap() returns the number of
% colors in the image colormap.
%
% The format of the DefineImageColormap method is:
%
% size_t DefineImageColormap(Image *image,CubeInfo *cube_info,
% NodeInfo *node_info)
%
% A description of each parameter follows.
%
% o image: the image.
%
% o cube_info: A pointer to the Cube structure.
%
% o node_info: the address of a structure of type NodeInfo which points to a
% node in the color cube tree that is to be pruned.
%
*/
static size_t DefineImageColormap(Image *image,CubeInfo *cube_info,
NodeInfo *node_info)
{
register ssize_t
i;
size_t
number_children;
/*
Traverse any children.
*/
number_children=cube_info->associate_alpha == MagickFalse ? 8UL : 16UL;
for (i=0; i < (ssize_t) number_children; i++)
if (node_info->child[i] != (NodeInfo *) NULL)
(void) DefineImageColormap(image,cube_info,node_info->child[i]);
if (node_info->number_unique != 0)
{
register MagickRealType
alpha;
register PixelPacket
*magick_restrict q;
/*
Colormap entry is defined by the mean color in this cube.
*/
q=image->colormap+image->colors;
alpha=(MagickRealType) ((MagickOffsetType) node_info->number_unique);
alpha=PerceptibleReciprocal(alpha);
if (cube_info->associate_alpha == MagickFalse)
{
SetPixelRed(q,ClampToQuantum((MagickRealType) (alpha*
QuantumRange*node_info->total_color.red)));
SetPixelGreen(q,ClampToQuantum((MagickRealType) (alpha*
QuantumRange*node_info->total_color.green)));
SetPixelBlue(q,ClampToQuantum((MagickRealType) (alpha*
QuantumRange*node_info->total_color.blue)));
SetPixelOpacity(q,OpaqueOpacity);
}
else
{
MagickRealType
opacity;
opacity=(MagickRealType) (alpha*QuantumRange*
node_info->total_color.opacity);
SetPixelOpacity(q,ClampToQuantum(opacity));
if (q->opacity == OpaqueOpacity)
{
SetPixelRed(q,ClampToQuantum((MagickRealType) (alpha*
QuantumRange*node_info->total_color.red)));
SetPixelGreen(q,ClampToQuantum((MagickRealType) (alpha*
QuantumRange*node_info->total_color.green)));
SetPixelBlue(q,ClampToQuantum((MagickRealType) (alpha*
QuantumRange*node_info->total_color.blue)));
}
else
{
double
gamma;
gamma=(double) (QuantumScale*(QuantumRange-(double) q->opacity));
gamma=PerceptibleReciprocal(gamma);
SetPixelRed(q,ClampToQuantum((MagickRealType) (alpha*
gamma*QuantumRange*node_info->total_color.red)));
SetPixelGreen(q,ClampToQuantum((MagickRealType) (alpha*
gamma*QuantumRange*node_info->total_color.green)));
SetPixelBlue(q,ClampToQuantum((MagickRealType) (alpha*
gamma*QuantumRange*node_info->total_color.blue)));
if (node_info->number_unique > cube_info->transparent_pixels)
{
cube_info->transparent_pixels=node_info->number_unique;
cube_info->transparent_index=(ssize_t) image->colors;
}
}
}
node_info->color_number=image->colors++;
}
return(image->colors);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ D e s t r o y C u b e I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DestroyCubeInfo() deallocates memory associated with an image.
%
% The format of the DestroyCubeInfo method is:
%
% DestroyCubeInfo(CubeInfo *cube_info)
%
% A description of each parameter follows:
%
% o cube_info: the address of a structure of type CubeInfo.
%
*/
static void DestroyCubeInfo(CubeInfo *cube_info)
{
register Nodes
*nodes;
/*
Release color cube tree storage.
*/
do
{
nodes=cube_info->node_queue->next;
cube_info->node_queue->nodes=(NodeInfo *) RelinquishMagickMemory(
cube_info->node_queue->nodes);
cube_info->node_queue=(Nodes *) RelinquishMagickMemory(
cube_info->node_queue);
cube_info->node_queue=nodes;
} while (cube_info->node_queue != (Nodes *) NULL);
if (cube_info->memory_info != (MemoryInfo *) NULL)
cube_info->memory_info=RelinquishVirtualMemory(cube_info->memory_info);
cube_info->quantize_info=DestroyQuantizeInfo(cube_info->quantize_info);
cube_info=(CubeInfo *) RelinquishMagickMemory(cube_info);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% D e s t r o y Q u a n t i z e I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DestroyQuantizeInfo() deallocates memory associated with an QuantizeInfo
% structure.
%
% The format of the DestroyQuantizeInfo method is:
%
% QuantizeInfo *DestroyQuantizeInfo(QuantizeInfo *quantize_info)
%
% A description of each parameter follows:
%
% o quantize_info: Specifies a pointer to an QuantizeInfo structure.
%
*/
MagickExport QuantizeInfo *DestroyQuantizeInfo(QuantizeInfo *quantize_info)
{
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"...");
assert(quantize_info != (QuantizeInfo *) NULL);
assert(quantize_info->signature == MagickCoreSignature);
quantize_info->signature=(~MagickCoreSignature);
quantize_info=(QuantizeInfo *) RelinquishMagickMemory(quantize_info);
return(quantize_info);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ D i t h e r I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DitherImage() distributes the difference between an original image and
% the corresponding color reduced algorithm to neighboring pixels using
% serpentine-scan Floyd-Steinberg error diffusion. DitherImage returns
% MagickTrue if the image is dithered otherwise MagickFalse.
%
% The format of the DitherImage method is:
%
% MagickBooleanType DitherImage(Image *image,CubeInfo *cube_info)
%
% A description of each parameter follows.
%
% o image: the image.
%
% o cube_info: A pointer to the Cube structure.
%
*/
static DoublePixelPacket **DestroyPixelThreadSet(DoublePixelPacket **pixels)
{
register ssize_t
i;
assert(pixels != (DoublePixelPacket **) NULL);
for (i=0; i < (ssize_t) GetMagickResourceLimit(ThreadResource); i++)
if (pixels[i] != (DoublePixelPacket *) NULL)
pixels[i]=(DoublePixelPacket *) RelinquishMagickMemory(pixels[i]);
pixels=(DoublePixelPacket **) RelinquishMagickMemory(pixels);
return(pixels);
}
static DoublePixelPacket **AcquirePixelThreadSet(const size_t count)
{
DoublePixelPacket
**pixels;
register ssize_t
i;
size_t
number_threads;
number_threads=(size_t) GetMagickResourceLimit(ThreadResource);
pixels=(DoublePixelPacket **) AcquireQuantumMemory(number_threads,
sizeof(*pixels));
if (pixels == (DoublePixelPacket **) NULL)
return((DoublePixelPacket **) NULL);
(void) ResetMagickMemory(pixels,0,number_threads*sizeof(*pixels));
for (i=0; i < (ssize_t) number_threads; i++)
{
pixels[i]=(DoublePixelPacket *) AcquireQuantumMemory(count,
2*sizeof(**pixels));
if (pixels[i] == (DoublePixelPacket *) NULL)
return(DestroyPixelThreadSet(pixels));
}
return(pixels);
}
static inline ssize_t CacheOffset(CubeInfo *cube_info,
const DoublePixelPacket *pixel)
{
#define RedShift(pixel) (((pixel) >> CacheShift) << (0*(8-CacheShift)))
#define GreenShift(pixel) (((pixel) >> CacheShift) << (1*(8-CacheShift)))
#define BlueShift(pixel) (((pixel) >> CacheShift) << (2*(8-CacheShift)))
#define AlphaShift(pixel) (((pixel) >> CacheShift) << (3*(8-CacheShift)))
ssize_t
offset;
offset=(ssize_t) (RedShift(ScaleQuantumToChar(ClampPixel(pixel->red))) |
GreenShift(ScaleQuantumToChar(ClampPixel(pixel->green))) |
BlueShift(ScaleQuantumToChar(ClampPixel(pixel->blue))));
if (cube_info->associate_alpha != MagickFalse)
offset|=AlphaShift(ScaleQuantumToChar(ClampPixel(pixel->opacity)));
return(offset);
}
static MagickBooleanType FloydSteinbergDither(Image *image,CubeInfo *cube_info)
{
#define DitherImageTag "Dither/Image"
CacheView
*image_view;
DoublePixelPacket
**pixels;
ExceptionInfo
*exception;
MagickBooleanType
status;
ssize_t
y;
/*
Distribute quantization error using Floyd-Steinberg.
*/
pixels=AcquirePixelThreadSet(image->columns);
if (pixels == (DoublePixelPacket **) NULL)
return(MagickFalse);
exception=(&image->exception);
status=MagickTrue;
image_view=AcquireAuthenticCacheView(image,exception);
for (y=0; y < (ssize_t) image->rows; y++)
{
const int
id = GetOpenMPThreadId();
CubeInfo
cube;
DoublePixelPacket
*current,
*previous;
register IndexPacket
*magick_restrict indexes;
register PixelPacket
*magick_restrict q;
register ssize_t
x;
size_t
index;
ssize_t
v;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
indexes=GetCacheViewAuthenticIndexQueue(image_view);
cube=(*cube_info);
current=pixels[id]+(y & 0x01)*image->columns;
previous=pixels[id]+((y+1) & 0x01)*image->columns;
v=(ssize_t) ((y & 0x01) ? -1 : 1);
for (x=0; x < (ssize_t) image->columns; x++)
{
DoublePixelPacket
color,
pixel;
register ssize_t
i;
ssize_t
u;
u=(y & 0x01) ? (ssize_t) image->columns-1-x : x;
AssociateAlphaPixel(&cube,q+u,&pixel);
if (x > 0)
{
pixel.red+=7*current[u-v].red/16;
pixel.green+=7*current[u-v].green/16;
pixel.blue+=7*current[u-v].blue/16;
if (cube.associate_alpha != MagickFalse)
pixel.opacity+=7*current[u-v].opacity/16;
}
if (y > 0)
{
if (x < (ssize_t) (image->columns-1))
{
pixel.red+=previous[u+v].red/16;
pixel.green+=previous[u+v].green/16;
pixel.blue+=previous[u+v].blue/16;
if (cube.associate_alpha != MagickFalse)
pixel.opacity+=previous[u+v].opacity/16;
}
pixel.red+=5*previous[u].red/16;
pixel.green+=5*previous[u].green/16;
pixel.blue+=5*previous[u].blue/16;
if (cube.associate_alpha != MagickFalse)
pixel.opacity+=5*previous[u].opacity/16;
if (x > 0)
{
pixel.red+=3*previous[u-v].red/16;
pixel.green+=3*previous[u-v].green/16;
pixel.blue+=3*previous[u-v].blue/16;
if (cube.associate_alpha != MagickFalse)
pixel.opacity+=3*previous[u-v].opacity/16;
}
}
pixel.red=(MagickRealType) ClampPixel(pixel.red);
pixel.green=(MagickRealType) ClampPixel(pixel.green);
pixel.blue=(MagickRealType) ClampPixel(pixel.blue);
if (cube.associate_alpha != MagickFalse)
pixel.opacity=(MagickRealType) ClampPixel(pixel.opacity);
i=CacheOffset(&cube,&pixel);
if (cube.cache[i] < 0)
{
register NodeInfo
*node_info;
register size_t
id;
/*
Identify the deepest node containing the pixel's color.
*/
node_info=cube.root;
for (index=MaxTreeDepth-1; (ssize_t) index > 0; index--)
{
id=ColorToNodeId(&cube,&pixel,index);
if (node_info->child[id] == (NodeInfo *) NULL)
break;
node_info=node_info->child[id];
}
/*
Find closest color among siblings and their children.
*/
cube.target=pixel;
cube.distance=(MagickRealType) (4.0*(QuantumRange+1.0)*(QuantumRange+
1.0)+1.0);
ClosestColor(image,&cube,node_info->parent);
cube.cache[i]=(ssize_t) cube.color_number;
}
/*
Assign pixel to closest colormap entry.
*/
index=(size_t) cube.cache[i];
if (image->storage_class == PseudoClass)
SetPixelIndex(indexes+u,index);
if (cube.quantize_info->measure_error == MagickFalse)
{
SetPixelRgb(q+u,image->colormap+index);
if (cube.associate_alpha != MagickFalse)
SetPixelOpacity(q+u,image->colormap[index].opacity);
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
/*
Store the error.
*/
AssociateAlphaPixel(&cube,image->colormap+index,&color);
current[u].red=pixel.red-color.red;
current[u].green=pixel.green-color.green;
current[u].blue=pixel.blue-color.blue;
if (cube.associate_alpha != MagickFalse)
current[u].opacity=pixel.opacity-color.opacity;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
proceed=SetImageProgress(image,DitherImageTag,(MagickOffsetType) y,
image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
}
image_view=DestroyCacheView(image_view);
pixels=DestroyPixelThreadSet(pixels);
return(MagickTrue);
}
static MagickBooleanType
RiemersmaDither(Image *,CacheView *,CubeInfo *,const unsigned int);
static void Riemersma(Image *image,CacheView *image_view,CubeInfo *cube_info,
const size_t level,const unsigned int direction)
{
if (level == 1)
switch (direction)
{
case WestGravity:
{
(void) RiemersmaDither(image,image_view,cube_info,EastGravity);
(void) RiemersmaDither(image,image_view,cube_info,SouthGravity);
(void) RiemersmaDither(image,image_view,cube_info,WestGravity);
break;
}
case EastGravity:
{
(void) RiemersmaDither(image,image_view,cube_info,WestGravity);
(void) RiemersmaDither(image,image_view,cube_info,NorthGravity);
(void) RiemersmaDither(image,image_view,cube_info,EastGravity);
break;
}
case NorthGravity:
{
(void) RiemersmaDither(image,image_view,cube_info,SouthGravity);
(void) RiemersmaDither(image,image_view,cube_info,EastGravity);
(void) RiemersmaDither(image,image_view,cube_info,NorthGravity);
break;
}
case SouthGravity:
{
(void) RiemersmaDither(image,image_view,cube_info,NorthGravity);
(void) RiemersmaDither(image,image_view,cube_info,WestGravity);
(void) RiemersmaDither(image,image_view,cube_info,SouthGravity);
break;
}
default:
break;
}
else
switch (direction)
{
case WestGravity:
{
Riemersma(image,image_view,cube_info,level-1,NorthGravity);
(void) RiemersmaDither(image,image_view,cube_info,EastGravity);
Riemersma(image,image_view,cube_info,level-1,WestGravity);
(void) RiemersmaDither(image,image_view,cube_info,SouthGravity);
Riemersma(image,image_view,cube_info,level-1,WestGravity);
(void) RiemersmaDither(image,image_view,cube_info,WestGravity);
Riemersma(image,image_view,cube_info,level-1,SouthGravity);
break;
}
case EastGravity:
{
Riemersma(image,image_view,cube_info,level-1,SouthGravity);
(void) RiemersmaDither(image,image_view,cube_info,WestGravity);
Riemersma(image,image_view,cube_info,level-1,EastGravity);
(void) RiemersmaDither(image,image_view,cube_info,NorthGravity);
Riemersma(image,image_view,cube_info,level-1,EastGravity);
(void) RiemersmaDither(image,image_view,cube_info,EastGravity);
Riemersma(image,image_view,cube_info,level-1,NorthGravity);
break;
}
case NorthGravity:
{
Riemersma(image,image_view,cube_info,level-1,WestGravity);
(void) RiemersmaDither(image,image_view,cube_info,SouthGravity);
Riemersma(image,image_view,cube_info,level-1,NorthGravity);
(void) RiemersmaDither(image,image_view,cube_info,EastGravity);
Riemersma(image,image_view,cube_info,level-1,NorthGravity);
(void) RiemersmaDither(image,image_view,cube_info,NorthGravity);
Riemersma(image,image_view,cube_info,level-1,EastGravity);
break;
}
case SouthGravity:
{
Riemersma(image,image_view,cube_info,level-1,EastGravity);
(void) RiemersmaDither(image,image_view,cube_info,NorthGravity);
Riemersma(image,image_view,cube_info,level-1,SouthGravity);
(void) RiemersmaDither(image,image_view,cube_info,WestGravity);
Riemersma(image,image_view,cube_info,level-1,SouthGravity);
(void) RiemersmaDither(image,image_view,cube_info,SouthGravity);
Riemersma(image,image_view,cube_info,level-1,WestGravity);
break;
}
default:
break;
}
}
static MagickBooleanType RiemersmaDither(Image *image,CacheView *image_view,
CubeInfo *cube_info,const unsigned int direction)
{
#define DitherImageTag "Dither/Image"
DoublePixelPacket
color,
pixel;
MagickBooleanType
proceed;
register CubeInfo
*p;
size_t
index;
p=cube_info;
if ((p->x >= 0) && (p->x < (ssize_t) image->columns) &&
(p->y >= 0) && (p->y < (ssize_t) image->rows))
{
ExceptionInfo
*exception;
register IndexPacket
*magick_restrict indexes;
register PixelPacket
*magick_restrict q;
register ssize_t
i;
/*
Distribute error.
*/
exception=(&image->exception);
q=GetCacheViewAuthenticPixels(image_view,p->x,p->y,1,1,exception);
if (q == (PixelPacket *) NULL)
return(MagickFalse);
indexes=GetCacheViewAuthenticIndexQueue(image_view);
AssociateAlphaPixel(cube_info,q,&pixel);
for (i=0; i < ErrorQueueLength; i++)
{
pixel.red+=p->weights[i]*p->error[i].red;
pixel.green+=p->weights[i]*p->error[i].green;
pixel.blue+=p->weights[i]*p->error[i].blue;
if (cube_info->associate_alpha != MagickFalse)
pixel.opacity+=p->weights[i]*p->error[i].opacity;
}
pixel.red=(MagickRealType) ClampPixel(pixel.red);
pixel.green=(MagickRealType) ClampPixel(pixel.green);
pixel.blue=(MagickRealType) ClampPixel(pixel.blue);
if (cube_info->associate_alpha != MagickFalse)
pixel.opacity=(MagickRealType) ClampPixel(pixel.opacity);
i=CacheOffset(cube_info,&pixel);
if (p->cache[i] < 0)
{
register NodeInfo
*node_info;
register size_t
id;
/*
Identify the deepest node containing the pixel's color.
*/
node_info=p->root;
for (index=MaxTreeDepth-1; (ssize_t) index > 0; index--)
{
id=ColorToNodeId(cube_info,&pixel,index);
if (node_info->child[id] == (NodeInfo *) NULL)
break;
node_info=node_info->child[id];
}
/*
Find closest color among siblings and their children.
*/
p->target=pixel;
p->distance=(MagickRealType) (4.0*(QuantumRange+1.0)*((MagickRealType)
QuantumRange+1.0)+1.0);
ClosestColor(image,p,node_info->parent);
p->cache[i]=(ssize_t) p->color_number;
}
/*
Assign pixel to closest colormap entry.
*/
index=(size_t) (1*p->cache[i]);
if (image->storage_class == PseudoClass)
*indexes=(IndexPacket) index;
if (cube_info->quantize_info->measure_error == MagickFalse)
{
SetPixelRgb(q,image->colormap+index);
if (cube_info->associate_alpha != MagickFalse)
SetPixelOpacity(q,image->colormap[index].opacity);
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
return(MagickFalse);
/*
Propagate the error as the last entry of the error queue.
*/
(void) CopyMagickMemory(p->error,p->error+1,(ErrorQueueLength-1)*
sizeof(p->error[0]));
AssociateAlphaPixel(cube_info,image->colormap+index,&color);
p->error[ErrorQueueLength-1].red=pixel.red-color.red;
p->error[ErrorQueueLength-1].green=pixel.green-color.green;
p->error[ErrorQueueLength-1].blue=pixel.blue-color.blue;
if (cube_info->associate_alpha != MagickFalse)
p->error[ErrorQueueLength-1].opacity=pixel.opacity-color.opacity;
proceed=SetImageProgress(image,DitherImageTag,p->offset,p->span);
if (proceed == MagickFalse)
return(MagickFalse);
p->offset++;
}
switch (direction)
{
case WestGravity: p->x--; break;
case EastGravity: p->x++; break;
case NorthGravity: p->y--; break;
case SouthGravity: p->y++; break;
}
return(MagickTrue);
}
static MagickBooleanType DitherImage(Image *image,CubeInfo *cube_info)
{
CacheView
*image_view;
MagickBooleanType
status;
register ssize_t
i;
size_t
depth;
if (cube_info->quantize_info->dither_method != RiemersmaDitherMethod)
return(FloydSteinbergDither(image,cube_info));
/*
Distribute quantization error along a Hilbert curve.
*/
(void) ResetMagickMemory(cube_info->error,0,ErrorQueueLength*
sizeof(*cube_info->error));
cube_info->x=0;
cube_info->y=0;
i=MagickMax((ssize_t) image->columns,(ssize_t) image->rows);
for (depth=1; i != 0; depth++)
i>>=1;
if ((ssize_t) (1L << depth) < MagickMax((ssize_t) image->columns,(ssize_t) image->rows))
depth++;
cube_info->offset=0;
cube_info->span=(MagickSizeType) image->columns*image->rows;
image_view=AcquireAuthenticCacheView(image,&image->exception);
if (depth > 1)
Riemersma(image,image_view,cube_info,depth-1,NorthGravity);
status=RiemersmaDither(image,image_view,cube_info,ForgetGravity);
image_view=DestroyCacheView(image_view);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t C u b e I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetCubeInfo() initialize the Cube data structure.
%
% The format of the GetCubeInfo method is:
%
% CubeInfo GetCubeInfo(const QuantizeInfo *quantize_info,
% const size_t depth,const size_t maximum_colors)
%
% A description of each parameter follows.
%
% o quantize_info: Specifies a pointer to an QuantizeInfo structure.
%
% o depth: Normally, this integer value is zero or one. A zero or
% one tells Quantize to choose a optimal tree depth of Log4(number_colors).
% A tree of this depth generally allows the best representation of the
% reference image with the least amount of memory and the fastest
% computational speed. In some cases, such as an image with low color
% dispersion (a few number of colors), a value other than
% Log4(number_colors) is required. To expand the color tree completely,
% use a value of 8.
%
% o maximum_colors: maximum colors.
%
*/
static CubeInfo *GetCubeInfo(const QuantizeInfo *quantize_info,
const size_t depth,const size_t maximum_colors)
{
CubeInfo
*cube_info;
MagickRealType
sum,
weight;
register ssize_t
i;
size_t
length;
/*
Initialize tree to describe color cube_info.
*/
cube_info=(CubeInfo *) AcquireMagickMemory(sizeof(*cube_info));
if (cube_info == (CubeInfo *) NULL)
return((CubeInfo *) NULL);
(void) ResetMagickMemory(cube_info,0,sizeof(*cube_info));
cube_info->depth=depth;
if (cube_info->depth > MaxTreeDepth)
cube_info->depth=MaxTreeDepth;
if (cube_info->depth < 2)
cube_info->depth=2;
cube_info->maximum_colors=maximum_colors;
/*
Initialize root node.
*/
cube_info->root=GetNodeInfo(cube_info,0,0,(NodeInfo *) NULL);
if (cube_info->root == (NodeInfo *) NULL)
return((CubeInfo *) NULL);
cube_info->root->parent=cube_info->root;
cube_info->quantize_info=CloneQuantizeInfo(quantize_info);
if (cube_info->quantize_info->dither == MagickFalse)
return(cube_info);
/*
Initialize dither resources.
*/
length=(size_t) (1UL << (4*(8-CacheShift)));
cube_info->memory_info=AcquireVirtualMemory(length,sizeof(*cube_info->cache));
if (cube_info->memory_info == (MemoryInfo *) NULL)
return((CubeInfo *) NULL);
cube_info->cache=(ssize_t *) GetVirtualMemoryBlob(cube_info->memory_info);
/*
Initialize color cache.
*/
(void) ResetMagickMemory(cube_info->cache,(-1),sizeof(*cube_info->cache)*
length);
/*
Distribute weights along a curve of exponential decay.
*/
weight=1.0;
for (i=0; i < ErrorQueueLength; i++)
{
cube_info->weights[ErrorQueueLength-i-1]=PerceptibleReciprocal(weight);
weight*=exp(log(((double) QuantumRange+1.0))/(ErrorQueueLength-1.0));
}
/*
Normalize the weighting factors.
*/
weight=0.0;
for (i=0; i < ErrorQueueLength; i++)
weight+=cube_info->weights[i];
sum=0.0;
for (i=0; i < ErrorQueueLength; i++)
{
cube_info->weights[i]/=weight;
sum+=cube_info->weights[i];
}
cube_info->weights[0]+=1.0-sum;
return(cube_info);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t N o d e I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetNodeInfo() allocates memory for a new node in the color cube tree and
% presets all fields to zero.
%
% The format of the GetNodeInfo method is:
%
% NodeInfo *GetNodeInfo(CubeInfo *cube_info,const size_t id,
% const size_t level,NodeInfo *parent)
%
% A description of each parameter follows.
%
% o node: The GetNodeInfo method returns a pointer to a queue of nodes.
%
% o id: Specifies the child number of the node.
%
% o level: Specifies the level in the storage_class the node resides.
%
*/
static NodeInfo *GetNodeInfo(CubeInfo *cube_info,const size_t id,
const size_t level,NodeInfo *parent)
{
NodeInfo
*node_info;
if (cube_info->free_nodes == 0)
{
Nodes
*nodes;
/*
Allocate a new queue of nodes.
*/
nodes=(Nodes *) AcquireMagickMemory(sizeof(*nodes));
if (nodes == (Nodes *) NULL)
return((NodeInfo *) NULL);
nodes->nodes=(NodeInfo *) AcquireQuantumMemory(NodesInAList,
sizeof(*nodes->nodes));
if (nodes->nodes == (NodeInfo *) NULL)
return((NodeInfo *) NULL);
nodes->next=cube_info->node_queue;
cube_info->node_queue=nodes;
cube_info->next_node=nodes->nodes;
cube_info->free_nodes=NodesInAList;
}
cube_info->nodes++;
cube_info->free_nodes--;
node_info=cube_info->next_node++;
(void) ResetMagickMemory(node_info,0,sizeof(*node_info));
node_info->parent=parent;
node_info->id=id;
node_info->level=level;
return(node_info);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t I m a g e Q u a n t i z e E r r o r %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetImageQuantizeError() measures the difference between the original
% and quantized images. This difference is the total quantization error.
% The error is computed by summing over all pixels in an image the distance
% squared in RGB space between each reference pixel value and its quantized
% value. These values are computed:
%
% o mean_error_per_pixel: This value is the mean error for any single
% pixel in the image.
%
% o normalized_mean_square_error: This value is the normalized mean
% quantization error for any single pixel in the image. This distance
% measure is normalized to a range between 0 and 1. It is independent
% of the range of red, green, and blue values in the image.
%
% o normalized_maximum_square_error: Thsi value is the normalized
% maximum quantization error for any single pixel in the image. This
% distance measure is normalized to a range between 0 and 1. It is
% independent of the range of red, green, and blue values in your image.
%
% The format of the GetImageQuantizeError method is:
%
% MagickBooleanType GetImageQuantizeError(Image *image)
%
% A description of each parameter follows.
%
% o image: the image.
%
*/
MagickExport MagickBooleanType GetImageQuantizeError(Image *image)
{
CacheView
*image_view;
ExceptionInfo
*exception;
IndexPacket
*indexes;
MagickRealType
alpha,
area,
beta,
distance,
gamma,
maximum_error,
mean_error,
mean_error_per_pixel;
size_t
index;
ssize_t
y;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
image->total_colors=GetNumberColors(image,(FILE *) NULL,&image->exception);
(void) ResetMagickMemory(&image->error,0,sizeof(image->error));
if (image->storage_class == DirectClass)
return(MagickTrue);
alpha=1.0;
beta=1.0;
area=3.0*image->columns*image->rows;
maximum_error=0.0;
mean_error_per_pixel=0.0;
mean_error=0.0;
exception=(&image->exception);
image_view=AcquireVirtualCacheView(image,exception);
for (y=0; y < (ssize_t) image->rows; y++)
{
register const PixelPacket
*magick_restrict p;
register ssize_t
x;
p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception);
if (p == (const PixelPacket *) NULL)
break;
indexes=GetCacheViewAuthenticIndexQueue(image_view);
for (x=0; x < (ssize_t) image->columns; x++)
{
index=1UL*GetPixelIndex(indexes+x);
if (image->matte != MagickFalse)
{
alpha=(MagickRealType) (QuantumScale*(GetPixelAlpha(p)));
beta=(MagickRealType) (QuantumScale*(QuantumRange-
image->colormap[index].opacity));
}
distance=fabs((double) (alpha*GetPixelRed(p)-beta*
image->colormap[index].red));
mean_error_per_pixel+=distance;
mean_error+=distance*distance;
if (distance > maximum_error)
maximum_error=distance;
distance=fabs((double) (alpha*GetPixelGreen(p)-beta*
image->colormap[index].green));
mean_error_per_pixel+=distance;
mean_error+=distance*distance;
if (distance > maximum_error)
maximum_error=distance;
distance=fabs((double) (alpha*GetPixelBlue(p)-beta*
image->colormap[index].blue));
mean_error_per_pixel+=distance;
mean_error+=distance*distance;
if (distance > maximum_error)
maximum_error=distance;
p++;
}
}
image_view=DestroyCacheView(image_view);
gamma=PerceptibleReciprocal(area);
image->error.mean_error_per_pixel=gamma*mean_error_per_pixel;
image->error.normalized_mean_error=gamma*QuantumScale*QuantumScale*mean_error;
image->error.normalized_maximum_error=QuantumScale*maximum_error;
return(MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t Q u a n t i z e I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetQuantizeInfo() initializes the QuantizeInfo structure.
%
% The format of the GetQuantizeInfo method is:
%
% GetQuantizeInfo(QuantizeInfo *quantize_info)
%
% A description of each parameter follows:
%
% o quantize_info: Specifies a pointer to a QuantizeInfo structure.
%
*/
MagickExport void GetQuantizeInfo(QuantizeInfo *quantize_info)
{
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"...");
assert(quantize_info != (QuantizeInfo *) NULL);
(void) ResetMagickMemory(quantize_info,0,sizeof(*quantize_info));
quantize_info->number_colors=256;
quantize_info->dither=MagickTrue;
quantize_info->dither_method=RiemersmaDitherMethod;
quantize_info->colorspace=UndefinedColorspace;
quantize_info->measure_error=MagickFalse;
quantize_info->signature=MagickCoreSignature;
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% P o s t e r i z e I m a g e C h a n n e l %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% PosterizeImage() reduces the image to a limited number of colors for a
% "poster" effect.
%
% The format of the PosterizeImage method is:
%
% MagickBooleanType PosterizeImage(Image *image,const size_t levels,
% const MagickBooleanType dither)
% MagickBooleanType PosterizeImageChannel(Image *image,
% const ChannelType channel,const size_t levels,
% const MagickBooleanType dither)
%
% A description of each parameter follows:
%
% o image: Specifies a pointer to an Image structure.
%
% o levels: Number of color levels allowed in each channel. Very low values
% (2, 3, or 4) have the most visible effect.
%
% o dither: Set this integer value to something other than zero to dither
% the mapped image.
%
*/
static inline double MagickRound(double x)
{
/*
Round the fraction to nearest integer.
*/
if ((x-floor(x)) < (ceil(x)-x))
return(floor(x));
return(ceil(x));
}
MagickExport MagickBooleanType PosterizeImage(Image *image,const size_t levels,
const MagickBooleanType dither)
{
MagickBooleanType
status;
status=PosterizeImageChannel(image,DefaultChannels,levels,dither);
return(status);
}
MagickExport MagickBooleanType PosterizeImageChannel(Image *image,
const ChannelType channel,const size_t levels,const MagickBooleanType dither)
{
#define PosterizeImageTag "Posterize/Image"
#define PosterizePixel(pixel) (Quantum) (QuantumRange*(MagickRound( \
QuantumScale*pixel*(levels-1)))/MagickMax((ssize_t) levels-1,1))
CacheView
*image_view;
ExceptionInfo
*exception;
MagickBooleanType
status;
MagickOffsetType
progress;
QuantizeInfo
*quantize_info;
register ssize_t
i;
ssize_t
y;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
if (image->storage_class == PseudoClass)
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) shared(progress,status) \
magick_number_threads(image,image,image->colors,1)
#endif
for (i=0; i < (ssize_t) image->colors; i++)
{
/*
Posterize colormap.
*/
if ((channel & RedChannel) != 0)
image->colormap[i].red=PosterizePixel(image->colormap[i].red);
if ((channel & GreenChannel) != 0)
image->colormap[i].green=PosterizePixel(image->colormap[i].green);
if ((channel & BlueChannel) != 0)
image->colormap[i].blue=PosterizePixel(image->colormap[i].blue);
if ((channel & OpacityChannel) != 0)
image->colormap[i].opacity=PosterizePixel(image->colormap[i].opacity);
}
/*
Posterize image.
*/
status=MagickTrue;
progress=0;
exception=(&image->exception);
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) shared(progress,status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
register IndexPacket
*magick_restrict indexes;
register PixelPacket
*magick_restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
indexes=GetCacheViewAuthenticIndexQueue(image_view);
for (x=0; x < (ssize_t) image->columns; x++)
{
if ((channel & RedChannel) != 0)
SetPixelRed(q,PosterizePixel(GetPixelRed(q)));
if ((channel & GreenChannel) != 0)
SetPixelGreen(q,PosterizePixel(GetPixelGreen(q)));
if ((channel & BlueChannel) != 0)
SetPixelBlue(q,PosterizePixel(GetPixelBlue(q)));
if (((channel & OpacityChannel) != 0) &&
(image->matte != MagickFalse))
SetPixelOpacity(q,PosterizePixel(GetPixelOpacity(q)));
if (((channel & IndexChannel) != 0) &&
(image->colorspace == CMYKColorspace))
SetPixelIndex(indexes+x,PosterizePixel(GetPixelIndex(indexes+x)));
q++;
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_PosterizeImageChannel)
#endif
proceed=SetImageProgress(image,PosterizeImageTag,progress++,
image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
image_view=DestroyCacheView(image_view);
quantize_info=AcquireQuantizeInfo((ImageInfo *) NULL);
quantize_info->number_colors=(size_t) MagickMin((ssize_t) levels*levels*
levels,MaxColormapSize+1);
quantize_info->dither=dither;
quantize_info->tree_depth=MaxTreeDepth;
status=QuantizeImage(quantize_info,image);
quantize_info=DestroyQuantizeInfo(quantize_info);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ P r u n e C h i l d %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% PruneChild() deletes the given node and merges its statistics into its
% parent.
%
% The format of the PruneSubtree method is:
%
% PruneChild(CubeInfo *cube_info,const NodeInfo *node_info)
%
% A description of each parameter follows.
%
% o cube_info: A pointer to the Cube structure.
%
% o node_info: pointer to node in color cube tree that is to be pruned.
%
*/
static void PruneChild(CubeInfo *cube_info,const NodeInfo *node_info)
{
NodeInfo
*parent;
register ssize_t
i;
size_t
number_children;
/*
Traverse any children.
*/
number_children=cube_info->associate_alpha == MagickFalse ? 8UL : 16UL;
for (i=0; i < (ssize_t) number_children; i++)
if (node_info->child[i] != (NodeInfo *) NULL)
PruneChild(cube_info,node_info->child[i]);
/*
Merge color statistics into parent.
*/
parent=node_info->parent;
parent->number_unique+=node_info->number_unique;
parent->total_color.red+=node_info->total_color.red;
parent->total_color.green+=node_info->total_color.green;
parent->total_color.blue+=node_info->total_color.blue;
parent->total_color.opacity+=node_info->total_color.opacity;
parent->child[node_info->id]=(NodeInfo *) NULL;
cube_info->nodes--;
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ P r u n e L e v e l %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% PruneLevel() deletes all nodes at the bottom level of the color tree merging
% their color statistics into their parent node.
%
% The format of the PruneLevel method is:
%
% PruneLevel(CubeInfo *cube_info,const NodeInfo *node_info)
%
% A description of each parameter follows.
%
% o cube_info: A pointer to the Cube structure.
%
% o node_info: pointer to node in color cube tree that is to be pruned.
%
*/
static void PruneLevel(CubeInfo *cube_info,const NodeInfo *node_info)
{
register ssize_t
i;
size_t
number_children;
/*
Traverse any children.
*/
number_children=cube_info->associate_alpha == MagickFalse ? 8UL : 16UL;
for (i=0; i < (ssize_t) number_children; i++)
if (node_info->child[i] != (NodeInfo *) NULL)
PruneLevel(cube_info,node_info->child[i]);
if (node_info->level == cube_info->depth)
PruneChild(cube_info,node_info);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ P r u n e T o C u b e D e p t h %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% PruneToCubeDepth() deletes any nodes at a depth greater than
% cube_info->depth while merging their color statistics into their parent
% node.
%
% The format of the PruneToCubeDepth method is:
%
% PruneToCubeDepth(CubeInfo *cube_info,const NodeInfo *node_info)
%
% A description of each parameter follows.
%
% o cube_info: A pointer to the Cube structure.
%
% o node_info: pointer to node in color cube tree that is to be pruned.
%
*/
static void PruneToCubeDepth(CubeInfo *cube_info,const NodeInfo *node_info)
{
register ssize_t
i;
size_t
number_children;
/*
Traverse any children.
*/
number_children=cube_info->associate_alpha == MagickFalse ? 8UL : 16UL;
for (i=0; i < (ssize_t) number_children; i++)
if (node_info->child[i] != (NodeInfo *) NULL)
PruneToCubeDepth(cube_info,node_info->child[i]);
if (node_info->level > cube_info->depth)
PruneChild(cube_info,node_info);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% Q u a n t i z e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% QuantizeImage() analyzes the colors within a reference image and chooses a
% fixed number of colors to represent the image. The goal of the algorithm
% is to minimize the color difference between the input and output image while
% minimizing the processing time.
%
% The format of the QuantizeImage method is:
%
% MagickBooleanType QuantizeImage(const QuantizeInfo *quantize_info,
% Image *image)
%
% A description of each parameter follows:
%
% o quantize_info: Specifies a pointer to an QuantizeInfo structure.
%
% o image: the image.
%
*/
MagickExport MagickBooleanType QuantizeImage(const QuantizeInfo *quantize_info,
Image *image)
{
CubeInfo
*cube_info;
MagickBooleanType
status;
size_t
depth,
maximum_colors;
assert(quantize_info != (const QuantizeInfo *) NULL);
assert(quantize_info->signature == MagickCoreSignature);
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
maximum_colors=quantize_info->number_colors;
if (maximum_colors == 0)
maximum_colors=MaxColormapSize;
if (maximum_colors > MaxColormapSize)
maximum_colors=MaxColormapSize;
if (image->matte == MagickFalse)
{
if (SetImageGray(image,&image->exception) != MagickFalse)
(void) SetGrayscaleImage(image);
}
if ((image->storage_class == PseudoClass) &&
(image->colors <= maximum_colors))
{
if ((quantize_info->colorspace != UndefinedColorspace) &&
(quantize_info->colorspace != CMYKColorspace))
(void) TransformImageColorspace(image,quantize_info->colorspace);
return(MagickTrue);
}
depth=quantize_info->tree_depth;
if (depth == 0)
{
size_t
colors;
/*
Depth of color tree is: Log4(colormap size)+2.
*/
colors=maximum_colors;
for (depth=1; colors != 0; depth++)
colors>>=2;
if ((quantize_info->dither != MagickFalse) && (depth > 2))
depth--;
if ((image->matte != MagickFalse) && (depth > 5))
depth--;
if (SetImageGray(image,&image->exception) != MagickFalse)
depth=MaxTreeDepth;
}
/*
Initialize color cube.
*/
cube_info=GetCubeInfo(quantize_info,depth,maximum_colors);
if (cube_info == (CubeInfo *) NULL)
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
status=ClassifyImageColors(cube_info,image,&image->exception);
if (status != MagickFalse)
{
/*
Reduce the number of colors in the image if it contains more than the
maximum, otherwise we can disable dithering to improve the performance.
*/
if (cube_info->colors > cube_info->maximum_colors)
ReduceImageColors(image,cube_info);
else
cube_info->quantize_info->dither_method=NoDitherMethod;
status=AssignImageColors(image,cube_info);
}
DestroyCubeInfo(cube_info);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% Q u a n t i z e I m a g e s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% QuantizeImages() analyzes the colors within a set of reference images and
% chooses a fixed number of colors to represent the set. The goal of the
% algorithm is to minimize the color difference between the input and output
% images while minimizing the processing time.
%
% The format of the QuantizeImages method is:
%
% MagickBooleanType QuantizeImages(const QuantizeInfo *quantize_info,
% Image *images)
%
% A description of each parameter follows:
%
% o quantize_info: Specifies a pointer to an QuantizeInfo structure.
%
% o images: Specifies a pointer to a list of Image structures.
%
*/
MagickExport MagickBooleanType QuantizeImages(const QuantizeInfo *quantize_info,
Image *images)
{
CubeInfo
*cube_info;
Image
*image;
MagickBooleanType
proceed,
status;
MagickProgressMonitor
progress_monitor;
register ssize_t
i;
size_t
depth,
maximum_colors,
number_images;
assert(quantize_info != (const QuantizeInfo *) NULL);
assert(quantize_info->signature == MagickCoreSignature);
assert(images != (Image *) NULL);
assert(images->signature == MagickCoreSignature);
if (images->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",images->filename);
if (GetNextImageInList(images) == (Image *) NULL)
{
/*
Handle a single image with QuantizeImage.
*/
status=QuantizeImage(quantize_info,images);
return(status);
}
status=MagickFalse;
maximum_colors=quantize_info->number_colors;
if (maximum_colors == 0)
maximum_colors=MaxColormapSize;
if (maximum_colors > MaxColormapSize)
maximum_colors=MaxColormapSize;
depth=quantize_info->tree_depth;
if (depth == 0)
{
size_t
colors;
/*
Depth of color tree is: Log4(colormap size)+2.
*/
colors=maximum_colors;
for (depth=1; colors != 0; depth++)
colors>>=2;
if (quantize_info->dither != MagickFalse)
depth--;
}
/*
Initialize color cube.
*/
cube_info=GetCubeInfo(quantize_info,depth,maximum_colors);
if (cube_info == (CubeInfo *) NULL)
{
(void) ThrowMagickException(&images->exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","`%s'",images->filename);
return(MagickFalse);
}
number_images=GetImageListLength(images);
image=images;
for (i=0; image != (Image *) NULL; i++)
{
progress_monitor=SetImageProgressMonitor(image,(MagickProgressMonitor) NULL,
image->client_data);
status=ClassifyImageColors(cube_info,image,&image->exception);
if (status == MagickFalse)
break;
(void) SetImageProgressMonitor(image,progress_monitor,image->client_data);
proceed=SetImageProgress(image,AssignImageTag,(MagickOffsetType) i,
number_images);
if (proceed == MagickFalse)
break;
image=GetNextImageInList(image);
}
if (status != MagickFalse)
{
/*
Reduce the number of colors in an image sequence.
*/
ReduceImageColors(images,cube_info);
image=images;
for (i=0; image != (Image *) NULL; i++)
{
progress_monitor=SetImageProgressMonitor(image,(MagickProgressMonitor)
NULL,image->client_data);
status=AssignImageColors(image,cube_info);
if (status == MagickFalse)
break;
(void) SetImageProgressMonitor(image,progress_monitor,
image->client_data);
proceed=SetImageProgress(image,AssignImageTag,(MagickOffsetType) i,
number_images);
if (proceed == MagickFalse)
break;
image=GetNextImageInList(image);
}
}
DestroyCubeInfo(cube_info);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ Q u a n t i z e E r r o r F l a t t e n %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% QuantizeErrorFlatten() traverses the color cube and flattens the quantization
% error into a sorted 1D array. This accelerates the color reduction process.
%
% Contributed by Yoya.
%
% The format of the QuantizeErrorFlatten method is:
%
% size_t QuantizeErrorFlatten(const CubeInfo *cube_info,
% const NodeInfo *node_info,const ssize_t offset,
% MagickRealType *quantize_error)
%
% A description of each parameter follows.
%
% o cube_info: A pointer to the Cube structure.
%
% o node_info: pointer to node in color cube tree that is current pointer.
%
% o offset: quantize error offset.
%
% o quantize_error: the quantization error vector.
%
*/
static size_t QuantizeErrorFlatten(const CubeInfo *cube_info,
const NodeInfo *node_info,const ssize_t offset,
MagickRealType *quantize_error)
{
register ssize_t
i;
size_t
n,
number_children;
if (offset >= (ssize_t) cube_info->nodes)
return(0);
quantize_error[offset]=node_info->quantize_error;
n=1;
number_children=cube_info->associate_alpha == MagickFalse ? 8UL : 16UL;
for (i=0; i < (ssize_t) number_children ; i++)
if (node_info->child[i] != (NodeInfo *) NULL)
n+=QuantizeErrorFlatten(cube_info,node_info->child[i],offset+n,
quantize_error);
return(n);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ R e d u c e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% Reduce() traverses the color cube tree and prunes any node whose
% quantization error falls below a particular threshold.
%
% The format of the Reduce method is:
%
% Reduce(CubeInfo *cube_info,const NodeInfo *node_info)
%
% A description of each parameter follows.
%
% o cube_info: A pointer to the Cube structure.
%
% o node_info: pointer to node in color cube tree that is to be pruned.
%
*/
static void Reduce(CubeInfo *cube_info,const NodeInfo *node_info)
{
register ssize_t
i;
size_t
number_children;
/*
Traverse any children.
*/
number_children=cube_info->associate_alpha == MagickFalse ? 8UL : 16UL;
for (i=0; i < (ssize_t) number_children; i++)
if (node_info->child[i] != (NodeInfo *) NULL)
Reduce(cube_info,node_info->child[i]);
if (node_info->quantize_error <= cube_info->pruning_threshold)
PruneChild(cube_info,node_info);
else
{
/*
Find minimum pruning threshold.
*/
if (node_info->number_unique > 0)
cube_info->colors++;
if (node_info->quantize_error < cube_info->next_threshold)
cube_info->next_threshold=node_info->quantize_error;
}
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ R e d u c e I m a g e C o l o r s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ReduceImageColors() repeatedly prunes the tree until the number of nodes
% with n2 > 0 is less than or equal to the maximum number of colors allowed
% in the output image. On any given iteration over the tree, it selects
% those nodes whose E value is minimal for pruning and merges their
% color statistics upward. It uses a pruning threshold, Ep, to govern
% node selection as follows:
%
% Ep = 0
% while number of nodes with (n2 > 0) > required maximum number of colors
% prune all nodes such that E <= Ep
% Set Ep to minimum E in remaining nodes
%
% This has the effect of minimizing any quantization error when merging
% two nodes together.
%
% When a node to be pruned has offspring, the pruning procedure invokes
% itself recursively in order to prune the tree from the leaves upward.
% n2, Sr, Sg, and Sb in a node being pruned are always added to the
% corresponding data in that node's parent. This retains the pruned
% node's color characteristics for later averaging.
%
% For each node, n2 pixels exist for which that node represents the
% smallest volume in RGB space containing those pixel's colors. When n2
% > 0 the node will uniquely define a color in the output image. At the
% beginning of reduction, n2 = 0 for all nodes except a the leaves of
% the tree which represent colors present in the input image.
%
% The other pixel count, n1, indicates the total number of colors
% within the cubic volume which the node represents. This includes n1 -
% n2 pixels whose colors should be defined by nodes at a lower level in
% the tree.
%
% The format of the ReduceImageColors method is:
%
% ReduceImageColors(const Image *image,CubeInfo *cube_info)
%
% A description of each parameter follows.
%
% o image: the image.
%
% o cube_info: A pointer to the Cube structure.
%
*/
static int MagickRealTypeCompare(const void *error_p,const void *error_q)
{
MagickRealType
*p,
*q;
p=(MagickRealType *) error_p;
q=(MagickRealType *) error_q;
if (*p > *q)
return(1);
if (fabs((double) (*q-*p)) <= MagickEpsilon)
return(0);
return(-1);
}
static void ReduceImageColors(const Image *image,CubeInfo *cube_info)
{
#define ReduceImageTag "Reduce/Image"
MagickBooleanType
proceed;
MagickOffsetType
offset;
size_t
span;
cube_info->next_threshold=0.0;
if (cube_info->colors > cube_info->maximum_colors)
{
MagickRealType
*quantize_error;
/*
Enable rapid reduction of the number of unique colors.
*/
quantize_error=(MagickRealType *) AcquireQuantumMemory(cube_info->nodes,
sizeof(*quantize_error));
if (quantize_error != (MagickRealType *) NULL)
{
(void) QuantizeErrorFlatten(cube_info,cube_info->root,0,
quantize_error);
qsort(quantize_error,cube_info->nodes,sizeof(MagickRealType),
MagickRealTypeCompare);
if (cube_info->nodes > (110*(cube_info->maximum_colors+1)/100))
cube_info->next_threshold=quantize_error[cube_info->nodes-110*
(cube_info->maximum_colors+1)/100];
quantize_error=(MagickRealType *) RelinquishMagickMemory(
quantize_error);
}
}
for (span=cube_info->colors; cube_info->colors > cube_info->maximum_colors; )
{
cube_info->pruning_threshold=cube_info->next_threshold;
cube_info->next_threshold=cube_info->root->quantize_error-1;
cube_info->colors=0;
Reduce(cube_info,cube_info->root);
offset=(MagickOffsetType) span-cube_info->colors;
proceed=SetImageProgress(image,ReduceImageTag,offset,span-
cube_info->maximum_colors+1);
if (proceed == MagickFalse)
break;
}
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% R e m a p I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% RemapImage() replaces the colors of an image with the closest color from
% a reference image.
%
% The format of the RemapImage method is:
%
% MagickBooleanType RemapImage(const QuantizeInfo *quantize_info,
% Image *image,const Image *remap_image)
%
% A description of each parameter follows:
%
% o quantize_info: Specifies a pointer to an QuantizeInfo structure.
%
% o image: the image.
%
% o remap_image: the reference image.
%
*/
MagickExport MagickBooleanType RemapImage(const QuantizeInfo *quantize_info,
Image *image,const Image *remap_image)
{
CubeInfo
*cube_info;
MagickBooleanType
status;
/*
Initialize color cube.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(remap_image != (Image *) NULL);
assert(remap_image->signature == MagickCoreSignature);
cube_info=GetCubeInfo(quantize_info,MaxTreeDepth,
quantize_info->number_colors);
if (cube_info == (CubeInfo *) NULL)
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
status=ClassifyImageColors(cube_info,remap_image,&image->exception);
if (status != MagickFalse)
{
/*
Classify image colors from the reference image.
*/
cube_info->quantize_info->number_colors=cube_info->colors;
status=AssignImageColors(image,cube_info);
}
DestroyCubeInfo(cube_info);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% R e m a p I m a g e s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% RemapImages() replaces the colors of a sequence of images with the
% closest color from a reference image.
%
% The format of the RemapImage method is:
%
% MagickBooleanType RemapImages(const QuantizeInfo *quantize_info,
% Image *images,Image *remap_image)
%
% A description of each parameter follows:
%
% o quantize_info: Specifies a pointer to an QuantizeInfo structure.
%
% o images: the image sequence.
%
% o remap_image: the reference image.
%
*/
MagickExport MagickBooleanType RemapImages(const QuantizeInfo *quantize_info,
Image *images,const Image *remap_image)
{
CubeInfo
*cube_info;
Image
*image;
MagickBooleanType
status;
assert(images != (Image *) NULL);
assert(images->signature == MagickCoreSignature);
if (images->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",images->filename);
image=images;
if (remap_image == (Image *) NULL)
{
/*
Create a global colormap for an image sequence.
*/
status=QuantizeImages(quantize_info,images);
return(status);
}
/*
Classify image colors from the reference image.
*/
cube_info=GetCubeInfo(quantize_info,MaxTreeDepth,
quantize_info->number_colors);
if (cube_info == (CubeInfo *) NULL)
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
status=ClassifyImageColors(cube_info,remap_image,&image->exception);
if (status != MagickFalse)
{
/*
Classify image colors from the reference image.
*/
cube_info->quantize_info->number_colors=cube_info->colors;
image=images;
for ( ; image != (Image *) NULL; image=GetNextImageInList(image))
{
status=AssignImageColors(image,cube_info);
if (status == MagickFalse)
break;
}
}
DestroyCubeInfo(cube_info);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S e t G r a y s c a l e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SetGrayscaleImage() converts an image to a PseudoClass grayscale image.
%
% The format of the SetGrayscaleImage method is:
%
% MagickBooleanType SetGrayscaleImage(Image *image)
%
% A description of each parameter follows:
%
% o image: The image.
%
*/
#if defined(__cplusplus) || defined(c_plusplus)
extern "C" {
#endif
static int IntensityCompare(const void *x,const void *y)
{
PixelPacket
*color_1,
*color_2;
int
intensity;
color_1=(PixelPacket *) x;
color_2=(PixelPacket *) y;
intensity=PixelPacketIntensity(color_1)-(int) PixelPacketIntensity(color_2);
return((int) intensity);
}
#if defined(__cplusplus) || defined(c_plusplus)
}
#endif
static MagickBooleanType SetGrayscaleImage(Image *image)
{
CacheView
*image_view;
ExceptionInfo
*exception;
MagickBooleanType
status;
PixelPacket
*colormap;
register ssize_t
i;
ssize_t
*colormap_index,
j,
y;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->type != GrayscaleType)
(void) TransformImageColorspace(image,GRAYColorspace);
colormap_index=(ssize_t *) AcquireQuantumMemory(MaxColormapSize,
sizeof(*colormap_index));
if (colormap_index == (ssize_t *) NULL)
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
if (image->storage_class != PseudoClass)
{
ExceptionInfo
*exception;
(void) ResetMagickMemory(colormap_index,(-1),MaxColormapSize*
sizeof(*colormap_index));
if (AcquireImageColormap(image,MaxColormapSize) == MagickFalse)
{
colormap_index=(ssize_t *) RelinquishMagickMemory(colormap_index);
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
}
image->colors=0;
status=MagickTrue;
exception=(&image->exception);
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) shared(status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
register IndexPacket
*magick_restrict indexes;
register PixelPacket
*magick_restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,
exception);
if (q == (PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
indexes=GetCacheViewAuthenticIndexQueue(image_view);
for (x=0; x < (ssize_t) image->columns; x++)
{
register size_t
intensity;
intensity=ScaleQuantumToMap(GetPixelRed(q));
if (colormap_index[intensity] < 0)
{
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_SetGrayscaleImage)
#endif
if (colormap_index[intensity] < 0)
{
colormap_index[intensity]=(ssize_t) image->colors;
image->colormap[image->colors].red=GetPixelRed(q);
image->colormap[image->colors].green=GetPixelGreen(q);
image->colormap[image->colors].blue=GetPixelBlue(q);
image->colors++;
}
}
SetPixelIndex(indexes+x,colormap_index[intensity]);
q++;
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
}
image_view=DestroyCacheView(image_view);
}
for (i=0; i < (ssize_t) image->colors; i++)
image->colormap[i].opacity=(unsigned short) i;
qsort((void *) image->colormap,image->colors,sizeof(PixelPacket),
IntensityCompare);
colormap=(PixelPacket *) AcquireQuantumMemory(image->colors,
sizeof(*colormap));
if (colormap == (PixelPacket *) NULL)
{
colormap_index=(ssize_t *) RelinquishMagickMemory(colormap_index);
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
}
j=0;
colormap[j]=image->colormap[0];
for (i=0; i < (ssize_t) image->colors; i++)
{
if (IsSameColor(image,&colormap[j],&image->colormap[i]) == MagickFalse)
{
j++;
colormap[j]=image->colormap[i];
}
colormap_index[(ssize_t) image->colormap[i].opacity]=j;
}
image->colors=(size_t) (j+1);
image->colormap=(PixelPacket *) RelinquishMagickMemory(image->colormap);
image->colormap=colormap;
status=MagickTrue;
exception=(&image->exception);
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) shared(status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
register IndexPacket
*magick_restrict indexes;
register const PixelPacket
*magick_restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
indexes=GetCacheViewAuthenticIndexQueue(image_view);
for (x=0; x < (ssize_t) image->columns; x++)
SetPixelIndex(indexes+x,colormap_index[ScaleQuantumToMap(GetPixelIndex(
indexes+x))]);
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
}
image_view=DestroyCacheView(image_view);
colormap_index=(ssize_t *) RelinquishMagickMemory(colormap_index);
image->type=GrayscaleType;
if (SetImageMonochrome(image,&image->exception) != MagickFalse)
image->type=BilevelType;
return(status);
}
|
AlloyArray.h | /*
* Copyright(C) 2015, Blake C. Lucas, Ph.D. (img.science@gmail.com)
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
* THE SOFTWARE.
*/
#ifndef INCLUDE_ALLOYARRAY_H_
#define INCLUDE_ALLOYARRAY_H_
#include "AlloyCommon.h"
#include "cereal/cereal.hpp"
#include "cereal/types/array.hpp"
#include "cereal/types/string.hpp"
namespace aly {
template<class T, int C> struct Array: public std::array<T, C> {
template<class Archive>
void save(Archive & archive) const {
archive(cereal::make_nvp(MakeString() << "array" << C, *this));
}
template<class Archive>
void load(Archive & archive) {
archive(cereal::make_nvp(MakeString() << "array" << C, *this));
}
void set(const T& val) {
for (float& v : *this) {
v = val;
}
}
Array():std::array<T,C>() {
}
Array(const T& val) {
set(val);
}
T max() const {
T tmp(std::numeric_limits<T>::min());
for (int i = 0; i < C; i++) {
if ((*this)[i] > tmp)
tmp = (*this)[i];
}
return tmp;
}
T min() const {
T tmp(std::numeric_limits<T>::max());
for (int i = 0; i < C; i++) {
if ((*this)[i] < tmp)
tmp = (*this)[i];
}
return tmp;
}
T mean() const {
T tmp(0);
for (int i = 0; i < C; i++) {
tmp += (*this)[i];
}
return tmp/T(C);
}
T median() const {
std::vector<T> tmp(this->begin(),this->end());
std::sort(tmp.begin(), tmp.end());
if (C% 2 == 0) {
return T(((double)tmp[C / 2]+ (double)tmp[C / 2 - 1])* 0.5f);
} else {
return tmp[C / 2];
}
}
T stdDev() const {
if (C < 2) {
return T(0);
}
T avg = mean();
double var(0.0);
for (const T& val : *this) {
double e = (double)(val - avg);
var += e * e;
}
var = var / (double)(C - 1);
return T(std::sqrt(var));
}
};
template<class T, int C> void Transform(Array<T, C>& im1, Array<T, C>& im2,
const std::function<void(T&, T&)>& func) {
if (im1.size() != im2.size())
throw std::runtime_error(
MakeString() << "Array dimensions do not match. " << im1.size()
<< "!=" << im2.size());
size_t sz = im1.size();
#pragma omp parallel for
for (size_t offset = 0; offset < sz; offset++) {
func(im1[offset], im2[offset]);
}
}
template<class T, int C> void Transform(Array<T, C>& im1,
const std::function<void(T&)>& func) {
size_t sz = im1.size();
#pragma omp parallel for
for (int offset = 0; offset < (int) sz; offset++) {
func(im1[offset]);
}
}
template<class T, int C> void Transform(Array<T, C>& im1,
const Array<T, C>& im2, const std::function<void(T&, const T&)>& func) {
if (im1.size() != im2.size())
throw std::runtime_error(
MakeString() << "Array dimensions do not match. " << im1.size()
<< "!=" << im2.size());
size_t sz = im1.size();
#pragma omp parallel for
for (int offset = 0; offset < (int) sz; offset++) {
func(im1[offset], im2[offset]);
}
}
template<class T, int C> void Transform(Array<T, C>& im1,
const Array<T, C>& im2, const Array<T, C>& im3, const Array<T, C>& im4,
const std::function<void(T&, const T&, const T&, const T&)>& func) {
if (im1.size() != im2.size())
throw std::runtime_error(
MakeString() << "Array dimensions do not match. " << im1.size()
<< "!=" << im2.size());
size_t sz = im1.size();
#pragma omp parallel for
for (int offset = 0; offset < (int) sz; offset++) {
func(im1[offset], im2[offset], im3[offset],
im4[offset]);
}
}
template<class T, int C> void Transform(Array<T, C>& im1,
const Array<T, C>& im2, const Array<T, C>& im3,
const std::function<void(T&, const T&, const T&)>& func) {
if (im1.size() != im2.size())
throw std::runtime_error(
MakeString() << "Array dimensions do not match. " << im1.size()
<< "!=" << im2.size());
size_t sz = im1.size();
#pragma omp parallel for
for (int offset = 0; offset < (int) sz; offset++) {
func(im1[offset], im2[offset], im3[offset]);
}
}
template<class T, int C> void Transform(Array<T, C>& im1, Array<T, C>& im2,
const std::function<void(size_t offset, T& val1, T& val2)>& func) {
if (im1.size() != im2.size())
throw std::runtime_error(
MakeString() << "Array dimensions do not match. " << im1.size()
<< "!=" << im2.size());
size_t sz = im1.size();
#pragma omp parallel for
for (size_t offset = 0; offset < sz; offset++) {
func(offset, im1[offset], im2[offset]);
}
}
template<class T, class L, class R, int C> std::basic_ostream<L, R> & operator <<(
std::basic_ostream<L, R> & ss, const Array<T, C> & A) {
size_t index = 0;
for (const T& val : A) {
ss << std::setw(5) << index++ << ": " << val << std::endl;
}
return ss;
}
template<class T, int C> Array<T, C> operator+(const T& scalar,
const Array<T, C>& img) {
Array<T, C> out(img.size());
std::function<void(T&, const T&)> f =
[=](T& val1, const T& val2) {val1 = scalar + val2;};
Transform(out, img, f);
return out;
}
template<class T, int C> void ScaleAdd(Array<T, C>& out, const T& scalar,
const Array<T, C>& in) {
out.resize(in.size());
std::function<void(T&, const T&)> f =
[=](T& val1, const T& val2) {val1 += scalar * val2;};
Transform(out, in, f);
}
template<class T, int C> void ScaleAdd(Array<T, C>& out, const Array<T, C>& in1,
const T& scalar, const Array<T, C>& in2) {
out.resize(in1.size());
std::function<void(T&, const T&, const T&)> f =
[=](T& val1, const T& val2, const T& val3) {val1 = val2+scalar * val3;};
Transform(out, in1, in2, f);
}
template<class T, int C> void ScaleAdd(Array<T, C>& out, const Array<T, C>& in1,
const T& scalar2, const Array<T, C>& in2, const T& scalar3,
const Array<T, C>& in3) {
out.resize(in1.size());
std::function<void(T&, const T&, const T&, const T&)> f = [=](T& out,
const T& val1,
const T& val2,
const T& val3) {
out = val1+scalar2*val2+scalar3 * val3;};
Transform(out, in1, in2, in3, f);
}
template<class T, int C> void ScaleSubtract(Array<T, C>& out, const T& scalar,
const Array<T, C>& in) {
out.resize(in.size());
std::function<void(T&, const T&)> f =
[=](T& val1, const T& val2) {val1 -= scalar * val2;};
Transform(out, in, f);
}
template<class T, int C> void ScaleSubtract(Array<T, C>& out,
const Array<T, C>& in1, const T& scalar, const Array<T, C>& in2) {
out.resize(in1.size());
std::function<void(T&, const T&, const T&)> f =
[=](T& val1, const T& val2, const T& val3) {val1 = val2 - scalar * val3;};
Transform(out, in1, in2, f);
}
template<class T, int C> void Subtract(Array<T, C>& out, const Array<T, C>& v1,
const Array<T, C>& v2) {
out.resize(v1.size());
std::function<void(T&, const T&, const T&)> f =
[=](T& val1, const T& val2, const T& val3) {val1 = val2-val3;};
Transform(out, v1, v2, f);
}
template<class T, int C> void Add(Array<T, C>& out, const Array<T, C>& v1,
const Array<T, C>& v2) {
out.resize(v1.size());
std::function<void(T&, const T&, const T&)> f =
[=](T& val1, const T& val2, const T& val3) {val1 = val2 + val3;};
Transform(out, v1, v2, f);
}
template<class T, int C> Array<T, C> operator-(const T& scalar,
const Array<T, C>& img) {
Array<T, C> out(img.size());
std::function<void(T&, const T&)> f =
[=](T& val1, const T& val2) {val1 = scalar - val2;};
Transform(out, img, f);
return out;
}
template<class T, int C> Array<T, C> operator*(const T& scalar,
const Array<T, C>& img) {
Array<T, C> out(img.size());
std::function<void(T&, const T&)> f =
[=](T& val1, const T& val2) {val1 = scalar*val2;};
Transform(out, img, f);
return out;
}
template<class T, int C> Array<T, C> operator/(const T& scalar,
const Array<T, C>& img) {
Array<T, C> out(img.size());
std::function<void(T&, const T&)> f =
[=](T& val1, const T& val2) {val1 = scalar / val2;};
Transform(out, img, f);
return out;
}
template<class T, int C> Array<T, C> operator+(const Array<T, C>& img,
const T& scalar) {
Array<T, C> out(img.size());
std::function<void(T&, const T&)> f =
[=](T& val1, const T& val2) {val1 = val2 + scalar;};
Transform(out, img, f);
return out;
}
template<class T, int C> Array<T, C> operator-(const Array<T, C>& img,
const T& scalar) {
Array<T, C> out(img.size());
std::function<void(T&, const T&)> f =
[=](T& val1, const T& val2) {val1 = val2 - scalar;};
Transform(out, img, f);
return out;
}
template<class T, int C> Array<T, C> operator*(const Array<T, C>& img,
const T& scalar) {
Array<T, C> out(img.size());
std::function<void(T&, const T&)> f =
[=](T& val1, const T& val2) {val1 = val2*scalar;};
Transform(out, img, f);
return out;
}
template<class T, int C> Array<T, C> operator/(const Array<T, C>& img,
const T& scalar) {
Array<T, C> out(img.size());
std::function<void(T&, const T&)> f =
[=](T& val1, const T& val2) {val1 = val2 / scalar;};
Transform(out, img, f);
return out;
}
template<class T, int C> Array<T, C> operator-(const Array<T, C>& img) {
Array<T, C> out(img.size());
std::function<void(T&, const T&)> f =
[=](T& val1, const T& val2) {val1 = -val2;};
Transform(out, img, f);
return out;
}
template<class T, int C> Array<T, C> operator+=(Array<T, C>& out,
const Array<T, C>& img) {
std::function<void(T&, const T&)> f =
[=](T& val1, const T& val2) {val1 += val2;};
Transform(out, img, f);
return out;
}
template<class T, int C> Array<T, C> operator-=(Array<T, C>& out,
const Array<T, C>& img) {
std::function<void(T&, const T&)> f =
[=](T& val1, const T& val2) {val1 -= val2;};
Transform(out, img, f);
return out;
}
template<class T, int C> Array<T, C> operator*=(Array<T, C>& out,
const Array<T, C>& img) {
std::function<void(T&, const T&)> f =
[=](T& val1, const T& val2) {val1 *= val2;};
Transform(out, img, f);
return out;
}
template<class T, int C> Array<T, C> operator/=(Array<T, C>& out,
const Array<T, C>& img) {
std::function<void(T&, const T&)> f =
[=](T& val1, const T& val2) {val1 /= val2;};
Transform(out, img, f);
return out;
}
template<class T, int C> Array<T, C> operator+=(Array<T, C>& out,
const T& scalar) {
std::function<void(T&)> f = [=](T& val1) {val1 += scalar;};
Transform(out, f);
return out;
}
template<class T, int C> Array<T, C> operator-=(Array<T, C>& out,
const T& scalar) {
std::function<void(T&)> f = [=](T& val1) {val1 -= scalar;};
Transform(out, f);
return out;
}
template<class T, int C> Array<T, C> operator*=(Array<T, C>& out,
const T& scalar) {
std::function<void(T&)> f = [=](T& val1) {val1 *= scalar;};
Transform(out, f);
return out;
}
template<class T, int C> Array<T, C> operator/=(Array<T, C>& out,
const T& scalar) {
std::function<void(T&)> f = [=](T& val1) {val1 /= scalar;};
Transform(out, f);
return out;
}
template<class T, int C> Array<T, C> operator+(const Array<T, C>& img1,
const Array<T, C>& img2) {
Array<T, C> out(img1.size());
std::function<void(T&, const T&, const T&)> f =
[=](T& val1, const T& val2, const T& val3) {val1 = val2 + val3;};
Transform(out, img1, img2, f);
return out;
}
template<class T, int C> Array<T, C> operator-(const Array<T, C>& img1,
const Array<T, C>& img2) {
Array<T, C> out(img1.size());
std::function<void(T&, const T&, const T&)> f =
[=](T& val1, const T& val2, const T& val3) {val1 = val2 - val3;};
Transform(out, img1, img2, f);
return out;
}
template<class T, int C> Array<T, C> operator*(const Array<T, C>& img1,
const Array<T, C>& img2) {
Array<T, C> out(img1.size());
std::function<void(T&, const T&, const T&)> f =
[=](T& val1, const T& val2, const T& val3) {val1 = val2*val3;};
Transform(out, img1, img2, f);
return out;
}
template<class T, int C> Array<T, C> operator/(const Array<T, C>& img1,
const Array<T, C>& img2) {
Array<T, C> out(img1.size());
std::function<void(T&, const T&, const T&)> f =
[=](T& val1, const T& val2, const T& val3) {val1 = val2 / val3;};
Transform(out, img1, img2, f);
return out;
}
template<class T, int C> double dot(const Array<T, C>& a,
const Array<T, C>& b) {
double ans = 0.0;
if (a.size() != b.size())
throw std::runtime_error(
MakeString() << "Array dimensions do not match. " << a.size()
<< "!=" << b.size());
size_t sz = a.size();
#pragma omp parallel for reduction(+:ans)
for (int i = 0; i < (int) sz; i++) {
ans += a[i]*b[i];
}
return ans;
}
template<class T, int C> T lengthSqr(const Array<T, C>& a) {
T ans(0);
size_t sz = a.size();
#pragma omp parallel for reduction(+:ans)
for (int i = 0; i < (int) sz; i++) {
ans += a[i]* a[i];
}
return ans;
}
template<class T, int C> T distanceSqr(const Array<T, C>& a, const Array<T, C>& b) {
T ans(0);
size_t sz = a.size();
#pragma omp parallel for reduction(+:ans)
for (int i = 0; i < (int)sz; i++) {
ans += (a[i] - b[i])*(a[i] - b[i]);
}
return ans;
}
template<class T, int C> T distanceL1(const Array<T, C>& a, const Array<T, C>& b) {
T ans(0);
size_t sz = a.size();
#pragma omp parallel for reduction(+:ans)
for (int i = 0; i < (int)sz; i++) {
ans += std::abs(a[i] - b[i]);
}
return ans;
}
template<class T, int C> T distance(const Array<T, C>& a, const Array<T, C>& b) {
return std::sqrt(distanceSqr(a,b));
}
template<class T, int C> T max(const Array<T, C>& a) {
size_t sz = a.size();
T tmp(std::numeric_limits<T>::min());
//#pragma omp parallel for reduction(max:tmp)
for (int i = 0; i < (int) sz; i++) {
if (a[i] > tmp)
tmp = a[i];
}
return tmp;
}
template<class T, int C> T min(const Array<T, C>& a) {
size_t sz = a.size();
T tmp(std::numeric_limits<T>::max());
//#pragma omp parallel for reduction(min:tmp)
for (int i = 0; i < (int) sz; i++) {
if (a[i] < tmp)
tmp = a[i];
}
return tmp;
}
template<class T, int C> T length(const Array<T, C>& a) {
return std::sqrt(lengthSqr(a));
}
}
;
#endif /* INCLUDE_ALLOYARRAY_H_ */
|
opencl_agilekeychain_fmt_plug.c | /* 1Password Agile Keychain cracker patch for JtR. Hacked together during
* July of 2012 by Dhiru Kholia <dhiru.kholia at gmail.com>.
*
* This software is Copyright (c) 2012 Lukas Odzioba <ukasz@openwall.net> and
* Copyright (c) 2012 Dhiru Kholia <dhiru.kholia at gmail.com>, and it is
* hereby released to the general public under the following terms:
* Redistribution and use in source and binary forms, with or without
* modification, are permitted.
*
* This software is based on "agilekeychain" project but no actual code is
* borrowed from it.
*
* "agilekeychain" project is at https://bitbucket.org/gwik/agilekeychain
*/
#ifdef HAVE_OPENCL
#if FMT_EXTERNS_H
extern struct fmt_main fmt_opencl_agilekeychain;
#elif FMT_REGISTERS_H
john_register_one(&fmt_opencl_agilekeychain);
#else
#include <string.h>
#ifdef _OPENMP
#include <omp.h>
#endif
#include "arch.h"
#include "formats.h"
#include "common.h"
#include "stdint.h"
#include "misc.h"
#include "aes.h"
#include "common-opencl.h"
#include "options.h"
#include "jumbo.h"
#define FORMAT_LABEL "agilekeychain-opencl"
#define FORMAT_NAME "1Password Agile Keychain"
#define FORMAT_TAG "$agilekeychain$"
#define FORMAT_TAG_LEN (sizeof(FORMAT_TAG)-1)
#define ALGORITHM_NAME "PBKDF2-SHA1 OpenCL AES"
#define BENCHMARK_COMMENT ""
#define BENCHMARK_LENGTH -1
#define MIN_KEYS_PER_CRYPT 1
#define MAX_KEYS_PER_CRYPT 1
#define BINARY_SIZE 0
#define PLAINTEXT_LENGTH 64
#define SALT_SIZE sizeof(struct custom_salt)
#define BINARY_ALIGN MEM_ALIGN_WORD
#define SALT_ALIGN 4
#define SALTLEN 8
#define CTLEN 1040
typedef struct {
uint32_t length;
uint8_t v[PLAINTEXT_LENGTH];
} keychain_password;
typedef struct {
uint32_t v[16/4];
} keychain_hash;
typedef struct {
uint32_t iterations;
uint32_t outlen;
uint32_t skip_bytes;
uint8_t length;
uint8_t salt[64];
} keychain_salt;
static int *cracked;
static int any_cracked;
static struct fmt_tests keychain_tests[] = {
{"$agilekeychain$2*1000*8*7146eaa1cca395e5*1040*e7eb81496717d35f12b83024bb055dec00ea82843886cbb8d0d77302a85d89b1d2c0b5b8275dca44c168cba310344be6eea3a79d559d0846a9501f4a012d32b655047673ef66215fc2eb4e944a9856130ee7cd44523017bbbe2957e6a81d1fd128434e7b83b49b8a014a3e413a1d76b109746468070f03f19d361a21c712ef88e05b04f8359f6dd96c1c4487ea2c9df22ea9029e9bc8406d37850a5ead03062283a42218c134d05ba40cddfe46799c931291ec238ee4c11dc71d2b7e018617d4a2bf95a0c3c1f98ea14f886d94ee2a65871418c7c237f1fe52d3e176f8ddab6dfd4bc039b6af36ab1bc9981689c391e71703e31979f732110b84d5fccccf59c918dfcf848fcd80c6da62ced6e231497b9cbef22d5edca439888556bae5e7b05571ac34ea54fafc03fb93e4bc17264e50a1d04b688fcc8bc715dd237086c2537c32de34bbb8a29de0208800af2a9b561551ae6561099beb61045f22dbe871fab5350e40577dd58b4c8fb1232f3f85b8d2e028e5535fd131988a5df4c0408929b8eac6d751dcc698aa1d79603251d90a216ae5e28bffc0610f61fefe0a23148dcc65ab88b117dd3b8d311157424867eb0261b8b8c5b11def85d434dd4c6dc7036822a279a77ec640b28da164bea7abf8b634ba0e4a13d9a31fdcfebbdbe53adcdf2564d656e64923f76bc2619428abdb0056ce20f47f3ece7d4d11dc55d2969684ca336725561cb27ce0504d57c88a2782daccefb7862b385d494ce70fef93d68e673b12a68ba5b8c93702be832d588ac935dbf0a7b332e42d1b6da5f87aed03498a37bb41fc78fcdbe8fe1f999fe756edf3a375beb54dd508ec45af07985f1430a105e552d9817106ae12d09906c4c28af575d270308a950d05c07da348f59571184088d46bbef3e7a2ad03713e90b435547b23f340f0f5d00149838d9919d40dac9b337920c7e577647fe4e2811f05b8e888e3211d9987cf922883aa6e53a756e579f7dff91c297fcc5cda7d10344545f64099cfd2f8fd59ee5c580ca97cf8b17e0222b764df25a2a52b81ee9db41b3c296fcea1203b367e55d321c3504aeda8913b0cae106ccf736991030088d581468264b8486968e868a44172ad904d97e3e52e8370aaf52732e6ee6cc46eb33a901afc6b7c687b8f6ce0b2b4cdfe19c7139615195a052051becf39383ab83699a383a26f8a36c78887fe27ea7588c0ea21a27357ff9923a3d23ca2fb04ad671b63f8a8ec9b7fc969d3bece0f5ff19a40bc327b9905a6de2193ffe3aa1997e9266205d083776e3b94869164abcdb88d64b8ee5465f7165b75e1632abd364a24bb1426889955b8f0354f75c6fb40e254f7de53d8ef7fee9644bf2ebccd934a72bb1cc9c19d354d66996acbddd60d1241657359d9074a4b313b21af2ee4f10cf20f4122a5fad4ee4f37a682ffb7234bea61985d1ad130bfb9f4714461fb574dbf851c*1000*8*c05f3bc3e7f3cad7*1040*f3e3d091b64da1529b04b2795898b717faad59f7dae4bda25e6e267c28a56a7702e51991b2a3fb034cdda2d9bfd531dfd2c3af00f39fdfe8bcbdde02ab790415bcf071d133b15f647f55ff512730ae4914ce20b72184c827f6350ac768b00c9eab0e3322e084bb3e9e9439a10030950f5504dcc4f7ba614b27fde99bd0d743a58341e90ec313395486eb8068df205b7bdf25134ed97dd2e2883d7eb3e63b659602ada765084a69d7ed8fc55b60aa67718cc9e5bf31ab8f3029b32a4b001071848d2b76b5f4b921d2169ca287e9e78ecd904d040c817c7c7cde4ba8510b462e139c16519962ca0adb7d5f89d431cd4541a9a7aaec8d799697f4d3947d87884bed32ada13db725c72ab6450ac8fe989a94917cca784bcf6ffbe756f19d4e8897e0f80d8c318e13e5b30fc356646aaf038a952b0781f12dfef1f4bd6922ae05a573eeff4dbb064cfbb0fd62962a6a53a8de308da2b8e83baebfe261cb127f874a5eff3f05cda123ab2ba559cf444ce33b6845f4c902733b8982044151a8aa1859769082ade5928f2d4f616ce972ae8dde1f2be37d496ad16057008dfe678c75cbdc53db25ed311edbcf8b2a73bcd2809f6bd1d389aaeed82a75fa15676d08aa5390efdc189c180be6a52ec5a7371304d26e477039197671377d1ea3d6ee41e68a42348a4fe9a1d2400eaeba8ed0a7419b9694d780456d96378c00318a5be0f41afa887476b3bebb7cf30d61ca8fc77de35671a3053a517aa39444e01e1752da3146dc97eec5849d6f025c3d4bc6e0499b901f629d8a081ad35ed33602cbef5e9a68f090170fcc1f285eb094e3dc619740a067fd2aeeb20abbb17926c3ad097f3f0bad4de540d1829a985cd7e700100622ec47da046071c11a1597e5f093268b4ed79ffcf2450b9ba2b649b932fbce912bdb4da010581bd9c731be792c8f75177f6c8c4e1756d63a1491a8aae4bb11beeca118e7d08073b500dd82b81e4bdbeb15625afca8f1c8e06b2360da972587516ef62e91d1d9aad90e62226d53363bff318f5af21f69c234731ac22b09506a1b807d2366e88905668d960c7963daa93046e9a56db1d7a437e9a37aa7a2945197265478b264ec14d383030ef73504fd26d4be9e72ebddb14a00bf6bd66a3adaa1d17cada378a2b0bc852f961af52333f7966f8a60738dfd47e79ce537082f187117ffd31f54f53356b671154dfa245671c4cd054c1a8d303a202fccfae6d3f9e3646838cef38703b5e660b5ce7679f5898d801908f90092dbec335c98e4002041287fe9bfa7d7828a29ab240ec2cedc9fa12cfd7c3ef7b61dad4fbf2ef9c0a904dbde1b3792fb5178607608dc9fc2fbc85addf89fa3df94317e729810b508356b5bb176cdb022afb0ec5eeff4d5081b66733d1be1b54cc4f080bfc33187663b5ab185472b35dc8812e201472e6af376c43ee23aa2db6cd04bddd79b99b0c28c48a5ae", "openwall"},
{"$agilekeychain$1*1000*8*54434b3047723444*1040*316539685a36617546544a61466e35743970356559624464304467394a4a41615459594a6b66454c5462417a7a694b5751474e4748595036344f3945374b414b676b6b7278673658794e63734a316c48656b496a3156346a544c6861797537347032466b4d6b416d31704a6b5063547a44703152544f72696e6e38347732597672774f6476414c70346462595a7678656b6e5958716b7a61746d5874514e575965564735627a437578584e4a573050567939413073306c377a4d726e6d576a6655424455394f4934696c48454f4d536e635567393950686d4171364f76747749446130454c6d74783069704d30456d45374f56736e486a5534667877327a526e52596e55454452393544437042646e6739355938714836584968664c4d7a726a4f63544c6858385141464c71565463664270493761664d633055447879613169456a72664479346438305641417054754775477a475266766c4774543668673848624d31636c37624e73743549634457655375507138535139396c4c39364c4f6f757a43305535586161364b47676a61713971394459526a78744e547459797a6a57715a3575534364487a4430306d4e4e39483277674c733238726463616d4f5146467957374234727252774b6d6161664b6d67414d5854496444665848684c376c6c776d47477a4b57566d5a3646346e775441446f3659745038646d336b6370494d50676742797a41325630716e794833793237494152496477556e4d6c4751497367346672635364486e6e71504f6e6264575953584462586c6e573947347a567163535333366e3253504d65656b45483841544f6952384d6170724471706c4a307863713653707265624f544a4d5139377562454a334b776e4879746a37704e37694557484d69696d436f484973613443754d484b4f51484833545a364654694a6d31783061665536796c444f7257666964397243444f684d305a324c6b75693953716664354b435963703559354978757a64354a755158394136663744435a674e4c73484a7935737a707739724c783077316631637349757a6d696252576244396a537730593143633348385a775734534b646569684f634f4c35323364734b7179625750364b76344a4a56626c4f727069366f575a386432745375684c464e42643173445a6a50745743696e666a4458325058644d57654c596d326f5763516a7951524a566372354d4d58435877765172596b734c59354476455156746d75504830444a4e47624e31524f4d544b4a6b4d675835305a7a56736758794c475057714e78496452725269484c75424f4d6d793550677277727453597045566e304c5642764c5a6732504c7a4e71584c4c67634979637369554a3446497655795a78583547306b365a4e337477786c7961796b4d787463796971596f516fcb3584235d7ecde5f8b7bc2b8f1e9e2e*46c3b75f6e4cf139e92f683f32107271", "123"},
{"$agilekeychain$1*1000*8*7a697868444e7458*1040*773954704874444d4d523043546b44375135544f74675a754532624a45794848305949436e4e724d336c524c39316247426a7843317131614152736d50724c6474586a4d4d445954786c31376d363155437130777a414d36586c7045555457424a5a436a657541456742417961654472745a73576e4b7a7a344d547043567846526655524b4339573631756f3850465a3878306b7176644c4253787071764c58376e716a50674f526d4a4e4b546e3359575175614b304a3964756f756935675a77544f4e6770654855776f79553465786e41364d6376496b7651624762424d62756746796a6753514c37793069783869683773454c533559365946584f545246616d48495730464e634d42466e51367856797a4368517335674a755972434b545944633270764e54775879563542776675386b6e4462506b743138694a756d63447134745361526a32373167366e787375514e346a73574e77796b4b49376d3677653448754c364b5a41514633626e71786130634458544e484a436551386e7679304b786d73346f774a383268665167596b466e39317a307269714434546d4d6173416e344b6a74455a584846526a6659746742504262495958386336755241386c496633417666696d7a5036425745757461736b684574794a5230436d50466d4b536375764674674562315679766a43453077356e614b476d345849395a726b7037626153496b6a66634f355261795157645941487731516f564c6764516d4e3074394b3839526341626f6b6b38324465497068624553646f4177786e6f68347779523338394f4e6561315271635236374d424d695978304b336b4a6966776e74614f4b43483237434b596a6630774e79394a4b7153714a48616b4b364455596a454b31433767786a72303450706d44666373574c5a61324f335852474b756c456b76483349754e3156654f417342324d6f75346d4b78774e43424863566e344c4c6c6c6d4e446b617550415a6f3337764f55484b4156344d4769336267344f4737794c354c5567636a565a6b7369616730383377744d69513431333032305a4a3747794944714d67396a5651444132424e79507a34726d346c333552757a764b6c543073437562534376714f346a5939784a546f683358517348623378716677313231383261685357743236455a6a6b6674365870554642386436574c374430635177347278736a744a6e463530756365684c7779497557366550356936514e704e4863353863437165397163496146794a726555714c623438543235396371416154326c66375276746e3550727453306b7042335961364239586c3359384b464865564e677636537234414e4d6c55583867456376686e43646e6e776a6f656d7152613453725148503462744b4a334565714f6e624a774a65623258552fff2bf0505a0bc88b9cbc9073a74586*a6f6556c971bd3ad40b52751ba025713", ""},
{"$agilekeychain$1*1000*8*7a65613743636950*1040*524a397449393859696b4a576e437763716a574947544a6d306e32474442343355764a7a6948517a45686d7569636631514745347448424e4e6b32564239656a55596f724671547638736d4e66783949504b6f38746b6f49426d4d6b794c7a6d3077327639365a4b515934357774664a477247366b5539486135495863766845714146317458356b725a6a50376f726e55734b3136533756706a4b42516165656a50336e4558616450794f59506f4771347268454730784555485a4f5a4772526a76354f45417470616258375a386436474b366f7653583257335939516d4f5364446a414b674e467a31374f716d73516b3362795776305a414a314f63324d616a6c6472413939443879414c523733794c47467654734d7a6a4734733461674353357a4456527841486233646d446e797448696837377364784344704831784f6a5975666168626b5534796678576c59584d4b3448704a784a4f675a6d7672636b5a4b567071445a345a376648624b55414b7262694972384531336c7a6875725a6f44627571775361774b66417743336230614e4166564954334a6c3477666b4254374f747565394b32667266566d3263416a656c79416c45724b3035504a4e42307a33303632483466664272705765415a4f3552416a36544e5a54415a5976666a4b53675a68493071394a6563426964544a4f564d304a773976394944444339516e564a78587539366974586c4f6132717937354c554b65384b7638585132596832417a5271314e4b5653766d4d50506d3554463762763961554e45695a51436e79504f6e7146617a755231373574455365305446624c636450424a43526a49384b32365967496a734c324e525574526e36714c533065694f536c6c37795a456945476d4a6e327262646942416c485046616e384e4d7869427571777355714e7638305267537752726245696c734d68664b53793836684b39445a716b47546d4b59747176474c6b6a6d52513368796b367a356449706c64385541614236546e426a6b4f64766d33493972763941765a71776345686b734c594a7254446c796f46444b6d557441305a636b414e437245587a63487a30304c50564e4e73694d634d5a6f4f74414534424f53685879374e62545734487a555054774a7056686f6a7453666a664e696d354548345631374c61396862586659666332304e465a5678656a304b4d59586d586547634d67474c6d31794a4b546473474c755a697579625779503259726d6d5248544f6f704b575046556e3438415a48474168396d787136327230367248774e73493439693049794b3765314b4f74547265556c564b6e6d594a5959355a7476334b546f75375a6a676c755a557a39744b54747745583948314a37366e6c6d5a53345079555856696438336876596141617a394438711ee66b990b013609582733309b01df00*444f4656a5ec58e8a75204fb25fd5ae5", "PASSWORD"},
{NULL}
};
static struct custom_salt {
unsigned int nkeys;
unsigned int iterations[2];
unsigned int saltlen[2];
unsigned char salt[2][SALTLEN];
unsigned int ctlen[2];
unsigned char ct[2][CTLEN];
} *cur_salt;
static cl_int cl_error;
static keychain_password *inbuffer;
static keychain_hash *outbuffer;
static keychain_salt currentsalt;
static cl_mem mem_in, mem_out, mem_setting;
size_t insize, outsize, settingsize, cracked_size;
static struct fmt_main *self;
#define STEP 0
#define SEED 256
// This file contains auto-tuning routine(s). Has to be included after formats definitions.
#include "opencl-autotune.h"
#include "memdbg.h"
static const char * warn[] = {
"xfer: ", ", crypt: ", ", xfer: "
};
/* ------- Helper functions ------- */
static size_t get_task_max_work_group_size()
{
return autotune_get_task_max_work_group_size(FALSE, 0, crypt_kernel);
}
static void create_clobj(size_t gws, struct fmt_main *self)
{
insize = sizeof(keychain_password) * gws;
outsize = sizeof(keychain_hash) * gws;
settingsize = sizeof(keychain_salt);
cracked_size = sizeof(*cracked) * gws;
inbuffer = mem_calloc(1, insize);
outbuffer = mem_alloc(outsize);
cracked = mem_calloc(1, cracked_size);
/// Allocate memory
mem_in =
clCreateBuffer(context[gpu_id], CL_MEM_READ_ONLY, insize, NULL,
&cl_error);
HANDLE_CLERROR(cl_error, "Error allocating mem in");
mem_setting =
clCreateBuffer(context[gpu_id], CL_MEM_READ_ONLY, settingsize,
NULL, &cl_error);
HANDLE_CLERROR(cl_error, "Error allocating mem setting");
mem_out =
clCreateBuffer(context[gpu_id], CL_MEM_WRITE_ONLY, outsize, NULL,
&cl_error);
HANDLE_CLERROR(cl_error, "Error allocating mem out");
HANDLE_CLERROR(clSetKernelArg(crypt_kernel, 0, sizeof(mem_in),
&mem_in), "Error while setting mem_in kernel argument");
HANDLE_CLERROR(clSetKernelArg(crypt_kernel, 1, sizeof(mem_out),
&mem_out), "Error while setting mem_out kernel argument");
HANDLE_CLERROR(clSetKernelArg(crypt_kernel, 2, sizeof(mem_setting),
&mem_setting), "Error while setting mem_salt kernel argument");
}
static void release_clobj(void)
{
if (cracked) {
HANDLE_CLERROR(clReleaseMemObject(mem_in), "Release mem in");
HANDLE_CLERROR(clReleaseMemObject(mem_setting), "Release mem setting");
HANDLE_CLERROR(clReleaseMemObject(mem_out), "Release mem out");
MEM_FREE(inbuffer);
MEM_FREE(outbuffer);
MEM_FREE(cracked);
}
}
static void done(void)
{
if (autotuned) {
release_clobj();
HANDLE_CLERROR(clReleaseKernel(crypt_kernel), "Release kernel");
HANDLE_CLERROR(clReleaseProgram(program[gpu_id]), "Release Program");
autotuned--;
}
}
static void init(struct fmt_main *_self)
{
self = _self;
opencl_prepare_dev(gpu_id);
}
static void reset(struct db_main *db)
{
if (!autotuned) {
char build_opts[64];
snprintf(build_opts, sizeof(build_opts),
"-DKEYLEN=%d -DSALTLEN=%d -DOUTLEN=%d",
PLAINTEXT_LENGTH,
(int)sizeof(currentsalt.salt),
(int)sizeof(outbuffer->v));
opencl_init("$JOHN/kernels/pbkdf2_hmac_sha1_unsplit_kernel.cl",
gpu_id, build_opts);
crypt_kernel = clCreateKernel(program[gpu_id], "derive_key", &cl_error);
HANDLE_CLERROR(cl_error, "Error creating kernel");
// Initialize openCL tuning (library) for this format.
opencl_init_auto_setup(SEED, 0, NULL, warn, 1, self,
create_clobj, release_clobj,
sizeof(keychain_password), 0, db);
// Auto tune execution from shared/included code.
autotune_run(self, 1, 0, 1000);
}
}
static int valid(char *ciphertext, struct fmt_main *self)
{
char *ctcopy, *keeptr;
int ctlen, extra;
int saltlen;
char *p;
if (strncmp(ciphertext, FORMAT_TAG, FORMAT_TAG_LEN) != 0)
return 0;
ctcopy = strdup(ciphertext);
keeptr = ctcopy;
ctcopy += FORMAT_TAG_LEN;
if ((p = strtokm(ctcopy, "*")) == NULL) /* nkeys */
goto err;
if (!isdec(p))
goto err;
if (atoi(p) > 2)
goto err;
if ((p = strtokm(NULL, "*")) == NULL) /* iterations */
goto err;
if (!isdec(p))
goto err;
if ((p = strtokm(NULL, "*")) == NULL) /* salt length */
goto err;
if (!isdec(p))
goto err;
saltlen = atoi(p);
if(saltlen > SALTLEN)
goto err;
if ((p = strtokm(NULL, "*")) == NULL) /* salt */
goto err;
if(hexlenl(p, &extra) != saltlen * 2 || extra)
goto err;
if ((p = strtokm(NULL, "*")) == NULL) /* ct length */
goto err;
if (!isdec(p))
goto err;
ctlen = atoi(p);
if (ctlen > CTLEN)
goto err;
if ((p = strtokm(NULL, "*")) == NULL) /* ciphertext */
goto err;
if(hexlenl(p, &extra) != ctlen * 2 || extra)
goto err;
MEM_FREE(keeptr);
return 1;
err:
MEM_FREE(keeptr);
return 0;
}
static void *get_salt(char *ciphertext)
{
char *ctcopy = strdup(ciphertext);
char *keeptr = ctcopy;
int i;
char *p;
static struct custom_salt cs;
memset(&cs, 0, sizeof(cs));
ctcopy += FORMAT_TAG_LEN; /* skip over "$agilekeychain$" */
p = strtokm(ctcopy, "*");
cs.nkeys = atoi(p);
p = strtokm(NULL, "*");
cs.iterations[0] = atoi(p);
p = strtokm(NULL, "*");
cs.saltlen[0] = atoi(p);
p = strtokm(NULL, "*");
for (i = 0; i < cs.saltlen[0]; i++)
cs.salt[0][i] = atoi16[ARCH_INDEX(p[i * 2])] * 16
+ atoi16[ARCH_INDEX(p[i * 2 + 1])];
p = strtokm(NULL, "*");
cs.ctlen[0] = atoi(p);
p = strtokm(NULL, "*");
for (i = 0; i < cs.ctlen[0]; i++)
cs.ct[0][i] = atoi16[ARCH_INDEX(p[i * 2])] * 16
+ atoi16[ARCH_INDEX(p[i * 2 + 1])];
MEM_FREE(keeptr);
return (void *)&cs;
}
static void set_salt(void *salt)
{
cur_salt = (struct custom_salt *)salt;
memcpy((char*)currentsalt.salt, cur_salt->salt, cur_salt->saltlen[0]);
currentsalt.length = cur_salt->saltlen[0];
currentsalt.iterations = cur_salt->iterations[0];
currentsalt.outlen = 16;
currentsalt.skip_bytes = 0;
HANDLE_CLERROR(clEnqueueWriteBuffer(queue[gpu_id], mem_setting,
CL_FALSE, 0, settingsize, ¤tsalt, 0, NULL, NULL),
"Copy salt to gpu");
}
#undef set_key
static void set_key(char *key, int index)
{
uint8_t length = strlen(key);
if (length > PLAINTEXT_LENGTH)
length = PLAINTEXT_LENGTH;
inbuffer[index].length = length;
memcpy(inbuffer[index].v, key, length);
}
static char *get_key(int index)
{
static char ret[PLAINTEXT_LENGTH + 1];
uint8_t length = inbuffer[index].length;
memcpy(ret, inbuffer[index].v, length);
ret[length] = '\0';
return ret;
}
static int akcdecrypt(unsigned char *derived_key, unsigned char *data)
{
unsigned char out[CTLEN];
int n, key_size;
AES_KEY akey;
unsigned char iv[16];
memcpy(iv, data + CTLEN - 32, 16);
if (AES_set_decrypt_key(derived_key, 128, &akey) < 0)
fprintf(stderr, "AES_set_decrypt_key failed in crypt!\n");
AES_cbc_encrypt(data + CTLEN - 16, out + CTLEN - 16, 16, &akey, iv, AES_DECRYPT);
n = check_pkcs_pad(out, CTLEN, 16);
if (n < 0)
return -1;
key_size = n / 8;
if (key_size != 128 && key_size != 192 && key_size != 256)
// "invalid key size"
return -1;
return 0;
}
static int crypt_all(int *pcount, struct db_salt *salt)
{
const int count = *pcount;
int index;
size_t *lws = local_work_size ? &local_work_size : NULL;
global_work_size = GET_MULTIPLE_OR_BIGGER(count, local_work_size);
if (any_cracked) {
memset(cracked, 0, cracked_size);
any_cracked = 0;
}
/// Copy data to gpu
BENCH_CLERROR(clEnqueueWriteBuffer(queue[gpu_id], mem_in, CL_FALSE, 0,
insize, inbuffer, 0, NULL, multi_profilingEvent[0]),
"Copy data to gpu");
/// Run kernel
BENCH_CLERROR(clEnqueueNDRangeKernel(queue[gpu_id], crypt_kernel, 1,
NULL, &global_work_size, lws, 0, NULL,
multi_profilingEvent[1]), "Run kernel");
/// Read the result back
BENCH_CLERROR(clEnqueueReadBuffer(queue[gpu_id], mem_out, CL_TRUE, 0,
outsize, outbuffer, 0, NULL, multi_profilingEvent[2]), "Copy result back");
if (ocl_autotune_running)
return count;
#ifdef _OPENMP
#pragma omp parallel for
#endif
for (index = 0; index < count; index++)
if (!akcdecrypt((unsigned char*)outbuffer[index].v, cur_salt->ct[0]))
{
cracked[index] = 1;
#ifdef _OPENMP
#pragma omp atomic
#endif
any_cracked |= 1;
}
return count;
}
static int cmp_all(void *binary, int count)
{
return any_cracked;
}
static int cmp_one(void *binary, int index)
{
return cracked[index];
}
static int cmp_exact(char *source, int index)
{
return 1;
}
static unsigned int iteration_count(void *salt)
{
struct custom_salt *my_salt;
my_salt = salt;
return (unsigned int) my_salt->iterations[0];
}
struct fmt_main fmt_opencl_agilekeychain = {
{
FORMAT_LABEL,
FORMAT_NAME,
ALGORITHM_NAME,
BENCHMARK_COMMENT,
BENCHMARK_LENGTH,
0,
PLAINTEXT_LENGTH,
BINARY_SIZE,
BINARY_ALIGN,
SALT_SIZE,
SALT_ALIGN,
MIN_KEYS_PER_CRYPT,
MAX_KEYS_PER_CRYPT,
FMT_CASE | FMT_8_BIT | FMT_OMP | FMT_NOT_EXACT,
{
"iteration count",
},
{ FORMAT_TAG },
keychain_tests
}, {
init,
done,
reset,
fmt_default_prepare,
valid,
fmt_default_split,
fmt_default_binary,
get_salt,
{
iteration_count,
},
fmt_default_source,
{
fmt_default_binary_hash
},
fmt_default_salt_hash,
NULL,
set_salt,
set_key,
get_key,
fmt_default_clear_keys,
crypt_all,
{
fmt_default_get_hash
},
cmp_all,
cmp_one,
cmp_exact
}
};
#endif /* plugin stanza */
#endif /* HAVE_OPENCL */
|
atomic-2.c | /* { dg-do compile } */
float x, y;
void f1(void)
{
#pragma omp atomic
x++;
#pragma omp atomic
x--;
#pragma omp atomic
++x;
#pragma omp atomic
--x;
#pragma omp atomic
x += 1;
#pragma omp atomic
x -= y;
#pragma omp atomic
x *= 3;
#pragma omp atomic
x /= 3;
}
|
17_multiply_spamm.c | #include "config.h"
#include "test.h"
#include <spamm.h>
#include <spamm_blas.h>
#include <getopt.h>
#include <math.h>
#include <stdio.h>
#include <stdlib.h>
#include <time.h>
#ifdef _OPENMP
#include <omp.h>
#endif
#define REL_TOLERANCE 1e-8
#define TEST_ABS_TOLERANCE 2e-8
#define TEST_REL_TOLERANCE 2e-6
unsigned int
matrix_index (const unsigned int i,
const unsigned int j,
const unsigned int M,
const unsigned int N)
{
return i+j*M;
}
int
main (int argc, char **argv)
{
int result = 0;
unsigned int dim;
unsigned int i[2];
#if !defined(DGEMM) && !defined(SGEMM)
unsigned int k;
#endif
unsigned int N[] = { 513, 513 };
unsigned int chunk_tier = 5;
/* Boolean program parameters. */
short use_linear_tree = 0;
short use_sgemm = 0;
short use_diagonal = 0;
short verify_result = 1;
short check_matrices = 1;
short random_matrix = 1;
short print_debug = 0;
short null_C = 0;
float gamma = 1.0;
double alpha = 1.2;
double beta = 0.5;
float alpha_float = alpha;
float beta_float = beta;
float tolerance = 0.0;
double *A_dense;
double *B_dense;
double *C_dense;
float *A_float;
float *B_float;
float *C_float;
struct spamm_matrix_t *A;
struct spamm_matrix_t *B;
struct spamm_matrix_t *C;
unsigned int max_i[] = { 0, 0 };
unsigned int max_rel_i[] = { 0, 0 };
unsigned int max_float_i[] = { 0, 0 };
double max_diff;
double max_rel_diff;
double max_diff_float;
struct spamm_timer_t *timer;
char *timer_string;
time_t random_seed = time(NULL);
enum matrix_t matrix_type = full;
double flop;
double memop;
int option_index;
int parse_result;
char *short_options = "hN:la:b:t:c:rds1g:vxnm:2:";
static struct option long_options[] = {
{ "help", no_argument, NULL, 'h' },
{ "N", required_argument, NULL, 'N' },
{ "linear", no_argument, NULL, 'l' },
{ "alpha", required_argument, NULL, 'a' },
{ "beta", required_argument, NULL, 'b' },
{ "tolerance", required_argument, NULL, 't' },
{ "chunk", required_argument, NULL, 'c' },
{ "no-random", no_argument, NULL, 'r' },
{ "debug", no_argument, NULL, 'd' },
{ "sgemm", no_argument, NULL, 's' },
{ "diagonal", no_argument, NULL, '1' },
{ "gamma", required_argument, NULL, 'g' },
{ "verify", no_argument, NULL, 'v' },
{ "check", no_argument, NULL, 'x' },
{ "nullC", no_argument, NULL, 'n' },
{ "matrix-type", required_argument, NULL, 'm' },
{ "seed", required_argument, NULL, '2' },
{ NULL, 0, NULL, 0 }
};
while(1)
{
parse_result = getopt_long(argc, argv, short_options, long_options, &option_index);
if(parse_result == -1) { break; }
switch(parse_result)
{
case 'h':
printf("Usage:\n");
printf("\n");
printf("{ -N | --N } N Set N\n");
printf("{ -l | --linear } Use a linear tier\n");
printf("{ -a | --alpha } alpha Set alpha\n");
printf("{ -b | --beta } beta Set beta\n");
printf("{ -t | --tolerance } tau Multiply with tolerance tau\n");
printf("{ -c | --chunk } c Set chunk tier to c\n");
printf("{ -r | --no-random } Do not create random matrix\n");
printf("{ -d | --debug } Print matrices\n");
printf("{ -s | --sgemm } Use sgemm\n");
printf("{ -1 | --diagonal } Create diagonally dominant matrices\n");
printf("{ -g | --gamma } g Set decay for diagonal to g\n");
printf("{ -v | --verify } Verify result\n");
printf("{ -x | --check } Check matrices\n");
printf("{ -n | --nullC } Initialize C as empty matrix\n");
printf("{ -m | --matrix-type } TYPE Matrix type: %s\n", print_matrix_types());
printf("{ --seed } SEED Seed random number generator with SEED\n");
exit(0);
break;
case 'N':
for(dim = 0; dim < 2; dim++)
{
N[dim] = strtol(optarg, NULL, 10);
}
break;
case 'l':
use_linear_tree = 1;
break;
case 'a':
alpha = strtod(optarg, NULL);
alpha_float = alpha;
break;
case 'b':
beta = strtod(optarg, NULL);
beta_float = beta;
break;
case 't':
tolerance = strtod(optarg, NULL);
break;
case 'c':
chunk_tier = strtol(optarg, NULL, 10);
break;
case 'r':
random_matrix = 0;
break;
case 'd':
print_debug = 1;
break;
case 's':
use_sgemm = 1;
break;
case '1':
use_diagonal = 1;
break;
case 'g':
gamma = strtof(optarg, NULL);
break;
case 'v':
verify_result = (verify_result+1)%2;
break;
case 'x':
check_matrices = (check_matrices+1)%2;
break;
case 'n':
null_C = 1;
break;
case 'm':
matrix_type = parse_matrix_type(optarg);
break;
case '2':
random_seed = strtol(optarg, NULL, 10);
break;
default:
printf("unknown option\n");
break;
}
}
/* Initialize random number generator. */
srand(random_seed);
#ifdef _OPENMP
#pragma omp parallel
{
#pragma omp master
{
printf("running on %d OpenMP threads\n", omp_get_num_threads());
}
}
#else
printf("running serial version\n");
#endif
A_dense = (double*) malloc(sizeof(double)*N[0]*N[1]);
B_dense = (double*) malloc(sizeof(double)*N[0]*N[1]);
C_dense = (double*) malloc(sizeof(double)*N[0]*N[1]);
A_float = (float*) malloc(sizeof(float)*N[0]*N[1]);
B_float = (float*) malloc(sizeof(float)*N[0]*N[1]);
C_float = (float*) malloc(sizeof(float)*N[0]*N[1]);
printf("creating random matrices (%ux%u), gamma = %e... ", N[0], N[1], gamma);
fflush(stdout);
for(i[0] = 0; i[0] < N[0]; i[0]++)
{
if(use_diagonal)
{
if(random_matrix)
{
A_dense[matrix_index(i[0], i[0], N[0], N[1])] = (float) rand()/(float) RAND_MAX;
B_dense[matrix_index(i[0], i[0], N[0], N[1])] = (float) rand()/(float) RAND_MAX;
if(!null_C)
{
C_dense[matrix_index(i[0], i[0], N[0], N[1])] = (float) rand()/(float) RAND_MAX;
}
}
else
{
A_dense[matrix_index(i[0], i[0], N[0], N[1])] = i[0]*N[1]+i[0]+1;
B_dense[matrix_index(i[0], i[0], N[0], N[1])] = i[0]*N[1]+i[0]+1;
if(!null_C)
{
C_dense[matrix_index(i[0], i[0], N[0], N[1])] = i[0]*N[1]+i[0]+1;
}
}
}
for(i[1] = 0; i[1] < N[1]; i[1]++)
{
if(use_diagonal)
{
if(i[0] != i[1])
{
A_dense[matrix_index(i[0], i[1], N[0], N[1])] = A_dense[matrix_index(i[0], i[0], N[0], N[1])]
*(fabs((float) i[0]-(float) i[1]) > gamma ? expf(-fabsf((float) i[0]-(float) i[1])/gamma) : 1);
B_dense[matrix_index(i[0], i[1], N[0], N[1])] = B_dense[matrix_index(i[0], i[0], N[0], N[1])]
*(fabs((float) i[0]-(float) i[1]) > gamma ? expf(-fabsf((float) i[0]-(float) i[1])/gamma) : 1);
if(!null_C)
{
C_dense[matrix_index(i[0], i[1], N[0], N[1])] = C_dense[matrix_index(i[0], i[0], N[0], N[1])]
*(fabs((float) i[0]-(float) i[1]) > gamma ? expf(-fabsf((float) i[0]-(float) i[1])/gamma) : 1);
}
}
}
else
{
if(random_matrix)
{
A_dense[matrix_index(i[0], i[1], N[0], N[1])] = (float) rand()/(float) RAND_MAX;
B_dense[matrix_index(i[0], i[1], N[0], N[1])] = (float) rand()/(float) RAND_MAX;
if(!null_C)
{
C_dense[matrix_index(i[0], i[1], N[0], N[1])] = (float) rand()/(float) RAND_MAX;
}
}
else
{
A_dense[matrix_index(i[0], i[1], N[0], N[1])] = i[0]*N[1]+i[1];
B_dense[matrix_index(i[0], i[1], N[0], N[1])] = i[0]*N[1]+i[1];
if(!null_C)
{
C_dense[matrix_index(i[0], i[1], N[0], N[1])] = i[0]*N[1]+i[1];
}
}
}
}
}
for (i[0] = 0; i[0] < N[0]; i[0]++) {
for (i[1] = 0; i[1] < N[1]; i[1]++)
{
A_float[matrix_index(i[0], i[1], N[0], N[1])] = A_dense[matrix_index(i[0], i[1], N[0], N[1])];
B_float[matrix_index(i[0], i[1], N[0], N[1])] = B_dense[matrix_index(i[0], i[1], N[0], N[1])];
C_float[matrix_index(i[0], i[1], N[0], N[1])] = C_dense[matrix_index(i[0], i[1], N[0], N[1])];
}
}
printf("done\n");
printf("creating SpAMM matrices... ");
fflush(stdout);
A = spamm_convert_dense_to_spamm(2, N, chunk_tier, use_linear_tree, column_major, A_float);
B = spamm_convert_dense_to_spamm(2, N, chunk_tier, use_linear_tree, column_major, B_float);
if(null_C)
{
C = spamm_new(2, N, chunk_tier, use_linear_tree);
}
else
{
C = spamm_convert_dense_to_spamm(2, N, chunk_tier, use_linear_tree, column_major, C_float);
}
printf("done\n");
printf("multiply: alpha = %f, beta = %f, tolerance = %f, chunk_tier = %u, use_linear_tree = %u\n",
alpha, beta, tolerance, chunk_tier, use_linear_tree);
if(check_matrices)
{
printf("checking A... ");
if(spamm_check(A, REL_TOLERANCE) != SPAMM_OK)
{
SPAMM_FATAL("failed\n");
}
else
{
printf("ok\n");
}
printf("checking B... ");
if(spamm_check(B, REL_TOLERANCE) != SPAMM_OK)
{
SPAMM_FATAL("failed\n");
}
else
{
printf("ok\n");
}
printf("checking C... ");
if(spamm_check(C, REL_TOLERANCE) != SPAMM_OK)
{
SPAMM_FATAL("failed\n");
}
else
{
printf("ok\n");
}
}
if(print_debug)
{
printf("A_dense = zeros(%u,%u);\n", N[0], N[1]);
for(i[0] = 0; i[0] < N[0]; i[0]++) {
for(i[1] = 0; i[1] < N[1]; i[1]++)
{
printf("A_dense(%u,%u) = %e;\n", i[0]+1, i[1]+1, A_dense[matrix_index(i[0], i[1], N[0], N[1])]);
}
}
printf("A =\n");
for(i[0] = 0; i[0] < N[0]; i[0]++) {
for(i[1] = 0; i[1] < N[1]; i[1]++)
{
printf(" %5.2e", spamm_get(i, A));
}
printf("\n");
}
printf("B_dense = zeros(%u,%u);\n", N[0], N[1]);
for(i[0] = 0; i[0] < N[0]; i[0]++) {
for(i[1] = 0; i[1] < N[1]; i[1]++)
{
printf("B_dense(%u,%u) = %e;\n", i[0]+1, i[1]+1, B_dense[matrix_index(i[0], i[1], N[0], N[1])]);
}
}
printf("B =\n");
for(i[0] = 0; i[0] < N[0]; i[0]++) {
for(i[1] = 0; i[1] < N[1]; i[1]++)
{
printf(" %5.2e", spamm_get(i, B));
}
printf("\n");
}
printf("C_dense = zeros(%u,%u);\n", N[0], N[1]);
for(i[0] = 0; i[0] < N[0]; i[0]++) {
for(i[1] = 0; i[1] < N[1]; i[1]++)
{
printf("C_dense(%u,%u) = %e;\n", i[0]+1, i[1]+1, C_dense[matrix_index(i[0], i[1], N[0], N[1])]);
}
}
printf("C =\n");
for(i[0] = 0; i[0] < N[0]; i[0]++) {
for(i[1] = 0; i[1] < N[1]; i[1]++)
{
printf(" %5.2e", spamm_get(i, C));
}
printf("\n");
}
printf("A (tree)\n");
spamm_print_tree(A);
printf("B (tree)\n");
spamm_print_tree(B);
printf("C (tree)\n");
spamm_print_tree(C);
}
if(verify_result)
{
printf("multiplying reference... ");
fflush(stdout);
#ifdef DGEMM
DGEMM("N", "N", &N[0], &N[0], &N[0], &alpha, A_dense, &N[0], B_dense, &N[0], &beta, C_dense, &N[0]);
#else
for(i[0] = 0; i[0] < N[0]; i[0]++) {
for(i[1] = 0; i[1] < N[1]; i[1]++)
{
C_dense[matrix_index(i[0], i[1], N[0], N[1])] *= beta;
for(k = 0; k < N[0]; k++)
{
C_dense[matrix_index(i[0], i[1], N[0], N[1])] += alpha*A_dense[matrix_index(i[0], k, N[0], N[1])]*B_dense[matrix_index(k, i[1], N[0], N[1])];
}
}
}
#endif
printf("done\n");
printf("multiplying sgemm... ");
fflush(stdout);
timer = spamm_timer_new();
spamm_timer_add_event(0x8000003b, timer);
spamm_timer_start(timer);
#ifdef SGEMM
SGEMM("N", "N", &N[0], &N[0], &N[0], &alpha_float, A_float, &N[0], B_float, &N[0], &beta_float, C_float, &N[0]);
#else
for(i[0] = 0; i[0] < N[0]; i[0]++) {
for(i[1] = 0; i[1] < N[1]; i[1]++)
{
C_float[matrix_index(i[0], i[1], N[0], N[1])] *= beta;
for(k = 0; k < N[0]; k++)
{
C_float[matrix_index(i[0], i[1], N[0], N[1])] += alpha*A_float[matrix_index(i[0], k, N[0], N[1])]*B_float[matrix_index(k, i[1], N[0], N[1])];
}
}
}
#endif
spamm_timer_stop(timer);
timer_string = spamm_timer_get_string(timer);
printf("%s\n", timer_string);
free(timer_string);
spamm_timer_delete(&timer);
}
if(print_debug)
{
printf("C_ref_float = zeros(%u,%u);\n", N[0], N[1]);
for(i[0] = 0; i[0] < N[0]; i[0]++) {
for(i[1] = 0; i[1] < N[1]; i[1]++)
{
printf("C_ref_float(%u,%u) = %e;\n", i[0]+1, i[1]+1, C_float[matrix_index(i[0], i[1], N[0], N[1])]);
}
}
printf("C_ref_dense = zeros(%u,%u);\n", N[0], N[1]);
for(i[0] = 0; i[0] < N[0]; i[0]++) {
for(i[1] = 0; i[1] < N[1]; i[1]++)
{
printf("C_ref_dense(%u,%u) = %e;\n", i[0]+1, i[1]+1, C_dense[matrix_index(i[0], i[1], N[0], N[1])]);
}
}
}
#ifdef _OPENMP
#pragma omp parallel
{
#pragma omp master
{
printf("multiplying SpAMM, %d OpenMP threads... ", omp_get_num_threads());
}
}
#else
printf("multiplying SpAMM... ");
#endif
fflush(stdout);
flop = 0;
memop = 0;
timer = spamm_timer_new();
spamm_timer_add_event(0x8000003b, timer);
spamm_timer_start(timer);
spamm_multiply(tolerance, alpha, A, B, beta, C, (use_sgemm ? SGEMM : NULL), &flop, &memop);
spamm_timer_stop(timer);
timer_string = spamm_timer_get_string(timer);
printf("%s, %e flop, %e memop\n", timer_string, flop, memop);
free(timer_string);
spamm_timer_delete(&timer);
if(print_debug)
{
printf("C (tree)\n");
spamm_print_tree(C);
printf("C =\n");
for(i[0] = 0; i[0] < N[0]; i[0]++) {
for(i[1] = 0; i[1] < N[1]; i[1]++)
{
printf(" %5.2e", spamm_get(i, C));
}
printf("\n");
}
}
if(check_matrices)
{
printf("checking C... ");
if(spamm_check(C, REL_TOLERANCE) != SPAMM_OK)
{
SPAMM_FATAL("failed\n");
}
else
{
printf("ok\n");
}
}
if(verify_result)
{
max_diff = 0;
max_rel_diff = 0;
max_diff_float = 0;
printf("verifying result... ");
fflush(stdout);
for(i[0] = 0; i[0] < N[0]; i[0]++) {
for(i[1] = 0; i[1] < N[1]; i[1]++) {
if(fabs(C_dense[matrix_index(i[0], i[1], N[0], N[1])]-spamm_get(i, C)) > max_diff)
{
max_diff = fabs(C_dense[matrix_index(i[0], i[1], N[0], N[1])]-spamm_get(i, C));
max_i[0] = i[0];
max_i[1] = i[1];
}
if(fabs(C_dense[matrix_index(i[0], i[1], N[0], N[1])]-C_float[matrix_index(i[0], i[1], N[0], N[1])]) > max_diff_float)
{
max_diff_float = fabs(C_dense[matrix_index(i[0], i[1], N[0], N[1])]-C_float[matrix_index(i[0], i[1], N[0], N[1])]);
max_float_i[0] = i[0];
max_float_i[1] = i[1];
}
if(C_dense[matrix_index(i[0], i[1], N[0], N[1])] != 0)
{
if(fabs((C_dense[matrix_index(i[0], i[1], N[0], N[1])]-spamm_get(i, C))/C_dense[matrix_index(i[0], i[1], N[0], N[1])]) > max_rel_diff)
{
max_rel_diff = fabs((C_dense[matrix_index(i[0], i[1], N[0], N[1])]-spamm_get(i, C))/C_dense[matrix_index(i[0], i[1], N[0], N[1])]);
max_rel_i[0] = i[0];
max_rel_i[1] = i[1];
}
}
}
}
printf("done\n");
printf("max float diff = %e, C_dense[%u][%u] = %e, C_float[%u][%u] = %e\n",
max_diff_float,
max_float_i[0], max_float_i[1], C_dense[matrix_index(max_float_i[0], max_float_i[1], N[0], N[1])],
max_float_i[0], max_float_i[1], C_float[matrix_index(max_float_i[0], max_float_i[1], N[0], N[1])]);
printf("max SpAMM diff = %e, rel. diff = %e, A[%u][%u] = %e, A_reference[%u][%u] = %e\n",
max_diff,
(C_dense[matrix_index(max_i[0], max_i[1], N[0], N[1])] != 0.0 ? max_diff/C_dense[matrix_index(max_i[0], max_i[1], N[0], N[1])] : 0.0),
max_i[0], max_i[1], spamm_get(max_i, C),
max_i[0], max_i[1], C_dense[matrix_index(max_i[0], max_i[1], N[0], N[1])]);
printf("max SpAMM rel. diff = %e, diff = %e, A[%u][%u] = %e, A_reference[%u][%u] = %e\n",
max_rel_diff,
fabs(C_dense[matrix_index(max_rel_i[0], max_rel_i[1], N[0], N[1])]-spamm_get(max_rel_i, C)),
max_rel_i[0], max_rel_i[1], spamm_get(max_rel_i, C),
max_rel_i[0], max_rel_i[1], C_dense[matrix_index(max_rel_i[0], max_rel_i[1], N[0], N[1])]);
if(max_diff > TEST_ABS_TOLERANCE && max_rel_diff > TEST_REL_TOLERANCE)
{
printf("test failed (abs. tolerance = %e, rel. tolerance = %e)\n", TEST_ABS_TOLERANCE, TEST_REL_TOLERANCE);
result = -1;
}
}
free(A_dense);
free(B_dense);
free(C_dense);
free(A_float);
free(B_float);
free(C_float);
spamm_delete(&A);
spamm_delete(&B);
spamm_delete(&C);
return result;
}
|
raytracer.h | #pragma once
#include "resource.h"
#include <iostream>
#include <linalg.h>
#include <memory>
#include <omp.h>
#include <random>
using namespace linalg::aliases;
namespace cg::renderer
{
struct ray
{
ray(float3 position, float3 direction) : position(position)
{
this->direction = normalize(direction);
}
float3 position;
float3 direction;
};
struct payload
{
float t;
float3 bary;
cg::color color;
};
template<typename VB>
struct triangle
{
triangle(const VB& vertex_a, const VB& vertex_b, const VB& vertex_c);
float3 a;
float3 b;
float3 c;
float3 ba;
float3 ca;
float3 na;
float3 nb;
float3 nc;
float3 ambient;
float3 diffuse;
float3 emissive;
};
template<typename VB>
inline triangle<VB>::triangle(
const VB& vertex_a, const VB& vertex_b, const VB& vertex_c)
{
a = float3{vertex_a.x, vertex_a.y, vertex_a.z};
b = float3{vertex_b.x, vertex_b.y, vertex_b.z};
c = float3{vertex_c.x, vertex_c.y, vertex_c.z};
ba = b - a;
ca = c - a;
na = float3{vertex_a.nx, vertex_a.ny, vertex_a.nz};
nb = float3{vertex_b.nx, vertex_b.ny, vertex_b.nz};
nc = float3{vertex_c.nx, vertex_c.ny, vertex_c.nz};
ambient = {vertex_a.ambient_r,
vertex_a.ambient_g,
vertex_a.ambient_b};
diffuse = {vertex_a.diffuse_r,
vertex_a.diffuse_g,
vertex_a.diffuse_b};
emissive = {vertex_a.emissive_r,
vertex_a.emissive_g,
vertex_a.emissive_b};
}
template<typename VB>
class aabb
{
public:
void add_triangle(const triangle<VB> triangle);
const std::vector<triangle<VB>>& get_triangles() const;
bool aabb_test(const ray& ray) const;
protected:
std::vector<triangle<VB>> triangles;
float3 aabb_min;
float3 aabb_max;
};
struct light
{
float3 position;
float3 color;
};
template<typename VB, typename RT>
class raytracer
{
public:
raytracer(){};
~raytracer(){};
void set_render_target(std::shared_ptr<resource<RT>> in_render_target);
void clear_render_target(const RT& in_clear_value);
void set_viewport(size_t in_width, size_t in_height);
void set_vertex_buffers(std::vector<std::shared_ptr<cg::resource<VB>>> in_vertex_buffers);
void set_index_buffers(std::vector<std::shared_ptr<cg::resource<unsigned int>>> in_index_buffers);
void build_acceleration_structure();
std::vector<aabb<VB>> acceleration_structures;
void ray_generation(float3 position, float3 direction, float3 right, float3 up, size_t depth, size_t accumulation_num);
payload trace_ray(const ray& ray, size_t depth, float max_t = 1000.f, float min_t = 0.001f) const;
payload intersection_shader(const triangle<VB>& triangle, const ray& ray) const;
std::function<payload(const ray& ray)> miss_shader = nullptr;
std::function<payload(const ray& ray, payload& payload, const triangle<VB>& triangle, size_t depth)>
closest_hit_shader = nullptr;
std::function<payload(const ray& ray, payload& payload, const triangle<VB>& triangle)> any_hit_shader =
nullptr;
float2 get_jitter(int frame_id);
protected:
std::shared_ptr<cg::resource<RT>> render_target;
std::shared_ptr<cg::resource<float3>> history;
std::vector<std::shared_ptr<cg::resource<unsigned int>>> index_buffers;
std::vector<std::shared_ptr<cg::resource<VB>>> vertex_buffers;
size_t width = 1920;
size_t height = 1080;
};
template<typename VB, typename RT>
inline void raytracer<VB, RT>::set_render_target(
std::shared_ptr<resource<RT>> in_render_target)
{
render_target = in_render_target;
}
template<typename VB, typename RT>
inline void raytracer<VB, RT>::clear_render_target(const RT& in_clear_value)
{
for (size_t i = 0; i < width * height; i++) {
render_target->item(i) = in_clear_value;
if (history)
history->item(i) = float3{0.f, 0.f, 0.f};
}
}
template<typename VB, typename RT>
void raytracer<VB, RT>::set_index_buffers(std::vector<std::shared_ptr<cg::resource<unsigned int>>> in_index_buffers)
{
index_buffers = in_index_buffers;
}
template<typename VB, typename RT>
inline void raytracer<VB, RT>::set_vertex_buffers(std::vector<std::shared_ptr<cg::resource<VB>>> in_vertex_buffers)
{
vertex_buffers = in_vertex_buffers;
}
template<typename VB, typename RT>
inline void raytracer<VB, RT>::build_acceleration_structure()
{
for (size_t shape_id = 0; shape_id < index_buffers.size(); shape_id++) {
auto& index_buffer = index_buffers[shape_id];
auto& vertex_buffer = vertex_buffers[shape_id];
size_t index_id = 0;
aabb<VB> aabb;
while (index_id < index_buffer->get_number_of_elements()) {
triangle<VB> triangle(
vertex_buffer->item(index_buffer->item(index_id++)),
vertex_buffer->item(index_buffer->item(index_id++)),
vertex_buffer->item(index_buffer->item(index_id++)));
aabb.add_triangle(triangle);
}
acceleration_structures.push_back(aabb);
}
}
template<typename VB, typename RT>
inline void raytracer<VB, RT>::set_viewport(size_t in_width, size_t in_height)
{
width = in_width;
height = in_height;
history = std::make_shared<cg::resource<float3>>(width, height);
}
template<typename VB, typename RT>
inline void raytracer<VB, RT>::ray_generation(float3 position, float3 direction, float3 right, float3 up, size_t depth, size_t accumulation_num)
{
float frame_weight = 1.f / static_cast<float>(accumulation_num);
for (int frame_id = 0; frame_id < accumulation_num; frame_id++) {
float2 jitter = get_jitter(frame_id);
for (int x = 0; x < width; x++) {
#pragma omp parallel for
for (int y = 0; y < height; y++) {
float u = (2.f * x + jitter.x) / static_cast<float>(width - 1) - 1.f;
float v = (2.f * y + jitter.y) / static_cast<float>(height - 1) - 1.f;
u *= static_cast<float>(width) / static_cast<float>(height);
float3 ray_direction = direction + u * right - v * up;
ray ray(position, ray_direction);
payload payload = trace_ray(ray, depth);
auto& history_pixel = history->item(x, y);
history_pixel += sqrt(float3{payload.color.r, payload.color.g, payload.color.b} * frame_weight);
render_target->item(x, y) = RT::from_float3(history_pixel);
}
}
}
}
template<typename VB, typename RT>
inline payload raytracer<VB, RT>::trace_ray(
const ray& ray, size_t depth, float max_t, float min_t) const
{
if (depth == 0)
return miss_shader(ray);
depth--;
payload closest_hit_payload = {};
closest_hit_payload.t = max_t;
const triangle<VB>* closest_triangle = nullptr;
for (auto& aabb: acceleration_structures) {
if (!aabb.aabb_test(ray))
continue;
for (auto& triangle: aabb.get_triangles()) {
payload payload = intersection_shader(triangle, ray);
if (payload.t > min_t && payload.t < closest_hit_payload.t) {
closest_hit_payload = payload;
closest_triangle = ▵
if (any_hit_shader)
return any_hit_shader(ray, payload, triangle);
}
}
}
if (closest_hit_payload.t < max_t) {
if (closest_hit_shader)
return closest_hit_shader(ray, closest_hit_payload, *closest_triangle, depth);
}
return miss_shader(ray);
}
template<typename VB, typename RT>
inline payload raytracer<VB, RT>::intersection_shader(
const triangle<VB>& triangle, const ray& ray) const
{
payload payload{};
payload.t = -1.f;
float3 pvec = cross(ray.direction, triangle.ca);
float det = dot(triangle.ba, pvec);
if (det > -1e-8 && det < 1e-8)
return payload;
float inv_det = 1.f / det;
float3 tvec = ray.position - triangle.a;
float u = dot(tvec, pvec) * inv_det;
if (u < 0.f || u > 1.f)
return payload;
float3 qvec = cross(tvec, triangle.ba);
float v = dot(ray.direction, qvec) * inv_det;
if (v < 0.f || u + v > 1.f)
return payload;
payload.t = dot(triangle.ca, qvec) * inv_det;
payload.bary = float3{1.f - u - v, u, v};
return payload;
}
template<typename VB, typename RT>
float2 raytracer<VB, RT>::get_jitter(int frame_id)
{
float2 result{0.f, 0.f};
constexpr int base_x = 2;
int index = frame_id + 1;
float inv_base = 1.f / base_x;
float fraction = inv_base;
while (index > 0) {
result.x += (index % base_x) * fraction;
index /= base_x;
fraction *= inv_base;
}
constexpr int base_y = 3;
index = frame_id + 1;
inv_base = 1.f / base_y;
fraction = inv_base;
while (index > 0) {
result.y += (index % base_y) * fraction;
index /= base_y;
fraction *= inv_base;
}
return result - 0.5f;
}
template<typename VB>
inline void aabb<VB>::add_triangle(const triangle<VB> triangle)
{
if (triangles.empty())
aabb_max = aabb_min = triangle.a;
triangles.push_back(triangle);
aabb_max = max(aabb_max, triangle.a);
aabb_max = max(aabb_max, triangle.b);
aabb_max = max(aabb_max, triangle.c);
aabb_min = min(aabb_min, triangle.a);
aabb_min = min(aabb_min, triangle.b);
aabb_min = min(aabb_min, triangle.c);
}
template<typename VB>
inline const std::vector<triangle<VB>>& aabb<VB>::get_triangles() const
{
return triangles;
}
template<typename VB>
inline bool aabb<VB>::aabb_test(const ray& ray) const
{
float3 inv_ray_direction = float3(1.f) / ray.direction;
float3 t0 = (aabb_max - ray.position) * inv_ray_direction;
float3 t1 = (aabb_min - ray.position) * inv_ray_direction;
float3 tmax = max(t0, t1);
float3 tmin = min(t0, t1);
return maxelem(tmin) <= minelem(tmax);
}
}// namespace cg::renderer |
sim5interpolation.c | //************************************************************************
// SIM5 library
// sim5interpolation.c - interpolation functions
//------------------------------------------------------------------------
// Author:
// Michal Bursa (bursa@astro.cas.cz)
// Astronomical Institute of the Czech Academy of Sciences
//************************************************************************
#ifndef CUDA
//! \file sim5interpolation.c
//! Numerical interpolation.
//!
//! Provides routines for interpolation of table data.
//! \cond SKIP
DEVICEFUNC INLINE
long sim5_interp_search(const double x_array[], double x, long index_lo, long index_hi)
// perform a search of an array of values
// - the parameters index_lo and index_hi provide an initial bracket, and it is assumed
// that index_lo < index_hi
// - the resulting index is guaranteed to be strictly less than index_hi and greater than
// or equal to index_lo, so that the implicit bracket [index, index+1] always corresponds
// to a region within the implicit value range of the value array
// (note that this depends on the result region, i.e. the behaviour at the boundaries
// may not correspond to what you expect)
// - complete specification of the behaviour is the following:
// suppose the input x_array[] = { x0, x1, ..., xN }
// if ( x <= x[0] ) then index == 0
// if ( x >= x[i] && x < x[i+1] ) then index == i
// if ( x >= x[N] ) then index == N-1
{
long ilo = index_lo;
long ihi = index_hi;
while(ihi > ilo + 1) {
long i = (ihi + ilo)/2;
if (x < x_array[i])
ihi = i;
else
ilo = i;
}
return ilo;
}
//! \endcond
//! \cond SKIP
DEVICEFUNC INLINE
long sim5_interp_search_accel(sim5interp* interp, double x)
// performs an accelerated search of an array of values having cached the last used index
{
long x_index = interp->last_index;
if(x < interp->X[x_index]) {
//interp->miss_count++;
interp->last_index = sim5_interp_search(interp->X, x, 0, x_index);
} else
if(x >= interp->X[x_index + 1]) {
//interp->miss_count++;
interp->last_index = sim5_interp_search(interp->X, x, x_index, interp->N-1);
} //else {
//interp->hit_count++;
//}
return interp->last_index;
}
//! \endcond
//! \cond SKIP
DEVICEFUNC
static void spline(double x[], double y[], int n, double yp1, double ypn, double y2[])
//! Calculates second derivatives of function y[]=f(x[]) for cubic spline interpolation.
//! - given arrays x[] and y[] containing a tabulated function y=f(x),
//! with and given values yp1 and ypn for the first derivative of the interpolating
//! function at points 1 and n , respectively
//! - the routine returns an array y2[1..n] that contains the second derivatives
//! of the interpolating function at the tabulated points x i .
//! - if yp1 and/or ypn are larger than 1e30, respectively, the routine is signaled
//! to set the corresponding boundary condition for a natural spline, with zero
//! second derivative on that boundary
//! (routine from Numerical Recipes in C)
{
int i,k;
double p, qn, sig, un, *u;
//MALLOC(u,float,n-1);
u = (double*)malloc((n-1)*sizeof(double));
//#ifndef CUDA
if (u == NULL) exit(EXIT_FAILURE);
//#else
//asm("exit;");
//#endif
//fprintf(stderr,"ERR %s line %d: Memory allocation failure.\n", __FILE__, __LINE__);
if(yp1 > 0.99e30)
y2[0] = u[0] = 0.0;
else{
y2[0] = -0.5;
u[0] = (3.0/(x[1]-x[0]))*((y[1]-y[0])/(x[1]-x[0])-yp1);
}
for(i = 1; i < n-1; i++){
sig = (x[i] - x[i-1])/(x[i+1] - x[i-1]);
p = sig*y2[i-1] + 2.0;
y2[i] = (sig - 1.0)/p;
u[i] = (y[i+1] - y[i])/(x[i+1] - x[i]) - (y[i] - y[i-1])/(x[i] - x[i-1]);
u[i] = (6.0*u[i]/(x[i+1] - x[i-1]) - sig*u[i-1])/p;
}
if(ypn > 0.99e30)
qn = un = 0.0;
else{
qn = 0.5;
un = (3.0/(x[n-1] - x[n-2]))*(ypn - (y[n-1] - y[n-2])/(x[n-1] - x[n-2]));
}
y2[n-1] = (un - qn*u[n-2])/(qn*y2[n-2] + 1.0);
for(k = n-2; k >= 0; k--){
y2[k] = y2[k]*y2[k+1] + u[k];
}
free(u);
}
//! \endcond
//! \cond SKIP
DEVICEFUNC
static double splint(double xa[], double ya[], double y2a[], int n, double x)
//! Cubic spline interpolation.
//! - given the arrays xa[] and ya[] of dimension N, which tabulate a function and
//! given the array y2a[] , which is the output from spline() routine,
//! this routine returns a cubic-spline interpolated value y at point x
//! - xa[] must be orderd array
//! (routine from Numerical Recipes in C)
{
int klo,khi,k;
double h,b,a;
static int pklo=0, pkhi=1;
#pragma omp threadprivate(pklo,pkhi)
// Assuming that calls to this function are made with closely-spaced,
// steadily-increasing values of x, we first try using the same values of klo and khi
// as were used in the previous invocation.
// If that interval is no longer correct, a standard binary search looks for the correct interval.
if(xa[pklo] <= x && xa[pkhi] > x){
klo = pklo;
khi = pkhi;
}
else{
klo = 0;
khi = n-1;
while (khi-klo > 1){
k = (khi + klo) >> 1;
if(xa[k] > x) khi = k; else klo = k;
}
pklo = klo;
pkhi = khi;
}
h = xa[khi] - xa[klo];
// we can skip this check since have checked that already during sim5interp initialization
//if (h == 0.0) {
// fprintf(stderr,"-E- %s line %d: Bad xa input to function splint()\n", __FILE__,__LINE__);
// exit(EXIT_FAILURE);
//}
a = (xa[khi] - x)/h;
b = (x - xa[klo])/h;
return a*ya[klo] + b*ya[khi] + ((a*a*a - a)*y2a[klo] + (b*b*b - b)*y2a[khi])*(h*h)/6.0;
}
//! \endcond
DEVICEFUNC
void sim5_interp_init(sim5interp* interp, double xa[], double ya[], long N, int data_model, int interp_type, int interp_options)
//! Interpolation initialization.
//! Initializes the interpolation object `interp` with the data (xa,ya) where xa and ya are arrays of size N.
//! @param interp interpolation object
//! @param xa array of x values
//! (`xa` data array is always assumed to be strictly ordered, with increasing x values)
//! @param ya array of y values
//! @param N size of x/y arrays
//! @param data_model switch that determines how input data in `xa` and `ya` arrays should be handled:
//! INTERP_DATA_REF=X/Y arrays are referenced, INTERP_DATA_COPY=X/Y arrays are copied,
//! INTERP_DATA_BUILD=X/Y arrays are not passed, they are build by calls to sim5_interp_data_push();
//! with INTERP_DATA_REF option (default), the interpolation object (interp) does not save the data
//! arrays `xa` and `ya`, only saves pointers to them; with INTERP_DATA_COPY it makes an independent
//! copy of those arrays, in which case the original arrays can be modified or freed after calling sim5_interp_init()
//! @param interp_type determines in which way data will be interpolated:
//! INTERP_TYPE_LINLIN=linear interpolation in both X and Y,
//! INTERP_TYPE_LINLOG=linear interpolation in X, logarithmic in Y,
//! INTERP_TYPE_LOGLIN=logarithmic interpolation in X, linear in Y,
//! INTERP_TYPE_LOGLOG=logarithmic interpolation in both X and Y,
//! INTERP_TYPE_SPLINE=linear cubic spline interpolation
//! @param interp_options specifies additional options (a combination of options can be used):
//! INTERP_OPT_ACCEL=interpolation will use acceleration (cashing of index values),
//! INTERP_OPT_CAN_EXTRAPOLATE=extrapolation is allowed when an `x` value for an of-out-grid point is requested
//!
//! @result Returns `interp` object to be used in actual interpolation.
{
if ((interp_type==INTERP_TYPE_SPLINE) && (interp_options & INTERP_OPT_CAN_EXTRAPOLATE)) {
//#ifndef CUDA
fprintf(stderr, "ERR (sim5_interp_init): spline interpolation cannot be used with extrapolation option\n");
//#endif
return;
}
interp->datamodel = data_model;
interp->type = interp_type;
interp->options = interp_options;
interp->d2Y = NULL;
// check of order
if ((interp->datamodel==INTERP_DATA_REF) || (interp->datamodel==INTERP_DATA_COPY)) {
long i;
for (i=0; i<N-1; i++) {
if (xa[i] >= xa[i+1]) {
//#ifndef CUDA
fprintf(stderr, "ERR (sim5_interp_init): unordered X grid (x[%ld]=%.4e, x[%ld]=%.4e, N=%ld, opt=%d)\n", i, xa[i], i+1, xa[i+1], N, interp_options);
backtrace();
//#endif
interp->N = 0;
interp->X = NULL;
interp->Y = NULL;
//#ifndef CUDA
exit(-1);//return;
//#else
//asm("exit;");
//#endif
}
}
}
switch (interp->datamodel) {
case INTERP_DATA_REF:
// assign the reference
interp->N = N;
interp->capa = 0;
interp->X = xa;
interp->Y = ya;
interp->xmin = interp->X[0];
interp->xmax = interp->X[N-1];
interp->last_index = (N-1)/2;
break;
case INTERP_DATA_COPY:
// make copy of arrays
interp->N = N;
interp->capa = N;
interp->X = (double*)malloc(N*sizeof(double));
interp->Y = (double*)malloc(N*sizeof(double));
memcpy (interp->X, xa, N*sizeof(double));
memcpy (interp->Y, ya, N*sizeof(double));
interp->xmin = interp->X[0];
interp->xmax = interp->X[N-1];
interp->last_index = (N-1)/2;
break;
case INTERP_DATA_BUILD:
// make copy of arrays
interp->N = 0;
interp->capa = N>0 ? N : 100;
interp->X = (double*)calloc(interp->capa, sizeof(double));
interp->Y = (double*)calloc(interp->capa, sizeof(double));
interp->xmin = 0.0;
interp->xmax = 0.0;
interp->last_index = 0;
break;
default:
//#ifndef CUDA
fprintf(stderr, "ERR (sim5_interp_init): unimplemented data model (%d)\n", interp->datamodel);
exit(-1);//return;
//#else
//asm("exit;");
//#endif
}
}
DEVICEFUNC
void sim5_interp_data_push(sim5interp* interp, double x, double y)
//! Pushes data into interpolation object.
//! Adds a data point [x,y] into the interpolation object. This function is for filling
//! interpolation object that has been created with option INTERP_DATA_BUILD with interolated data.
//! The data must come in ordered sequence (x[i] < x[i+1])
//!
//! @param interp interpolation object
//! @param x x-value of data point
//! @param y y-value of data point
{
if (interp->datamodel != INTERP_DATA_BUILD) {
//#ifndef CUDA
fprintf(stderr, "ERR (sim5_interp_data_push): you can only push in data with INTERP_DATA_BUILD data model\n");
//#endif
return;
}
long i = interp->N;
if ((i>0) && (interp->X[i-1] >= x)) {
//#ifndef CUDA
fprintf(stderr, "ERR (sim5_interp_data_push): unordered X grid (x[%ld]=%.4e, x[%ld]=%.4e)\n", i-1, interp->X[i-1], i, x);
exit(-1);//return;
//#else
//asm("exit;");
//#endif
}
interp->X[i] = x;
interp->Y[i] = y;
interp->N++;
if (interp->N >= interp->capa) {
interp->capa *= 2;
interp->X = (double*)realloc(interp->X, interp->capa*sizeof(double));
interp->Y = (double*)realloc(interp->Y, interp->capa*sizeof(double));
}
interp->xmin = interp->X[0];
interp->xmax = interp->X[i];
interp->last_index = i/2;
}
DEVICEFUNC
double sim5_interp_eval(sim5interp* interp, double x)
//! Interpolated data evaluation.
//! Makes the evalutaion on interpolated grid at given point.
//!
//! @param interp interpolation object
//! @param x value for which to get interpolated value
//!
//! @result Interpolated value.
{
double x_lo, x_hi;
double y_lo, y_hi;
long index;
// treat spline interpolation seperately
if (interp->type == INTERP_TYPE_SPLINE) {
// calculate second derivatives if they are not yet available
if (!interp->d2Y) {
interp->d2Y = (double*) malloc(interp->N*sizeof(double));
spline(interp->X, interp->Y, interp->N, 1e50, 1e50, interp->d2Y);
}
return splint(interp->X, interp->Y, interp->d2Y, interp->N, x);
}
if ((!(interp->options & INTERP_OPT_CAN_EXTRAPOLATE)) && ((x < interp->xmin) || (x > interp->xmax))) {
//#ifndef CUDA
fprintf(stderr, "WRN (sim5_interp_eval): unwarranted extrapolation (x=%.4e, xmin=%.4e, xmax=%.4e)\n", x, interp->xmin, interp->xmax);
//#endif
}
if (interp->options & INTERP_OPT_ACCEL) {
// index search with acceleration
index = sim5_interp_search_accel(interp, x);
} else {
// index search without acceleration
index = sim5_interp_search(interp->X, x, 0, interp->N-1);
}
x_lo = interp->X[index];
x_hi = interp->X[index + 1];
y_lo = interp->Y[index];
y_hi = interp->Y[index + 1];
// seems unnecessary as we have checked on order of X array already on initialization
//if (x_lo >= x_hi) {
// fprintf(stderr, "ERR (sim5_interp_eval: unordered X grid (x[%ld]=%.4e, x[%ld]=%.4e, N=%ld)\n", index, x_lo, index+1, x_hi, interp->N);
// return NAN;
//}
switch (interp->type) {
case INTERP_TYPE_LINLIN:
return y_lo + (x-x_lo)/(x_hi-x_lo) * (y_hi-y_lo);
case INTERP_TYPE_LOGLOG:
return exp(log(y_lo) + (log(x)-log(x_lo)) / (log(x_hi) - log(x_lo)) * (log(y_hi)-log(y_lo)));
// equvivalent to: exp(log(y_lo) + (log(x)-log(x_lo)) / (log(x_hi) - log(x_lo)) * (log(y_hi)-log(y_lo)))
case INTERP_TYPE_LOGLIN:
return y_lo + log(x/x_lo)/log(x_hi/x_lo) * (y_hi-y_lo);
// equvivalent to: y_lo + (log(x)-log(x_lo)) / (log(x_hi) - log(x_lo)) * (y_hi - y_lo)
default:
//#ifndef CUDA
fprintf(stderr, "ERR (sim5_interp_eval): unimplemented interpolation type (%d)\n", interp->type);
//#endif
return NAN;
}
}
/*
double sim5_interp_integral(sim5interp* interp, double a, double b)
// makes the evalutaion of interpolated grid at point x
{
int i, N = 500;
double result = 0.0;
for (i=0; i<N; i++) result += sim5_interp_eval(interp, a+(i+0.5)*(b-a)/(N));
return result*(b-a)/N;
}
*/
DEVICEFUNC
void sim5_interp_done(sim5interp* interp)
//! Interpolation finalization.
//! Frees the interpolation object interp (including a copied data, if necessary).
//!
//! @param interp interpolation object
{
if ((interp->datamodel==INTERP_DATA_COPY) || (interp->datamodel==INTERP_DATA_BUILD)){
free(interp->X);
free(interp->Y);
}
if (interp->d2Y) free(interp->d2Y);
interp->N = 0;
interp->capa = 0;
interp->X = NULL;
interp->Y = NULL;
}
DEVICEFUNC
sim5interp* sim5_interp_alloc()
//! Alloc interpolation object memory.
//! Makes a memory allocation for interpolation object. This function should be used for heap-allocated variant of usage:
//!
//! sim5interp* interp;
//! interp = sim5_interp_alloc();
//! sim5_interp_init(interp, ...);
//! sim5_interp_free(interp);
//!
//! @result Interpolation object.
{
return (sim5interp*)calloc(1, sizeof(sim5interp));
}
DEVICEFUNC
void sim5_interp_free(sim5interp* interp)
//! Free interpolation object memory.
//! Frees the interpolation object interp that had been previously alocated by `sim5_interp_alloc()`.
//!
//! @param interp interpolation object
{
sim5_interp_done(interp);
free(interp);
}
//#define SIM5FILEIO_TESTING
#ifdef SIM5FILEIO_TESTING
int main() {
double X[5] = {1.,2.,3.,4.,5.};
double Y[5] = {2.,4.,6.,8.,10.};
sim5interp interp;
sim5_interp_init(&interp, X, Y, 5, INTERP_TYPE_LINLIN, INTERP_OPT_ALLOW_EXTRAPOLATION+INTERP_OPT_ACCEL);
double x;
for (x=0.; x<10.; x+=0.1) printf("%e %e\n", x, sim5_interp_eval(&interp, x));
sim5_interp_free(&interp);
return 0;
}
#endif
#endif //CUDA
|
ast-dump-openmp-begin-declare-variant-varying-return.c | // RUN: %clang_cc1 -triple x86_64-unknown-unknown -fopenmp -verify -ast-dump %s -DUSE_FLOAT | FileCheck %s --check-prefix=C_FLOAT
// RUN: %clang_cc1 -triple x86_64-unknown-unknown -fopenmp -verify -ast-dump %s -x c++ -DUSE_FLOAT | FileCheck %s --check-prefix=CXX_FLOAT
// RUN: %clang_cc1 -triple x86_64-unknown-unknown -fopenmp -verify -ast-dump %s | FileCheck %s --check-prefix=C_INT
// RUN: %clang_cc1 -triple x86_64-unknown-unknown -fopenmp -verify -ast-dump %s -x c++ | FileCheck %s --check-prefix=CXX_INT
// expected-no-diagnostics
#ifdef __cplusplus
#define OVERLOADABLE
#else
#define OVERLOADABLE __attribute__((overloadable))
#endif
#ifdef USE_FLOAT
#define RETURN_TY float
#define BEFORE_BASE_RETURN_VALUE 0
#define BEFORE_VARIANT_RETURN_VALUE 1
#define AFTER__BASE_RETURN_VALUE 1
#define AFTER__VARIANT_RETURN_VALUE 0
#else
#define RETURN_TY int
#define BEFORE_BASE_RETURN_VALUE 1
#define BEFORE_VARIANT_RETURN_VALUE 0
#define AFTER__BASE_RETURN_VALUE 0
#define AFTER__VARIANT_RETURN_VALUE 1
#endif
OVERLOADABLE
RETURN_TY also_before(void) {
return BEFORE_BASE_RETURN_VALUE;
}
OVERLOADABLE
RETURN_TY also_before(int i) {
return BEFORE_BASE_RETURN_VALUE;
}
#pragma omp begin declare variant match(implementation = {extension(disable_implicit_base)})
OVERLOADABLE
int also_before(void) {
return BEFORE_VARIANT_RETURN_VALUE;
}
OVERLOADABLE
int also_before(int i) {
return BEFORE_VARIANT_RETURN_VALUE;
}
OVERLOADABLE
int also_after(double d) {
return AFTER__VARIANT_RETURN_VALUE;
}
OVERLOADABLE
int also_after(long l) {
return AFTER__VARIANT_RETURN_VALUE;
}
#pragma omp end declare variant
OVERLOADABLE
RETURN_TY also_after(double d) {
return AFTER__BASE_RETURN_VALUE;
}
OVERLOADABLE
RETURN_TY also_after(long l) {
return AFTER__BASE_RETURN_VALUE;
}
int main(void) {
// Should return 0.
return also_before() + also_before(1) + also_before(2.0f) + also_after(3.0) + also_after(4L);
}
// Make sure we see base calls in the FLOAT versions, that is no
// PseudoObjectExpr in those. In the INT versions we want PseudoObjectExpr (=
// variant calls) for the `*_before` functions but not the `*_after` ones
// (first 3 vs 2 last ones).
// C_FLOAT: |-FunctionDecl [[ADDR_0:0x[a-z0-9]*]] <{{.*}}, line:30:1> line:28:11 used also_before 'float ({{.*}})'
// C_FLOAT-NEXT: | |-CompoundStmt [[ADDR_1:0x[a-z0-9]*]] <col:29, line:30:1>
// C_FLOAT-NEXT: | | `-ReturnStmt [[ADDR_2:0x[a-z0-9]*]] <line:29:3, line:15:34>
// C_FLOAT-NEXT: | | `-ImplicitCastExpr [[ADDR_3:0x[a-z0-9]*]] <col:34> 'float' <IntegralToFloating>
// C_FLOAT-NEXT: | | `-IntegerLiteral [[ADDR_4:0x[a-z0-9]*]] <col:34> 'int' 0
// C_FLOAT-NEXT: | `-OverloadableAttr [[ADDR_5:0x[a-z0-9]*]] <line:10:37>
// C_FLOAT-NEXT: |-FunctionDecl [[ADDR_6:0x[a-z0-9]*]] <col:22, line:34:1> line:32:11 used also_before 'float (int)'
// C_FLOAT-NEXT: | |-ParmVarDecl [[ADDR_7:0x[a-z0-9]*]] <col:23, col:27> col:27 i 'int'
// C_FLOAT-NEXT: | |-CompoundStmt [[ADDR_8:0x[a-z0-9]*]] <col:30, line:34:1>
// C_FLOAT-NEXT: | | `-ReturnStmt [[ADDR_9:0x[a-z0-9]*]] <line:33:3, line:15:34>
// C_FLOAT-NEXT: | | `-ImplicitCastExpr [[ADDR_10:0x[a-z0-9]*]] <col:34> 'float' <IntegralToFloating>
// C_FLOAT-NEXT: | | `-IntegerLiteral [[ADDR_11:0x[a-z0-9]*]] <col:34> 'int' 0
// C_FLOAT-NEXT: | `-OverloadableAttr [[ADDR_12:0x[a-z0-9]*]] <line:10:37>
// C_FLOAT-NEXT: |-FunctionDecl [[ADDR_13:0x[a-z0-9]*]] <col:22, line:40:1> line:10:22 also_before[implementation={extension(disable_implicit_base)}] 'int ({{.*}})'
// C_FLOAT-NEXT: | |-CompoundStmt [[ADDR_14:0x[a-z0-9]*]] <line:38:23, line:40:1>
// C_FLOAT-NEXT: | | `-ReturnStmt [[ADDR_15:0x[a-z0-9]*]] <line:39:3, line:16:37>
// C_FLOAT-NEXT: | | `-IntegerLiteral [[ADDR_16:0x[a-z0-9]*]] <col:37> 'int' 1
// C_FLOAT-NEXT: | `-OverloadableAttr [[ADDR_17:0x[a-z0-9]*]] <line:10:37>
// C_FLOAT-NEXT: |-FunctionDecl [[ADDR_18:0x[a-z0-9]*]] <col:22, line:44:1> line:10:22 also_before[implementation={extension(disable_implicit_base)}] 'int (int)'
// C_FLOAT-NEXT: | |-ParmVarDecl [[ADDR_19:0x[a-z0-9]*]] <line:42:17, col:21> col:21 i 'int'
// C_FLOAT-NEXT: | |-CompoundStmt [[ADDR_20:0x[a-z0-9]*]] <col:24, line:44:1>
// C_FLOAT-NEXT: | | `-ReturnStmt [[ADDR_21:0x[a-z0-9]*]] <line:43:3, line:16:37>
// C_FLOAT-NEXT: | | `-IntegerLiteral [[ADDR_22:0x[a-z0-9]*]] <col:37> 'int' 1
// C_FLOAT-NEXT: | `-OverloadableAttr [[ADDR_23:0x[a-z0-9]*]] <line:10:37>
// C_FLOAT-NEXT: |-FunctionDecl [[ADDR_24:0x[a-z0-9]*]] <col:22, line:49:1> line:10:22 also_after[implementation={extension(disable_implicit_base)}] 'int (double)'
// C_FLOAT-NEXT: | |-ParmVarDecl [[ADDR_25:0x[a-z0-9]*]] <line:47:16, col:23> col:23 d 'double'
// C_FLOAT-NEXT: | |-CompoundStmt [[ADDR_26:0x[a-z0-9]*]] <col:26, line:49:1>
// C_FLOAT-NEXT: | | `-ReturnStmt [[ADDR_27:0x[a-z0-9]*]] <line:48:3, line:18:37>
// C_FLOAT-NEXT: | | `-IntegerLiteral [[ADDR_28:0x[a-z0-9]*]] <col:37> 'int' 0
// C_FLOAT-NEXT: | `-OverloadableAttr [[ADDR_29:0x[a-z0-9]*]] <line:10:37>
// C_FLOAT-NEXT: |-FunctionDecl [[ADDR_30:0x[a-z0-9]*]] <col:22, line:53:1> line:10:22 also_after[implementation={extension(disable_implicit_base)}] 'int (long)'
// C_FLOAT-NEXT: | |-ParmVarDecl [[ADDR_31:0x[a-z0-9]*]] <line:51:16, col:21> col:21 l 'long'
// C_FLOAT-NEXT: | |-CompoundStmt [[ADDR_32:0x[a-z0-9]*]] <col:24, line:53:1>
// C_FLOAT-NEXT: | | `-ReturnStmt [[ADDR_33:0x[a-z0-9]*]] <line:52:3, line:18:37>
// C_FLOAT-NEXT: | | `-IntegerLiteral [[ADDR_34:0x[a-z0-9]*]] <col:37> 'int' 0
// C_FLOAT-NEXT: | `-OverloadableAttr [[ADDR_35:0x[a-z0-9]*]] <line:10:37>
// C_FLOAT-NEXT: |-FunctionDecl [[ADDR_36:0x[a-z0-9]*]] <col:22, line:59:1> line:57:11 used also_after 'float (double)'
// C_FLOAT-NEXT: | |-ParmVarDecl [[ADDR_37:0x[a-z0-9]*]] <col:22, col:29> col:29 d 'double'
// C_FLOAT-NEXT: | |-CompoundStmt [[ADDR_38:0x[a-z0-9]*]] <col:32, line:59:1>
// C_FLOAT-NEXT: | | `-ReturnStmt [[ADDR_39:0x[a-z0-9]*]] <line:58:3, line:17:34>
// C_FLOAT-NEXT: | | `-ImplicitCastExpr [[ADDR_40:0x[a-z0-9]*]] <col:34> 'float' <IntegralToFloating>
// C_FLOAT-NEXT: | | `-IntegerLiteral [[ADDR_41:0x[a-z0-9]*]] <col:34> 'int' 1
// C_FLOAT-NEXT: | `-OverloadableAttr [[ADDR_42:0x[a-z0-9]*]] <line:10:37>
// C_FLOAT-NEXT: |-FunctionDecl [[ADDR_43:0x[a-z0-9]*]] <col:22, line:63:1> line:61:11 used also_after 'float (long)'
// C_FLOAT-NEXT: | |-ParmVarDecl [[ADDR_44:0x[a-z0-9]*]] <col:22, col:27> col:27 l 'long'
// C_FLOAT-NEXT: | |-CompoundStmt [[ADDR_45:0x[a-z0-9]*]] <col:30, line:63:1>
// C_FLOAT-NEXT: | | `-ReturnStmt [[ADDR_46:0x[a-z0-9]*]] <line:62:3, line:17:34>
// C_FLOAT-NEXT: | | `-ImplicitCastExpr [[ADDR_47:0x[a-z0-9]*]] <col:34> 'float' <IntegralToFloating>
// C_FLOAT-NEXT: | | `-IntegerLiteral [[ADDR_48:0x[a-z0-9]*]] <col:34> 'int' 1
// C_FLOAT-NEXT: | `-OverloadableAttr [[ADDR_49:0x[a-z0-9]*]] <line:10:37>
// C_FLOAT-NEXT: `-FunctionDecl [[ADDR_50:0x[a-z0-9]*]] <line:65:1, line:68:1> line:65:5 main 'int ({{.*}})'
// C_FLOAT-NEXT: `-CompoundStmt [[ADDR_51:0x[a-z0-9]*]] <col:16, line:68:1>
// C_FLOAT-NEXT: `-ReturnStmt [[ADDR_52:0x[a-z0-9]*]] <line:67:3, col:94>
// C_FLOAT-NEXT: `-ImplicitCastExpr [[ADDR_53:0x[a-z0-9]*]] <col:10, col:94> 'int' <FloatingToIntegral>
// C_FLOAT-NEXT: `-BinaryOperator [[ADDR_54:0x[a-z0-9]*]] <col:10, col:94> 'float' '+'
// C_FLOAT-NEXT: |-BinaryOperator [[ADDR_55:0x[a-z0-9]*]] <col:10, col:77> 'float' '+'
// C_FLOAT-NEXT: | |-BinaryOperator [[ADDR_56:0x[a-z0-9]*]] <col:10, col:59> 'float' '+'
// C_FLOAT-NEXT: | | |-BinaryOperator [[ADDR_57:0x[a-z0-9]*]] <col:10, col:39> 'float' '+'
// C_FLOAT-NEXT: | | | |-CallExpr [[ADDR_58:0x[a-z0-9]*]] <col:10, col:22> 'float'
// C_FLOAT-NEXT: | | | | `-ImplicitCastExpr [[ADDR_59:0x[a-z0-9]*]] <col:10> 'float (*)({{.*}})' <FunctionToPointerDecay>
// C_FLOAT-NEXT: | | | | `-DeclRefExpr [[ADDR_60:0x[a-z0-9]*]] <col:10> 'float ({{.*}})' {{.*}}Function [[ADDR_0]] 'also_before' 'float ({{.*}})'
// C_FLOAT-NEXT: | | | `-CallExpr [[ADDR_61:0x[a-z0-9]*]] <col:26, col:39> 'float'
// C_FLOAT-NEXT: | | | |-ImplicitCastExpr [[ADDR_62:0x[a-z0-9]*]] <col:26> 'float (*)(int)' <FunctionToPointerDecay>
// C_FLOAT-NEXT: | | | | `-DeclRefExpr [[ADDR_63:0x[a-z0-9]*]] <col:26> 'float (int)' {{.*}}Function [[ADDR_6]] 'also_before' 'float (int)'
// C_FLOAT-NEXT: | | | `-IntegerLiteral [[ADDR_64:0x[a-z0-9]*]] <col:38> 'int' 1
// C_FLOAT-NEXT: | | `-CallExpr [[ADDR_65:0x[a-z0-9]*]] <col:43, col:59> 'float'
// C_FLOAT-NEXT: | | |-ImplicitCastExpr [[ADDR_66:0x[a-z0-9]*]] <col:43> 'float (*)(int)' <FunctionToPointerDecay>
// C_FLOAT-NEXT: | | | `-DeclRefExpr [[ADDR_67:0x[a-z0-9]*]] <col:43> 'float (int)' {{.*}}Function [[ADDR_6]] 'also_before' 'float (int)'
// C_FLOAT-NEXT: | | `-ImplicitCastExpr [[ADDR_68:0x[a-z0-9]*]] <col:55> 'int' <FloatingToIntegral>
// C_FLOAT-NEXT: | | `-FloatingLiteral [[ADDR_69:0x[a-z0-9]*]] <col:55> 'float' 2.000000e+00
// C_FLOAT-NEXT: | `-CallExpr [[ADDR_70:0x[a-z0-9]*]] <col:63, col:77> 'float'
// C_FLOAT-NEXT: | |-ImplicitCastExpr [[ADDR_71:0x[a-z0-9]*]] <col:63> 'float (*)(double)' <FunctionToPointerDecay>
// C_FLOAT-NEXT: | | `-DeclRefExpr [[ADDR_72:0x[a-z0-9]*]] <col:63> 'float (double)' {{.*}}Function [[ADDR_36]] 'also_after' 'float (double)'
// C_FLOAT-NEXT: | `-FloatingLiteral [[ADDR_73:0x[a-z0-9]*]] <col:74> 'double' 3.000000e+00
// C_FLOAT-NEXT: `-CallExpr [[ADDR_74:0x[a-z0-9]*]] <col:81, col:94> 'float'
// C_FLOAT-NEXT: |-ImplicitCastExpr [[ADDR_75:0x[a-z0-9]*]] <col:81> 'float (*)(long)' <FunctionToPointerDecay>
// C_FLOAT-NEXT: | `-DeclRefExpr [[ADDR_76:0x[a-z0-9]*]] <col:81> 'float (long)' {{.*}}Function [[ADDR_43]] 'also_after' 'float (long)'
// C_FLOAT-NEXT: `-IntegerLiteral [[ADDR_77:0x[a-z0-9]*]] <col:92> 'long' 4
// CXX_FLOAT: |-FunctionDecl [[ADDR_0:0x[a-z0-9]*]] <{{.*}}, line:30:1> line:28:11 used also_before 'float ({{.*}})'
// CXX_FLOAT-NEXT: | `-CompoundStmt [[ADDR_1:0x[a-z0-9]*]] <col:29, line:30:1>
// CXX_FLOAT-NEXT: | `-ReturnStmt [[ADDR_2:0x[a-z0-9]*]] <line:29:3, line:15:34>
// CXX_FLOAT-NEXT: | `-ImplicitCastExpr [[ADDR_3:0x[a-z0-9]*]] <col:34> 'float' <IntegralToFloating>
// CXX_FLOAT-NEXT: | `-IntegerLiteral [[ADDR_4:0x[a-z0-9]*]] <col:34> 'int' 0
// CXX_FLOAT-NEXT: |-FunctionDecl [[ADDR_5:0x[a-z0-9]*]] <line:14:19, line:34:1> line:32:11 used also_before 'float (int)'
// CXX_FLOAT-NEXT: | |-ParmVarDecl [[ADDR_6:0x[a-z0-9]*]] <col:23, col:27> col:27 i 'int'
// CXX_FLOAT-NEXT: | `-CompoundStmt [[ADDR_7:0x[a-z0-9]*]] <col:30, line:34:1>
// CXX_FLOAT-NEXT: | `-ReturnStmt [[ADDR_8:0x[a-z0-9]*]] <line:33:3, line:15:34>
// CXX_FLOAT-NEXT: | `-ImplicitCastExpr [[ADDR_9:0x[a-z0-9]*]] <col:34> 'float' <IntegralToFloating>
// CXX_FLOAT-NEXT: | `-IntegerLiteral [[ADDR_10:0x[a-z0-9]*]] <col:34> 'int' 0
// CXX_FLOAT-NEXT: |-FunctionDecl [[ADDR_11:0x[a-z0-9]*]] <line:38:1, line:40:1> line:38:1 also_before[implementation={extension(disable_implicit_base)}] 'int ({{.*}})'
// CXX_FLOAT-NEXT: | `-CompoundStmt [[ADDR_12:0x[a-z0-9]*]] <col:23, line:40:1>
// CXX_FLOAT-NEXT: | `-ReturnStmt [[ADDR_13:0x[a-z0-9]*]] <line:39:3, line:16:37>
// CXX_FLOAT-NEXT: | `-IntegerLiteral [[ADDR_14:0x[a-z0-9]*]] <col:37> 'int' 1
// CXX_FLOAT-NEXT: |-FunctionDecl [[ADDR_15:0x[a-z0-9]*]] <line:42:1, line:44:1> line:42:1 also_before[implementation={extension(disable_implicit_base)}] 'int (int)'
// CXX_FLOAT-NEXT: | |-ParmVarDecl [[ADDR_16:0x[a-z0-9]*]] <col:17, col:21> col:21 i 'int'
// CXX_FLOAT-NEXT: | `-CompoundStmt [[ADDR_17:0x[a-z0-9]*]] <col:24, line:44:1>
// CXX_FLOAT-NEXT: | `-ReturnStmt [[ADDR_18:0x[a-z0-9]*]] <line:43:3, line:16:37>
// CXX_FLOAT-NEXT: | `-IntegerLiteral [[ADDR_19:0x[a-z0-9]*]] <col:37> 'int' 1
// CXX_FLOAT-NEXT: |-FunctionDecl [[ADDR_20:0x[a-z0-9]*]] <line:47:1, line:49:1> line:47:1 also_after[implementation={extension(disable_implicit_base)}] 'int (double)'
// CXX_FLOAT-NEXT: | |-ParmVarDecl [[ADDR_21:0x[a-z0-9]*]] <col:16, col:23> col:23 d 'double'
// CXX_FLOAT-NEXT: | `-CompoundStmt [[ADDR_22:0x[a-z0-9]*]] <col:26, line:49:1>
// CXX_FLOAT-NEXT: | `-ReturnStmt [[ADDR_23:0x[a-z0-9]*]] <line:48:3, line:18:37>
// CXX_FLOAT-NEXT: | `-IntegerLiteral [[ADDR_24:0x[a-z0-9]*]] <col:37> 'int' 0
// CXX_FLOAT-NEXT: |-FunctionDecl [[ADDR_25:0x[a-z0-9]*]] <line:51:1, line:53:1> line:51:1 also_after[implementation={extension(disable_implicit_base)}] 'int (long)'
// CXX_FLOAT-NEXT: | |-ParmVarDecl [[ADDR_26:0x[a-z0-9]*]] <col:16, col:21> col:21 l 'long'
// CXX_FLOAT-NEXT: | `-CompoundStmt [[ADDR_27:0x[a-z0-9]*]] <col:24, line:53:1>
// CXX_FLOAT-NEXT: | `-ReturnStmt [[ADDR_28:0x[a-z0-9]*]] <line:52:3, line:18:37>
// CXX_FLOAT-NEXT: | `-IntegerLiteral [[ADDR_29:0x[a-z0-9]*]] <col:37> 'int' 0
// CXX_FLOAT-NEXT: |-FunctionDecl [[ADDR_30:0x[a-z0-9]*]] <line:14:19, line:59:1> line:57:11 used also_after 'float (double)'
// CXX_FLOAT-NEXT: | |-ParmVarDecl [[ADDR_31:0x[a-z0-9]*]] <col:22, col:29> col:29 d 'double'
// CXX_FLOAT-NEXT: | `-CompoundStmt [[ADDR_32:0x[a-z0-9]*]] <col:32, line:59:1>
// CXX_FLOAT-NEXT: | `-ReturnStmt [[ADDR_33:0x[a-z0-9]*]] <line:58:3, line:17:34>
// CXX_FLOAT-NEXT: | `-ImplicitCastExpr [[ADDR_34:0x[a-z0-9]*]] <col:34> 'float' <IntegralToFloating>
// CXX_FLOAT-NEXT: | `-IntegerLiteral [[ADDR_35:0x[a-z0-9]*]] <col:34> 'int' 1
// CXX_FLOAT-NEXT: |-FunctionDecl [[ADDR_36:0x[a-z0-9]*]] <line:14:19, line:63:1> line:61:11 used also_after 'float (long)'
// CXX_FLOAT-NEXT: | |-ParmVarDecl [[ADDR_37:0x[a-z0-9]*]] <col:22, col:27> col:27 l 'long'
// CXX_FLOAT-NEXT: | `-CompoundStmt [[ADDR_38:0x[a-z0-9]*]] <col:30, line:63:1>
// CXX_FLOAT-NEXT: | `-ReturnStmt [[ADDR_39:0x[a-z0-9]*]] <line:62:3, line:17:34>
// CXX_FLOAT-NEXT: | `-ImplicitCastExpr [[ADDR_40:0x[a-z0-9]*]] <col:34> 'float' <IntegralToFloating>
// CXX_FLOAT-NEXT: | `-IntegerLiteral [[ADDR_41:0x[a-z0-9]*]] <col:34> 'int' 1
// CXX_FLOAT-NEXT: `-FunctionDecl [[ADDR_42:0x[a-z0-9]*]] <line:65:1, line:68:1> line:65:5 main 'int ({{.*}})'
// CXX_FLOAT-NEXT: `-CompoundStmt [[ADDR_43:0x[a-z0-9]*]] <col:16, line:68:1>
// CXX_FLOAT-NEXT: `-ReturnStmt [[ADDR_44:0x[a-z0-9]*]] <line:67:3, col:94>
// CXX_FLOAT-NEXT: `-ImplicitCastExpr [[ADDR_45:0x[a-z0-9]*]] <col:10, col:94> 'int' <FloatingToIntegral>
// CXX_FLOAT-NEXT: `-BinaryOperator [[ADDR_46:0x[a-z0-9]*]] <col:10, col:94> 'float' '+'
// CXX_FLOAT-NEXT: |-BinaryOperator [[ADDR_47:0x[a-z0-9]*]] <col:10, col:77> 'float' '+'
// CXX_FLOAT-NEXT: | |-BinaryOperator [[ADDR_48:0x[a-z0-9]*]] <col:10, col:59> 'float' '+'
// CXX_FLOAT-NEXT: | | |-BinaryOperator [[ADDR_49:0x[a-z0-9]*]] <col:10, col:39> 'float' '+'
// CXX_FLOAT-NEXT: | | | |-CallExpr [[ADDR_50:0x[a-z0-9]*]] <col:10, col:22> 'float'
// CXX_FLOAT-NEXT: | | | | `-ImplicitCastExpr [[ADDR_51:0x[a-z0-9]*]] <col:10> 'float (*)({{.*}})' <FunctionToPointerDecay>
// CXX_FLOAT-NEXT: | | | | `-DeclRefExpr [[ADDR_52:0x[a-z0-9]*]] <col:10> 'float ({{.*}})' {{.*}}Function [[ADDR_0]] 'also_before' 'float ({{.*}})'
// CXX_FLOAT-NEXT: | | | `-CallExpr [[ADDR_53:0x[a-z0-9]*]] <col:26, col:39> 'float'
// CXX_FLOAT-NEXT: | | | |-ImplicitCastExpr [[ADDR_54:0x[a-z0-9]*]] <col:26> 'float (*)(int)' <FunctionToPointerDecay>
// CXX_FLOAT-NEXT: | | | | `-DeclRefExpr [[ADDR_55:0x[a-z0-9]*]] <col:26> 'float (int)' {{.*}}Function [[ADDR_5]] 'also_before' 'float (int)'
// CXX_FLOAT-NEXT: | | | `-IntegerLiteral [[ADDR_56:0x[a-z0-9]*]] <col:38> 'int' 1
// CXX_FLOAT-NEXT: | | `-CallExpr [[ADDR_57:0x[a-z0-9]*]] <col:43, col:59> 'float'
// CXX_FLOAT-NEXT: | | |-ImplicitCastExpr [[ADDR_58:0x[a-z0-9]*]] <col:43> 'float (*)(int)' <FunctionToPointerDecay>
// CXX_FLOAT-NEXT: | | | `-DeclRefExpr [[ADDR_59:0x[a-z0-9]*]] <col:43> 'float (int)' {{.*}}Function [[ADDR_5]] 'also_before' 'float (int)'
// CXX_FLOAT-NEXT: | | `-ImplicitCastExpr [[ADDR_60:0x[a-z0-9]*]] <col:55> 'int' <FloatingToIntegral>
// CXX_FLOAT-NEXT: | | `-FloatingLiteral [[ADDR_61:0x[a-z0-9]*]] <col:55> 'float' 2.000000e+00
// CXX_FLOAT-NEXT: | `-CallExpr [[ADDR_62:0x[a-z0-9]*]] <col:63, col:77> 'float'
// CXX_FLOAT-NEXT: | |-ImplicitCastExpr [[ADDR_63:0x[a-z0-9]*]] <col:63> 'float (*)(double)' <FunctionToPointerDecay>
// CXX_FLOAT-NEXT: | | `-DeclRefExpr [[ADDR_64:0x[a-z0-9]*]] <col:63> 'float (double)' {{.*}}Function [[ADDR_30]] 'also_after' 'float (double)'
// CXX_FLOAT-NEXT: | `-FloatingLiteral [[ADDR_65:0x[a-z0-9]*]] <col:74> 'double' 3.000000e+00
// CXX_FLOAT-NEXT: `-CallExpr [[ADDR_66:0x[a-z0-9]*]] <col:81, col:94> 'float'
// CXX_FLOAT-NEXT: |-ImplicitCastExpr [[ADDR_67:0x[a-z0-9]*]] <col:81> 'float (*)(long)' <FunctionToPointerDecay>
// CXX_FLOAT-NEXT: | `-DeclRefExpr [[ADDR_68:0x[a-z0-9]*]] <col:81> 'float (long)' {{.*}}Function [[ADDR_36]] 'also_after' 'float (long)'
// CXX_FLOAT-NEXT: `-IntegerLiteral [[ADDR_69:0x[a-z0-9]*]] <col:92> 'long' 4
// C_INT: |-FunctionDecl [[ADDR_0:0x[a-z0-9]*]] <{{.*}}, line:30:1> line:28:11 used also_before 'int ({{.*}})'
// C_INT-NEXT: | |-CompoundStmt [[ADDR_1:0x[a-z0-9]*]] <col:29, line:30:1>
// C_INT-NEXT: | | `-ReturnStmt [[ADDR_2:0x[a-z0-9]*]] <line:29:3, line:21:34>
// C_INT-NEXT: | | `-IntegerLiteral [[ADDR_3:0x[a-z0-9]*]] <col:34> 'int' 1
// C_INT-NEXT: | |-OverloadableAttr [[ADDR_4:0x[a-z0-9]*]] <line:10:37>
// C_INT-NEXT: | `-OMPDeclareVariantAttr [[ADDR_5:0x[a-z0-9]*]] <<invalid sloc>> Implicit implementation={extension(disable_implicit_base)}
// C_INT-NEXT: | `-DeclRefExpr [[ADDR_6:0x[a-z0-9]*]] <col:22> 'int ({{.*}})' Function [[ADDR_7:0x[a-z0-9]*]] 'also_before[implementation={extension(disable_implicit_base)}]' 'int ({{.*}})'
// C_INT-NEXT: |-FunctionDecl [[ADDR_8:0x[a-z0-9]*]] <col:22, line:34:1> line:32:11 used also_before 'int (int)'
// C_INT-NEXT: | |-ParmVarDecl [[ADDR_9:0x[a-z0-9]*]] <col:23, col:27> col:27 i 'int'
// C_INT-NEXT: | |-CompoundStmt [[ADDR_10:0x[a-z0-9]*]] <col:30, line:34:1>
// C_INT-NEXT: | | `-ReturnStmt [[ADDR_11:0x[a-z0-9]*]] <line:33:3, line:21:34>
// C_INT-NEXT: | | `-IntegerLiteral [[ADDR_12:0x[a-z0-9]*]] <col:34> 'int' 1
// C_INT-NEXT: | |-OverloadableAttr [[ADDR_13:0x[a-z0-9]*]] <line:10:37>
// C_INT-NEXT: | `-OMPDeclareVariantAttr [[ADDR_14:0x[a-z0-9]*]] <<invalid sloc>> Implicit implementation={extension(disable_implicit_base)}
// C_INT-NEXT: | `-DeclRefExpr [[ADDR_15:0x[a-z0-9]*]] <col:22> 'int (int)' Function [[ADDR_16:0x[a-z0-9]*]] 'also_before[implementation={extension(disable_implicit_base)}]' 'int (int)'
// C_INT-NEXT: |-FunctionDecl [[ADDR_7]] <col:22, line:40:1> line:10:22 also_before[implementation={extension(disable_implicit_base)}] 'int ({{.*}})'
// C_INT-NEXT: | |-CompoundStmt [[ADDR_17:0x[a-z0-9]*]] <line:38:23, line:40:1>
// C_INT-NEXT: | | `-ReturnStmt [[ADDR_18:0x[a-z0-9]*]] <line:39:3, line:22:37>
// C_INT-NEXT: | | `-IntegerLiteral [[ADDR_19:0x[a-z0-9]*]] <col:37> 'int' 0
// C_INT-NEXT: | `-OverloadableAttr [[ADDR_20:0x[a-z0-9]*]] <line:10:37>
// C_INT-NEXT: |-FunctionDecl [[ADDR_16]] <col:22, line:44:1> line:10:22 also_before[implementation={extension(disable_implicit_base)}] 'int (int)'
// C_INT-NEXT: | |-ParmVarDecl [[ADDR_21:0x[a-z0-9]*]] <line:42:17, col:21> col:21 i 'int'
// C_INT-NEXT: | |-CompoundStmt [[ADDR_22:0x[a-z0-9]*]] <col:24, line:44:1>
// C_INT-NEXT: | | `-ReturnStmt [[ADDR_23:0x[a-z0-9]*]] <line:43:3, line:22:37>
// C_INT-NEXT: | | `-IntegerLiteral [[ADDR_24:0x[a-z0-9]*]] <col:37> 'int' 0
// C_INT-NEXT: | `-OverloadableAttr [[ADDR_25:0x[a-z0-9]*]] <line:10:37>
// C_INT-NEXT: |-FunctionDecl [[ADDR_26:0x[a-z0-9]*]] <col:22, line:49:1> line:10:22 also_after[implementation={extension(disable_implicit_base)}] 'int (double)'
// C_INT-NEXT: | |-ParmVarDecl [[ADDR_27:0x[a-z0-9]*]] <line:47:16, col:23> col:23 d 'double'
// C_INT-NEXT: | |-CompoundStmt [[ADDR_28:0x[a-z0-9]*]] <col:26, line:49:1>
// C_INT-NEXT: | | `-ReturnStmt [[ADDR_29:0x[a-z0-9]*]] <line:48:3, line:24:37>
// C_INT-NEXT: | | `-IntegerLiteral [[ADDR_30:0x[a-z0-9]*]] <col:37> 'int' 1
// C_INT-NEXT: | `-OverloadableAttr [[ADDR_31:0x[a-z0-9]*]] <line:10:37>
// C_INT-NEXT: |-FunctionDecl [[ADDR_32:0x[a-z0-9]*]] <col:22, line:53:1> line:10:22 also_after[implementation={extension(disable_implicit_base)}] 'int (long)'
// C_INT-NEXT: | |-ParmVarDecl [[ADDR_33:0x[a-z0-9]*]] <line:51:16, col:21> col:21 l 'long'
// C_INT-NEXT: | |-CompoundStmt [[ADDR_34:0x[a-z0-9]*]] <col:24, line:53:1>
// C_INT-NEXT: | | `-ReturnStmt [[ADDR_35:0x[a-z0-9]*]] <line:52:3, line:24:37>
// C_INT-NEXT: | | `-IntegerLiteral [[ADDR_36:0x[a-z0-9]*]] <col:37> 'int' 1
// C_INT-NEXT: | `-OverloadableAttr [[ADDR_37:0x[a-z0-9]*]] <line:10:37>
// C_INT-NEXT: |-FunctionDecl [[ADDR_38:0x[a-z0-9]*]] <col:22, line:59:1> line:57:11 used also_after 'int (double)'
// C_INT-NEXT: | |-ParmVarDecl [[ADDR_39:0x[a-z0-9]*]] <col:22, col:29> col:29 d 'double'
// C_INT-NEXT: | |-CompoundStmt [[ADDR_40:0x[a-z0-9]*]] <col:32, line:59:1>
// C_INT-NEXT: | | `-ReturnStmt [[ADDR_41:0x[a-z0-9]*]] <line:58:3, line:23:34>
// C_INT-NEXT: | | `-IntegerLiteral [[ADDR_42:0x[a-z0-9]*]] <col:34> 'int' 0
// C_INT-NEXT: | `-OverloadableAttr [[ADDR_43:0x[a-z0-9]*]] <line:10:37>
// C_INT-NEXT: |-FunctionDecl [[ADDR_44:0x[a-z0-9]*]] <col:22, line:63:1> line:61:11 used also_after 'int (long)'
// C_INT-NEXT: | |-ParmVarDecl [[ADDR_45:0x[a-z0-9]*]] <col:22, col:27> col:27 l 'long'
// C_INT-NEXT: | |-CompoundStmt [[ADDR_46:0x[a-z0-9]*]] <col:30, line:63:1>
// C_INT-NEXT: | | `-ReturnStmt [[ADDR_47:0x[a-z0-9]*]] <line:62:3, line:23:34>
// C_INT-NEXT: | | `-IntegerLiteral [[ADDR_48:0x[a-z0-9]*]] <col:34> 'int' 0
// C_INT-NEXT: | `-OverloadableAttr [[ADDR_49:0x[a-z0-9]*]] <line:10:37>
// C_INT-NEXT: `-FunctionDecl [[ADDR_50:0x[a-z0-9]*]] <line:65:1, line:68:1> line:65:5 main 'int ({{.*}})'
// C_INT-NEXT: `-CompoundStmt [[ADDR_51:0x[a-z0-9]*]] <col:16, line:68:1>
// C_INT-NEXT: `-ReturnStmt [[ADDR_52:0x[a-z0-9]*]] <line:67:3, col:94>
// C_INT-NEXT: `-BinaryOperator [[ADDR_53:0x[a-z0-9]*]] <col:10, col:94> 'int' '+'
// C_INT-NEXT: |-BinaryOperator [[ADDR_54:0x[a-z0-9]*]] <col:10, col:77> 'int' '+'
// C_INT-NEXT: | |-BinaryOperator [[ADDR_55:0x[a-z0-9]*]] <col:10, col:59> 'int' '+'
// C_INT-NEXT: | | |-BinaryOperator [[ADDR_56:0x[a-z0-9]*]] <col:10, col:39> 'int' '+'
// C_INT-NEXT: | | | |-PseudoObjectExpr [[ADDR_57:0x[a-z0-9]*]] <col:10, col:22> 'int'
// C_INT-NEXT: | | | | |-CallExpr [[ADDR_58:0x[a-z0-9]*]] <col:10, col:22> 'int'
// C_INT-NEXT: | | | | | `-ImplicitCastExpr [[ADDR_59:0x[a-z0-9]*]] <col:10> 'int (*)({{.*}})' <FunctionToPointerDecay>
// C_INT-NEXT: | | | | | `-DeclRefExpr [[ADDR_60:0x[a-z0-9]*]] <col:10> 'int ({{.*}})' {{.*}}Function [[ADDR_0]] 'also_before' 'int ({{.*}})'
// C_INT-NEXT: | | | | `-CallExpr [[ADDR_61:0x[a-z0-9]*]] <line:10:22, line:67:22> 'int'
// C_INT-NEXT: | | | | `-ImplicitCastExpr [[ADDR_62:0x[a-z0-9]*]] <line:10:22> 'int (*)({{.*}})' <FunctionToPointerDecay>
// C_INT-NEXT: | | | | `-DeclRefExpr [[ADDR_6]] <col:22> 'int ({{.*}})' Function [[ADDR_7]] 'also_before[implementation={extension(disable_implicit_base)}]' 'int ({{.*}})'
// C_INT-NEXT: | | | `-PseudoObjectExpr [[ADDR_63:0x[a-z0-9]*]] <line:67:26, col:39> 'int'
// C_INT-NEXT: | | | |-CallExpr [[ADDR_64:0x[a-z0-9]*]] <col:26, col:39> 'int'
// C_INT-NEXT: | | | | |-ImplicitCastExpr [[ADDR_65:0x[a-z0-9]*]] <col:26> 'int (*)(int)' <FunctionToPointerDecay>
// C_INT-NEXT: | | | | | `-DeclRefExpr [[ADDR_66:0x[a-z0-9]*]] <col:26> 'int (int)' {{.*}}Function [[ADDR_8]] 'also_before' 'int (int)'
// C_INT-NEXT: | | | | `-IntegerLiteral [[ADDR_67:0x[a-z0-9]*]] <col:38> 'int' 1
// C_INT-NEXT: | | | `-CallExpr [[ADDR_68:0x[a-z0-9]*]] <line:10:22, line:67:39> 'int'
// C_INT-NEXT: | | | |-ImplicitCastExpr [[ADDR_69:0x[a-z0-9]*]] <line:10:22> 'int (*)(int)' <FunctionToPointerDecay>
// C_INT-NEXT: | | | | `-DeclRefExpr [[ADDR_15]] <col:22> 'int (int)' Function [[ADDR_16]] 'also_before[implementation={extension(disable_implicit_base)}]' 'int (int)'
// C_INT-NEXT: | | | `-IntegerLiteral [[ADDR_67]] <line:67:38> 'int' 1
// C_INT-NEXT: | | `-PseudoObjectExpr [[ADDR_70:0x[a-z0-9]*]] <col:43, col:59> 'int'
// C_INT-NEXT: | | |-CallExpr [[ADDR_71:0x[a-z0-9]*]] <col:43, col:59> 'int'
// C_INT-NEXT: | | | |-ImplicitCastExpr [[ADDR_72:0x[a-z0-9]*]] <col:43> 'int (*)(int)' <FunctionToPointerDecay>
// C_INT-NEXT: | | | | `-DeclRefExpr [[ADDR_73:0x[a-z0-9]*]] <col:43> 'int (int)' {{.*}}Function [[ADDR_8]] 'also_before' 'int (int)'
// C_INT-NEXT: | | | `-ImplicitCastExpr [[ADDR_74:0x[a-z0-9]*]] <col:55> 'int' <FloatingToIntegral>
// C_INT-NEXT: | | | `-FloatingLiteral [[ADDR_75:0x[a-z0-9]*]] <col:55> 'float' 2.000000e+00
// C_INT-NEXT: | | `-CallExpr [[ADDR_76:0x[a-z0-9]*]] <line:10:22, line:67:59> 'int'
// C_INT-NEXT: | | |-ImplicitCastExpr [[ADDR_77:0x[a-z0-9]*]] <line:10:22> 'int (*)(int)' <FunctionToPointerDecay>
// C_INT-NEXT: | | | `-DeclRefExpr [[ADDR_15]] <col:22> 'int (int)' Function [[ADDR_16]] 'also_before[implementation={extension(disable_implicit_base)}]' 'int (int)'
// C_INT-NEXT: | | `-ImplicitCastExpr [[ADDR_78:0x[a-z0-9]*]] <line:67:55> 'int' <FloatingToIntegral>
// C_INT-NEXT: | | `-FloatingLiteral [[ADDR_75]] <col:55> 'float' 2.000000e+00
// C_INT-NEXT: | `-CallExpr [[ADDR_79:0x[a-z0-9]*]] <col:63, col:77> 'int'
// C_INT-NEXT: | |-ImplicitCastExpr [[ADDR_80:0x[a-z0-9]*]] <col:63> 'int (*)(double)' <FunctionToPointerDecay>
// C_INT-NEXT: | | `-DeclRefExpr [[ADDR_81:0x[a-z0-9]*]] <col:63> 'int (double)' {{.*}}Function [[ADDR_38]] 'also_after' 'int (double)'
// C_INT-NEXT: | `-FloatingLiteral [[ADDR_82:0x[a-z0-9]*]] <col:74> 'double' 3.000000e+00
// C_INT-NEXT: `-CallExpr [[ADDR_83:0x[a-z0-9]*]] <col:81, col:94> 'int'
// C_INT-NEXT: |-ImplicitCastExpr [[ADDR_84:0x[a-z0-9]*]] <col:81> 'int (*)(long)' <FunctionToPointerDecay>
// C_INT-NEXT: | `-DeclRefExpr [[ADDR_85:0x[a-z0-9]*]] <col:81> 'int (long)' {{.*}}Function [[ADDR_44]] 'also_after' 'int (long)'
// C_INT-NEXT: `-IntegerLiteral [[ADDR_86:0x[a-z0-9]*]] <col:92> 'long' 4
// CXX_INT: |-FunctionDecl [[ADDR_0:0x[a-z0-9]*]] <{{.*}}, line:30:1> line:28:11 used also_before 'int ({{.*}})'
// CXX_INT-NEXT: | |-CompoundStmt [[ADDR_1:0x[a-z0-9]*]] <col:29, line:30:1>
// CXX_INT-NEXT: | | `-ReturnStmt [[ADDR_2:0x[a-z0-9]*]] <line:29:3, line:21:34>
// CXX_INT-NEXT: | | `-IntegerLiteral [[ADDR_3:0x[a-z0-9]*]] <col:34> 'int' 1
// CXX_INT-NEXT: | `-OMPDeclareVariantAttr [[ADDR_4:0x[a-z0-9]*]] <<invalid sloc>> Implicit implementation={extension(disable_implicit_base)}
// CXX_INT-NEXT: | `-DeclRefExpr [[ADDR_5:0x[a-z0-9]*]] <line:38:1> 'int ({{.*}})' Function [[ADDR_6:0x[a-z0-9]*]] 'also_before[implementation={extension(disable_implicit_base)}]' 'int ({{.*}})'
// CXX_INT-NEXT: |-FunctionDecl [[ADDR_7:0x[a-z0-9]*]] <line:20:19, line:34:1> line:32:11 used also_before 'int (int)'
// CXX_INT-NEXT: | |-ParmVarDecl [[ADDR_8:0x[a-z0-9]*]] <col:23, col:27> col:27 i 'int'
// CXX_INT-NEXT: | |-CompoundStmt [[ADDR_9:0x[a-z0-9]*]] <col:30, line:34:1>
// CXX_INT-NEXT: | | `-ReturnStmt [[ADDR_10:0x[a-z0-9]*]] <line:33:3, line:21:34>
// CXX_INT-NEXT: | | `-IntegerLiteral [[ADDR_11:0x[a-z0-9]*]] <col:34> 'int' 1
// CXX_INT-NEXT: | `-OMPDeclareVariantAttr [[ADDR_12:0x[a-z0-9]*]] <<invalid sloc>> Implicit implementation={extension(disable_implicit_base)}
// CXX_INT-NEXT: | `-DeclRefExpr [[ADDR_13:0x[a-z0-9]*]] <line:42:1> 'int (int)' Function [[ADDR_14:0x[a-z0-9]*]] 'also_before[implementation={extension(disable_implicit_base)}]' 'int (int)'
// CXX_INT-NEXT: |-FunctionDecl [[ADDR_6]] <line:38:1, line:40:1> line:38:1 also_before[implementation={extension(disable_implicit_base)}] 'int ({{.*}})'
// CXX_INT-NEXT: | `-CompoundStmt [[ADDR_15:0x[a-z0-9]*]] <col:23, line:40:1>
// CXX_INT-NEXT: | `-ReturnStmt [[ADDR_16:0x[a-z0-9]*]] <line:39:3, line:22:37>
// CXX_INT-NEXT: | `-IntegerLiteral [[ADDR_17:0x[a-z0-9]*]] <col:37> 'int' 0
// CXX_INT-NEXT: |-FunctionDecl [[ADDR_14]] <line:42:1, line:44:1> line:42:1 also_before[implementation={extension(disable_implicit_base)}] 'int (int)'
// CXX_INT-NEXT: | |-ParmVarDecl [[ADDR_18:0x[a-z0-9]*]] <col:17, col:21> col:21 i 'int'
// CXX_INT-NEXT: | `-CompoundStmt [[ADDR_19:0x[a-z0-9]*]] <col:24, line:44:1>
// CXX_INT-NEXT: | `-ReturnStmt [[ADDR_20:0x[a-z0-9]*]] <line:43:3, line:22:37>
// CXX_INT-NEXT: | `-IntegerLiteral [[ADDR_21:0x[a-z0-9]*]] <col:37> 'int' 0
// CXX_INT-NEXT: |-FunctionDecl [[ADDR_22:0x[a-z0-9]*]] <line:47:1, line:49:1> line:47:1 also_after[implementation={extension(disable_implicit_base)}] 'int (double)'
// CXX_INT-NEXT: | |-ParmVarDecl [[ADDR_23:0x[a-z0-9]*]] <col:16, col:23> col:23 d 'double'
// CXX_INT-NEXT: | `-CompoundStmt [[ADDR_24:0x[a-z0-9]*]] <col:26, line:49:1>
// CXX_INT-NEXT: | `-ReturnStmt [[ADDR_25:0x[a-z0-9]*]] <line:48:3, line:24:37>
// CXX_INT-NEXT: | `-IntegerLiteral [[ADDR_26:0x[a-z0-9]*]] <col:37> 'int' 1
// CXX_INT-NEXT: |-FunctionDecl [[ADDR_27:0x[a-z0-9]*]] <line:51:1, line:53:1> line:51:1 also_after[implementation={extension(disable_implicit_base)}] 'int (long)'
// CXX_INT-NEXT: | |-ParmVarDecl [[ADDR_28:0x[a-z0-9]*]] <col:16, col:21> col:21 l 'long'
// CXX_INT-NEXT: | `-CompoundStmt [[ADDR_29:0x[a-z0-9]*]] <col:24, line:53:1>
// CXX_INT-NEXT: | `-ReturnStmt [[ADDR_30:0x[a-z0-9]*]] <line:52:3, line:24:37>
// CXX_INT-NEXT: | `-IntegerLiteral [[ADDR_31:0x[a-z0-9]*]] <col:37> 'int' 1
// CXX_INT-NEXT: |-FunctionDecl [[ADDR_32:0x[a-z0-9]*]] <line:20:19, line:59:1> line:57:11 used also_after 'int (double)'
// CXX_INT-NEXT: | |-ParmVarDecl [[ADDR_33:0x[a-z0-9]*]] <col:22, col:29> col:29 d 'double'
// CXX_INT-NEXT: | `-CompoundStmt [[ADDR_34:0x[a-z0-9]*]] <col:32, line:59:1>
// CXX_INT-NEXT: | `-ReturnStmt [[ADDR_35:0x[a-z0-9]*]] <line:58:3, line:23:34>
// CXX_INT-NEXT: | `-IntegerLiteral [[ADDR_36:0x[a-z0-9]*]] <col:34> 'int' 0
// CXX_INT-NEXT: |-FunctionDecl [[ADDR_37:0x[a-z0-9]*]] <line:20:19, line:63:1> line:61:11 used also_after 'int (long)'
// CXX_INT-NEXT: | |-ParmVarDecl [[ADDR_38:0x[a-z0-9]*]] <col:22, col:27> col:27 l 'long'
// CXX_INT-NEXT: | `-CompoundStmt [[ADDR_39:0x[a-z0-9]*]] <col:30, line:63:1>
// CXX_INT-NEXT: | `-ReturnStmt [[ADDR_40:0x[a-z0-9]*]] <line:62:3, line:23:34>
// CXX_INT-NEXT: | `-IntegerLiteral [[ADDR_41:0x[a-z0-9]*]] <col:34> 'int' 0
// CXX_INT-NEXT: `-FunctionDecl [[ADDR_42:0x[a-z0-9]*]] <line:65:1, line:68:1> line:65:5 main 'int ({{.*}})'
// CXX_INT-NEXT: `-CompoundStmt [[ADDR_43:0x[a-z0-9]*]] <col:16, line:68:1>
// CXX_INT-NEXT: `-ReturnStmt [[ADDR_44:0x[a-z0-9]*]] <line:67:3, col:94>
// CXX_INT-NEXT: `-BinaryOperator [[ADDR_45:0x[a-z0-9]*]] <col:10, col:94> 'int' '+'
// CXX_INT-NEXT: |-BinaryOperator [[ADDR_46:0x[a-z0-9]*]] <col:10, col:77> 'int' '+'
// CXX_INT-NEXT: | |-BinaryOperator [[ADDR_47:0x[a-z0-9]*]] <col:10, col:59> 'int' '+'
// CXX_INT-NEXT: | | |-BinaryOperator [[ADDR_48:0x[a-z0-9]*]] <col:10, col:39> 'int' '+'
// CXX_INT-NEXT: | | | |-PseudoObjectExpr [[ADDR_49:0x[a-z0-9]*]] <col:10, col:22> 'int'
// CXX_INT-NEXT: | | | | |-CallExpr [[ADDR_50:0x[a-z0-9]*]] <col:10, col:22> 'int'
// CXX_INT-NEXT: | | | | | `-ImplicitCastExpr [[ADDR_51:0x[a-z0-9]*]] <col:10> 'int (*)({{.*}})' <FunctionToPointerDecay>
// CXX_INT-NEXT: | | | | | `-DeclRefExpr [[ADDR_52:0x[a-z0-9]*]] <col:10> 'int ({{.*}})' {{.*}}Function [[ADDR_0]] 'also_before' 'int ({{.*}})'
// CXX_INT-NEXT: | | | | `-CallExpr [[ADDR_53:0x[a-z0-9]*]] <line:38:1, line:67:22> 'int'
// CXX_INT-NEXT: | | | | `-ImplicitCastExpr [[ADDR_54:0x[a-z0-9]*]] <line:38:1> 'int (*)({{.*}})' <FunctionToPointerDecay>
// CXX_INT-NEXT: | | | | `-DeclRefExpr [[ADDR_5]] <col:1> 'int ({{.*}})' Function [[ADDR_6]] 'also_before[implementation={extension(disable_implicit_base)}]' 'int ({{.*}})'
// CXX_INT-NEXT: | | | `-PseudoObjectExpr [[ADDR_55:0x[a-z0-9]*]] <line:67:26, col:39> 'int'
// CXX_INT-NEXT: | | | |-CallExpr [[ADDR_56:0x[a-z0-9]*]] <col:26, col:39> 'int'
// CXX_INT-NEXT: | | | | |-ImplicitCastExpr [[ADDR_57:0x[a-z0-9]*]] <col:26> 'int (*)(int)' <FunctionToPointerDecay>
// CXX_INT-NEXT: | | | | | `-DeclRefExpr [[ADDR_58:0x[a-z0-9]*]] <col:26> 'int (int)' {{.*}}Function [[ADDR_7]] 'also_before' 'int (int)'
// CXX_INT-NEXT: | | | | `-IntegerLiteral [[ADDR_59:0x[a-z0-9]*]] <col:38> 'int' 1
// CXX_INT-NEXT: | | | `-CallExpr [[ADDR_60:0x[a-z0-9]*]] <line:42:1, line:67:39> 'int'
// CXX_INT-NEXT: | | | |-ImplicitCastExpr [[ADDR_61:0x[a-z0-9]*]] <line:42:1> 'int (*)(int)' <FunctionToPointerDecay>
// CXX_INT-NEXT: | | | | `-DeclRefExpr [[ADDR_13]] <col:1> 'int (int)' Function [[ADDR_14]] 'also_before[implementation={extension(disable_implicit_base)}]' 'int (int)'
// CXX_INT-NEXT: | | | `-IntegerLiteral [[ADDR_59]] <line:67:38> 'int' 1
// CXX_INT-NEXT: | | `-PseudoObjectExpr [[ADDR_62:0x[a-z0-9]*]] <col:43, col:59> 'int'
// CXX_INT-NEXT: | | |-CallExpr [[ADDR_63:0x[a-z0-9]*]] <col:43, col:59> 'int'
// CXX_INT-NEXT: | | | |-ImplicitCastExpr [[ADDR_64:0x[a-z0-9]*]] <col:43> 'int (*)(int)' <FunctionToPointerDecay>
// CXX_INT-NEXT: | | | | `-DeclRefExpr [[ADDR_65:0x[a-z0-9]*]] <col:43> 'int (int)' {{.*}}Function [[ADDR_7]] 'also_before' 'int (int)'
// CXX_INT-NEXT: | | | `-ImplicitCastExpr [[ADDR_66:0x[a-z0-9]*]] <col:55> 'int' <FloatingToIntegral>
// CXX_INT-NEXT: | | | `-FloatingLiteral [[ADDR_67:0x[a-z0-9]*]] <col:55> 'float' 2.000000e+00
// CXX_INT-NEXT: | | `-CallExpr [[ADDR_68:0x[a-z0-9]*]] <line:42:1, line:67:59> 'int'
// CXX_INT-NEXT: | | |-ImplicitCastExpr [[ADDR_69:0x[a-z0-9]*]] <line:42:1> 'int (*)(int)' <FunctionToPointerDecay>
// CXX_INT-NEXT: | | | `-DeclRefExpr [[ADDR_13]] <col:1> 'int (int)' Function [[ADDR_14]] 'also_before[implementation={extension(disable_implicit_base)}]' 'int (int)'
// CXX_INT-NEXT: | | `-ImplicitCastExpr [[ADDR_70:0x[a-z0-9]*]] <line:67:55> 'int' <FloatingToIntegral>
// CXX_INT-NEXT: | | `-FloatingLiteral [[ADDR_67]] <col:55> 'float' 2.000000e+00
// CXX_INT-NEXT: | `-CallExpr [[ADDR_71:0x[a-z0-9]*]] <col:63, col:77> 'int'
// CXX_INT-NEXT: | |-ImplicitCastExpr [[ADDR_72:0x[a-z0-9]*]] <col:63> 'int (*)(double)' <FunctionToPointerDecay>
// CXX_INT-NEXT: | | `-DeclRefExpr [[ADDR_73:0x[a-z0-9]*]] <col:63> 'int (double)' {{.*}}Function [[ADDR_32]] 'also_after' 'int (double)'
// CXX_INT-NEXT: | `-FloatingLiteral [[ADDR_74:0x[a-z0-9]*]] <col:74> 'double' 3.000000e+00
// CXX_INT-NEXT: `-CallExpr [[ADDR_75:0x[a-z0-9]*]] <col:81, col:94> 'int'
// CXX_INT-NEXT: |-ImplicitCastExpr [[ADDR_76:0x[a-z0-9]*]] <col:81> 'int (*)(long)' <FunctionToPointerDecay>
// CXX_INT-NEXT: | `-DeclRefExpr [[ADDR_77:0x[a-z0-9]*]] <col:81> 'int (long)' {{.*}}Function [[ADDR_37]] 'also_after' 'int (long)'
// CXX_INT-NEXT: `-IntegerLiteral [[ADDR_78:0x[a-z0-9]*]] <col:92> 'long' 4
|
known_hosts_fmt_plug.c | /* Quick-and-dirty cracker for ~/.ssh/known_hosts hashes (HashKnownHosts yes).
*
* Based on http://blog.tremily.us/posts/known_hosts/
*
* This software is Copyright (c) 2014, Dhiru Kholia <dhiru at openwall.com>,
* and it is hereby released to the general public under the following terms:
*
* Redistribution and use in source and binary forms, with or without modification,
* are permitted.
*
* Significant speedup Dec 2014, JimF. OMPSCALE was way off, and:
* NOTE Appears that salt and password are reversed?? With this info, salt was
* redone, to compute the first half of the HMAC, and double the speed.
*/
#if FMT_EXTERNS_H
extern struct fmt_main fmt_known_hosts;
#elif FMT_REGISTERS_H
john_register_one(&fmt_known_hosts);
#else
#include "sha.h"
#include <string.h>
#include "arch.h"
#include "misc.h"
#include "common.h"
#include "formats.h"
#include "base64.h"
#include "params.h"
#include "options.h"
#ifdef _OPENMP
#include <omp.h>
#ifndef OMP_SCALE
#define OMP_SCALE 2048
#endif
#endif
#include "memdbg.h"
#define FORMAT_LABEL "known_hosts"
#define FORMAT_TAG "$known_hosts$"
#define TAG_LENGTH (sizeof(FORMAT_TAG) - 1)
#define FORMAT_NAME "HashKnownHosts HMAC-SHA1"
#define ALGORITHM_NAME "SHA1 32/" ARCH_BITS_STR
#define BENCHMARK_COMMENT ""
#define BENCHMARK_LENGTH 0
#define PLAINTEXT_LENGTH 125
#define BINARY_SIZE 20
#define BINARY_ENCODED_SIZE 28
#define PAD_SIZE 64
#define BINARY_ALIGN 4
#define SALT_SIZE sizeof(struct custom_salt)
#define SALT_ALIGN 1
#define MIN_KEYS_PER_CRYPT 1
#define MAX_KEYS_PER_CRYPT 1
static struct fmt_tests known_hosts_tests[] = {
{"$known_hosts$|1|yivSFSAv9mhGu/GPc14KpaPMSjE=|I9L3FH6RGefWIFb0Po74BVN3Fto=", "213.100.98.219"},
{"$known_hosts$|1|pgjIzNM77FYsBHLfKvvG9aWpKAA=|XbHqTCXG1JAV6fb2h2HT8MT7kGU=", "192.30.252.130"},
{"$known_hosts$|1|vAQX51f9EfXY33/j3upxFIlI1ds=|q+CzSLaa1EaSsAQzP/XRM/gaFQ4=", "192.30.252.128"},
{"$known_hosts$|1|F1E1KeoE/eEWhi10WpGv4OdiO6Y=|3988QV0VE8wmZL7suNrYQLITLCg=", "192.168.1.61"},
{NULL}
};
static char (*saved_key)[PLAINTEXT_LENGTH + 1];
static ARCH_WORD_32 (*crypt_out)[BINARY_SIZE / sizeof(ARCH_WORD_32)];
static struct custom_salt {
SHA_CTX ipad_ctx;
SHA_CTX opad_ctx;
} *cur_salt;
static void init(struct fmt_main *self)
{
#ifdef _OPENMP
int omp_t = omp_get_max_threads();
self->params.min_keys_per_crypt *= omp_t;
omp_t *= OMP_SCALE;
self->params.max_keys_per_crypt *= omp_t;
#endif
saved_key = mem_calloc(self->params.max_keys_per_crypt,
sizeof(*saved_key));
crypt_out = mem_calloc(self->params.max_keys_per_crypt,
sizeof(*crypt_out));
}
static void done(void)
{
MEM_FREE(crypt_out);
MEM_FREE(saved_key);
}
static int valid(char *ciphertext, struct fmt_main *self)
{
char *p, *q;
if (strncmp(ciphertext, FORMAT_TAG, TAG_LENGTH))
return 0;
p = q = ciphertext + TAG_LENGTH;
if (p[0] != '|' || p[2] != '|')
return 0;
p += 3;
q = strchr(p, '|');
if (q -p != BINARY_ENCODED_SIZE)
return 0;
p = strrchr(ciphertext, '|') + 1;
if (strlen(p) != BINARY_ENCODED_SIZE)
return 0;
return 1;
}
static void *get_salt(char *ciphertext)
{
char *p, *q;
unsigned char ipad[20], opad[20], salt[20 + 4 + 1];
int i;
static struct custom_salt cs;
memset(&cs, 0, sizeof(cs));
p = ciphertext + TAG_LENGTH + 3;
q = strchr(p, '|');
base64_decode(p, q - p, (char*)salt);
for (i = 0; i < 20; ++i) {
ipad[i] = salt[i] ^ 0x36;
opad[i] = salt[i] ^ 0x5C;
}
SHA1_Init(&cs.ipad_ctx);
SHA1_Update(&cs.ipad_ctx, ipad, 20);
SHA1_Update(&cs.ipad_ctx, "\x36\x36\x36\x36\x36\x36\x36\x36\x36\x36\x36\x36\x36\x36\x36\x36\x36\x36\x36\x36\x36\x36\x36\x36\x36\x36\x36\x36\x36\x36\x36\x36\x36\x36\x36\x36\x36\x36\x36\x36\x36\x36\x36\x36", 44);
SHA1_Init(&cs.opad_ctx);
SHA1_Update(&cs.opad_ctx, opad, 20);
SHA1_Update(&cs.opad_ctx, "\x5C\x5C\x5C\x5C\x5C\x5C\x5C\x5C\x5C\x5C\x5C\x5C\x5C\x5C\x5C\x5C\x5C\x5C\x5C\x5C\x5C\x5C\x5C\x5C\x5C\x5C\x5C\x5C\x5C\x5C\x5C\x5C\x5C\x5C\x5C\x5C\x5C\x5C\x5C\x5C\x5C\x5C\x5C\x5C", 44);
return (void *)&cs;
}
static void *get_binary(char *ciphertext)
{
static union {
unsigned char c[BINARY_SIZE + 1 + 4];
ARCH_WORD dummy;
} buf;
unsigned char *out = buf.c;
char *p;
p = strrchr(ciphertext, '|') + 1;
base64_decode((char*)p, BINARY_ENCODED_SIZE, (char*)out);
return out;
}
static int get_hash_0(int index) { return crypt_out[index][0] & PH_MASK_0; }
static int get_hash_1(int index) { return crypt_out[index][0] & PH_MASK_1; }
static int get_hash_2(int index) { return crypt_out[index][0] & PH_MASK_2; }
static int get_hash_3(int index) { return crypt_out[index][0] & PH_MASK_3; }
static int get_hash_4(int index) { return crypt_out[index][0] & PH_MASK_4; }
static int get_hash_5(int index) { return crypt_out[index][0] & PH_MASK_5; }
static int get_hash_6(int index) { return crypt_out[index][0] & PH_MASK_6; }
static void set_salt(void *salt)
{
cur_salt = (struct custom_salt *)salt;
}
static int crypt_all(int *pcount, struct db_salt *salt)
{
const int count = *pcount;
int index = 0;
#ifdef _OPENMP
#pragma omp parallel for
#endif
for (index = 0; index < count; index++)
{
SHA_CTX ctx;
memcpy(&ctx, &cur_salt->ipad_ctx, sizeof(ctx));
SHA1_Update(&ctx, saved_key[index], strlen(saved_key[index]));
SHA1_Final((unsigned char*) crypt_out[index], &ctx);
memcpy(&ctx, &cur_salt->opad_ctx, sizeof(ctx));
SHA1_Update(&ctx, crypt_out[index], BINARY_SIZE);
SHA1_Final((unsigned char*) crypt_out[index], &ctx);
}
return count;
}
static int cmp_all(void *binary, int count)
{
int index = 0;
#ifdef _OPENMP
for (; index < count; index++)
#endif
if (!memcmp(binary, crypt_out[index], ARCH_SIZE))
return 1;
return 0;
}
static int cmp_one(void *binary, int index)
{
return !memcmp(binary, crypt_out[index], BINARY_SIZE);
}
static int cmp_exact(char *source, int index)
{
return 1;
}
static void known_hosts_set_key(char *key, int index)
{
int len = strlen(key);
memcpy(saved_key[index], key, len);
saved_key[index][len] = 0;
}
static char *get_key(int index)
{
return saved_key[index];
}
struct fmt_main fmt_known_hosts = {
{
FORMAT_LABEL,
FORMAT_NAME,
ALGORITHM_NAME,
BENCHMARK_COMMENT,
BENCHMARK_LENGTH,
0,
PLAINTEXT_LENGTH,
BINARY_SIZE,
BINARY_ALIGN,
SALT_SIZE,
SALT_ALIGN,
MIN_KEYS_PER_CRYPT,
MAX_KEYS_PER_CRYPT,
FMT_CASE | FMT_8_BIT | FMT_OMP | FMT_OMP_BAD,
{ NULL },
known_hosts_tests
}, {
init,
done,
fmt_default_reset,
fmt_default_prepare,
valid,
fmt_default_split,
get_binary,
get_salt,
{ NULL },
fmt_default_source,
{
fmt_default_binary_hash_0,
fmt_default_binary_hash_1,
fmt_default_binary_hash_2,
fmt_default_binary_hash_3,
fmt_default_binary_hash_4,
fmt_default_binary_hash_5,
fmt_default_binary_hash_6
},
fmt_default_salt_hash,
NULL,
set_salt,
known_hosts_set_key,
get_key,
fmt_default_clear_keys,
crypt_all,
{
get_hash_0,
get_hash_1,
get_hash_2,
get_hash_3,
get_hash_4,
get_hash_5,
get_hash_6
},
cmp_all,
cmp_one,
cmp_exact
}
};
#endif /* plugin stanza */
|
GB_unop__identity_uint16_int32.c | //------------------------------------------------------------------------------
// GB_unop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_unop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop_apply__identity_uint16_int32
// op(A') function: GB_unop_tran__identity_uint16_int32
// C type: uint16_t
// A type: int32_t
// cast: uint16_t cij = (uint16_t) aij
// unaryop: cij = aij
#define GB_ATYPE \
int32_t
#define GB_CTYPE \
uint16_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
int32_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = x ;
// casting
#define GB_CAST(z, aij) \
uint16_t z = (uint16_t) aij ;
// cij = op (aij)
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
int32_t aij = Ax [pA] ; \
/* Cx [pC] = op (cast (aij)) */ \
uint16_t z = (uint16_t) aij ; \
Cx [pC] = z ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_IDENTITY || GxB_NO_UINT16 || GxB_NO_INT32)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop_apply__identity_uint16_int32
(
uint16_t *Cx, // Cx and Ax may be aliased
const int32_t *Ax,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
int32_t aij = Ax [p] ;
uint16_t z = (uint16_t) aij ;
Cx [p] = z ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop_tran__identity_uint16_int32
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *GB_RESTRICT *Rowcounts,
GBI_single_iterator Iter,
const int64_t *GB_RESTRICT A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#define GB_PHASE_2_OF_2
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
distance.c | #include "image.h"
#include <assert.h>
#include <stdint.h>
#include <stdlib.h>
#include <math.h>
const HEMAN_FLOAT INF = 1E20;
#define NEW(t, n) calloc(n, sizeof(t))
#define SDISTFIELD_TEXEL(x, y) (*(sdf->data + y * width + x))
#define COORDFIELD_TEXEL(x, y, c) (*(cf->data + 2 * (y * width + x) + c))
static void edt(
HEMAN_FLOAT* f, HEMAN_FLOAT* d, HEMAN_FLOAT* z, uint16_t* w, int n)
{
int k = 0;
HEMAN_FLOAT s;
w[0] = 0;
z[0] = -INF;
z[1] = +INF;
for (int q = 1; q < n; ++q) {
s = ((f[q] + SQR(q)) - (f[w[k]] + SQR(w[k]))) / (2 * q - 2 * w[k]);
while (s <= z[k]) {
--k;
s = ((f[q] + SQR(q)) - (f[w[k]] + SQR(w[k]))) / (2 * q - 2 * w[k]);
}
w[++k] = q;
z[k] = s;
z[k + 1] = +INF;
}
k = 0;
for (int q = 0; q < n; ++q) {
while (z[k + 1] < q) {
++k;
}
d[q] = SQR(q - w[k]) + f[w[k]];
}
}
static void edt_with_payload(HEMAN_FLOAT* f, HEMAN_FLOAT* d, HEMAN_FLOAT* z,
uint16_t* w, int n, HEMAN_FLOAT* payload_in, HEMAN_FLOAT* payload_out)
{
int k = 0;
HEMAN_FLOAT s;
w[0] = 0;
z[0] = -INF;
z[1] = +INF;
for (int q = 1; q < n; ++q) {
s = ((f[q] + SQR(q)) - (f[w[k]] + SQR(w[k]))) / (2 * q - 2 * w[k]);
while (s <= z[k]) {
--k;
s = ((f[q] + SQR(q)) - (f[w[k]] + SQR(w[k]))) / (2 * q - 2 * w[k]);
}
w[++k] = q;
z[k] = s;
z[k + 1] = +INF;
}
k = 0;
for (int q = 0; q < n; ++q) {
while (z[k + 1] < q) {
++k;
}
d[q] = SQR(q - w[k]) + f[w[k]];
payload_out[q * 2] = payload_in[w[k] * 2];
payload_out[q * 2 + 1] = payload_in[w[k] * 2 + 1];
}
}
static void transform_to_distance(heman_image* sdf)
{
int width = sdf->width;
int height = sdf->height;
int size = width * height;
HEMAN_FLOAT* ff = NEW(HEMAN_FLOAT, size);
HEMAN_FLOAT* dd = NEW(HEMAN_FLOAT, size);
HEMAN_FLOAT* zz = NEW(HEMAN_FLOAT, (height + 1) * (width + 1));
uint16_t* ww = NEW(uint16_t, size);
int x;
#pragma omp parallel for
for (x = 0; x < width; ++x) {
HEMAN_FLOAT* f = ff + height * x;
HEMAN_FLOAT* d = dd + height * x;
HEMAN_FLOAT* z = zz + (height + 1) * x;
uint16_t* w = ww + height * x;
for (int y = 0; y < height; ++y) {
f[y] = SDISTFIELD_TEXEL(x, y);
}
edt(f, d, z, w, height);
for (int y = 0; y < height; ++y) {
SDISTFIELD_TEXEL(x, y) = d[y];
}
}
int y;
#pragma omp parallel for
for (y = 0; y < height; ++y) {
HEMAN_FLOAT* f = ff + width * y;
HEMAN_FLOAT* d = dd + width * y;
HEMAN_FLOAT* z = zz + (width + 1) * y;
uint16_t* w = ww + width * y;
for (int x = 0; x < width; ++x) {
f[x] = SDISTFIELD_TEXEL(x, y);
}
edt(f, d, z, w, width);
for (int x = 0; x < width; ++x) {
SDISTFIELD_TEXEL(x, y) = d[x];
}
}
free(ff);
free(dd);
free(zz);
free(ww);
}
static void transform_to_coordfield(heman_image* sdf, heman_image* cf)
{
int width = sdf->width;
int height = sdf->height;
int size = width * height;
HEMAN_FLOAT* ff = NEW(HEMAN_FLOAT, size);
HEMAN_FLOAT* dd = NEW(HEMAN_FLOAT, size);
HEMAN_FLOAT* zz = NEW(HEMAN_FLOAT, (height + 1) * (width + 1));
uint16_t* ww = NEW(uint16_t, size);
int x;
#pragma omp parallel for
for (x = 0; x < width; ++x) {
HEMAN_FLOAT* pl1 = NEW(HEMAN_FLOAT, height * 2);
HEMAN_FLOAT* pl2 = NEW(HEMAN_FLOAT, height * 2);
HEMAN_FLOAT* f = ff + height * x;
HEMAN_FLOAT* d = dd + height * x;
HEMAN_FLOAT* z = zz + (height + 1) * x;
uint16_t* w = ww + height * x;
for (int y = 0; y < height; ++y) {
f[y] = SDISTFIELD_TEXEL(x, y);
pl1[y * 2] = COORDFIELD_TEXEL(x, y, 0);
pl1[y * 2 + 1] = COORDFIELD_TEXEL(x, y, 1);
}
edt_with_payload(f, d, z, w, height, pl1, pl2);
for (int y = 0; y < height; ++y) {
SDISTFIELD_TEXEL(x, y) = d[y];
COORDFIELD_TEXEL(x, y, 0) = pl2[2 * y];
COORDFIELD_TEXEL(x, y, 1) = pl2[2 * y + 1];
}
free(pl1);
free(pl2);
}
int y;
#pragma omp parallel for
for (y = 0; y < height; ++y) {
HEMAN_FLOAT* pl1 = NEW(HEMAN_FLOAT, width * 2);
HEMAN_FLOAT* pl2 = NEW(HEMAN_FLOAT, width * 2);
HEMAN_FLOAT* f = ff + width * y;
HEMAN_FLOAT* d = dd + width * y;
HEMAN_FLOAT* z = zz + (width + 1) * y;
uint16_t* w = ww + width * y;
for (int x = 0; x < width; ++x) {
f[x] = SDISTFIELD_TEXEL(x, y);
pl1[x * 2] = COORDFIELD_TEXEL(x, y, 0);
pl1[x * 2 + 1] = COORDFIELD_TEXEL(x, y, 1);
}
edt_with_payload(f, d, z, w, width, pl1, pl2);
for (int x = 0; x < width; ++x) {
SDISTFIELD_TEXEL(x, y) = d[x];
COORDFIELD_TEXEL(x, y, 0) = pl2[2 * x];
COORDFIELD_TEXEL(x, y, 1) = pl2[2 * x + 1];
}
free(pl1);
free(pl2);
}
free(ff);
free(dd);
free(zz);
free(ww);
}
heman_image* heman_distance_create_sdf(heman_image* src)
{
assert(src->nbands == 1 && "Distance field input must have only 1 band.");
heman_image* positive = heman_image_create(src->width, src->height, 1);
heman_image* negative = heman_image_create(src->width, src->height, 1);
int size = src->height * src->width;
HEMAN_FLOAT* pptr = positive->data;
HEMAN_FLOAT* nptr = negative->data;
HEMAN_FLOAT* sptr = src->data;
for (int i = 0; i < size; ++i, ++sptr) {
*pptr++ = *sptr ? INF : 0;
*nptr++ = *sptr ? 0 : INF;
}
transform_to_distance(positive);
transform_to_distance(negative);
HEMAN_FLOAT inv = 1.0f / src->width;
pptr = positive->data;
nptr = negative->data;
for (int i = 0; i < size; ++i, ++pptr, ++nptr) {
*pptr = (sqrt(*pptr) - sqrt(*nptr)) * inv;
}
heman_image_destroy(negative);
return positive;
}
heman_image* heman_distance_create_df(heman_image* src)
{
assert(src->nbands == 1 && "Distance field input must have only 1 band.");
heman_image* positive = heman_image_create(src->width, src->height, 1);
int size = src->height * src->width;
HEMAN_FLOAT* pptr = positive->data;
HEMAN_FLOAT* sptr = src->data;
for (int i = 0; i < size; ++i, ++sptr) {
*pptr++ = *sptr ? 0 : INF;
}
transform_to_distance(positive);
HEMAN_FLOAT inv = 1.0f / src->width;
pptr = positive->data;
for (int i = 0; i < size; ++i, ++pptr) {
*pptr = sqrt(*pptr) * inv;
}
return positive;
}
heman_image* heman_distance_identity_cpcf(int width, int height)
{
heman_image* retval = heman_image_create(width, height, 2);
HEMAN_FLOAT* cdata = retval->data;
for (int y = 0; y < height; y++) {
for (int x = 0; x < width; x++) {
*cdata++ = x;
*cdata++ = y;
}
}
return retval;
}
heman_image* heman_distance_create_cpcf(heman_image* src)
{
heman_image* negative = heman_image_create(src->width, src->height, 1);
int size = src->height * src->width;
HEMAN_FLOAT* nptr = negative->data;
HEMAN_FLOAT* sptr = src->data;
for (int i = 0; i < size; ++i) {
HEMAN_FLOAT val = 0;
for (int b = 0; b < src->nbands; ++b) {
val += *sptr++;
}
*nptr++ = val ? 0 : INF;
}
heman_image* coordfield = heman_distance_identity_cpcf(src->width, src->height);
transform_to_coordfield(negative, coordfield);
heman_image_destroy(negative);
return coordfield;
}
heman_image* heman_distance_from_cpcf(heman_image* cf)
{
assert(cf->nbands == 2 && "Coordinate field input must have 2 bands.");
heman_image* udf = heman_image_create(cf->width, cf->height, 1);
HEMAN_FLOAT* dptr = udf->data;
HEMAN_FLOAT* sptr = cf->data;
HEMAN_FLOAT scale = 1.0f / sqrt(SQR(cf->width) + SQR(cf->height));
for (int y = 0; y < cf->height; y++) {
for (int x = 0; x < cf->width; x++) {
HEMAN_FLOAT u = *sptr++;
HEMAN_FLOAT v = *sptr++;
HEMAN_FLOAT dist = sqrt(SQR(u - x) + SQR(v - y)) * scale;
*dptr++ = dist;
}
}
return udf;
}
|
sincos_scalar.c | /*************************************************************************************/
/* Copyright (C) 2021 Intel Corporation */
/* */
/* Redistribution and use in source and binary forms, with or without modification, */
/* are permitted provided that the following conditions are met: */
/* */
/* 1. Redistributions of source code must retain the above copyright notice, */
/* this list of conditions and the following disclaimer. */
/* 2. Redistributions in binary form must reproduce the above copyright notice, */
/* this list of conditions and the following disclaimer in the documentation */
/* and/or other materials provided with the distribution. */
/* 3. Neither the name of the copyright holder nor the names of its contributors */
/* may be used to endorse or promote products derived from this software */
/* without specific prior written permission. */
/* */
/* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" */
/* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, */
/* THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE */
/* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS */
/* BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, */
/* OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT */
/* OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; */
/* OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, */
/* WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE */
/* OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, */
/* EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */
/* */
/* */
/* SPDX-License-Identifier: BSD-3-Clause */
/*************************************************************************************/
#include "bench.h"
//#define _GNU_SOURCE
//#include <math.h>
#define FLOAT double
#define SINCOS
#define FUNCTION_LOOP \
for (unsigned int i = 0; i < loopCount; i++) \
{ \
for (unsigned int j = i * dim; j < (i + 1) * dim; j++) \
{ \
sincos(input_array[j], &result_array[j], &result_array1[j]); \
} \
}
//#pragma omp declare simd notinbranch linear(y, z)
//void sincos(double x, double *y, double *z);
#define RANGE1 rand_in_range(-360.0, 360.0)
#include "bench.c" |
reciprocal_to_normal.c | /* Copyright (C) 2015 Atsushi Togo */
/* All rights reserved. */
/* This file is part of phonopy. */
/* Redistribution and use in source and binary forms, with or without */
/* modification, are permitted provided that the following conditions */
/* are met: */
/* * Redistributions of source code must retain the above copyright */
/* notice, this list of conditions and the following disclaimer. */
/* * Redistributions in binary form must reproduce the above copyright */
/* notice, this list of conditions and the following disclaimer in */
/* the documentation and/or other materials provided with the */
/* distribution. */
/* * Neither the name of the phonopy project nor the names of its */
/* contributors may be used to endorse or promote products derived */
/* from this software without specific prior written permission. */
/* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS */
/* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT */
/* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS */
/* FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE */
/* COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, */
/* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, */
/* BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; */
/* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER */
/* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT */
/* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN */
/* ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE */
/* POSSIBILITY OF SUCH DAMAGE. */
#include "reciprocal_to_normal.h"
#include <math.h>
#include <stdlib.h>
#include "lapack_wrapper.h"
#ifdef MEASURE_R2N
#include <time.h>
#include <unistd.h>
#endif
static double get_fc3_sum(const lapack_complex_double *e0,
const lapack_complex_double *e1,
const lapack_complex_double *e2,
const lapack_complex_double *fc3_reciprocal,
const long num_band);
void reciprocal_to_normal_squared(
double *fc3_normal_squared, const long (*g_pos)[4], const long num_g_pos,
const lapack_complex_double *fc3_reciprocal, const double *freqs0,
const double *freqs1, const double *freqs2,
const lapack_complex_double *eigvecs0,
const lapack_complex_double *eigvecs1,
const lapack_complex_double *eigvecs2, const double *masses,
const long *band_indices, const long num_band,
const double cutoff_frequency, const long openmp_at_bands) {
long i, j, k, num_atom;
double real, imag;
double *inv_sqrt_masses;
lapack_complex_double *e0, *e1, *e2;
/* Transpose eigenvectors for the better data alignment in memory. */
/* Memory space for three eigenvector matrices is allocated at once */
/* to make it contiguous. */
e0 = (lapack_complex_double *)malloc(sizeof(lapack_complex_double) * 3 *
num_band * num_band);
e1 = e0 + num_band * num_band;
e2 = e1 + num_band * num_band;
for (i = 0; i < num_band; i++) {
for (j = 0; j < num_band; j++) {
e0[j * num_band + i] = eigvecs0[i * num_band + j];
e1[j * num_band + i] = eigvecs1[i * num_band + j];
e2[j * num_band + i] = eigvecs2[i * num_band + j];
}
}
/* Inverse sqrt mass is multipled with eigenvectors to reduce number of */
/* operations in get_fc3_sum. Three eigenvector matrices are looped by */
/* first loop leveraging contiguous memory layout of [e0, e1, e2]. */
num_atom = num_band / 3;
inv_sqrt_masses = (double *)malloc(sizeof(double) * num_atom);
for (i = 0; i < num_atom; i++) {
inv_sqrt_masses[i] = 1.0 / sqrt(masses[i]);
}
for (i = 0; i < 3 * num_band; i++) {
for (j = 0; j < num_atom; j++) {
for (k = 0; k < 3; k++) {
real = lapack_complex_double_real(e0[i * num_band + j * 3 + k]);
imag = lapack_complex_double_imag(e0[i * num_band + j * 3 + k]);
e0[i * num_band + j * 3 + k] = lapack_make_complex_double(
real * inv_sqrt_masses[j], imag * inv_sqrt_masses[j]);
}
}
}
free(inv_sqrt_masses);
inv_sqrt_masses = NULL;
#ifdef MEASURE_R2N
double loopTotalCPUTime, loopTotalWallTime;
time_t loopStartWallTime;
clock_t loopStartCPUTime;
#endif
#ifdef MEASURE_R2N
loopStartWallTime = time(NULL);
loopStartCPUTime = clock();
#endif
#ifdef _OPENMP
#pragma omp parallel for if (openmp_at_bands)
#endif
for (i = 0; i < num_g_pos; i++) {
if (freqs0[band_indices[g_pos[i][0]]] > cutoff_frequency &&
freqs1[g_pos[i][1]] > cutoff_frequency &&
freqs2[g_pos[i][2]] > cutoff_frequency) {
fc3_normal_squared[g_pos[i][3]] =
get_fc3_sum(e0 + band_indices[g_pos[i][0]] * num_band,
e1 + g_pos[i][1] * num_band,
e2 + g_pos[i][2] * num_band, fc3_reciprocal,
num_band) /
(freqs0[band_indices[g_pos[i][0]]] * freqs1[g_pos[i][1]] *
freqs2[g_pos[i][2]]);
} else {
fc3_normal_squared[g_pos[i][3]] = 0;
}
}
#ifdef MEASURE_R2N
loopTotalCPUTime = (double)(clock() - loopStartCPUTime) / CLOCKS_PER_SEC;
loopTotalWallTime = difftime(time(NULL), loopStartWallTime);
printf(" %1.3fs (%1.3fs CPU)\n", loopTotalWallTime, loopTotalCPUTime);
#endif
free(e0);
e0 = NULL;
e1 = NULL;
e2 = NULL;
}
static double get_fc3_sum(const lapack_complex_double *e0,
const lapack_complex_double *e1,
const lapack_complex_double *e2,
const lapack_complex_double *fc3_reciprocal,
const long num_band) {
long i, j, k;
double sum_real, sum_imag;
lapack_complex_double e_01, e_012, e_012_fc3;
sum_real = 0;
sum_imag = 0;
for (i = 0; i < num_band; i++) {
for (j = 0; j < num_band; j++) {
e_01 = phonoc_complex_prod(e0[i], e1[j]);
for (k = 0; k < num_band; k++) {
e_012 = phonoc_complex_prod(e_01, e2[k]);
e_012_fc3 = phonoc_complex_prod(
e_012,
fc3_reciprocal[i * num_band * num_band + j * num_band + k]);
sum_real += lapack_complex_double_real(e_012_fc3);
sum_imag += lapack_complex_double_imag(e_012_fc3);
}
}
}
return (sum_real * sum_real + sum_imag * sum_imag);
}
|
nested_parallel_for_omp.c | /* -*- Mode: C; c-basic-offset:4 ; indent-tabs-mode:nil ; -*- */
/*
* See LICENSE.txt in top-level directory.
*/
/* Nested Pragma omp parallel for directives evaluation
* Output: avg time
*/
#include <assert.h>
#include <omp.h>
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#define NUM_ELEMS 5017600 /* 2GB */
#define NUM_REPS 1
/* Vector initialization */
void init(float *v, int n)
{
int i = 0;
for (i = 0; i < n; i++) {
v[i] = i + 100.0f;
}
}
/* Called after each test to be sure that the compiler does
not avoid to execute the test */
void check(float *v, int n)
{
int i = 0;
for (i = 0; i < n; i++) {
if (v[i] != (i + 100.0f) * 0.9f) {
printf("v[%d]<=0.0f\n", i);
}
}
}
int main(int argc, char *argv[])
{
int i, j, r, nthreads;
double *time, avg_time = 0.0;
float *v;
#pragma omp parallel
{
#pragma omp master
{
nthreads = omp_get_num_threads();
}
}
int n = (argc > 1) ? atoi(argv[1]) : NUM_ELEMS;
int in_th = (argc > 2) ? atoi(argv[2]) : nthreads;
int rep = (argc > 3) ? atoi(argv[3]) : NUM_REPS;
int it = ceil(sqrt((double)n));
n = it * it;
time = (double *)malloc(sizeof(double) * rep);
v = (float *)malloc(sizeof(float) * n);
init(v, n);
for (r = 0; r < rep; r++) {
time[r] = omp_get_wtime();
#pragma omp parallel for
for (j = 0; j < it; j++) {
omp_set_num_threads(in_th);
#pragma omp parallel for
for (i = 0; i < it; i++) {
v[j * it + i] *= 0.9f;
}
}
time[r] = omp_get_wtime() - time[r];
avg_time += time[r];
}
avg_time /= rep;
check(v, n);
printf("%d %d %d %f\n", nthreads, in_th, n, avg_time);
free(time);
free(v);
return EXIT_SUCCESS;
}
|
omp_getEnvInfo.c | /******************************************************************************
* FILE: omp_getEnvInfo.c
* DESCRIPTION:
* OpenMP Example - Get Environment Information - C/C++ Version
* The master thread queries and prints selected environment information.
* AUTHOR: Blaise Barney 7/06
* LAST REVISED: 07/12/06
******************************************************************************/
#include <omp.h>
#include <stdio.h>
#include <stdlib.h>
int main (int argc, char *argv[])
{
int nthreads, tid, procs, maxt, inpar, dynamic, nested;
/* Start parallel region */
#pragma omp parallel private(nthreads, tid)
{
/* Obtain thread number */
tid = omp_get_thread_num();
/* Only master thread does this */
if (tid == 0)
{
printf("Thread %d getting environment info...\n", tid);
/* Get environment information */
procs = omp_get_num_procs();
nthreads = omp_get_num_threads();
maxt = omp_get_max_threads();
inpar = omp_in_parallel();
dynamic = omp_get_dynamic();
nested = omp_get_nested();
/* Print environment information */
printf("Number of processors = %d\n", procs);
printf("Number of threads = %d\n", nthreads);
printf("Max threads = %d\n", maxt);
printf("In parallel? = %d\n", inpar);
printf("Dynamic threads enabled? = %d\n", dynamic);
printf("Nested parallelism supported? = %d\n", nested);
}
} /* Done */
}
|
OpenMP-check.c | #include <stdio.h>
#include <omp.h>
int main(void) {
#pragma omp parallel
{
printf("I'm a parallel region.\n");
}
return 0;
}
|
spmmd_x_csr_row.c | #include "alphasparse/kernel.h"
#include "alphasparse/util.h"
#include "alphasparse/opt.h"
#ifdef _OPENMP
#include <omp.h>
#endif
#include <memory.h>
alphasparse_status_t ONAME(const ALPHA_SPMAT_CSR *matA, const ALPHA_SPMAT_CSR *matB, ALPHA_Number *matC, const ALPHA_INT ldc)
{
if (matA->cols != matB->rows || ldc < matB->cols)
return ALPHA_SPARSE_STATUS_INVALID_VALUE;
ALPHA_INT m = matA->rows;
for(ALPHA_INT i = 0; i < matA->rows; i++)
for(ALPHA_INT j = 0; j < matB->cols; j++)
{
alpha_setzero(matC[index2(i, j, ldc)]);
}
ALPHA_INT num_thread = alpha_get_thread_num();
ALPHA_INT64 flop[m];
memset(flop, '\0', m * sizeof(ALPHA_INT64));
#ifdef _OPENMP
#pragma omp parallel for num_threads(num_thread)
#endif
for (ALPHA_INT ar = 0; ar < m; ar++)
{
for (ALPHA_INT ai = matA->rows_start[ar]; ai < matA->rows_end[ar]; ai++)
{
ALPHA_INT br = matA->col_indx[ai];
flop[ar] += matB->rows_end[br] - matB->rows_start[br];
}
}
for (ALPHA_INT i = 1; i < m; i++)
{
flop[i] += flop[i - 1];
}
ALPHA_INT partition[num_thread + 1];
balanced_partition_row_by_flop(flop, m, num_thread, partition);
#ifdef _OPENMP
#pragma omp parallel num_threads(num_thread)
#endif
{
ALPHA_INT tid = alpha_get_thread_id();
ALPHA_INT local_m_s = partition[tid];
ALPHA_INT local_m_e = partition[tid + 1];
for (ALPHA_INT ar = local_m_s; ar < local_m_e; ar++)
{
for (ALPHA_INT ai = matA->rows_start[ar]; ai < matA->rows_end[ar]; ai++)
{
ALPHA_INT br = matA->col_indx[ai];
ALPHA_Number av = matA->values[ai];
for (ALPHA_INT bi = matB->rows_start[br]; bi < matB->rows_end[br]; bi++)
{
ALPHA_INT bc = matB->col_indx[bi];
ALPHA_Number bv = matB->values[bi];
alpha_madde(matC[index2(ar, bc, ldc)], av, bv);
}
}
}
}
return ALPHA_SPARSE_STATUS_SUCCESS;
}
|
LAGraph_dense_relabel.c | //------------------------------------------------------------------------------
// LAGraph_dense_relabel: dense relabeling of ids to matrix indices
//------------------------------------------------------------------------------
/*
LAGraph: graph algorithms based on GraphBLAS
Copyright 2019 LAGraph Contributors.
(see Contributors.txt for a full list of Contributors; see
ContributionInstructions.txt for information on how you can Contribute to
this project).
All Rights Reserved.
NO WARRANTY. THIS MATERIAL IS FURNISHED ON AN "AS-IS" BASIS. THE LAGRAPH
CONTRIBUTORS MAKE NO WARRANTIES OF ANY KIND, EITHER EXPRESSED OR IMPLIED,
AS TO ANY MATTER INCLUDING, BUT NOT LIMITED TO, WARRANTY OF FITNESS FOR
PURPOSE OR MERCHANTABILITY, EXCLUSIVITY, OR RESULTS OBTAINED FROM USE OF
THE MATERIAL. THE CONTRIBUTORS DO NOT MAKE ANY WARRANTY OF ANY KIND WITH
RESPECT TO FREEDOM FROM PATENT, TRADEMARK, OR COPYRIGHT INFRINGEMENT.
Released under a BSD license, please see the LICENSE file distributed with
this Software or contact permission@sei.cmu.edu for full terms.
Created, in part, with funding and support from the United States
Government. (see Acknowledgments.txt file).
This program includes and/or can make use of certain third party source
code, object code, documentation and other files ("Third Party Software").
See LICENSE file for more details.
*/
//------------------------------------------------------------------------------
// LAGraph_dense_relabel: relabel sparse IDs to dense row/column indices
// Contributed by Marton Elekes and Gabor Szarnyas,
// Budapest University of Technology and Economics
// (with accented characters: M\'{a}rton Elekes and G\'{a}bor Sz\'{a}rnyas.
// Converts array of sparse IDs (ids) to row/column indices between 0...(nids-1).
// The order of IDs is kept, therefore ids can be used for index -> ID conversion: ids[index]=id.
//
// Gives back two binary matrices for conversion between ID- and index-based vertices.
// id2index vector can be used to look up for indices of chosen IDs.
// id_dimension gives back the height of Id2index matrix and id2index vector. (Same as width of Index2id_handle matrix.)
// id_dimension is the size that can store the largest ID in the array.
// Currently it is the largest valid dimension in SuiteSparse:GraphBLAS (GxB_INDEX_MAX)
//
// Find usage example in /Test/DenseRelabel/dense_relabel_test.c
#include "LAGraph_internal.h"
#include <string.h>
#define LAGRAPH_FREE_ALL \
{ \
GrB_free (Id2index_handle) ; \
GrB_free (Index2id_handle) ; \
GrB_free (id2index_handle) ; \
LAGRAPH_FREE (indices) ; \
LAGRAPH_FREE (true_values) ; \
}
//------------------------------------------------------------------------------
GrB_Info LAGraph_dense_relabel // relabel sparse IDs to dense row/column indices
(
GrB_Matrix *Id2index_handle, // output matrix: A(id, index)=1 (unfilled if NULL)
GrB_Matrix *Index2id_handle, // output matrix: B(index, id)=1 (unfilled if NULL)
GrB_Vector *id2index_handle, // output vector: v(id)=index (unfilled if NULL)
const GrB_Index *ids, // array of unique identifiers (under GxB_INDEX_MAX)
GrB_Index nids, // number of identifiers
GrB_Index *id_dimension // number of rows in Id2index matrix, id2index vector (unfilled if NULL)
) {
GrB_Index *indices = NULL;
bool *true_values = NULL;
// from LAGraph_1_to_n.c
int nthreads = LAGraph_get_nthreads();
nthreads = LAGRAPH_MIN ((int64_t) (nids / 4096), nthreads);
nthreads = LAGRAPH_MAX (nthreads, 1);
//--------------------------------------------------------------------------
// check inputs
//--------------------------------------------------------------------------
if (!Id2index_handle && !Index2id_handle && !id2index_handle) {
LAGRAPH_ERROR ("All output mapping arguments are NULL", GrB_NULL_POINTER);
}
if (!ids) {
LAGRAPH_ERROR ("ids is NULL", GrB_NULL_POINTER);
}
// the largest valid dimension in SuiteSparse:GraphBLAS
GrB_Index id_max_dimension = GxB_INDEX_MAX;
if (id_dimension)
*id_dimension = id_max_dimension;
// set indices 0..(nids-1)
indices = LAGraph_malloc(nids, sizeof(*indices));
if (!indices) {
LAGRAPH_ERROR ("Out of Memory", GrB_OUT_OF_MEMORY);
}
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (size_t i = 0; i < nids; ++i) {
indices[i] = i;
}
// build vector id2index(original_id) = index
if (id2index_handle) {
LAGr_Vector_new(id2index_handle, GrB_UINT64, id_max_dimension);
LAGr_Vector_build(*id2index_handle, ids, indices, nids, GrB_SECOND_UINT64);
}
if (Id2index_handle || Index2id_handle) {
// initialize true values of the matrix
true_values = LAGraph_malloc(nids, sizeof(*true_values));
if (!true_values) {
LAGRAPH_ERROR ("Out of Memory", GrB_OUT_OF_MEMORY);
}
memset(true_values, true, nids * sizeof(*true_values));
// build matrix Index2id(index, original_id) = 1
if (Index2id_handle) {
LAGr_Matrix_new(Index2id_handle, GrB_BOOL, nids, id_max_dimension);
LAGr_Matrix_build(*Index2id_handle, indices, ids, true_values, nids, GrB_SECOND_UINT64);
}
// build matrix Id2index(original_id, index) = 1
if (Id2index_handle) {
LAGr_Matrix_new(Id2index_handle, GrB_BOOL, id_max_dimension, nids);
LAGr_Matrix_build(*Id2index_handle, ids, indices, true_values, nids, GrB_SECOND_UINT64);
}
}
LAGRAPH_FREE(indices);
LAGRAPH_FREE(true_values);
return GrB_SUCCESS;
}
|
GB_bitmap_assign_C_template.c | //------------------------------------------------------------------------------
// GB_bitmap_assign_C_template: iterate over a bitmap matrix C
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// The #include'ing file defines a GB_CIJ_WORK macro for the body of the loop,
// which operates on the entry C(iC,jC) at position Cx [pC] and Cb [pC]. The C
// matrix held in bitmap form. If the mask matrix is also a bitmap matrix or
// full matrix, the GB_GET_MIJ macro can compute the effective value of the
// mask for the C(iC,jC) entry.
// C must be bitmap or full. If M is accessed, it must also be bitmap or full.
#ifndef GB_GET_MIJ
#define GB_GET_MIJ(mij,pM) ;
#endif
{
switch (assign_kind)
{
//----------------------------------------------------------------------
// row assignment: C<M'>(iC,:), M is a column vector
//----------------------------------------------------------------------
case GB_ROW_ASSIGN :
{
// iterate over all of C(iC,:)
const int64_t iC = I [0] ;
const int nthreads = GB_nthreads (cvdim, chunk, nthreads_max) ;
int tid ;
#pragma omp parallel for num_threads(nthreads) schedule(static) \
reduction(+:cnvals)
for (tid = 0 ; tid < nthreads ; tid++)
{
int64_t jC_start, jC_end, task_cnvals = 0 ;
GB_PARTITION (jC_start, jC_end, cvdim, tid, nthreads) ;
for (int64_t jC = jC_start ; jC < jC_end ; jC++)
{
int64_t pC = iC + jC * cvlen ;
GB_GET_MIJ (mij, jC) ; // mij = Mask (jC)
GB_CIJ_WORK (pC) ; // operate on C(iC,jC)
}
cnvals += task_cnvals ;
}
}
break ;
//----------------------------------------------------------------------
// column assignment: C<M>(:,jC), M is a column vector
//----------------------------------------------------------------------
case GB_COL_ASSIGN :
{
// iterate over all of C(:,jC)
const int64_t jC = J [0] ;
const int64_t pC0 = jC * cvlen ;
const int nthreads = GB_nthreads (cvlen, chunk, nthreads_max) ;
int tid ;
#pragma omp parallel for num_threads(nthreads) schedule(static) \
reduction(+:cnvals)
for (tid = 0 ; tid < nthreads ; tid++)
{
int64_t iC_start, iC_end, task_cnvals = 0 ;
GB_PARTITION (iC_start, iC_end, cvlen, tid, nthreads) ;
for (int64_t iC = iC_start ; iC < iC_end ; iC++)
{
int64_t pC = iC + pC0 ;
GB_GET_MIJ (mij, iC) ; // mij = Mask (iC)
GB_CIJ_WORK (pC) ; // operate on C(iC,jC)
}
cnvals += task_cnvals ;
}
}
break ;
//----------------------------------------------------------------------
// GrB_assign: C<M>(I,J), M is a matrix the same size as C
//----------------------------------------------------------------------
#ifndef GB_NO_ASSIGN_CASE
case GB_ASSIGN :
{
// iterate over all of C(:,:).
#include "GB_bitmap_assign_C_whole_template.c"
}
break ;
#endif
//----------------------------------------------------------------------
// GxB_subassign: C(I,J)<M>, M is a matrix the same size as C(I,J)
//----------------------------------------------------------------------
#ifndef GB_NO_SUBASSIGN_CASE
case GB_SUBASSIGN :
{
// iterate over all of C(I,J)
#undef GB_IXJ_WORK
#define GB_IXJ_WORK(pC,pA) \
{ \
GB_GET_MIJ (mij, pA) ; /* mij = Mask (pA) */ \
GB_CIJ_WORK (pC) ; /* operate on C(iC,jC) */ \
}
#include "GB_bitmap_assign_IxJ_template.c"
}
break ;
#endif
default: ;
}
}
#undef GB_NO_ASSIGN_CASE
#undef GB_NO_SUBASSIGN_CASE
|
DRB014-outofbounds-orig-yes.c | /*
Copyright (c) 2017, Lawrence Livermore National Security, LLC.
Produced at the Lawrence Livermore National Laboratory
Written by Chunhua Liao, Pei-Hung Lin, Joshua Asplund,
Markus Schordan, and Ian Karlin
(email: liao6@llnl.gov, lin32@llnl.gov, asplund1@llnl.gov,
schordan1@llnl.gov, karlin1@llnl.gov)
LLNL-CODE-732144
All rights reserved.
This file is part of DataRaceBench. For details, see
https://github.com/LLNL/dataracebench. Please also see the LICENSE file
for our additional BSD notice.
Redistribution and use in source and binary forms, with
or without modification, are permitted provided that the following
conditions are met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the disclaimer below.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the disclaimer (as noted below)
in the documentation and/or other materials provided with the
distribution.
* Neither the name of the LLNS/LLNL nor the names of its contributors
may be used to endorse or promote products derived from this
software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL LAWRENCE LIVERMORE NATIONAL
SECURITY, LLC, THE U.S. DEPARTMENT OF ENERGY OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY,
OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
The outmost loop is parallelized.
But the inner level loop has out of bound access for b[i][j] when j equals to 0.
This will case memory access of a previous row's last element.
For example, an array of 4x4:
j=0 1 2 3
i=0 x x x x
1 x x x x
2 x x x x
3 x x x x
outer loop: i=2,
inner loop: j=0
array element accessed b[i][j-1] becomes b[2][-1], which in turn is b[1][3]
due to linearized row-major storage of the 2-D array.
This causes loop-carried data dependence between i=2 and i=1.
Data race pair: b[i][j]@75 vs. b[i][j-1]@75.
*/
#include <stdio.h>
#include <stdlib.h>
#include <omp.h>
int main(int argc,char *argv[])
{
int i;
int j;
int n = 100;
int m = 100;
double b[n][m];
for (i = 1; i <= n - 1; i += 1) {
#pragma omp parallel for private (j)
for (j = 0; j <= m - 1; j += 1) {
b[i][j] = b[i - 1][j];
}
}
printf("b[50][50]=%f\n",b[50][50]);
return 0;
}
|
queue.h | // -*- C++ -*-
// Copyright (C) 2007-2016 Free Software Foundation, Inc.
//
// This file is part of the GNU ISO C++ Library. This library is free
// software; you can redistribute it and/or modify it under the terms
// of the GNU General Public License as published by the Free Software
// Foundation; either version 3, or (at your option) any later
// version.
// This library is distributed in the hope that it will be useful, but
// WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
// General Public License for more details.
// Under Section 7 of GPL version 3, you are granted additional
// permissions described in the GCC Runtime Library Exception, version
// 3.1, as published by the Free Software Foundation.
// You should have received a copy of the GNU General Public License and
// a copy of the GCC Runtime Library Exception along with this program;
// see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
// <http://www.gnu.org/licenses/>.
/** @file parallel/queue.h
* @brief Lock-free double-ended queue.
* This file is a GNU parallel extension to the Standard C++ Library.
*/
// Written by Johannes Singler.
#ifndef _GLIBCXX_PARALLEL_QUEUE_H
#define _GLIBCXX_PARALLEL_QUEUE_H 1
#include <parallel/types.h>
#include <parallel/base.h>
#include <parallel/compatibility.h>
/** @brief Decide whether to declare certain variable volatile in this file. */
#define _GLIBCXX_VOLATILE volatile
namespace __gnu_parallel
{
/**@brief Double-ended queue of bounded size, allowing lock-free
* atomic access. push_front() and pop_front() must not be called
* concurrently to each other, while pop_back() can be called
* concurrently at all times.
* @c empty(), @c size(), and @c top() are intentionally not provided.
* Calling them would not make sense in a concurrent setting.
* @param _Tp Contained element type. */
template<typename _Tp>
class _RestrictedBoundedConcurrentQueue
{
private:
/** @brief Array of elements, seen as cyclic buffer. */
_Tp* _M_base;
/** @brief Maximal number of elements contained at the same time. */
_SequenceIndex _M_max_size;
/** @brief Cyclic __begin and __end pointers contained in one
atomically changeable value. */
_GLIBCXX_VOLATILE _CASable _M_borders;
public:
/** @brief Constructor. Not to be called concurrent, of course.
* @param __max_size Maximal number of elements to be contained. */
_RestrictedBoundedConcurrentQueue(_SequenceIndex __max_size)
{
_M_max_size = __max_size;
_M_base = new _Tp[__max_size];
_M_borders = __encode2(0, 0);
#pragma omp flush
}
/** @brief Destructor. Not to be called concurrent, of course. */
~_RestrictedBoundedConcurrentQueue()
{ delete[] _M_base; }
/** @brief Pushes one element into the queue at the front end.
* Must not be called concurrently with pop_front(). */
void
push_front(const _Tp& __t)
{
_CASable __former_borders = _M_borders;
int __former_front, __former_back;
__decode2(__former_borders, __former_front, __former_back);
*(_M_base + __former_front % _M_max_size) = __t;
#if _GLIBCXX_ASSERTIONS
// Otherwise: front - back > _M_max_size eventually.
_GLIBCXX_PARALLEL_ASSERT(((__former_front + 1) - __former_back)
<= _M_max_size);
#endif
__fetch_and_add(&_M_borders, __encode2(1, 0));
}
/** @brief Pops one element from the queue at the front end.
* Must not be called concurrently with pop_front(). */
bool
pop_front(_Tp& __t)
{
int __former_front, __former_back;
#pragma omp flush
__decode2(_M_borders, __former_front, __former_back);
while (__former_front > __former_back)
{
// Chance.
_CASable __former_borders = __encode2(__former_front,
__former_back);
_CASable __new_borders = __encode2(__former_front - 1,
__former_back);
if (__compare_and_swap(&_M_borders, __former_borders,
__new_borders))
{
__t = *(_M_base + (__former_front - 1) % _M_max_size);
return true;
}
#pragma omp flush
__decode2(_M_borders, __former_front, __former_back);
}
return false;
}
/** @brief Pops one element from the queue at the front end.
* Must not be called concurrently with pop_front(). */
bool
pop_back(_Tp& __t) //queue behavior
{
int __former_front, __former_back;
#pragma omp flush
__decode2(_M_borders, __former_front, __former_back);
while (__former_front > __former_back)
{
// Chance.
_CASable __former_borders = __encode2(__former_front,
__former_back);
_CASable __new_borders = __encode2(__former_front,
__former_back + 1);
if (__compare_and_swap(&_M_borders, __former_borders,
__new_borders))
{
__t = *(_M_base + __former_back % _M_max_size);
return true;
}
#pragma omp flush
__decode2(_M_borders, __former_front, __former_back);
}
return false;
}
};
} //namespace __gnu_parallel
#undef _GLIBCXX_VOLATILE
#endif /* _GLIBCXX_PARALLEL_QUEUE_H */
|
paralelo.c |
#include <stdio.h>
#include <stdlib.h>
#include <omp.h>
#include <time.h>
#include "mpi.h"
#define MASTER 0
typedef struct
{
unsigned char r;
unsigned char g;
unsigned char b;
}rgb;
typedef struct
{
int columns;
int size;
int flag;
}info;
unsigned char avg (rgb **image, int x, int y, int cols, int rows, int flag);
int main (int argc, char **argv){
FILE *file;
info imgInfo;
int i, j,k,l, rows, columns, max, stripSize, extra, rc, size, rank;
rgb **img, **newImg;
double startTime, endTime;
MPI_Status status;
rc = MPI_Init(&argc, &argv);
if(rc != MPI_SUCCESS){
printf("Error!\n");
return -1;
}
MPI_Comm_size(MPI_COMM_WORLD, &size);
MPI_Comm_rank(MPI_COMM_WORLD, &rank);
if(rank == MASTER ){
file = fopen("rgb/in.ppm", "rb");
fseek(file, 2, SEEK_SET);
fscanf(file, "%d", &columns);
fscanf(file, "%d", &rows);
/* alocando memória para a matriz que irá armazenar as componentes r,g e b da imagem de entrada*/
img = (rgb**) malloc (rows*sizeof(rgb*));
/* alocando memória para a matriz que irá armazenar as componentes r,g e b da imagem de saída*/
newImg = (rgb**) malloc (rows*sizeof(rgb*));
for(i = 0; i < rows; i++){
img[i] = (rgb*)malloc(columns*sizeof(rgb));
newImg[i] = (rgb*)malloc(columns*sizeof(rgb));
}
fscanf(file,"%d\n",&max);
/* lendo a imagem do arquivo de entrada para a matriz */
for(i = 0; i < rows; i++){
for(j = 0; j < columns; j++){
fread(&img[i][j].r,sizeof(unsigned char),1,file);
fread(&img[i][j].g,sizeof(unsigned char),1,file);
fread(&img[i][j].b,sizeof(unsigned char),1,file);
}
}
fclose(file);
stripSize = rows / (size - 1) ;
extra = (rows % (size-1)) + stripSize + 2;
j = 0;
startTime = MPI_Wtime();
imgInfo.columns = columns;
for(i = 1; i < size; i++){
if(i == 1){ /* envia as primeiras linhas para escravo */
imgInfo.size = stripSize + 2;
imgInfo.flag = 0;
MPI_Send(&imgInfo, 3, MPI_INT, i, 40*i, MPI_COMM_WORLD);
for(k = 0; k < imgInfo.size; k++){
MPI_Send(img[k], 3*imgInfo.columns, MPI_BYTE, i, 10*i, MPI_COMM_WORLD);
}
}
else if(i > 1 && i < size - 1){ /* envia as linhas seguintes para os escravos */
imgInfo.size = stripSize + 4;
imgInfo.flag = 1;
MPI_Send(&imgInfo, 3, MPI_INT, i, 40*i, MPI_COMM_WORLD);
for(k = j-2; k < j+imgInfo.size; k++){
MPI_Send(img[k], 3*imgInfo.columns, MPI_BYTE, i, 10*i, MPI_COMM_WORLD);
}
}
else{ /* envia a ultima linha para escravo */
imgInfo.size = extra;
imgInfo.flag = 2;
MPI_Send(&imgInfo, 3, MPI_INT, i, 40*i, MPI_COMM_WORLD);
for(k = j-2; k < j+extra-2; k++){
MPI_Send(img[k], 3*imgInfo.columns, MPI_BYTE, i, 10*i, MPI_COMM_WORLD);
}
}
j += stripSize;
}
/* recebendo resultados apos nós processarem */
j = 0;
for(i = 1; i < size-1; i++){
for(k = j; k < j+stripSize; k++){
MPI_Recv(newImg[k], 3*imgInfo.columns, MPI_BYTE, i, i*10 + 1, MPI_COMM_WORLD, &status);
}
j += stripSize;
}
for(k = j; k < j+extra-2; k++){
MPI_Recv(newImg[k], 3*imgInfo.columns, MPI_BYTE, i, i*10 + 1, MPI_COMM_WORLD, &status);
}
endTime = MPI_Wtime();
printf("%.6lf segundos\n", endTime - startTime);
/*criando a nova imagem */
file = fopen("out.ppm", "wb");
fprintf(file, "P6\n");
fprintf(file, "%d %d\n",columns,rows);
fprintf(file, "%d\n",max);
for(i = 0; i < rows; i++){
for(j = 0; j < columns; j++){
fwrite(&newImg[i][j].r ,sizeof(unsigned char),1,file);
fwrite(&newImg[i][j].g ,sizeof(unsigned char),1,file);
fwrite(&newImg[i][j].b ,sizeof(unsigned char),1,file);
}
}
fclose(file);
/* liberando a memória utilizada */
for(i = 0; i < rows; i++){
free(img[i]);
free(newImg[i]);
}
free(img);
free(newImg);
}
else{
MPI_Recv(&imgInfo, 3, MPI_INT, MASTER, 40*rank, MPI_COMM_WORLD, &status);
/* alocando memória para a matriz que irá armazenar as componentes r,g e b da imagem de entrada*/
img = (rgb**) malloc (imgInfo.size*sizeof(rgb*));
/* alocando memória para a matriz que irá armazenar as componentes r,g e b da imagem de saída*/
newImg = (rgb**) malloc (imgInfo.size*sizeof(rgb*));
for(i = 0; i < imgInfo.size; i++){
img[i] = (rgb*)malloc(imgInfo.columns*sizeof(rgb));
newImg[i] = (rgb*)malloc(imgInfo.columns*sizeof(rgb));
}
/* recebendo dados do nó mestre */
for(i = 0; i < imgInfo.size; i++){
MPI_Recv(img[i], 3*imgInfo.columns, MPI_BYTE, MASTER, rank*10, MPI_COMM_WORLD, &status);
}
/* fazendo processamento da imagem para o efeito smooth */
#pragma omp parallel for firstprivate(j) lastprivate(j)
for(i = 0; i < imgInfo.size; i++){
for(j = 0; j < imgInfo.columns; j++){
newImg[i][j].r = avg(img, j, i,imgInfo.columns,imgInfo.size,1);
newImg[i][j].g = avg(img, j, i,imgInfo.columns,imgInfo.size,2);
newImg[i][j].b = avg(img, j, i,imgInfo.columns,imgInfo.size,3);
}
}
/* enviando mensagem de volta para o nó mestre */
if(imgInfo.flag == 0){
for(i = 0; i < imgInfo.size-2; i++){
MPI_Send(newImg[i], 3*imgInfo.columns, MPI_BYTE, MASTER , rank*10 + 1, MPI_COMM_WORLD);
}
}
else if(imgInfo.flag == 1){
for(i = 2; i < imgInfo.size-2; i++){
MPI_Send(newImg[i], 3*imgInfo.columns, MPI_BYTE, MASTER , rank*10 + 1, MPI_COMM_WORLD);
}
}
else{
for(i = 2; i < imgInfo.size; i++){
MPI_Send(newImg[i], 3*imgInfo.columns, MPI_BYTE, MASTER , rank*10 + 1, MPI_COMM_WORLD);
}
}
/* liberando memória */
for(i = 0; i < imgInfo.size; i++){
free(img[i]);
free(newImg[i]);
}
free(img);
free(newImg);
}
MPI_Finalize();
return 0;
}
/* função que retorna a média de uma componente do pixel utilizando os valores da componente ao redor dela (numa sub matriz 5x5) */
unsigned char avg (rgb **image, int x, int y, int cols, int rows, int flag){
int i, j;
unsigned char avg;
int sum = 0, count = 0;
for(i = y-2; i < y+2; i++){
for(j = x-2; j < x+2; j++){
if((j < 0 || j > cols-1) || (i < 0 || i > rows-1));
else{
if (flag == 1)
sum += image[i][j].r;
else if (flag == 2)
sum += image[i][j].g;
else if (flag == 3)
sum += image[i][j].b;
count++;
}
}
}
avg = sum/count;
return avg;
}
|
deconvolution_packn.h | // Tencent is pleased to support the open source community by making ncnn available.
//
// Copyright (C) 2021 THL A29 Limited, a Tencent company. All rights reserved.
//
// Licensed under the BSD 3-Clause License (the "License"); you may not use this file except
// in compliance with the License. You may obtain a copy of the License at
//
// https://opensource.org/licenses/BSD-3-Clause
//
// Unless required by applicable law or agreed to in writing, software distributed
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
// specific language governing permissions and limitations under the License.
static void deconvolution_packn_rvv(const Mat& bottom_blob, Mat& top_blob, const Mat& weight_data_packn, const Mat& bias_data, int kernel_w, int kernel_h, int dilation_w, int dilation_h, int stride_w, int stride_h, int activation_type, const Mat& activation_params, const Option& opt)
{
const int packn = csrr_vlenb() / 4;
const word_type vl = vsetvl_e32m1(packn);
int w = bottom_blob.w;
int h = bottom_blob.h;
int channels = bottom_blob.c;
int outw = top_blob.w;
int outh = top_blob.h;
int outch = top_blob.c;
const int kernel_extent_w = dilation_w * (kernel_w - 1) + 1;
const int kernel_extent_h = dilation_h * (kernel_h - 1) + 1;
const int maxk = kernel_w * kernel_h;
const float* bias_data_ptr = bias_data;
// num_output
#pragma omp parallel for num_threads(opt.num_threads)
for (int p = 0; p < outch; p++)
{
float* outptr = top_blob.channel(p);
for (int i = 0; i < outh; i++)
{
for (int j = 0; j < outw; j++)
{
vfloat32m1_t _sum = vfmv_v_f_f32m1(0.f, vl);
if (bias_data_ptr)
{
_sum = vle32_v_f32m1(bias_data_ptr + p * packn, vl);
}
const float* kptr = (const float*)weight_data_packn.channel(p);
// channels
for (int q = 0; q < channels; q++)
{
const Mat m = bottom_blob.channel(q);
for (int y = 0; y < kernel_h; y++)
{
int sys = (i + y * dilation_h - (kernel_extent_h - 1));
if (sys < 0 || sys % stride_h != 0)
continue;
int sy = sys / stride_h;
if (sy >= h)
continue;
for (int x = 0; x < kernel_w; x++)
{
int sxs = (j + x * dilation_w - (kernel_extent_w - 1));
if (sxs < 0 || sxs % stride_w != 0)
continue;
int sx = sxs / stride_w;
if (sx >= w)
continue;
const float* sptr = m.row(sy) + sx * packn;
int k = y * kernel_w + x;
for (int l = 0; l < packn; l++)
{
float val = *sptr++;
vfloat32m1_t _w0 = vle32_v_f32m1(kptr + k * packn * packn + packn * l, vl);
_sum = vfmacc_vf_f32m1(_sum, val, _w0, vl);
}
}
}
kptr += maxk * packn * packn;
}
_sum = activation_ps(_sum, activation_type, activation_params, vl);
vse32_v_f32m1(outptr + j * packn, _sum, vl);
}
outptr += outw * packn;
}
}
}
|
GB_is_diagonal.c | //------------------------------------------------------------------------------
// GB_is_diagonal: check if A is a diagonal matrix
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// Returns true if A is a square diagonal matrix, with all diagonal entries
// present. All pending tuples are ignored. Zombies are treated as entries.
#include "GB_mxm.h"
#include "GB_atomics.h"
bool GB_is_diagonal // true if A is diagonal
(
const GrB_Matrix A, // input matrix to examine
GB_Context Context
)
{
//--------------------------------------------------------------------------
// check inputs
//--------------------------------------------------------------------------
ASSERT (A != NULL) ;
ASSERT_MATRIX_OK (A, "A check diag", GB0) ;
//--------------------------------------------------------------------------
// trivial cases
//--------------------------------------------------------------------------
int64_t n = GB_NROWS (A) ;
int64_t ncols = GB_NCOLS (A) ;
if (n != ncols)
{
// A is rectangular
return (false) ;
}
int64_t anz = GB_NNZ (A) ;
int64_t nvec = A->nvec ;
if (n != anz || n != nvec)
{
// A must have exactly n entries in n vectors. A can be sparse or
// hypersparse. If hypersparse, all vectors must be present, so
// Ap has size n+1 whether sparse or hypersparse.
return (false) ;
}
//--------------------------------------------------------------------------
// determine the number of threads to use
//--------------------------------------------------------------------------
// Break the work into lots of tasks so the early-exit can be exploited.
GB_GET_NTHREADS_MAX (nthreads_max, chunk, Context) ;
int nthreads = GB_nthreads (n, chunk, nthreads_max) ;
int ntasks = (nthreads == 1) ? 1 : (256 * nthreads) ;
ntasks = GB_IMIN (ntasks, n) ;
ntasks = GB_IMAX (ntasks, 1) ;
//--------------------------------------------------------------------------
// examine each vector of A
//--------------------------------------------------------------------------
const int64_t *GB_RESTRICT Ap = A->p ;
const int64_t *GB_RESTRICT Ai = A->i ;
int diagonal = true ;
int tid ;
#pragma omp parallel for num_threads(nthreads) schedule(dynamic,1)
for (tid = 0 ; tid < ntasks ; tid++)
{
//----------------------------------------------------------------------
// check for early exit
//----------------------------------------------------------------------
int diag = true ;
{
#if GB_MICROSOFT
#pragma omp critical (GB_is_diagonal)
diag = diagonal ;
#else
GB_ATOMIC_READ
diag = diagonal ;
#endif
}
if (!diag) continue ;
//----------------------------------------------------------------------
// check if vectors jstart:jend-1 are diagonal
//----------------------------------------------------------------------
int64_t jstart, jend ;
GB_PARTITION (jstart, jend, n, tid, ntasks) ;
for (int64_t j = jstart ; diag && j < jend ; j++)
{
int64_t p = Ap [j] ;
int64_t ajnz = Ap [j+1] - p ;
if (ajnz != 1)
{
// A(:,j) must have exactly one entry
diag = false ;
}
int64_t i = Ai [p] ;
if (i != j)
{
// the single entry must be A(i,i)
diag = false ;
}
}
//----------------------------------------------------------------------
// early exit: tell all other tasks to halt
//----------------------------------------------------------------------
if (!diag)
{
#if GB_MICROSOFT
#pragma omp critical (GB_is_diagonal)
diagonal = false ;
#else
GB_ATOMIC_WRITE
diagonal = false ;
#endif
}
}
//--------------------------------------------------------------------------
// return result
//--------------------------------------------------------------------------
if (diagonal) A->nvec_nonempty = n ;
return ((bool) diagonal) ;
}
|
barrier.c |
// OpenMP Barrier Example
// Inclusions
#include <omp.h>
#include <stdio.h>
#include <stdlib.h>
// Main
int main( int argc, char** argv ) {
int i = 0; // Loop Iterator
int n = 0; // Number of Iterations
double start = 0.0; // Start Time
double middle = 0.0; // Middle Time
double end = 0.0; // End Time
double for1 = 0.0; // For Loop 1 Time
double for2 = 0.0; // For Loop 2 Time
double total = 0.0; // Total Time
// Parallel Region Start
#pragma omp parallel \
shared( n ) \
private( i )
{
start = omp_get_wtime( ); // Get Start Time
#pragma omp for nowait // Parallelize For Loop - Don't Wait for All to End
for( i = 0; i < n; i++ ) { // Interate Through
printf( "Thread %d of %d - Iteration %d\n",
omp_get_thread_num( ),
omp_get_max_threads( ), i );
}
#pragma omp barrier // Barrier - Wait Here for All Threads
middle = omp_get_wtime( ); // Get Middle Time
#pragma omp for nowait // Parallelize For Loop - Don't Wait for All to End
for( i = 0; i < n; i++ ) { // Iterate Through
printf( "Thread %d of %d - Iteration %d\n",
omp_get_thread_num( ),
omp_get_max_threads( ), i );
}
#pragma omp barrier // Barrier - Wait Here for All Threads
end = omp_get_wtime( ); // Get End Time
}
// Calcule Time
for1 = middle - start;
for2 = end - middle;
total = end - start;
// Display Time
printf( "For Loop 1: %0.9lf\n", for1 );
printf( "For Loop 2: %0.9lf\n", for2 );
printf( "Total Time: %0.9lf\n", total );
return 0;
}
// End barrier.c - EWG SDG
|
convolution_sgemm_pack4_fp16s.h | // Tencent is pleased to support the open source community by making ncnn available.
//
// Copyright (C) 2021 THL A29 Limited, a Tencent company. All rights reserved.
//
// Licensed under the BSD 3-Clause License (the "License"); you may not use this file except
// in compliance with the License. You may obtain a copy of the License at
//
// https://opensource.org/licenses/BSD-3-Clause
//
// Unless required by applicable law or agreed to in writing, software distributed
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
// specific language governing permissions and limitations under the License.
static void im2col_sgemm_pack4_fp16sa_neon(const Mat& bottom_im2col, Mat& top_blob, const Mat& kernel, const Mat& _bias, const Option& opt)
{
// Mat bottom_im2col(size, maxk, inch, 8u, 4, opt.workspace_allocator);
const int size = bottom_im2col.w;
const int maxk = bottom_im2col.h;
const int inch = bottom_im2col.c;
const int outch = top_blob.c;
const __fp16* bias = _bias;
// permute
Mat tmp;
if (size >= 8)
tmp.create(8 * maxk, inch, size / 8 + (size % 8) / 4 + size % 4, 8u, 4, opt.workspace_allocator);
else if (size >= 4)
tmp.create(4 * maxk, inch, size / 4 + size % 4, 8u, 4, opt.workspace_allocator);
else
tmp.create(maxk, inch, size, 8u, 4, opt.workspace_allocator);
{
int nn_size = size / 8;
int remain_size_start = 0;
#pragma omp parallel for num_threads(opt.num_threads)
for (int ii = 0; ii < nn_size; ii++)
{
int i = remain_size_start + ii * 8;
__fp16* tmpptr = tmp.channel(i / 8);
for (int q = 0; q < inch; q++)
{
const __fp16* img0 = (const __fp16*)bottom_im2col.channel(q) + i * 4;
for (int k = 0; k < maxk; k++)
{
// transpose 4x8
asm volatile(
"prfm pldl1keep, [%0, #512] \n"
"ld4 {v0.8h, v1.8h, v2.8h, v3.8h}, [%0] \n"
"st1 {v0.8h, v1.8h, v2.8h, v3.8h}, [%1], #64 \n"
: "=r"(img0), // %0
"=r"(tmpptr) // %1
: "0"(img0),
"1"(tmpptr)
: "memory", "v0", "v1", "v2", "v3");
img0 += size * 4;
}
}
}
remain_size_start += nn_size << 3;
nn_size = (size - remain_size_start) >> 2;
#pragma omp parallel for num_threads(opt.num_threads)
for (int ii = 0; ii < nn_size; ii++)
{
int i = remain_size_start + ii * 4;
__fp16* tmpptr = tmp.channel(i / 8 + (i % 8) / 4);
for (int q = 0; q < inch; q++)
{
const __fp16* img0 = (const __fp16*)bottom_im2col.channel(q) + i * 4;
for (int k = 0; k < maxk; k++)
{
// transpose 4x4
asm volatile(
"prfm pldl1keep, [%0, #256] \n"
"ld4 {v0.4h, v1.4h, v2.4h, v3.4h}, [%0] \n"
"st1 {v0.4h, v1.4h, v2.4h, v3.4h}, [%1], #32 \n"
: "=r"(img0), // %0
"=r"(tmpptr) // %1
: "0"(img0),
"1"(tmpptr)
: "memory", "v0", "v1", "v2", "v3");
img0 += size * 4;
}
}
}
remain_size_start += nn_size << 2;
#pragma omp parallel for num_threads(opt.num_threads)
for (int i = remain_size_start; i < size; i++)
{
__fp16* tmpptr = tmp.channel(i / 8 + (i % 8) / 4 + i % 4);
for (int q = 0; q < inch; q++)
{
const __fp16* img0 = (const __fp16*)bottom_im2col.channel(q) + i * 4;
for (int k = 0; k < maxk; k++)
{
asm volatile(
"prfm pldl1keep, [%0, #64] \n"
"ld1 {v0.4h}, [%0] \n"
"st1 {v0.4h}, [%1], #8 \n"
: "=r"(img0), // %0
"=r"(tmpptr) // %1
: "0"(img0),
"1"(tmpptr)
: "memory", "v0");
img0 += size * 4;
}
}
}
}
int remain_outch_start = 0;
int nn_outch = outch >> 1;
remain_outch_start = nn_outch << 1;
#pragma omp parallel for num_threads(opt.num_threads)
for (int pp = 0; pp < nn_outch; pp++)
{
int p = pp * 2;
__fp16* outptr0 = top_blob.channel(p);
__fp16* outptr1 = top_blob.channel(p + 1);
const __fp16 zeros[8] = {0.f, 0.f, 0.f, 0.f, 0.f, 0.f, 0.f, 0.f};
const __fp16* biasptr = bias ? bias + p * 4 : zeros;
float16x8_t _bias0 = vld1q_f16(biasptr);
int i = 0;
for (; i + 7 < size; i += 8)
{
const __fp16* tmpptr = tmp.channel(i / 8);
const __fp16* kptr = kernel.channel(p / 2);
int nn = inch * maxk; // inch always > 0
asm volatile(
"mov v24.16b, %10.16b \n"
"mov v25.16b, %10.16b \n"
"mov v26.16b, %10.16b \n"
"mov v27.16b, %10.16b \n"
"mov v28.16b, %10.16b \n"
"mov v29.16b, %10.16b \n"
"mov v30.16b, %10.16b \n"
"mov v31.16b, %10.16b \n"
"0: \n"
"prfm pldl1keep, [%3, #512] \n"
"ld1 {v0.8h, v1.8h, v2.8h, v3.8h}, [%3], #64 \n" // r01 r23 r45 r67
"prfm pldl1keep, [%4, #512] \n"
"ld1 {v4.8h, v5.8h, v6.8h, v7.8h}, [%4], #64 \n" // k0123
"fmla v24.8h, v4.8h, v0.h[0] \n"
"fmla v25.8h, v4.8h, v0.h[1] \n"
"fmla v26.8h, v4.8h, v0.h[2] \n"
"fmla v27.8h, v4.8h, v0.h[3] \n"
"fmla v28.8h, v4.8h, v0.h[4] \n"
"fmla v29.8h, v4.8h, v0.h[5] \n"
"fmla v30.8h, v4.8h, v0.h[6] \n"
"fmla v31.8h, v4.8h, v0.h[7] \n"
"fmla v24.8h, v5.8h, v1.h[0] \n"
"fmla v25.8h, v5.8h, v1.h[1] \n"
"fmla v26.8h, v5.8h, v1.h[2] \n"
"fmla v27.8h, v5.8h, v1.h[3] \n"
"fmla v28.8h, v5.8h, v1.h[4] \n"
"fmla v29.8h, v5.8h, v1.h[5] \n"
"fmla v30.8h, v5.8h, v1.h[6] \n"
"fmla v31.8h, v5.8h, v1.h[7] \n"
"fmla v24.8h, v6.8h, v2.h[0] \n"
"fmla v25.8h, v6.8h, v2.h[1] \n"
"fmla v26.8h, v6.8h, v2.h[2] \n"
"fmla v27.8h, v6.8h, v2.h[3] \n"
"fmla v28.8h, v6.8h, v2.h[4] \n"
"fmla v29.8h, v6.8h, v2.h[5] \n"
"fmla v30.8h, v6.8h, v2.h[6] \n"
"fmla v31.8h, v6.8h, v2.h[7] \n"
"subs %w0, %w0, #1 \n"
"fmla v24.8h, v7.8h, v3.h[0] \n"
"fmla v25.8h, v7.8h, v3.h[1] \n"
"fmla v26.8h, v7.8h, v3.h[2] \n"
"fmla v27.8h, v7.8h, v3.h[3] \n"
"fmla v28.8h, v7.8h, v3.h[4] \n"
"fmla v29.8h, v7.8h, v3.h[5] \n"
"fmla v30.8h, v7.8h, v3.h[6] \n"
"fmla v31.8h, v7.8h, v3.h[7] \n"
"bne 0b \n"
"st1 {v24.4h, v25.4h, v26.4h, v27.4h}, [%1], #32 \n"
"st1 {v28.4h, v29.4h, v30.4h, v31.4h}, [%1], #32 \n"
"ext v24.16b, v24.16b, v24.16b, #8 \n"
"ext v25.16b, v25.16b, v25.16b, #8 \n"
"ext v26.16b, v26.16b, v26.16b, #8 \n"
"ext v27.16b, v27.16b, v27.16b, #8 \n"
"ext v28.16b, v28.16b, v28.16b, #8 \n"
"ext v29.16b, v29.16b, v29.16b, #8 \n"
"ext v30.16b, v30.16b, v30.16b, #8 \n"
"ext v31.16b, v31.16b, v31.16b, #8 \n"
"st1 {v24.4h, v25.4h, v26.4h, v27.4h}, [%2], #32 \n"
"st1 {v28.4h, v29.4h, v30.4h, v31.4h}, [%2], #32 \n"
: "=r"(nn), // %0
"=r"(outptr0), // %1
"=r"(outptr1), // %2
"=r"(tmpptr), // %3
"=r"(kptr) // %4
: "0"(nn),
"1"(outptr0),
"2"(outptr1),
"3"(tmpptr),
"4"(kptr),
"w"(_bias0) // %10
: "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31");
}
for (; i + 3 < size; i += 4)
{
const __fp16* tmpptr = tmp.channel(i / 8 + (i % 8) / 4);
const __fp16* kptr = kernel.channel(p / 2);
int nn = inch * maxk; // inch always > 0
asm volatile(
"mov v24.16b, %10.16b \n"
"mov v25.16b, %10.16b \n"
"mov v26.16b, %10.16b \n"
"mov v27.16b, %10.16b \n"
"0: \n"
"prfm pldl1keep, [%3, #256] \n"
"ld1 {v0.4h, v1.4h, v2.4h, v3.4h}, [%3], #32 \n" // r01 r23 r45 r67
"prfm pldl1keep, [%4, #512] \n"
"ld1 {v4.8h, v5.8h, v6.8h, v7.8h}, [%4], #64 \n" // k0123
"fmla v24.8h, v4.8h, v0.h[0] \n"
"fmla v25.8h, v4.8h, v0.h[1] \n"
"fmla v26.8h, v4.8h, v0.h[2] \n"
"fmla v27.8h, v4.8h, v0.h[3] \n"
"fmla v24.8h, v5.8h, v1.h[0] \n"
"fmla v25.8h, v5.8h, v1.h[1] \n"
"fmla v26.8h, v5.8h, v1.h[2] \n"
"fmla v27.8h, v5.8h, v1.h[3] \n"
"fmla v24.8h, v6.8h, v2.h[0] \n"
"fmla v25.8h, v6.8h, v2.h[1] \n"
"fmla v26.8h, v6.8h, v2.h[2] \n"
"fmla v27.8h, v6.8h, v2.h[3] \n"
"subs %w0, %w0, #1 \n"
"fmla v24.8h, v7.8h, v3.h[0] \n"
"fmla v25.8h, v7.8h, v3.h[1] \n"
"fmla v26.8h, v7.8h, v3.h[2] \n"
"fmla v27.8h, v7.8h, v3.h[3] \n"
"bne 0b \n"
"st1 {v24.4h, v25.4h, v26.4h, v27.4h}, [%1], #32 \n"
"ext v24.16b, v24.16b, v24.16b, #8 \n"
"ext v25.16b, v25.16b, v25.16b, #8 \n"
"ext v26.16b, v26.16b, v26.16b, #8 \n"
"ext v27.16b, v27.16b, v27.16b, #8 \n"
"st1 {v24.4h, v25.4h, v26.4h, v27.4h}, [%2], #32 \n"
: "=r"(nn), // %0
"=r"(outptr0), // %1
"=r"(outptr1), // %2
"=r"(tmpptr), // %3
"=r"(kptr) // %4
: "0"(nn),
"1"(outptr0),
"2"(outptr1),
"3"(tmpptr),
"4"(kptr),
"w"(_bias0) // %10
: "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v24", "v25", "v26", "v27");
}
for (; i < size; i++)
{
const __fp16* tmpptr = tmp.channel(i / 8 + (i % 8) / 4 + i % 4);
const __fp16* kptr = kernel.channel(p / 2);
int nn = inch * maxk; // inch always > 0
float16x8_t _sum0 = _bias0;
for (int q = 0; q < nn; q++)
{
float16x4_t _r0 = vld1_f16(tmpptr);
float16x8_t _k0 = vld1q_f16(kptr);
float16x8_t _k1 = vld1q_f16(kptr + 8);
float16x8_t _k2 = vld1q_f16(kptr + 16);
float16x8_t _k3 = vld1q_f16(kptr + 24);
_sum0 = vfmaq_lane_f16(_sum0, _k0, _r0, 0);
_sum0 = vfmaq_lane_f16(_sum0, _k1, _r0, 1);
_sum0 = vfmaq_lane_f16(_sum0, _k2, _r0, 2);
_sum0 = vfmaq_lane_f16(_sum0, _k3, _r0, 3);
kptr += 32;
tmpptr += 4;
}
vst1_f16(outptr0, vget_low_f16(_sum0));
vst1_f16(outptr1, vget_high_f16(_sum0));
outptr0 += 4;
outptr1 += 4;
}
}
#pragma omp parallel for num_threads(opt.num_threads)
for (int p = remain_outch_start; p < outch; p++)
{
__fp16* outptr0 = top_blob.channel(p);
const __fp16 zeros[4] = {0.f, 0.f, 0.f, 0.f};
const __fp16* biasptr = bias ? bias + p * 4 : zeros;
float16x4_t _bias0 = vld1_f16(biasptr);
int i = 0;
for (; i + 7 < size; i += 8)
{
const __fp16* tmpptr = tmp.channel(i / 8);
const __fp16* kptr = kernel.channel(p / 2 + p % 2);
int nn = inch * maxk; // inch always > 0
asm volatile(
"mov v24.16b, %8.16b \n"
"mov v25.16b, %8.16b \n"
"mov v26.16b, %8.16b \n"
"mov v27.16b, %8.16b \n"
"mov v28.16b, %8.16b \n"
"mov v29.16b, %8.16b \n"
"mov v30.16b, %8.16b \n"
"mov v31.16b, %8.16b \n"
"0: \n"
"prfm pldl1keep, [%2, #512] \n"
"ld1 {v0.8h, v1.8h, v2.8h, v3.8h}, [%2], #64 \n" // r01 r23 r45 r67
"prfm pldl1keep, [%3, #256] \n"
"ld1 {v4.4h, v5.4h, v6.4h, v7.4h}, [%3], #32 \n" // k0123
"fmla v24.4h, v4.4h, v0.h[0] \n"
"fmla v25.4h, v4.4h, v0.h[1] \n"
"fmla v26.4h, v4.4h, v0.h[2] \n"
"fmla v27.4h, v4.4h, v0.h[3] \n"
"fmla v28.4h, v4.4h, v0.h[4] \n"
"fmla v29.4h, v4.4h, v0.h[5] \n"
"fmla v30.4h, v4.4h, v0.h[6] \n"
"fmla v31.4h, v4.4h, v0.h[7] \n"
"fmla v24.4h, v5.4h, v1.h[0] \n"
"fmla v25.4h, v5.4h, v1.h[1] \n"
"fmla v26.4h, v5.4h, v1.h[2] \n"
"fmla v27.4h, v5.4h, v1.h[3] \n"
"fmla v28.4h, v5.4h, v1.h[4] \n"
"fmla v29.4h, v5.4h, v1.h[5] \n"
"fmla v30.4h, v5.4h, v1.h[6] \n"
"fmla v31.4h, v5.4h, v1.h[7] \n"
"fmla v24.4h, v6.4h, v2.h[0] \n"
"fmla v25.4h, v6.4h, v2.h[1] \n"
"fmla v26.4h, v6.4h, v2.h[2] \n"
"fmla v27.4h, v6.4h, v2.h[3] \n"
"fmla v28.4h, v6.4h, v2.h[4] \n"
"fmla v29.4h, v6.4h, v2.h[5] \n"
"fmla v30.4h, v6.4h, v2.h[6] \n"
"fmla v31.4h, v6.4h, v2.h[7] \n"
"subs %w0, %w0, #1 \n"
"fmla v24.4h, v7.4h, v3.h[0] \n"
"fmla v25.4h, v7.4h, v3.h[1] \n"
"fmla v26.4h, v7.4h, v3.h[2] \n"
"fmla v27.4h, v7.4h, v3.h[3] \n"
"fmla v28.4h, v7.4h, v3.h[4] \n"
"fmla v29.4h, v7.4h, v3.h[5] \n"
"fmla v30.4h, v7.4h, v3.h[6] \n"
"fmla v31.4h, v7.4h, v3.h[7] \n"
"bne 0b \n"
"st1 {v24.4h, v25.4h, v26.4h, v27.4h}, [%1], #32 \n"
"st1 {v28.4h, v29.4h, v30.4h, v31.4h}, [%1], #32 \n"
: "=r"(nn), // %0
"=r"(outptr0), // %1
"=r"(tmpptr), // %2
"=r"(kptr) // %3
: "0"(nn),
"1"(outptr0),
"2"(tmpptr),
"3"(kptr),
"w"(_bias0) // %8
: "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31");
}
for (; i + 3 < size; i += 4)
{
const __fp16* tmpptr = tmp.channel(i / 8 + (i % 8) / 4);
const __fp16* kptr = kernel.channel(p / 2 + p % 2);
int nn = inch * maxk; // inch always > 0
asm volatile(
"mov v24.16b, %8.16b \n"
"mov v25.16b, %8.16b \n"
"mov v26.16b, %8.16b \n"
"mov v27.16b, %8.16b \n"
"0: \n"
"prfm pldl1keep, [%2, #256] \n"
"ld1 {v0.4h, v1.4h, v2.4h, v3.4h}, [%2], #32 \n" // r01 r23 r45 r67
"prfm pldl1keep, [%3, #256] \n"
"ld1 {v4.4h, v5.4h, v6.4h, v7.4h}, [%3], #32 \n" // k0123
"fmla v24.4h, v4.4h, v0.h[0] \n"
"fmla v25.4h, v4.4h, v0.h[1] \n"
"fmla v26.4h, v4.4h, v0.h[2] \n"
"fmla v27.4h, v4.4h, v0.h[3] \n"
"fmla v24.4h, v5.4h, v1.h[0] \n"
"fmla v25.4h, v5.4h, v1.h[1] \n"
"fmla v26.4h, v5.4h, v1.h[2] \n"
"fmla v27.4h, v5.4h, v1.h[3] \n"
"fmla v24.4h, v6.4h, v2.h[0] \n"
"fmla v25.4h, v6.4h, v2.h[1] \n"
"fmla v26.4h, v6.4h, v2.h[2] \n"
"fmla v27.4h, v6.4h, v2.h[3] \n"
"subs %w0, %w0, #1 \n"
"fmla v24.4h, v7.4h, v3.h[0] \n"
"fmla v25.4h, v7.4h, v3.h[1] \n"
"fmla v26.4h, v7.4h, v3.h[2] \n"
"fmla v27.4h, v7.4h, v3.h[3] \n"
"bne 0b \n"
"st1 {v24.4h, v25.4h, v26.4h, v27.4h}, [%1], #32 \n"
: "=r"(nn), // %0
"=r"(outptr0), // %1
"=r"(tmpptr), // %2
"=r"(kptr) // %3
: "0"(nn),
"1"(outptr0),
"2"(tmpptr),
"3"(kptr),
"w"(_bias0) // %8
: "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v24", "v25", "v26", "v27");
}
for (; i < size; i++)
{
const __fp16* tmpptr = tmp.channel(i / 8 + (i % 8) / 4 + i % 4);
const __fp16* kptr = kernel.channel(p / 2 + p % 2);
int nn = inch * maxk; // inch always > 0
float16x4_t _sum0 = _bias0;
for (int q = 0; q < nn; q++)
{
float16x4_t _r0 = vld1_f16(tmpptr);
float16x4_t _k0 = vld1_f16(kptr);
float16x4_t _k1 = vld1_f16(kptr + 4);
float16x4_t _k2 = vld1_f16(kptr + 8);
float16x4_t _k3 = vld1_f16(kptr + 12);
_sum0 = vfma_lane_f16(_sum0, _k0, _r0, 0);
_sum0 = vfma_lane_f16(_sum0, _k1, _r0, 1);
_sum0 = vfma_lane_f16(_sum0, _k2, _r0, 2);
_sum0 = vfma_lane_f16(_sum0, _k3, _r0, 3);
kptr += 16;
tmpptr += 4;
}
vst1_f16(outptr0, _sum0);
outptr0 += 4;
}
}
}
static void convolution_im2col_sgemm_transform_kernel_pack4_fp16sa_neon(const Mat& _kernel, Mat& kernel_tm, int inch, int outch, int kernel_w, int kernel_h)
{
const int maxk = kernel_w * kernel_h;
// interleave
// src = maxk-inch-outch
// dst = 4b-4a-maxk-inch/4a-outch/4b
Mat kernel = _kernel.reshape(maxk, inch, outch);
kernel_tm.create(32 * maxk, inch / 4, outch / 8 + (outch % 8) / 4, (size_t)2u);
int q = 0;
for (; q + 7 < outch; q += 8)
{
const Mat k0 = kernel.channel(q);
const Mat k1 = kernel.channel(q + 1);
const Mat k2 = kernel.channel(q + 2);
const Mat k3 = kernel.channel(q + 3);
const Mat k4 = kernel.channel(q + 4);
const Mat k5 = kernel.channel(q + 5);
const Mat k6 = kernel.channel(q + 6);
const Mat k7 = kernel.channel(q + 7);
__fp16* g00 = kernel_tm.channel(q / 8);
for (int p = 0; p + 3 < inch; p += 4)
{
const float* k00 = k0.row(p);
const float* k01 = k0.row(p + 1);
const float* k02 = k0.row(p + 2);
const float* k03 = k0.row(p + 3);
const float* k10 = k1.row(p);
const float* k11 = k1.row(p + 1);
const float* k12 = k1.row(p + 2);
const float* k13 = k1.row(p + 3);
const float* k20 = k2.row(p);
const float* k21 = k2.row(p + 1);
const float* k22 = k2.row(p + 2);
const float* k23 = k2.row(p + 3);
const float* k30 = k3.row(p);
const float* k31 = k3.row(p + 1);
const float* k32 = k3.row(p + 2);
const float* k33 = k3.row(p + 3);
const float* k40 = k4.row(p);
const float* k41 = k4.row(p + 1);
const float* k42 = k4.row(p + 2);
const float* k43 = k4.row(p + 3);
const float* k50 = k5.row(p);
const float* k51 = k5.row(p + 1);
const float* k52 = k5.row(p + 2);
const float* k53 = k5.row(p + 3);
const float* k60 = k6.row(p);
const float* k61 = k6.row(p + 1);
const float* k62 = k6.row(p + 2);
const float* k63 = k6.row(p + 3);
const float* k70 = k7.row(p);
const float* k71 = k7.row(p + 1);
const float* k72 = k7.row(p + 2);
const float* k73 = k7.row(p + 3);
for (int k = 0; k < maxk; k++)
{
g00[0] = (__fp16)k00[k];
g00[1] = (__fp16)k10[k];
g00[2] = (__fp16)k20[k];
g00[3] = (__fp16)k30[k];
g00[4] = (__fp16)k40[k];
g00[5] = (__fp16)k50[k];
g00[6] = (__fp16)k60[k];
g00[7] = (__fp16)k70[k];
g00[8] = (__fp16)k01[k];
g00[9] = (__fp16)k11[k];
g00[10] = (__fp16)k21[k];
g00[11] = (__fp16)k31[k];
g00[12] = (__fp16)k41[k];
g00[13] = (__fp16)k51[k];
g00[14] = (__fp16)k61[k];
g00[15] = (__fp16)k71[k];
g00[16] = (__fp16)k02[k];
g00[17] = (__fp16)k12[k];
g00[18] = (__fp16)k22[k];
g00[19] = (__fp16)k32[k];
g00[20] = (__fp16)k42[k];
g00[21] = (__fp16)k52[k];
g00[22] = (__fp16)k62[k];
g00[23] = (__fp16)k72[k];
g00[24] = (__fp16)k03[k];
g00[25] = (__fp16)k13[k];
g00[26] = (__fp16)k23[k];
g00[27] = (__fp16)k33[k];
g00[28] = (__fp16)k43[k];
g00[29] = (__fp16)k53[k];
g00[30] = (__fp16)k63[k];
g00[31] = (__fp16)k73[k];
g00 += 32;
}
}
}
for (; q + 3 < outch; q += 4)
{
const Mat k0 = kernel.channel(q);
const Mat k1 = kernel.channel(q + 1);
const Mat k2 = kernel.channel(q + 2);
const Mat k3 = kernel.channel(q + 3);
__fp16* g00 = kernel_tm.channel(q / 8 + (q % 8) / 4);
for (int p = 0; p + 3 < inch; p += 4)
{
const float* k00 = k0.row(p);
const float* k01 = k0.row(p + 1);
const float* k02 = k0.row(p + 2);
const float* k03 = k0.row(p + 3);
const float* k10 = k1.row(p);
const float* k11 = k1.row(p + 1);
const float* k12 = k1.row(p + 2);
const float* k13 = k1.row(p + 3);
const float* k20 = k2.row(p);
const float* k21 = k2.row(p + 1);
const float* k22 = k2.row(p + 2);
const float* k23 = k2.row(p + 3);
const float* k30 = k3.row(p);
const float* k31 = k3.row(p + 1);
const float* k32 = k3.row(p + 2);
const float* k33 = k3.row(p + 3);
for (int k = 0; k < maxk; k++)
{
g00[0] = (__fp16)k00[k];
g00[1] = (__fp16)k10[k];
g00[2] = (__fp16)k20[k];
g00[3] = (__fp16)k30[k];
g00[4] = (__fp16)k01[k];
g00[5] = (__fp16)k11[k];
g00[6] = (__fp16)k21[k];
g00[7] = (__fp16)k31[k];
g00[8] = (__fp16)k02[k];
g00[9] = (__fp16)k12[k];
g00[10] = (__fp16)k22[k];
g00[11] = (__fp16)k32[k];
g00[12] = (__fp16)k03[k];
g00[13] = (__fp16)k13[k];
g00[14] = (__fp16)k23[k];
g00[15] = (__fp16)k33[k];
g00 += 16;
}
}
}
}
static void convolution_im2col_sgemm_pack4_fp16sa_neon(const Mat& bottom_blob, Mat& top_blob, const Mat& kernel, const Mat& _bias, int kernel_w, int kernel_h, int dilation_w, int dilation_h, int stride_w, int stride_h, const Option& opt)
{
int w = bottom_blob.w;
int inch = bottom_blob.c;
int outw = top_blob.w;
int outh = top_blob.h;
const int size = outw * outh;
const int maxk = kernel_w * kernel_h;
// im2col
Mat bottom_im2col(size, maxk, inch, 8u, 4, opt.workspace_allocator);
{
const int gap = (w * stride_h - outw * stride_w) * 4;
#pragma omp parallel for num_threads(opt.num_threads)
for (int p = 0; p < inch; p++)
{
const Mat img = bottom_blob.channel(p);
__fp16* ptr = bottom_im2col.channel(p);
for (int u = 0; u < kernel_h; u++)
{
for (int v = 0; v < kernel_w; v++)
{
const __fp16* sptr = img.row<const __fp16>(dilation_h * u) + dilation_w * v * 4;
for (int i = 0; i < outh; i++)
{
int j = 0;
for (; j + 3 < outw; j += 4)
{
float16x4_t _val0 = vld1_f16(sptr);
float16x4_t _val1 = vld1_f16(sptr + stride_w * 4);
float16x4_t _val2 = vld1_f16(sptr + stride_w * 8);
float16x4_t _val3 = vld1_f16(sptr + stride_w * 12);
vst1_f16(ptr, _val0);
vst1_f16(ptr + 4, _val1);
vst1_f16(ptr + 8, _val2);
vst1_f16(ptr + 12, _val3);
sptr += stride_w * 16;
ptr += 16;
}
for (; j + 1 < outw; j += 2)
{
float16x4_t _val0 = vld1_f16(sptr);
float16x4_t _val1 = vld1_f16(sptr + stride_w * 4);
vst1_f16(ptr, _val0);
vst1_f16(ptr + 4, _val1);
sptr += stride_w * 8;
ptr += 8;
}
for (; j < outw; j++)
{
float16x4_t _val = vld1_f16(sptr);
vst1_f16(ptr, _val);
sptr += stride_w * 4;
ptr += 4;
}
sptr += gap;
}
}
}
}
}
im2col_sgemm_pack4_fp16sa_neon(bottom_im2col, top_blob, kernel, _bias, opt);
}
|
enhance.c | /*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% EEEEE N N H H AAA N N CCCC EEEEE %
% E NN N H H A A NN N C E %
% EEE N N N HHHHH AAAAA N N N C EEE %
% E N NN H H A A N NN C E %
% EEEEE N N H H A A N N CCCC EEEEE %
% %
% %
% MagickCore Image Enhancement Methods %
% %
% Software Design %
% Cristy %
% July 1992 %
% %
% %
% Copyright 1999-2021 ImageMagick Studio LLC, a non-profit organization %
% dedicated to making software imaging solutions freely available. %
% %
% You may not use this file except in compliance with the License. You may %
% obtain a copy of the License at %
% %
% https://imagemagick.org/script/license.php %
% %
% Unless required by applicable law or agreed to in writing, software %
% distributed under the License is distributed on an "AS IS" BASIS, %
% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. %
% See the License for the specific language governing permissions and %
% limitations under the License. %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
%
%
*/
/*
Include declarations.
*/
#include "MagickCore/studio.h"
#include "MagickCore/accelerate-private.h"
#include "MagickCore/artifact.h"
#include "MagickCore/attribute.h"
#include "MagickCore/cache.h"
#include "MagickCore/cache-private.h"
#include "MagickCore/cache-view.h"
#include "MagickCore/channel.h"
#include "MagickCore/color.h"
#include "MagickCore/color-private.h"
#include "MagickCore/colorspace.h"
#include "MagickCore/colorspace-private.h"
#include "MagickCore/composite-private.h"
#include "MagickCore/enhance.h"
#include "MagickCore/exception.h"
#include "MagickCore/exception-private.h"
#include "MagickCore/fx.h"
#include "MagickCore/gem.h"
#include "MagickCore/gem-private.h"
#include "MagickCore/geometry.h"
#include "MagickCore/histogram.h"
#include "MagickCore/image.h"
#include "MagickCore/image-private.h"
#include "MagickCore/memory_.h"
#include "MagickCore/monitor.h"
#include "MagickCore/monitor-private.h"
#include "MagickCore/option.h"
#include "MagickCore/pixel.h"
#include "MagickCore/pixel-accessor.h"
#include "MagickCore/quantum.h"
#include "MagickCore/quantum-private.h"
#include "MagickCore/resample.h"
#include "MagickCore/resample-private.h"
#include "MagickCore/resource_.h"
#include "MagickCore/statistic.h"
#include "MagickCore/string_.h"
#include "MagickCore/string-private.h"
#include "MagickCore/thread-private.h"
#include "MagickCore/threshold.h"
#include "MagickCore/token.h"
#include "MagickCore/xml-tree.h"
#include "MagickCore/xml-tree-private.h"
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% A u t o G a m m a I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% AutoGammaImage() extract the 'mean' from the image and adjust the image
% to try make set its gamma appropriately.
%
% The format of the AutoGammaImage method is:
%
% MagickBooleanType AutoGammaImage(Image *image,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: The image to auto-level
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType AutoGammaImage(Image *image,
ExceptionInfo *exception)
{
double
gamma,
log_mean,
mean,
sans;
MagickStatusType
status;
ssize_t
i;
log_mean=log(0.5);
if (image->channel_mask == DefaultChannels)
{
/*
Apply gamma correction equally across all given channels.
*/
(void) GetImageMean(image,&mean,&sans,exception);
gamma=log(mean*QuantumScale)/log_mean;
return(LevelImage(image,0.0,(double) QuantumRange,gamma,exception));
}
/*
Auto-gamma each channel separately.
*/
status=MagickTrue;
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
ChannelType
channel_mask;
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
if ((traits & UpdatePixelTrait) == 0)
continue;
channel_mask=SetImageChannelMask(image,(ChannelType) (1UL << i));
status=GetImageMean(image,&mean,&sans,exception);
gamma=log(mean*QuantumScale)/log_mean;
status&=LevelImage(image,0.0,(double) QuantumRange,gamma,exception);
(void) SetImageChannelMask(image,channel_mask);
if (status == MagickFalse)
break;
}
return(status != 0 ? MagickTrue : MagickFalse);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% A u t o L e v e l I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% AutoLevelImage() adjusts the levels of a particular image channel by
% scaling the minimum and maximum values to the full quantum range.
%
% The format of the LevelImage method is:
%
% MagickBooleanType AutoLevelImage(Image *image,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: The image to auto-level
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType AutoLevelImage(Image *image,
ExceptionInfo *exception)
{
return(MinMaxStretchImage(image,0.0,0.0,1.0,exception));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% B r i g h t n e s s C o n t r a s t I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% BrightnessContrastImage() changes the brightness and/or contrast of an
% image. It converts the brightness and contrast parameters into slope and
% intercept and calls a polynomical function to apply to the image.
%
% The format of the BrightnessContrastImage method is:
%
% MagickBooleanType BrightnessContrastImage(Image *image,
% const double brightness,const double contrast,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o brightness: the brightness percent (-100 .. 100).
%
% o contrast: the contrast percent (-100 .. 100).
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType BrightnessContrastImage(Image *image,
const double brightness,const double contrast,ExceptionInfo *exception)
{
#define BrightnessContastImageTag "BrightnessContast/Image"
double
alpha,
coefficients[2],
intercept,
slope;
MagickBooleanType
status;
/*
Compute slope and intercept.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
alpha=contrast;
slope=tan((double) (MagickPI*(alpha/100.0+1.0)/4.0));
if (slope < 0.0)
slope=0.0;
intercept=brightness/100.0+((100-brightness)/200.0)*(1.0-slope);
coefficients[0]=slope;
coefficients[1]=intercept;
status=FunctionImage(image,PolynomialFunction,2,coefficients,exception);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% C L A H E I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% CLAHEImage() is a variant of adaptive histogram equalization in which the
% contrast amplification is limited, so as to reduce this problem of noise
% amplification.
%
% Adapted from implementation by Karel Zuiderveld, karel@cv.ruu.nl in
% "Graphics Gems IV", Academic Press, 1994.
%
% The format of the CLAHEImage method is:
%
% MagickBooleanType CLAHEImage(Image *image,const size_t width,
% const size_t height,const size_t number_bins,const double clip_limit,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o width: the width of the tile divisions to use in horizontal direction.
%
% o height: the height of the tile divisions to use in vertical direction.
%
% o number_bins: number of bins for histogram ("dynamic range").
%
% o clip_limit: contrast limit for localised changes in contrast. A limit
% less than 1 results in standard non-contrast limited AHE.
%
% o exception: return any errors or warnings in this structure.
%
*/
typedef struct _RangeInfo
{
unsigned short
min,
max;
} RangeInfo;
static void ClipCLAHEHistogram(const double clip_limit,const size_t number_bins,
size_t *histogram)
{
#define NumberCLAHEGrays (65536)
ssize_t
i;
size_t
cumulative_excess,
previous_excess,
step;
ssize_t
excess;
/*
Compute total number of excess pixels.
*/
cumulative_excess=0;
for (i=0; i < (ssize_t) number_bins; i++)
{
excess=(ssize_t) histogram[i]-(ssize_t) clip_limit;
if (excess > 0)
cumulative_excess+=excess;
}
/*
Clip histogram and redistribute excess pixels across all bins.
*/
step=cumulative_excess/number_bins;
excess=(ssize_t) (clip_limit-step);
for (i=0; i < (ssize_t) number_bins; i++)
{
if ((double) histogram[i] > clip_limit)
histogram[i]=(size_t) clip_limit;
else
if ((ssize_t) histogram[i] > excess)
{
cumulative_excess-=histogram[i]-excess;
histogram[i]=(size_t) clip_limit;
}
else
{
cumulative_excess-=step;
histogram[i]+=step;
}
}
/*
Redistribute remaining excess.
*/
do
{
size_t
*p;
size_t
*q;
previous_excess=cumulative_excess;
p=histogram;
q=histogram+number_bins;
while ((cumulative_excess != 0) && (p < q))
{
step=number_bins/cumulative_excess;
if (step < 1)
step=1;
for (p=histogram; (p < q) && (cumulative_excess != 0); p+=step)
if ((double) *p < clip_limit)
{
(*p)++;
cumulative_excess--;
}
p++;
}
} while ((cumulative_excess != 0) && (cumulative_excess < previous_excess));
}
static void GenerateCLAHEHistogram(const RectangleInfo *clahe_info,
const RectangleInfo *tile_info,const size_t number_bins,
const unsigned short *lut,const unsigned short *pixels,size_t *histogram)
{
const unsigned short
*p;
ssize_t
i;
/*
Classify the pixels into a gray histogram.
*/
for (i=0; i < (ssize_t) number_bins; i++)
histogram[i]=0L;
p=pixels;
for (i=0; i < (ssize_t) tile_info->height; i++)
{
const unsigned short
*q;
q=p+tile_info->width;
while (p < q)
histogram[lut[*p++]]++;
q+=clahe_info->width;
p=q-tile_info->width;
}
}
static void InterpolateCLAHE(const RectangleInfo *clahe_info,const size_t *Q12,
const size_t *Q22,const size_t *Q11,const size_t *Q21,
const RectangleInfo *tile,const unsigned short *lut,unsigned short *pixels)
{
ssize_t
y;
unsigned short
intensity;
/*
Bilinear interpolate four tiles to eliminate boundary artifacts.
*/
for (y=(ssize_t) tile->height; y > 0; y--)
{
ssize_t
x;
for (x=(ssize_t) tile->width; x > 0; x--)
{
intensity=lut[*pixels];
*pixels++=(unsigned short) (PerceptibleReciprocal((double) tile->width*
tile->height)*(y*((double) x*Q12[intensity]+(tile->width-x)*
Q22[intensity])+(tile->height-y)*((double) x*Q11[intensity]+
(tile->width-x)*Q21[intensity])));
}
pixels+=(clahe_info->width-tile->width);
}
}
static void GenerateCLAHELut(const RangeInfo *range_info,
const size_t number_bins,unsigned short *lut)
{
ssize_t
i;
unsigned short
delta;
/*
Scale input image [intensity min,max] to [0,number_bins-1].
*/
delta=(unsigned short) ((range_info->max-range_info->min)/number_bins+1);
for (i=(ssize_t) range_info->min; i <= (ssize_t) range_info->max; i++)
lut[i]=(unsigned short) ((i-range_info->min)/delta);
}
static void MapCLAHEHistogram(const RangeInfo *range_info,
const size_t number_bins,const size_t number_pixels,size_t *histogram)
{
double
scale,
sum;
ssize_t
i;
/*
Rescale histogram to range [min-intensity .. max-intensity].
*/
scale=(double) (range_info->max-range_info->min)/number_pixels;
sum=0.0;
for (i=0; i < (ssize_t) number_bins; i++)
{
sum+=histogram[i];
histogram[i]=(size_t) (range_info->min+scale*sum);
if (histogram[i] > range_info->max)
histogram[i]=range_info->max;
}
}
static MagickBooleanType CLAHE(const RectangleInfo *clahe_info,
const RectangleInfo *tile_info,const RangeInfo *range_info,
const size_t number_bins,const double clip_limit,unsigned short *pixels)
{
MemoryInfo
*tile_cache;
unsigned short
*p;
size_t
limit,
*tiles;
ssize_t
y;
unsigned short
*lut;
/*
Constrast limited adapted histogram equalization.
*/
if (clip_limit == 1.0)
return(MagickTrue);
tile_cache=AcquireVirtualMemory((size_t) clahe_info->x*number_bins,
clahe_info->y*sizeof(*tiles));
if (tile_cache == (MemoryInfo *) NULL)
return(MagickFalse);
lut=(unsigned short *) AcquireQuantumMemory(NumberCLAHEGrays,sizeof(*lut));
if (lut == (unsigned short *) NULL)
{
tile_cache=RelinquishVirtualMemory(tile_cache);
return(MagickFalse);
}
tiles=(size_t *) GetVirtualMemoryBlob(tile_cache);
limit=(size_t) (clip_limit*(tile_info->width*tile_info->height)/number_bins);
if (limit < 1UL)
limit=1UL;
/*
Generate greylevel mappings for each tile.
*/
GenerateCLAHELut(range_info,number_bins,lut);
p=pixels;
for (y=0; y < (ssize_t) clahe_info->y; y++)
{
ssize_t
x;
for (x=0; x < (ssize_t) clahe_info->x; x++)
{
size_t
*histogram;
histogram=tiles+(number_bins*(y*clahe_info->x+x));
GenerateCLAHEHistogram(clahe_info,tile_info,number_bins,lut,p,histogram);
ClipCLAHEHistogram((double) limit,number_bins,histogram);
MapCLAHEHistogram(range_info,number_bins,tile_info->width*
tile_info->height,histogram);
p+=tile_info->width;
}
p+=clahe_info->width*(tile_info->height-1);
}
/*
Interpolate greylevel mappings to get CLAHE image.
*/
p=pixels;
for (y=0; y <= (ssize_t) clahe_info->y; y++)
{
OffsetInfo
offset;
RectangleInfo
tile;
ssize_t
x;
tile.height=tile_info->height;
tile.y=y-1;
offset.y=tile.y+1;
if (y == 0)
{
/*
Top row.
*/
tile.height=tile_info->height >> 1;
tile.y=0;
offset.y=0;
}
else
if (y == (ssize_t) clahe_info->y)
{
/*
Bottom row.
*/
tile.height=(tile_info->height+1) >> 1;
tile.y=clahe_info->y-1;
offset.y=tile.y;
}
for (x=0; x <= (ssize_t) clahe_info->x; x++)
{
tile.width=tile_info->width;
tile.x=x-1;
offset.x=tile.x+1;
if (x == 0)
{
/*
Left column.
*/
tile.width=tile_info->width >> 1;
tile.x=0;
offset.x=0;
}
else
if (x == (ssize_t) clahe_info->x)
{
/*
Right column.
*/
tile.width=(tile_info->width+1) >> 1;
tile.x=clahe_info->x-1;
offset.x=tile.x;
}
InterpolateCLAHE(clahe_info,
tiles+(number_bins*(tile.y*clahe_info->x+tile.x)), /* Q12 */
tiles+(number_bins*(tile.y*clahe_info->x+offset.x)), /* Q22 */
tiles+(number_bins*(offset.y*clahe_info->x+tile.x)), /* Q11 */
tiles+(number_bins*(offset.y*clahe_info->x+offset.x)), /* Q21 */
&tile,lut,p);
p+=tile.width;
}
p+=clahe_info->width*(tile.height-1);
}
lut=(unsigned short *) RelinquishMagickMemory(lut);
tile_cache=RelinquishVirtualMemory(tile_cache);
return(MagickTrue);
}
MagickExport MagickBooleanType CLAHEImage(Image *image,const size_t width,
const size_t height,const size_t number_bins,const double clip_limit,
ExceptionInfo *exception)
{
#define CLAHEImageTag "CLAHE/Image"
CacheView
*image_view;
ColorspaceType
colorspace;
MagickBooleanType
status;
MagickOffsetType
progress;
MemoryInfo
*pixel_cache;
RangeInfo
range_info;
RectangleInfo
clahe_info,
tile_info;
size_t
n;
ssize_t
y;
unsigned short
*pixels;
/*
Configure CLAHE parameters.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
range_info.min=0;
range_info.max=NumberCLAHEGrays-1;
tile_info.width=width;
if (tile_info.width == 0)
tile_info.width=image->columns >> 3;
tile_info.height=height;
if (tile_info.height == 0)
tile_info.height=image->rows >> 3;
tile_info.x=0;
if ((image->columns % tile_info.width) != 0)
tile_info.x=(ssize_t) tile_info.width-(image->columns % tile_info.width);
tile_info.y=0;
if ((image->rows % tile_info.height) != 0)
tile_info.y=(ssize_t) tile_info.height-(image->rows % tile_info.height);
clahe_info.width=image->columns+tile_info.x;
clahe_info.height=image->rows+tile_info.y;
clahe_info.x=(ssize_t) clahe_info.width/tile_info.width;
clahe_info.y=(ssize_t) clahe_info.height/tile_info.height;
pixel_cache=AcquireVirtualMemory(clahe_info.width,clahe_info.height*
sizeof(*pixels));
if (pixel_cache == (MemoryInfo *) NULL)
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
pixels=(unsigned short *) GetVirtualMemoryBlob(pixel_cache);
colorspace=image->colorspace;
if (TransformImageColorspace(image,LabColorspace,exception) == MagickFalse)
{
pixel_cache=RelinquishVirtualMemory(pixel_cache);
return(MagickFalse);
}
/*
Initialize CLAHE pixels.
*/
image_view=AcquireVirtualCacheView(image,exception);
progress=0;
status=MagickTrue;
n=0;
for (y=0; y < (ssize_t) clahe_info.height; y++)
{
const Quantum
*magick_restrict p;
ssize_t
x;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,-(tile_info.x >> 1),y-
(tile_info.y >> 1),clahe_info.width,1,exception);
if (p == (const Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) clahe_info.width; x++)
{
pixels[n++]=ScaleQuantumToShort(p[0]);
p+=GetPixelChannels(image);
}
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
progress++;
proceed=SetImageProgress(image,CLAHEImageTag,progress,2*
GetPixelChannels(image));
if (proceed == MagickFalse)
status=MagickFalse;
}
}
image_view=DestroyCacheView(image_view);
status=CLAHE(&clahe_info,&tile_info,&range_info,number_bins == 0 ?
(size_t) 128 : MagickMin(number_bins,256),clip_limit,pixels);
if (status == MagickFalse)
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","`%s'",image->filename);
/*
Push CLAHE pixels to CLAHE image.
*/
image_view=AcquireAuthenticCacheView(image,exception);
n=clahe_info.width*(tile_info.y >> 1);
for (y=0; y < (ssize_t) image->rows; y++)
{
Quantum
*magick_restrict q;
ssize_t
x;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
n+=tile_info.x >> 1;
for (x=0; x < (ssize_t) image->columns; x++)
{
q[0]=ScaleShortToQuantum(pixels[n++]);
q+=GetPixelChannels(image);
}
n+=(clahe_info.width-image->columns-(tile_info.x >> 1));
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
progress++;
proceed=SetImageProgress(image,CLAHEImageTag,progress,2*
GetPixelChannels(image));
if (proceed == MagickFalse)
status=MagickFalse;
}
}
image_view=DestroyCacheView(image_view);
pixel_cache=RelinquishVirtualMemory(pixel_cache);
if (TransformImageColorspace(image,colorspace,exception) == MagickFalse)
status=MagickFalse;
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% C l u t I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ClutImage() replaces each color value in the given image, by using it as an
% index to lookup a replacement color value in a Color Look UP Table in the
% form of an image. The values are extracted along a diagonal of the CLUT
% image so either a horizontal or vertial gradient image can be used.
%
% Typically this is used to either re-color a gray-scale image according to a
% color gradient in the CLUT image, or to perform a freeform histogram
% (level) adjustment according to the (typically gray-scale) gradient in the
% CLUT image.
%
% When the 'channel' mask includes the matte/alpha transparency channel but
% one image has no such channel it is assumed that that image is a simple
% gray-scale image that will effect the alpha channel values, either for
% gray-scale coloring (with transparent or semi-transparent colors), or
% a histogram adjustment of existing alpha channel values. If both images
% have matte channels, direct and normal indexing is applied, which is rarely
% used.
%
% The format of the ClutImage method is:
%
% MagickBooleanType ClutImage(Image *image,Image *clut_image,
% const PixelInterpolateMethod method,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image, which is replaced by indexed CLUT values
%
% o clut_image: the color lookup table image for replacement color values.
%
% o method: the pixel interpolation method.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType ClutImage(Image *image,const Image *clut_image,
const PixelInterpolateMethod method,ExceptionInfo *exception)
{
#define ClutImageTag "Clut/Image"
CacheView
*clut_view,
*image_view;
MagickBooleanType
status;
MagickOffsetType
progress;
PixelInfo
*clut_map;
ssize_t
i;
ssize_t adjust,
y;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(clut_image != (Image *) NULL);
assert(clut_image->signature == MagickCoreSignature);
if (SetImageStorageClass(image,DirectClass,exception) == MagickFalse)
return(MagickFalse);
if ((IsGrayColorspace(image->colorspace) != MagickFalse) &&
(IsGrayColorspace(clut_image->colorspace) == MagickFalse))
(void) SetImageColorspace(image,sRGBColorspace,exception);
clut_map=(PixelInfo *) AcquireQuantumMemory(MaxMap+1UL,sizeof(*clut_map));
if (clut_map == (PixelInfo *) NULL)
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
/*
Clut image.
*/
status=MagickTrue;
progress=0;
adjust=(ssize_t) (clut_image->interpolate == IntegerInterpolatePixel ? 0 : 1);
clut_view=AcquireVirtualCacheView(clut_image,exception);
for (i=0; i <= (ssize_t) MaxMap; i++)
{
GetPixelInfo(clut_image,clut_map+i);
status=InterpolatePixelInfo(clut_image,clut_view,method,
(double) i*(clut_image->columns-adjust)/MaxMap,(double) i*
(clut_image->rows-adjust)/MaxMap,clut_map+i,exception);
if (status == MagickFalse)
break;
}
clut_view=DestroyCacheView(clut_view);
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
PixelInfo
pixel;
Quantum
*magick_restrict q;
ssize_t
x;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
GetPixelInfo(image,&pixel);
for (x=0; x < (ssize_t) image->columns; x++)
{
PixelTrait
traits;
GetPixelInfoPixel(image,q,&pixel);
traits=GetPixelChannelTraits(image,RedPixelChannel);
if ((traits & UpdatePixelTrait) != 0)
pixel.red=clut_map[ScaleQuantumToMap(ClampToQuantum(
pixel.red))].red;
traits=GetPixelChannelTraits(image,GreenPixelChannel);
if ((traits & UpdatePixelTrait) != 0)
pixel.green=clut_map[ScaleQuantumToMap(ClampToQuantum(
pixel.green))].green;
traits=GetPixelChannelTraits(image,BluePixelChannel);
if ((traits & UpdatePixelTrait) != 0)
pixel.blue=clut_map[ScaleQuantumToMap(ClampToQuantum(
pixel.blue))].blue;
traits=GetPixelChannelTraits(image,BlackPixelChannel);
if ((traits & UpdatePixelTrait) != 0)
pixel.black=clut_map[ScaleQuantumToMap(ClampToQuantum(
pixel.black))].black;
traits=GetPixelChannelTraits(image,AlphaPixelChannel);
if ((traits & UpdatePixelTrait) != 0)
pixel.alpha=clut_map[ScaleQuantumToMap(ClampToQuantum(
pixel.alpha))].alpha;
SetPixelViaPixelInfo(image,&pixel,q);
q+=GetPixelChannels(image);
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(image,ClutImageTag,progress,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
image_view=DestroyCacheView(image_view);
clut_map=(PixelInfo *) RelinquishMagickMemory(clut_map);
if ((clut_image->alpha_trait != UndefinedPixelTrait) &&
((GetPixelAlphaTraits(image) & UpdatePixelTrait) != 0))
(void) SetImageAlphaChannel(image,ActivateAlphaChannel,exception);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% C o l o r D e c i s i o n L i s t I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ColorDecisionListImage() accepts a lightweight Color Correction Collection
% (CCC) file which solely contains one or more color corrections and applies
% the correction to the image. Here is a sample CCC file:
%
% <ColorCorrectionCollection xmlns="urn:ASC:CDL:v1.2">
% <ColorCorrection id="cc03345">
% <SOPNode>
% <Slope> 0.9 1.2 0.5 </Slope>
% <Offset> 0.4 -0.5 0.6 </Offset>
% <Power> 1.0 0.8 1.5 </Power>
% </SOPNode>
% <SATNode>
% <Saturation> 0.85 </Saturation>
% </SATNode>
% </ColorCorrection>
% </ColorCorrectionCollection>
%
% which includes the slop, offset, and power for each of the RGB channels
% as well as the saturation.
%
% The format of the ColorDecisionListImage method is:
%
% MagickBooleanType ColorDecisionListImage(Image *image,
% const char *color_correction_collection,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o color_correction_collection: the color correction collection in XML.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType ColorDecisionListImage(Image *image,
const char *color_correction_collection,ExceptionInfo *exception)
{
#define ColorDecisionListCorrectImageTag "ColorDecisionList/Image"
typedef struct _Correction
{
double
slope,
offset,
power;
} Correction;
typedef struct _ColorCorrection
{
Correction
red,
green,
blue;
double
saturation;
} ColorCorrection;
CacheView
*image_view;
char
token[MagickPathExtent];
ColorCorrection
color_correction;
const char
*content,
*p;
MagickBooleanType
status;
MagickOffsetType
progress;
PixelInfo
*cdl_map;
ssize_t
i;
ssize_t
y;
XMLTreeInfo
*cc,
*ccc,
*sat,
*sop;
/*
Allocate and initialize cdl maps.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
if (color_correction_collection == (const char *) NULL)
return(MagickFalse);
ccc=NewXMLTree((const char *) color_correction_collection,exception);
if (ccc == (XMLTreeInfo *) NULL)
return(MagickFalse);
cc=GetXMLTreeChild(ccc,"ColorCorrection");
if (cc == (XMLTreeInfo *) NULL)
{
ccc=DestroyXMLTree(ccc);
return(MagickFalse);
}
color_correction.red.slope=1.0;
color_correction.red.offset=0.0;
color_correction.red.power=1.0;
color_correction.green.slope=1.0;
color_correction.green.offset=0.0;
color_correction.green.power=1.0;
color_correction.blue.slope=1.0;
color_correction.blue.offset=0.0;
color_correction.blue.power=1.0;
color_correction.saturation=0.0;
sop=GetXMLTreeChild(cc,"SOPNode");
if (sop != (XMLTreeInfo *) NULL)
{
XMLTreeInfo
*offset,
*power,
*slope;
slope=GetXMLTreeChild(sop,"Slope");
if (slope != (XMLTreeInfo *) NULL)
{
content=GetXMLTreeContent(slope);
p=(const char *) content;
for (i=0; (*p != '\0') && (i < 3); i++)
{
(void) GetNextToken(p,&p,MagickPathExtent,token);
if (*token == ',')
(void) GetNextToken(p,&p,MagickPathExtent,token);
switch (i)
{
case 0:
{
color_correction.red.slope=StringToDouble(token,(char **) NULL);
break;
}
case 1:
{
color_correction.green.slope=StringToDouble(token,
(char **) NULL);
break;
}
case 2:
{
color_correction.blue.slope=StringToDouble(token,
(char **) NULL);
break;
}
}
}
}
offset=GetXMLTreeChild(sop,"Offset");
if (offset != (XMLTreeInfo *) NULL)
{
content=GetXMLTreeContent(offset);
p=(const char *) content;
for (i=0; (*p != '\0') && (i < 3); i++)
{
(void) GetNextToken(p,&p,MagickPathExtent,token);
if (*token == ',')
(void) GetNextToken(p,&p,MagickPathExtent,token);
switch (i)
{
case 0:
{
color_correction.red.offset=StringToDouble(token,
(char **) NULL);
break;
}
case 1:
{
color_correction.green.offset=StringToDouble(token,
(char **) NULL);
break;
}
case 2:
{
color_correction.blue.offset=StringToDouble(token,
(char **) NULL);
break;
}
}
}
}
power=GetXMLTreeChild(sop,"Power");
if (power != (XMLTreeInfo *) NULL)
{
content=GetXMLTreeContent(power);
p=(const char *) content;
for (i=0; (*p != '\0') && (i < 3); i++)
{
(void) GetNextToken(p,&p,MagickPathExtent,token);
if (*token == ',')
(void) GetNextToken(p,&p,MagickPathExtent,token);
switch (i)
{
case 0:
{
color_correction.red.power=StringToDouble(token,(char **) NULL);
break;
}
case 1:
{
color_correction.green.power=StringToDouble(token,
(char **) NULL);
break;
}
case 2:
{
color_correction.blue.power=StringToDouble(token,
(char **) NULL);
break;
}
}
}
}
}
sat=GetXMLTreeChild(cc,"SATNode");
if (sat != (XMLTreeInfo *) NULL)
{
XMLTreeInfo
*saturation;
saturation=GetXMLTreeChild(sat,"Saturation");
if (saturation != (XMLTreeInfo *) NULL)
{
content=GetXMLTreeContent(saturation);
p=(const char *) content;
(void) GetNextToken(p,&p,MagickPathExtent,token);
color_correction.saturation=StringToDouble(token,(char **) NULL);
}
}
ccc=DestroyXMLTree(ccc);
if (image->debug != MagickFalse)
{
(void) LogMagickEvent(TransformEvent,GetMagickModule(),
" Color Correction Collection:");
(void) LogMagickEvent(TransformEvent,GetMagickModule(),
" color_correction.red.slope: %g",color_correction.red.slope);
(void) LogMagickEvent(TransformEvent,GetMagickModule(),
" color_correction.red.offset: %g",color_correction.red.offset);
(void) LogMagickEvent(TransformEvent,GetMagickModule(),
" color_correction.red.power: %g",color_correction.red.power);
(void) LogMagickEvent(TransformEvent,GetMagickModule(),
" color_correction.green.slope: %g",color_correction.green.slope);
(void) LogMagickEvent(TransformEvent,GetMagickModule(),
" color_correction.green.offset: %g",color_correction.green.offset);
(void) LogMagickEvent(TransformEvent,GetMagickModule(),
" color_correction.green.power: %g",color_correction.green.power);
(void) LogMagickEvent(TransformEvent,GetMagickModule(),
" color_correction.blue.slope: %g",color_correction.blue.slope);
(void) LogMagickEvent(TransformEvent,GetMagickModule(),
" color_correction.blue.offset: %g",color_correction.blue.offset);
(void) LogMagickEvent(TransformEvent,GetMagickModule(),
" color_correction.blue.power: %g",color_correction.blue.power);
(void) LogMagickEvent(TransformEvent,GetMagickModule(),
" color_correction.saturation: %g",color_correction.saturation);
}
cdl_map=(PixelInfo *) AcquireQuantumMemory(MaxMap+1UL,sizeof(*cdl_map));
if (cdl_map == (PixelInfo *) NULL)
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
for (i=0; i <= (ssize_t) MaxMap; i++)
{
cdl_map[i].red=(double) ScaleMapToQuantum((double)
(MaxMap*(pow(color_correction.red.slope*i/MaxMap+
color_correction.red.offset,color_correction.red.power))));
cdl_map[i].green=(double) ScaleMapToQuantum((double)
(MaxMap*(pow(color_correction.green.slope*i/MaxMap+
color_correction.green.offset,color_correction.green.power))));
cdl_map[i].blue=(double) ScaleMapToQuantum((double)
(MaxMap*(pow(color_correction.blue.slope*i/MaxMap+
color_correction.blue.offset,color_correction.blue.power))));
}
if (image->storage_class == PseudoClass)
for (i=0; i < (ssize_t) image->colors; i++)
{
/*
Apply transfer function to colormap.
*/
double
luma;
luma=0.21267f*image->colormap[i].red+0.71526*image->colormap[i].green+
0.07217f*image->colormap[i].blue;
image->colormap[i].red=luma+color_correction.saturation*cdl_map[
ScaleQuantumToMap(ClampToQuantum(image->colormap[i].red))].red-luma;
image->colormap[i].green=luma+color_correction.saturation*cdl_map[
ScaleQuantumToMap(ClampToQuantum(image->colormap[i].green))].green-luma;
image->colormap[i].blue=luma+color_correction.saturation*cdl_map[
ScaleQuantumToMap(ClampToQuantum(image->colormap[i].blue))].blue-luma;
}
/*
Apply transfer function to image.
*/
status=MagickTrue;
progress=0;
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
double
luma;
Quantum
*magick_restrict q;
ssize_t
x;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
luma=0.21267f*GetPixelRed(image,q)+0.71526*GetPixelGreen(image,q)+
0.07217f*GetPixelBlue(image,q);
SetPixelRed(image,ClampToQuantum(luma+color_correction.saturation*
(cdl_map[ScaleQuantumToMap(GetPixelRed(image,q))].red-luma)),q);
SetPixelGreen(image,ClampToQuantum(luma+color_correction.saturation*
(cdl_map[ScaleQuantumToMap(GetPixelGreen(image,q))].green-luma)),q);
SetPixelBlue(image,ClampToQuantum(luma+color_correction.saturation*
(cdl_map[ScaleQuantumToMap(GetPixelBlue(image,q))].blue-luma)),q);
q+=GetPixelChannels(image);
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(image,ColorDecisionListCorrectImageTag,
progress,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
image_view=DestroyCacheView(image_view);
cdl_map=(PixelInfo *) RelinquishMagickMemory(cdl_map);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% C o n t r a s t I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ContrastImage() enhances the intensity differences between the lighter and
% darker elements of the image. Set sharpen to a MagickTrue to increase the
% image contrast otherwise the contrast is reduced.
%
% The format of the ContrastImage method is:
%
% MagickBooleanType ContrastImage(Image *image,
% const MagickBooleanType sharpen,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o sharpen: Increase or decrease image contrast.
%
% o exception: return any errors or warnings in this structure.
%
*/
static void Contrast(const int sign,double *red,double *green,double *blue)
{
double
brightness,
hue,
saturation;
/*
Enhance contrast: dark color become darker, light color become lighter.
*/
assert(red != (double *) NULL);
assert(green != (double *) NULL);
assert(blue != (double *) NULL);
hue=0.0;
saturation=0.0;
brightness=0.0;
ConvertRGBToHSB(*red,*green,*blue,&hue,&saturation,&brightness);
brightness+=0.5*sign*(0.5*(sin((double) (MagickPI*(brightness-0.5)))+1.0)-
brightness);
if (brightness > 1.0)
brightness=1.0;
else
if (brightness < 0.0)
brightness=0.0;
ConvertHSBToRGB(hue,saturation,brightness,red,green,blue);
}
MagickExport MagickBooleanType ContrastImage(Image *image,
const MagickBooleanType sharpen,ExceptionInfo *exception)
{
#define ContrastImageTag "Contrast/Image"
CacheView
*image_view;
int
sign;
MagickBooleanType
status;
MagickOffsetType
progress;
ssize_t
i;
ssize_t
y;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
#if defined(MAGICKCORE_OPENCL_SUPPORT)
if (AccelerateContrastImage(image,sharpen,exception) != MagickFalse)
return(MagickTrue);
#endif
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
sign=sharpen != MagickFalse ? 1 : -1;
if (image->storage_class == PseudoClass)
{
/*
Contrast enhance colormap.
*/
for (i=0; i < (ssize_t) image->colors; i++)
{
double
blue,
green,
red;
red=(double) image->colormap[i].red;
green=(double) image->colormap[i].green;
blue=(double) image->colormap[i].blue;
Contrast(sign,&red,&green,&blue);
image->colormap[i].red=(MagickRealType) red;
image->colormap[i].green=(MagickRealType) green;
image->colormap[i].blue=(MagickRealType) blue;
}
}
/*
Contrast enhance image.
*/
status=MagickTrue;
progress=0;
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
double
blue,
green,
red;
Quantum
*magick_restrict q;
ssize_t
x;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
red=(double) GetPixelRed(image,q);
green=(double) GetPixelGreen(image,q);
blue=(double) GetPixelBlue(image,q);
Contrast(sign,&red,&green,&blue);
SetPixelRed(image,ClampToQuantum(red),q);
SetPixelGreen(image,ClampToQuantum(green),q);
SetPixelBlue(image,ClampToQuantum(blue),q);
q+=GetPixelChannels(image);
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(image,ContrastImageTag,progress,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
image_view=DestroyCacheView(image_view);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% C o n t r a s t S t r e t c h I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ContrastStretchImage() is a simple image enhancement technique that attempts
% to improve the contrast in an image by 'stretching' the range of intensity
% values it contains to span a desired range of values. It differs from the
% more sophisticated histogram equalization in that it can only apply a
% linear scaling function to the image pixel values. As a result the
% 'enhancement' is less harsh.
%
% The format of the ContrastStretchImage method is:
%
% MagickBooleanType ContrastStretchImage(Image *image,
% const char *levels,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o black_point: the black point.
%
% o white_point: the white point.
%
% o levels: Specify the levels where the black and white points have the
% range of 0 to number-of-pixels (e.g. 1%, 10x90%, etc.).
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType ContrastStretchImage(Image *image,
const double black_point,const double white_point,ExceptionInfo *exception)
{
#define MaxRange(color) ((double) ScaleQuantumToMap((Quantum) (color)))
#define ContrastStretchImageTag "ContrastStretch/Image"
CacheView
*image_view;
double
*black,
*histogram,
*stretch_map,
*white;
ImageType
type;
MagickBooleanType
status;
MagickOffsetType
progress;
ssize_t
i;
ssize_t
y;
/*
Allocate histogram and stretch map.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
type=IdentifyImageType(image,exception);
if (IsGrayImageType(type) != MagickFalse)
(void) SetImageColorspace(image,GRAYColorspace,exception);
black=(double *) AcquireQuantumMemory(MaxPixelChannels,sizeof(*black));
white=(double *) AcquireQuantumMemory(MaxPixelChannels,sizeof(*white));
histogram=(double *) AcquireQuantumMemory(MaxMap+1UL,MaxPixelChannels*
sizeof(*histogram));
stretch_map=(double *) AcquireQuantumMemory(MaxMap+1UL,MaxPixelChannels*
sizeof(*stretch_map));
if ((black == (double *) NULL) || (white == (double *) NULL) ||
(histogram == (double *) NULL) || (stretch_map == (double *) NULL))
{
if (stretch_map != (double *) NULL)
stretch_map=(double *) RelinquishMagickMemory(stretch_map);
if (histogram != (double *) NULL)
histogram=(double *) RelinquishMagickMemory(histogram);
if (white != (double *) NULL)
white=(double *) RelinquishMagickMemory(white);
if (black != (double *) NULL)
black=(double *) RelinquishMagickMemory(black);
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
}
/*
Form histogram.
*/
status=MagickTrue;
(void) memset(histogram,0,(MaxMap+1)*GetPixelChannels(image)*
sizeof(*histogram));
image_view=AcquireVirtualCacheView(image,exception);
for (y=0; y < (ssize_t) image->rows; y++)
{
const Quantum
*magick_restrict p;
ssize_t
x;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception);
if (p == (const Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
double
pixel;
pixel=GetPixelIntensity(image,p);
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
if (image->channel_mask != DefaultChannels)
pixel=(double) p[i];
histogram[GetPixelChannels(image)*ScaleQuantumToMap(
ClampToQuantum(pixel))+i]++;
}
p+=GetPixelChannels(image);
}
}
image_view=DestroyCacheView(image_view);
/*
Find the histogram boundaries by locating the black/white levels.
*/
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
double
intensity;
ssize_t
j;
black[i]=0.0;
white[i]=MaxRange(QuantumRange);
intensity=0.0;
for (j=0; j <= (ssize_t) MaxMap; j++)
{
intensity+=histogram[GetPixelChannels(image)*j+i];
if (intensity > black_point)
break;
}
black[i]=(double) j;
intensity=0.0;
for (j=(ssize_t) MaxMap; j != 0; j--)
{
intensity+=histogram[GetPixelChannels(image)*j+i];
if (intensity > ((double) image->columns*image->rows-white_point))
break;
}
white[i]=(double) j;
}
histogram=(double *) RelinquishMagickMemory(histogram);
/*
Stretch the histogram to create the stretched image mapping.
*/
(void) memset(stretch_map,0,(MaxMap+1)*GetPixelChannels(image)*
sizeof(*stretch_map));
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
ssize_t
j;
for (j=0; j <= (ssize_t) MaxMap; j++)
{
double
gamma;
gamma=PerceptibleReciprocal(white[i]-black[i]);
if (j < (ssize_t) black[i])
stretch_map[GetPixelChannels(image)*j+i]=0.0;
else
if (j > (ssize_t) white[i])
stretch_map[GetPixelChannels(image)*j+i]=(double) QuantumRange;
else
if (black[i] != white[i])
stretch_map[GetPixelChannels(image)*j+i]=(double) ScaleMapToQuantum(
(double) (MaxMap*gamma*(j-black[i])));
}
}
if (image->storage_class == PseudoClass)
{
ssize_t
j;
/*
Stretch-contrast colormap.
*/
for (j=0; j < (ssize_t) image->colors; j++)
{
if ((GetPixelRedTraits(image) & UpdatePixelTrait) != 0)
{
i=GetPixelChannelOffset(image,RedPixelChannel);
image->colormap[j].red=stretch_map[GetPixelChannels(image)*
ScaleQuantumToMap(ClampToQuantum(image->colormap[j].red))+i];
}
if ((GetPixelGreenTraits(image) & UpdatePixelTrait) != 0)
{
i=GetPixelChannelOffset(image,GreenPixelChannel);
image->colormap[j].green=stretch_map[GetPixelChannels(image)*
ScaleQuantumToMap(ClampToQuantum(image->colormap[j].green))+i];
}
if ((GetPixelBlueTraits(image) & UpdatePixelTrait) != 0)
{
i=GetPixelChannelOffset(image,BluePixelChannel);
image->colormap[j].blue=stretch_map[GetPixelChannels(image)*
ScaleQuantumToMap(ClampToQuantum(image->colormap[j].blue))+i];
}
if ((GetPixelAlphaTraits(image) & UpdatePixelTrait) != 0)
{
i=GetPixelChannelOffset(image,AlphaPixelChannel);
image->colormap[j].alpha=stretch_map[GetPixelChannels(image)*
ScaleQuantumToMap(ClampToQuantum(image->colormap[j].alpha))+i];
}
}
}
/*
Stretch-contrast image.
*/
status=MagickTrue;
progress=0;
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
Quantum
*magick_restrict q;
ssize_t
x;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
ssize_t
j;
for (j=0; j < (ssize_t) GetPixelChannels(image); j++)
{
PixelChannel channel = GetPixelChannelChannel(image,j);
PixelTrait traits = GetPixelChannelTraits(image,channel);
if ((traits & UpdatePixelTrait) == 0)
continue;
if (black[j] == white[j])
continue;
q[j]=ClampToQuantum(stretch_map[GetPixelChannels(image)*
ScaleQuantumToMap(q[j])+j]);
}
q+=GetPixelChannels(image);
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(image,ContrastStretchImageTag,progress,
image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
image_view=DestroyCacheView(image_view);
stretch_map=(double *) RelinquishMagickMemory(stretch_map);
white=(double *) RelinquishMagickMemory(white);
black=(double *) RelinquishMagickMemory(black);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% E n h a n c e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% EnhanceImage() applies a digital filter that improves the quality of a
% noisy image.
%
% The format of the EnhanceImage method is:
%
% Image *EnhanceImage(const Image *image,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *EnhanceImage(const Image *image,ExceptionInfo *exception)
{
#define EnhanceImageTag "Enhance/Image"
#define EnhancePixel(weight) \
mean=QuantumScale*((double) GetPixelRed(image,r)+pixel.red)/2.0; \
distance=QuantumScale*((double) GetPixelRed(image,r)-pixel.red); \
distance_squared=(4.0+mean)*distance*distance; \
mean=QuantumScale*((double) GetPixelGreen(image,r)+pixel.green)/2.0; \
distance=QuantumScale*((double) GetPixelGreen(image,r)-pixel.green); \
distance_squared+=(7.0-mean)*distance*distance; \
mean=QuantumScale*((double) GetPixelBlue(image,r)+pixel.blue)/2.0; \
distance=QuantumScale*((double) GetPixelBlue(image,r)-pixel.blue); \
distance_squared+=(5.0-mean)*distance*distance; \
mean=QuantumScale*((double) GetPixelBlack(image,r)+pixel.black)/2.0; \
distance=QuantumScale*((double) GetPixelBlack(image,r)-pixel.black); \
distance_squared+=(5.0-mean)*distance*distance; \
mean=QuantumScale*((double) GetPixelAlpha(image,r)+pixel.alpha)/2.0; \
distance=QuantumScale*((double) GetPixelAlpha(image,r)-pixel.alpha); \
distance_squared+=(5.0-mean)*distance*distance; \
if (distance_squared < 0.069) \
{ \
aggregate.red+=(weight)*GetPixelRed(image,r); \
aggregate.green+=(weight)*GetPixelGreen(image,r); \
aggregate.blue+=(weight)*GetPixelBlue(image,r); \
aggregate.black+=(weight)*GetPixelBlack(image,r); \
aggregate.alpha+=(weight)*GetPixelAlpha(image,r); \
total_weight+=(weight); \
} \
r+=GetPixelChannels(image);
CacheView
*enhance_view,
*image_view;
Image
*enhance_image;
MagickBooleanType
status;
MagickOffsetType
progress;
ssize_t
y;
/*
Initialize enhanced image attributes.
*/
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
enhance_image=CloneImage(image,0,0,MagickTrue,
exception);
if (enhance_image == (Image *) NULL)
return((Image *) NULL);
if (SetImageStorageClass(enhance_image,DirectClass,exception) == MagickFalse)
{
enhance_image=DestroyImage(enhance_image);
return((Image *) NULL);
}
/*
Enhance image.
*/
status=MagickTrue;
progress=0;
image_view=AcquireVirtualCacheView(image,exception);
enhance_view=AcquireAuthenticCacheView(enhance_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(image,enhance_image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
PixelInfo
pixel;
const Quantum
*magick_restrict p;
Quantum
*magick_restrict q;
ssize_t
x;
ssize_t
center;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,-2,y-2,image->columns+4,5,exception);
q=QueueCacheViewAuthenticPixels(enhance_view,0,y,enhance_image->columns,1,
exception);
if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL))
{
status=MagickFalse;
continue;
}
center=(ssize_t) GetPixelChannels(image)*(2*(image->columns+4)+2);
GetPixelInfo(image,&pixel);
for (x=0; x < (ssize_t) image->columns; x++)
{
double
distance,
distance_squared,
mean,
total_weight;
PixelInfo
aggregate;
const Quantum
*magick_restrict r;
GetPixelInfo(image,&aggregate);
total_weight=0.0;
GetPixelInfoPixel(image,p+center,&pixel);
r=p;
EnhancePixel(5.0); EnhancePixel(8.0); EnhancePixel(10.0);
EnhancePixel(8.0); EnhancePixel(5.0);
r=p+GetPixelChannels(image)*(image->columns+4);
EnhancePixel(8.0); EnhancePixel(20.0); EnhancePixel(40.0);
EnhancePixel(20.0); EnhancePixel(8.0);
r=p+2*GetPixelChannels(image)*(image->columns+4);
EnhancePixel(10.0); EnhancePixel(40.0); EnhancePixel(80.0);
EnhancePixel(40.0); EnhancePixel(10.0);
r=p+3*GetPixelChannels(image)*(image->columns+4);
EnhancePixel(8.0); EnhancePixel(20.0); EnhancePixel(40.0);
EnhancePixel(20.0); EnhancePixel(8.0);
r=p+4*GetPixelChannels(image)*(image->columns+4);
EnhancePixel(5.0); EnhancePixel(8.0); EnhancePixel(10.0);
EnhancePixel(8.0); EnhancePixel(5.0);
if (total_weight > MagickEpsilon)
{
pixel.red=((aggregate.red+total_weight/2.0)/total_weight);
pixel.green=((aggregate.green+total_weight/2.0)/total_weight);
pixel.blue=((aggregate.blue+total_weight/2.0)/total_weight);
pixel.black=((aggregate.black+total_weight/2.0)/total_weight);
pixel.alpha=((aggregate.alpha+total_weight/2.0)/total_weight);
}
SetPixelViaPixelInfo(enhance_image,&pixel,q);
p+=GetPixelChannels(image);
q+=GetPixelChannels(enhance_image);
}
if (SyncCacheViewAuthenticPixels(enhance_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(image,EnhanceImageTag,progress,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
enhance_view=DestroyCacheView(enhance_view);
image_view=DestroyCacheView(image_view);
if (status == MagickFalse)
enhance_image=DestroyImage(enhance_image);
return(enhance_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% E q u a l i z e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% EqualizeImage() applies a histogram equalization to the image.
%
% The format of the EqualizeImage method is:
%
% MagickBooleanType EqualizeImage(Image *image,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType EqualizeImage(Image *image,
ExceptionInfo *exception)
{
#define EqualizeImageTag "Equalize/Image"
CacheView
*image_view;
double
black[CompositePixelChannel+1],
*equalize_map,
*histogram,
*map,
white[CompositePixelChannel+1];
MagickBooleanType
status;
MagickOffsetType
progress;
ssize_t
i;
ssize_t
y;
/*
Allocate and initialize histogram arrays.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
#if defined(MAGICKCORE_OPENCL_SUPPORT)
if (AccelerateEqualizeImage(image,exception) != MagickFalse)
return(MagickTrue);
#endif
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
equalize_map=(double *) AcquireQuantumMemory(MaxMap+1UL,MaxPixelChannels*
sizeof(*equalize_map));
histogram=(double *) AcquireQuantumMemory(MaxMap+1UL,MaxPixelChannels*
sizeof(*histogram));
map=(double *) AcquireQuantumMemory(MaxMap+1UL,MaxPixelChannels*sizeof(*map));
if ((equalize_map == (double *) NULL) || (histogram == (double *) NULL) ||
(map == (double *) NULL))
{
if (map != (double *) NULL)
map=(double *) RelinquishMagickMemory(map);
if (histogram != (double *) NULL)
histogram=(double *) RelinquishMagickMemory(histogram);
if (equalize_map != (double *) NULL)
equalize_map=(double *) RelinquishMagickMemory(equalize_map);
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
}
/*
Form histogram.
*/
status=MagickTrue;
(void) memset(histogram,0,(MaxMap+1)*GetPixelChannels(image)*
sizeof(*histogram));
image_view=AcquireVirtualCacheView(image,exception);
for (y=0; y < (ssize_t) image->rows; y++)
{
const Quantum
*magick_restrict p;
ssize_t
x;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception);
if (p == (const Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
double
intensity;
intensity=(double) p[i];
if ((image->channel_mask & SyncChannels) != 0)
intensity=GetPixelIntensity(image,p);
histogram[GetPixelChannels(image)*ScaleQuantumToMap(
ClampToQuantum(intensity))+i]++;
}
p+=GetPixelChannels(image);
}
}
image_view=DestroyCacheView(image_view);
/*
Integrate the histogram to get the equalization map.
*/
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
double
intensity;
ssize_t
j;
intensity=0.0;
for (j=0; j <= (ssize_t) MaxMap; j++)
{
intensity+=histogram[GetPixelChannels(image)*j+i];
map[GetPixelChannels(image)*j+i]=intensity;
}
}
(void) memset(equalize_map,0,(MaxMap+1)*GetPixelChannels(image)*
sizeof(*equalize_map));
(void) memset(black,0,sizeof(*black));
(void) memset(white,0,sizeof(*white));
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
ssize_t
j;
black[i]=map[i];
white[i]=map[GetPixelChannels(image)*MaxMap+i];
if (black[i] != white[i])
for (j=0; j <= (ssize_t) MaxMap; j++)
equalize_map[GetPixelChannels(image)*j+i]=(double)
ScaleMapToQuantum((double) ((MaxMap*(map[
GetPixelChannels(image)*j+i]-black[i]))/(white[i]-black[i])));
}
histogram=(double *) RelinquishMagickMemory(histogram);
map=(double *) RelinquishMagickMemory(map);
if (image->storage_class == PseudoClass)
{
ssize_t
j;
/*
Equalize colormap.
*/
for (j=0; j < (ssize_t) image->colors; j++)
{
if ((GetPixelRedTraits(image) & UpdatePixelTrait) != 0)
{
PixelChannel channel = GetPixelChannelChannel(image,
RedPixelChannel);
if (black[channel] != white[channel])
image->colormap[j].red=equalize_map[GetPixelChannels(image)*
ScaleQuantumToMap(ClampToQuantum(image->colormap[j].red))+
channel];
}
if ((GetPixelGreenTraits(image) & UpdatePixelTrait) != 0)
{
PixelChannel channel = GetPixelChannelChannel(image,
GreenPixelChannel);
if (black[channel] != white[channel])
image->colormap[j].green=equalize_map[GetPixelChannels(image)*
ScaleQuantumToMap(ClampToQuantum(image->colormap[j].green))+
channel];
}
if ((GetPixelBlueTraits(image) & UpdatePixelTrait) != 0)
{
PixelChannel channel = GetPixelChannelChannel(image,
BluePixelChannel);
if (black[channel] != white[channel])
image->colormap[j].blue=equalize_map[GetPixelChannels(image)*
ScaleQuantumToMap(ClampToQuantum(image->colormap[j].blue))+
channel];
}
if ((GetPixelAlphaTraits(image) & UpdatePixelTrait) != 0)
{
PixelChannel channel = GetPixelChannelChannel(image,
AlphaPixelChannel);
if (black[channel] != white[channel])
image->colormap[j].alpha=equalize_map[GetPixelChannels(image)*
ScaleQuantumToMap(ClampToQuantum(image->colormap[j].alpha))+
channel];
}
}
}
/*
Equalize image.
*/
progress=0;
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
Quantum
*magick_restrict q;
ssize_t
x;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
ssize_t
j;
for (j=0; j < (ssize_t) GetPixelChannels(image); j++)
{
PixelChannel channel = GetPixelChannelChannel(image,j);
PixelTrait traits = GetPixelChannelTraits(image,channel);
if (((traits & UpdatePixelTrait) == 0) || (black[j] == white[j]))
continue;
q[j]=ClampToQuantum(equalize_map[GetPixelChannels(image)*
ScaleQuantumToMap(q[j])+j]);
}
q+=GetPixelChannels(image);
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(image,EqualizeImageTag,progress,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
image_view=DestroyCacheView(image_view);
equalize_map=(double *) RelinquishMagickMemory(equalize_map);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G a m m a I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GammaImage() gamma-corrects a particular image channel. The same
% image viewed on different devices will have perceptual differences in the
% way the image's intensities are represented on the screen. Specify
% individual gamma levels for the red, green, and blue channels, or adjust
% all three with the gamma parameter. Values typically range from 0.8 to 2.3.
%
% You can also reduce the influence of a particular channel with a gamma
% value of 0.
%
% The format of the GammaImage method is:
%
% MagickBooleanType GammaImage(Image *image,const double gamma,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o level: the image gamma as a string (e.g. 1.6,1.2,1.0).
%
% o gamma: the image gamma.
%
*/
static inline double gamma_pow(const double value,const double gamma)
{
return(value < 0.0 ? value : pow(value,gamma));
}
MagickExport MagickBooleanType GammaImage(Image *image,const double gamma,
ExceptionInfo *exception)
{
#define GammaImageTag "Gamma/Image"
CacheView
*image_view;
MagickBooleanType
status;
MagickOffsetType
progress;
Quantum
*gamma_map;
ssize_t
i;
ssize_t
y;
/*
Allocate and initialize gamma maps.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
if (gamma == 1.0)
return(MagickTrue);
gamma_map=(Quantum *) AcquireQuantumMemory(MaxMap+1UL,sizeof(*gamma_map));
if (gamma_map == (Quantum *) NULL)
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
(void) memset(gamma_map,0,(MaxMap+1)*sizeof(*gamma_map));
if (gamma != 0.0)
for (i=0; i <= (ssize_t) MaxMap; i++)
gamma_map[i]=ScaleMapToQuantum((double) (MaxMap*pow((double) i/
MaxMap,PerceptibleReciprocal(gamma))));
if (image->storage_class == PseudoClass)
for (i=0; i < (ssize_t) image->colors; i++)
{
/*
Gamma-correct colormap.
*/
if ((GetPixelRedTraits(image) & UpdatePixelTrait) != 0)
image->colormap[i].red=(double) gamma_map[ScaleQuantumToMap(
ClampToQuantum(image->colormap[i].red))];
if ((GetPixelGreenTraits(image) & UpdatePixelTrait) != 0)
image->colormap[i].green=(double) gamma_map[ScaleQuantumToMap(
ClampToQuantum(image->colormap[i].green))];
if ((GetPixelBlueTraits(image) & UpdatePixelTrait) != 0)
image->colormap[i].blue=(double) gamma_map[ScaleQuantumToMap(
ClampToQuantum(image->colormap[i].blue))];
if ((GetPixelAlphaTraits(image) & UpdatePixelTrait) != 0)
image->colormap[i].alpha=(double) gamma_map[ScaleQuantumToMap(
ClampToQuantum(image->colormap[i].alpha))];
}
/*
Gamma-correct image.
*/
status=MagickTrue;
progress=0;
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
Quantum
*magick_restrict q;
ssize_t
x;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
ssize_t
j;
for (j=0; j < (ssize_t) GetPixelChannels(image); j++)
{
PixelChannel channel = GetPixelChannelChannel(image,j);
PixelTrait traits = GetPixelChannelTraits(image,channel);
if ((traits & UpdatePixelTrait) == 0)
continue;
q[j]=gamma_map[ScaleQuantumToMap(ClampToQuantum((MagickRealType)
q[j]))];
}
q+=GetPixelChannels(image);
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(image,GammaImageTag,progress,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
image_view=DestroyCacheView(image_view);
gamma_map=(Quantum *) RelinquishMagickMemory(gamma_map);
if (image->gamma != 0.0)
image->gamma*=gamma;
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G r a y s c a l e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GrayscaleImage() converts the image to grayscale.
%
% The format of the GrayscaleImage method is:
%
% MagickBooleanType GrayscaleImage(Image *image,
% const PixelIntensityMethod method ,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o method: the pixel intensity method.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType GrayscaleImage(Image *image,
const PixelIntensityMethod method,ExceptionInfo *exception)
{
#define GrayscaleImageTag "Grayscale/Image"
CacheView
*image_view;
MagickBooleanType
status;
MagickOffsetType
progress;
ssize_t
y;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
if (image->storage_class == PseudoClass)
{
if (SyncImage(image,exception) == MagickFalse)
return(MagickFalse);
if (SetImageStorageClass(image,DirectClass,exception) == MagickFalse)
return(MagickFalse);
}
#if defined(MAGICKCORE_OPENCL_SUPPORT)
if (AccelerateGrayscaleImage(image,method,exception) != MagickFalse)
{
image->intensity=method;
image->type=GrayscaleType;
if ((method == Rec601LuminancePixelIntensityMethod) ||
(method == Rec709LuminancePixelIntensityMethod))
return(SetImageColorspace(image,LinearGRAYColorspace,exception));
return(SetImageColorspace(image,GRAYColorspace,exception));
}
#endif
/*
Grayscale image.
*/
status=MagickTrue;
progress=0;
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
Quantum
*magick_restrict q;
ssize_t
x;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
MagickRealType
blue,
green,
red,
intensity;
red=(MagickRealType) GetPixelRed(image,q);
green=(MagickRealType) GetPixelGreen(image,q);
blue=(MagickRealType) GetPixelBlue(image,q);
intensity=0.0;
switch (method)
{
case AveragePixelIntensityMethod:
{
intensity=(red+green+blue)/3.0;
break;
}
case BrightnessPixelIntensityMethod:
{
intensity=MagickMax(MagickMax(red,green),blue);
break;
}
case LightnessPixelIntensityMethod:
{
intensity=(MagickMin(MagickMin(red,green),blue)+
MagickMax(MagickMax(red,green),blue))/2.0;
break;
}
case MSPixelIntensityMethod:
{
intensity=(MagickRealType) (((double) red*red+green*green+
blue*blue)/3.0);
break;
}
case Rec601LumaPixelIntensityMethod:
{
if (image->colorspace == RGBColorspace)
{
red=EncodePixelGamma(red);
green=EncodePixelGamma(green);
blue=EncodePixelGamma(blue);
}
intensity=0.298839*red+0.586811*green+0.114350*blue;
break;
}
case Rec601LuminancePixelIntensityMethod:
{
if (image->colorspace == sRGBColorspace)
{
red=DecodePixelGamma(red);
green=DecodePixelGamma(green);
blue=DecodePixelGamma(blue);
}
intensity=0.298839*red+0.586811*green+0.114350*blue;
break;
}
case Rec709LumaPixelIntensityMethod:
default:
{
if (image->colorspace == RGBColorspace)
{
red=EncodePixelGamma(red);
green=EncodePixelGamma(green);
blue=EncodePixelGamma(blue);
}
intensity=0.212656*red+0.715158*green+0.072186*blue;
break;
}
case Rec709LuminancePixelIntensityMethod:
{
if (image->colorspace == sRGBColorspace)
{
red=DecodePixelGamma(red);
green=DecodePixelGamma(green);
blue=DecodePixelGamma(blue);
}
intensity=0.212656*red+0.715158*green+0.072186*blue;
break;
}
case RMSPixelIntensityMethod:
{
intensity=(MagickRealType) (sqrt((double) red*red+green*green+
blue*blue)/sqrt(3.0));
break;
}
}
SetPixelGray(image,ClampToQuantum(intensity),q);
q+=GetPixelChannels(image);
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(image,GrayscaleImageTag,progress,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
image_view=DestroyCacheView(image_view);
image->intensity=method;
image->type=GrayscaleType;
if ((method == Rec601LuminancePixelIntensityMethod) ||
(method == Rec709LuminancePixelIntensityMethod))
return(SetImageColorspace(image,LinearGRAYColorspace,exception));
return(SetImageColorspace(image,GRAYColorspace,exception));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% H a l d C l u t I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% HaldClutImage() applies a Hald color lookup table to the image. A Hald
% color lookup table is a 3-dimensional color cube mapped to 2 dimensions.
% Create it with the HALD coder. You can apply any color transformation to
% the Hald image and then use this method to apply the transform to the
% image.
%
% The format of the HaldClutImage method is:
%
% MagickBooleanType HaldClutImage(Image *image,Image *hald_image,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image, which is replaced by indexed CLUT values
%
% o hald_image: the color lookup table image for replacement color values.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType HaldClutImage(Image *image,
const Image *hald_image,ExceptionInfo *exception)
{
#define HaldClutImageTag "Clut/Image"
typedef struct _HaldInfo
{
double
x,
y,
z;
} HaldInfo;
CacheView
*hald_view,
*image_view;
double
width;
MagickBooleanType
status;
MagickOffsetType
progress;
PixelInfo
zero;
size_t
cube_size,
length,
level;
ssize_t
y;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(hald_image != (Image *) NULL);
assert(hald_image->signature == MagickCoreSignature);
if (SetImageStorageClass(image,DirectClass,exception) == MagickFalse)
return(MagickFalse);
if (image->alpha_trait == UndefinedPixelTrait)
(void) SetImageAlphaChannel(image,OpaqueAlphaChannel,exception);
/*
Hald clut image.
*/
status=MagickTrue;
progress=0;
length=(size_t) MagickMin((MagickRealType) hald_image->columns,
(MagickRealType) hald_image->rows);
for (level=2; (level*level*level) < length; level++) ;
level*=level;
cube_size=level*level;
width=(double) hald_image->columns;
GetPixelInfo(hald_image,&zero);
hald_view=AcquireVirtualCacheView(hald_image,exception);
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
Quantum
*magick_restrict q;
ssize_t
x;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
double
area,
offset;
HaldInfo
point;
PixelInfo
pixel,
pixel1,
pixel2,
pixel3,
pixel4;
point.x=QuantumScale*(level-1.0)*GetPixelRed(image,q);
point.y=QuantumScale*(level-1.0)*GetPixelGreen(image,q);
point.z=QuantumScale*(level-1.0)*GetPixelBlue(image,q);
offset=point.x+level*floor(point.y)+cube_size*floor(point.z);
point.x-=floor(point.x);
point.y-=floor(point.y);
point.z-=floor(point.z);
pixel1=zero;
status=InterpolatePixelInfo(hald_image,hald_view,hald_image->interpolate,
fmod(offset,width),floor(offset/width),&pixel1,exception);
if (status == MagickFalse)
break;
pixel2=zero;
status=InterpolatePixelInfo(hald_image,hald_view,hald_image->interpolate,
fmod(offset+level,width),floor((offset+level)/width),&pixel2,exception);
if (status == MagickFalse)
break;
pixel3=zero;
area=point.y;
if (hald_image->interpolate == NearestInterpolatePixel)
area=(point.y < 0.5) ? 0.0 : 1.0;
CompositePixelInfoAreaBlend(&pixel1,pixel1.alpha,&pixel2,pixel2.alpha,
area,&pixel3);
offset+=cube_size;
status=InterpolatePixelInfo(hald_image,hald_view,hald_image->interpolate,
fmod(offset,width),floor(offset/width),&pixel1,exception);
if (status == MagickFalse)
break;
status=InterpolatePixelInfo(hald_image,hald_view,hald_image->interpolate,
fmod(offset+level,width),floor((offset+level)/width),&pixel2,exception);
if (status == MagickFalse)
break;
pixel4=zero;
CompositePixelInfoAreaBlend(&pixel1,pixel1.alpha,&pixel2,pixel2.alpha,
area,&pixel4);
pixel=zero;
area=point.z;
if (hald_image->interpolate == NearestInterpolatePixel)
area=(point.z < 0.5)? 0.0 : 1.0;
CompositePixelInfoAreaBlend(&pixel3,pixel3.alpha,&pixel4,pixel4.alpha,
area,&pixel);
if ((GetPixelRedTraits(image) & UpdatePixelTrait) != 0)
SetPixelRed(image,ClampToQuantum(pixel.red),q);
if ((GetPixelGreenTraits(image) & UpdatePixelTrait) != 0)
SetPixelGreen(image,ClampToQuantum(pixel.green),q);
if ((GetPixelBlueTraits(image) & UpdatePixelTrait) != 0)
SetPixelBlue(image,ClampToQuantum(pixel.blue),q);
if (((GetPixelBlackTraits(image) & UpdatePixelTrait) != 0) &&
(image->colorspace == CMYKColorspace))
SetPixelBlack(image,ClampToQuantum(pixel.black),q);
if (((GetPixelAlphaTraits(image) & UpdatePixelTrait) != 0) &&
(image->alpha_trait != UndefinedPixelTrait))
SetPixelAlpha(image,ClampToQuantum(pixel.alpha),q);
q+=GetPixelChannels(image);
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(image,HaldClutImageTag,progress,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
hald_view=DestroyCacheView(hald_view);
image_view=DestroyCacheView(image_view);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% L e v e l I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% LevelImage() adjusts the levels of a particular image channel by
% scaling the colors falling between specified white and black points to
% the full available quantum range.
%
% The parameters provided represent the black, and white points. The black
% point specifies the darkest color in the image. Colors darker than the
% black point are set to zero. White point specifies the lightest color in
% the image. Colors brighter than the white point are set to the maximum
% quantum value.
%
% If a '!' flag is given, map black and white colors to the given levels
% rather than mapping those levels to black and white. See
% LevelizeImage() below.
%
% Gamma specifies a gamma correction to apply to the image.
%
% The format of the LevelImage method is:
%
% MagickBooleanType LevelImage(Image *image,const double black_point,
% const double white_point,const double gamma,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o black_point: The level to map zero (black) to.
%
% o white_point: The level to map QuantumRange (white) to.
%
% o exception: return any errors or warnings in this structure.
%
*/
static inline double LevelPixel(const double black_point,
const double white_point,const double gamma,const double pixel)
{
double
level_pixel,
scale;
scale=PerceptibleReciprocal(white_point-black_point);
level_pixel=QuantumRange*gamma_pow(scale*((double) pixel-black_point),
PerceptibleReciprocal(gamma));
return(level_pixel);
}
MagickExport MagickBooleanType LevelImage(Image *image,const double black_point,
const double white_point,const double gamma,ExceptionInfo *exception)
{
#define LevelImageTag "Level/Image"
CacheView
*image_view;
MagickBooleanType
status;
MagickOffsetType
progress;
ssize_t
i;
ssize_t
y;
/*
Allocate and initialize levels map.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
if (image->storage_class == PseudoClass)
for (i=0; i < (ssize_t) image->colors; i++)
{
/*
Level colormap.
*/
if ((GetPixelRedTraits(image) & UpdatePixelTrait) != 0)
image->colormap[i].red=(double) ClampToQuantum(LevelPixel(black_point,
white_point,gamma,image->colormap[i].red));
if ((GetPixelGreenTraits(image) & UpdatePixelTrait) != 0)
image->colormap[i].green=(double) ClampToQuantum(LevelPixel(black_point,
white_point,gamma,image->colormap[i].green));
if ((GetPixelBlueTraits(image) & UpdatePixelTrait) != 0)
image->colormap[i].blue=(double) ClampToQuantum(LevelPixel(black_point,
white_point,gamma,image->colormap[i].blue));
if ((GetPixelAlphaTraits(image) & UpdatePixelTrait) != 0)
image->colormap[i].alpha=(double) ClampToQuantum(LevelPixel(black_point,
white_point,gamma,image->colormap[i].alpha));
}
/*
Level image.
*/
status=MagickTrue;
progress=0;
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
Quantum
*magick_restrict q;
ssize_t
x;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
ssize_t
j;
for (j=0; j < (ssize_t) GetPixelChannels(image); j++)
{
PixelChannel channel = GetPixelChannelChannel(image,j);
PixelTrait traits = GetPixelChannelTraits(image,channel);
if ((traits & UpdatePixelTrait) == 0)
continue;
q[j]=ClampToQuantum(LevelPixel(black_point,white_point,gamma,
(double) q[j]));
}
q+=GetPixelChannels(image);
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(image,LevelImageTag,progress,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
image_view=DestroyCacheView(image_view);
(void) ClampImage(image,exception);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% L e v e l i z e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% LevelizeImage() applies the reversed LevelImage() operation to just
% the specific channels specified. It compresses the full range of color
% values, so that they lie between the given black and white points. Gamma is
% applied before the values are mapped.
%
% LevelizeImage() can be called with by using a +level command line
% API option, or using a '!' on a -level or LevelImage() geometry string.
%
% It can be used to de-contrast a greyscale image to the exact levels
% specified. Or by using specific levels for each channel of an image you
% can convert a gray-scale image to any linear color gradient, according to
% those levels.
%
% The format of the LevelizeImage method is:
%
% MagickBooleanType LevelizeImage(Image *image,const double black_point,
% const double white_point,const double gamma,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o black_point: The level to map zero (black) to.
%
% o white_point: The level to map QuantumRange (white) to.
%
% o gamma: adjust gamma by this factor before mapping values.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType LevelizeImage(Image *image,
const double black_point,const double white_point,const double gamma,
ExceptionInfo *exception)
{
#define LevelizeImageTag "Levelize/Image"
#define LevelizeValue(x) ClampToQuantum(((MagickRealType) gamma_pow((double) \
(QuantumScale*(x)),gamma))*(white_point-black_point)+black_point)
CacheView
*image_view;
MagickBooleanType
status;
MagickOffsetType
progress;
ssize_t
i;
ssize_t
y;
/*
Allocate and initialize levels map.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
if (image->storage_class == PseudoClass)
for (i=0; i < (ssize_t) image->colors; i++)
{
/*
Level colormap.
*/
if ((GetPixelRedTraits(image) & UpdatePixelTrait) != 0)
image->colormap[i].red=(double) LevelizeValue(image->colormap[i].red);
if ((GetPixelGreenTraits(image) & UpdatePixelTrait) != 0)
image->colormap[i].green=(double) LevelizeValue(
image->colormap[i].green);
if ((GetPixelBlueTraits(image) & UpdatePixelTrait) != 0)
image->colormap[i].blue=(double) LevelizeValue(image->colormap[i].blue);
if ((GetPixelAlphaTraits(image) & UpdatePixelTrait) != 0)
image->colormap[i].alpha=(double) LevelizeValue(
image->colormap[i].alpha);
}
/*
Level image.
*/
status=MagickTrue;
progress=0;
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
Quantum
*magick_restrict q;
ssize_t
x;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
ssize_t
j;
for (j=0; j < (ssize_t) GetPixelChannels(image); j++)
{
PixelChannel channel = GetPixelChannelChannel(image,j);
PixelTrait traits = GetPixelChannelTraits(image,channel);
if ((traits & UpdatePixelTrait) == 0)
continue;
q[j]=LevelizeValue(q[j]);
}
q+=GetPixelChannels(image);
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(image,LevelizeImageTag,progress,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
image_view=DestroyCacheView(image_view);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% L e v e l I m a g e C o l o r s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% LevelImageColors() maps the given color to "black" and "white" values,
% linearly spreading out the colors, and level values on a channel by channel
% bases, as per LevelImage(). The given colors allows you to specify
% different level ranges for each of the color channels separately.
%
% If the boolean 'invert' is set true the image values will modifyed in the
% reverse direction. That is any existing "black" and "white" colors in the
% image will become the color values given, with all other values compressed
% appropriately. This effectivally maps a greyscale gradient into the given
% color gradient.
%
% The format of the LevelImageColors method is:
%
% MagickBooleanType LevelImageColors(Image *image,
% const PixelInfo *black_color,const PixelInfo *white_color,
% const MagickBooleanType invert,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o black_color: The color to map black to/from
%
% o white_point: The color to map white to/from
%
% o invert: if true map the colors (levelize), rather than from (level)
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType LevelImageColors(Image *image,
const PixelInfo *black_color,const PixelInfo *white_color,
const MagickBooleanType invert,ExceptionInfo *exception)
{
ChannelType
channel_mask;
MagickStatusType
status;
/*
Allocate and initialize levels map.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
if ((IsGrayColorspace(image->colorspace) != MagickFalse) &&
((IsGrayColorspace(black_color->colorspace) == MagickFalse) ||
(IsGrayColorspace(white_color->colorspace) == MagickFalse)))
(void) SetImageColorspace(image,sRGBColorspace,exception);
status=MagickTrue;
if (invert == MagickFalse)
{
if ((GetPixelRedTraits(image) & UpdatePixelTrait) != 0)
{
channel_mask=SetImageChannelMask(image,RedChannel);
status&=LevelImage(image,black_color->red,white_color->red,1.0,
exception);
(void) SetImageChannelMask(image,channel_mask);
}
if ((GetPixelGreenTraits(image) & UpdatePixelTrait) != 0)
{
channel_mask=SetImageChannelMask(image,GreenChannel);
status&=LevelImage(image,black_color->green,white_color->green,1.0,
exception);
(void) SetImageChannelMask(image,channel_mask);
}
if ((GetPixelBlueTraits(image) & UpdatePixelTrait) != 0)
{
channel_mask=SetImageChannelMask(image,BlueChannel);
status&=LevelImage(image,black_color->blue,white_color->blue,1.0,
exception);
(void) SetImageChannelMask(image,channel_mask);
}
if (((GetPixelBlackTraits(image) & UpdatePixelTrait) != 0) &&
(image->colorspace == CMYKColorspace))
{
channel_mask=SetImageChannelMask(image,BlackChannel);
status&=LevelImage(image,black_color->black,white_color->black,1.0,
exception);
(void) SetImageChannelMask(image,channel_mask);
}
if (((GetPixelAlphaTraits(image) & UpdatePixelTrait) != 0) &&
(image->alpha_trait != UndefinedPixelTrait))
{
channel_mask=SetImageChannelMask(image,AlphaChannel);
status&=LevelImage(image,black_color->alpha,white_color->alpha,1.0,
exception);
(void) SetImageChannelMask(image,channel_mask);
}
}
else
{
if ((GetPixelRedTraits(image) & UpdatePixelTrait) != 0)
{
channel_mask=SetImageChannelMask(image,RedChannel);
status&=LevelizeImage(image,black_color->red,white_color->red,1.0,
exception);
(void) SetImageChannelMask(image,channel_mask);
}
if ((GetPixelGreenTraits(image) & UpdatePixelTrait) != 0)
{
channel_mask=SetImageChannelMask(image,GreenChannel);
status&=LevelizeImage(image,black_color->green,white_color->green,1.0,
exception);
(void) SetImageChannelMask(image,channel_mask);
}
if ((GetPixelBlueTraits(image) & UpdatePixelTrait) != 0)
{
channel_mask=SetImageChannelMask(image,BlueChannel);
status&=LevelizeImage(image,black_color->blue,white_color->blue,1.0,
exception);
(void) SetImageChannelMask(image,channel_mask);
}
if (((GetPixelBlackTraits(image) & UpdatePixelTrait) != 0) &&
(image->colorspace == CMYKColorspace))
{
channel_mask=SetImageChannelMask(image,BlackChannel);
status&=LevelizeImage(image,black_color->black,white_color->black,1.0,
exception);
(void) SetImageChannelMask(image,channel_mask);
}
if (((GetPixelAlphaTraits(image) & UpdatePixelTrait) != 0) &&
(image->alpha_trait != UndefinedPixelTrait))
{
channel_mask=SetImageChannelMask(image,AlphaChannel);
status&=LevelizeImage(image,black_color->alpha,white_color->alpha,1.0,
exception);
(void) SetImageChannelMask(image,channel_mask);
}
}
return(status != 0 ? MagickTrue : MagickFalse);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% L i n e a r S t r e t c h I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% LinearStretchImage() discards any pixels below the black point and above
% the white point and levels the remaining pixels.
%
% The format of the LinearStretchImage method is:
%
% MagickBooleanType LinearStretchImage(Image *image,
% const double black_point,const double white_point,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o black_point: the black point.
%
% o white_point: the white point.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType LinearStretchImage(Image *image,
const double black_point,const double white_point,ExceptionInfo *exception)
{
#define LinearStretchImageTag "LinearStretch/Image"
CacheView
*image_view;
double
*histogram,
intensity;
MagickBooleanType
status;
ssize_t
black,
white,
y;
/*
Allocate histogram and linear map.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
histogram=(double *) AcquireQuantumMemory(MaxMap+1UL,sizeof(*histogram));
if (histogram == (double *) NULL)
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
/*
Form histogram.
*/
(void) memset(histogram,0,(MaxMap+1)*sizeof(*histogram));
image_view=AcquireVirtualCacheView(image,exception);
for (y=0; y < (ssize_t) image->rows; y++)
{
const Quantum
*magick_restrict p;
ssize_t
x;
p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception);
if (p == (const Quantum *) NULL)
break;
for (x=0; x < (ssize_t) image->columns; x++)
{
intensity=GetPixelIntensity(image,p);
histogram[ScaleQuantumToMap(ClampToQuantum(intensity))]++;
p+=GetPixelChannels(image);
}
}
image_view=DestroyCacheView(image_view);
/*
Find the histogram boundaries by locating the black and white point levels.
*/
intensity=0.0;
for (black=0; black < (ssize_t) MaxMap; black++)
{
intensity+=histogram[black];
if (intensity >= black_point)
break;
}
intensity=0.0;
for (white=(ssize_t) MaxMap; white != 0; white--)
{
intensity+=histogram[white];
if (intensity >= white_point)
break;
}
histogram=(double *) RelinquishMagickMemory(histogram);
status=LevelImage(image,(double) ScaleMapToQuantum((MagickRealType) black),
(double) ScaleMapToQuantum((MagickRealType) white),1.0,exception);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% M o d u l a t e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ModulateImage() lets you control the brightness, saturation, and hue
% of an image. Modulate represents the brightness, saturation, and hue
% as one parameter (e.g. 90,150,100). If the image colorspace is HSL, the
% modulation is lightness, saturation, and hue. For HWB, use blackness,
% whiteness, and hue. And for HCL, use chrome, luma, and hue.
%
% The format of the ModulateImage method is:
%
% MagickBooleanType ModulateImage(Image *image,const char *modulate,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o modulate: Define the percent change in brightness, saturation, and hue.
%
% o exception: return any errors or warnings in this structure.
%
*/
static inline void ModulateHCL(const double percent_hue,
const double percent_chroma,const double percent_luma,double *red,
double *green,double *blue)
{
double
hue,
luma,
chroma;
/*
Increase or decrease color luma, chroma, or hue.
*/
ConvertRGBToHCL(*red,*green,*blue,&hue,&chroma,&luma);
hue+=fmod((percent_hue-100.0),200.0)/200.0;
chroma*=0.01*percent_chroma;
luma*=0.01*percent_luma;
ConvertHCLToRGB(hue,chroma,luma,red,green,blue);
}
static inline void ModulateHCLp(const double percent_hue,
const double percent_chroma,const double percent_luma,double *red,
double *green,double *blue)
{
double
hue,
luma,
chroma;
/*
Increase or decrease color luma, chroma, or hue.
*/
ConvertRGBToHCLp(*red,*green,*blue,&hue,&chroma,&luma);
hue+=fmod((percent_hue-100.0),200.0)/200.0;
chroma*=0.01*percent_chroma;
luma*=0.01*percent_luma;
ConvertHCLpToRGB(hue,chroma,luma,red,green,blue);
}
static inline void ModulateHSB(const double percent_hue,
const double percent_saturation,const double percent_brightness,double *red,
double *green,double *blue)
{
double
brightness,
hue,
saturation;
/*
Increase or decrease color brightness, saturation, or hue.
*/
ConvertRGBToHSB(*red,*green,*blue,&hue,&saturation,&brightness);
hue+=fmod((percent_hue-100.0),200.0)/200.0;
saturation*=0.01*percent_saturation;
brightness*=0.01*percent_brightness;
ConvertHSBToRGB(hue,saturation,brightness,red,green,blue);
}
static inline void ModulateHSI(const double percent_hue,
const double percent_saturation,const double percent_intensity,double *red,
double *green,double *blue)
{
double
intensity,
hue,
saturation;
/*
Increase or decrease color intensity, saturation, or hue.
*/
ConvertRGBToHSI(*red,*green,*blue,&hue,&saturation,&intensity);
hue+=fmod((percent_hue-100.0),200.0)/200.0;
saturation*=0.01*percent_saturation;
intensity*=0.01*percent_intensity;
ConvertHSIToRGB(hue,saturation,intensity,red,green,blue);
}
static inline void ModulateHSL(const double percent_hue,
const double percent_saturation,const double percent_lightness,double *red,
double *green,double *blue)
{
double
hue,
lightness,
saturation;
/*
Increase or decrease color lightness, saturation, or hue.
*/
ConvertRGBToHSL(*red,*green,*blue,&hue,&saturation,&lightness);
hue+=fmod((percent_hue-100.0),200.0)/200.0;
saturation*=0.01*percent_saturation;
lightness*=0.01*percent_lightness;
ConvertHSLToRGB(hue,saturation,lightness,red,green,blue);
}
static inline void ModulateHSV(const double percent_hue,
const double percent_saturation,const double percent_value,double *red,
double *green,double *blue)
{
double
hue,
saturation,
value;
/*
Increase or decrease color value, saturation, or hue.
*/
ConvertRGBToHSV(*red,*green,*blue,&hue,&saturation,&value);
hue+=fmod((percent_hue-100.0),200.0)/200.0;
saturation*=0.01*percent_saturation;
value*=0.01*percent_value;
ConvertHSVToRGB(hue,saturation,value,red,green,blue);
}
static inline void ModulateHWB(const double percent_hue,
const double percent_whiteness,const double percent_blackness,double *red,
double *green,double *blue)
{
double
blackness,
hue,
whiteness;
/*
Increase or decrease color blackness, whiteness, or hue.
*/
ConvertRGBToHWB(*red,*green,*blue,&hue,&whiteness,&blackness);
hue+=fmod((percent_hue-100.0),200.0)/200.0;
blackness*=0.01*percent_blackness;
whiteness*=0.01*percent_whiteness;
ConvertHWBToRGB(hue,whiteness,blackness,red,green,blue);
}
static inline void ModulateLCHab(const double percent_luma,
const double percent_chroma,const double percent_hue,
const IlluminantType illuminant,double *red,double *green,double *blue)
{
double
hue,
luma,
chroma;
/*
Increase or decrease color luma, chroma, or hue.
*/
ConvertRGBToLCHab(*red,*green,*blue,illuminant,&luma,&chroma,&hue);
luma*=0.01*percent_luma;
chroma*=0.01*percent_chroma;
hue+=fmod((percent_hue-100.0),200.0)/200.0;
ConvertLCHabToRGB(luma,chroma,hue,illuminant,red,green,blue);
}
static inline void ModulateLCHuv(const double percent_luma,
const double percent_chroma,const double percent_hue,
const IlluminantType illuminant,double *red,double *green,double *blue)
{
double
hue,
luma,
chroma;
/*
Increase or decrease color luma, chroma, or hue.
*/
ConvertRGBToLCHuv(*red,*green,*blue,illuminant,&luma,&chroma,&hue);
luma*=0.01*percent_luma;
chroma*=0.01*percent_chroma;
hue+=fmod((percent_hue-100.0),200.0)/200.0;
ConvertLCHuvToRGB(luma,chroma,hue,illuminant,red,green,blue);
}
MagickExport MagickBooleanType ModulateImage(Image *image,const char *modulate,
ExceptionInfo *exception)
{
#define ModulateImageTag "Modulate/Image"
CacheView
*image_view;
ColorspaceType
colorspace = UndefinedColorspace;
const char
*artifact;
double
percent_brightness = 100.0,
percent_hue = 100.0,
percent_saturation = 100.0;
GeometryInfo
geometry_info;
IlluminantType
illuminant = D65Illuminant;
MagickBooleanType
status;
MagickOffsetType
progress;
MagickStatusType
flags;
ssize_t
i;
ssize_t
y;
/*
Initialize modulate table.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
if (modulate == (char *) NULL)
return(MagickFalse);
if (IssRGBCompatibleColorspace(image->colorspace) == MagickFalse)
(void) SetImageColorspace(image,sRGBColorspace,exception);
flags=ParseGeometry(modulate,&geometry_info);
if ((flags & RhoValue) != 0)
percent_brightness=geometry_info.rho;
if ((flags & SigmaValue) != 0)
percent_saturation=geometry_info.sigma;
if ((flags & XiValue) != 0)
percent_hue=geometry_info.xi;
artifact=GetImageArtifact(image,"modulate:colorspace");
if (artifact != (const char *) NULL)
{
colorspace=(ColorspaceType) ParseCommandOption(MagickColorspaceOptions,
MagickFalse,artifact);
if ((ssize_t) illuminant < 0)
colorspace=UndefinedColorspace;
}
artifact=GetImageArtifact(image,"color:illuminant");
if (artifact != (const char *) NULL)
{
illuminant=(IlluminantType) ParseCommandOption(MagickIlluminantOptions,
MagickFalse,artifact);
if ((ssize_t) illuminant < 0)
illuminant=UndefinedIlluminant;
}
if (image->storage_class == PseudoClass)
for (i=0; i < (ssize_t) image->colors; i++)
{
double
blue,
green,
red;
/*
Modulate image colormap.
*/
red=(double) image->colormap[i].red;
green=(double) image->colormap[i].green;
blue=(double) image->colormap[i].blue;
switch (colorspace)
{
case HCLColorspace:
{
ModulateHCL(percent_hue,percent_saturation,percent_brightness,
&red,&green,&blue);
break;
}
case HCLpColorspace:
{
ModulateHCLp(percent_hue,percent_saturation,percent_brightness,
&red,&green,&blue);
break;
}
case HSBColorspace:
{
ModulateHSB(percent_hue,percent_saturation,percent_brightness,
&red,&green,&blue);
break;
}
case HSIColorspace:
{
ModulateHSI(percent_hue,percent_saturation,percent_brightness,
&red,&green,&blue);
break;
}
case HSLColorspace:
default:
{
ModulateHSL(percent_hue,percent_saturation,percent_brightness,
&red,&green,&blue);
break;
}
case HSVColorspace:
{
ModulateHSV(percent_hue,percent_saturation,percent_brightness,
&red,&green,&blue);
break;
}
case HWBColorspace:
{
ModulateHWB(percent_hue,percent_saturation,percent_brightness,
&red,&green,&blue);
break;
}
case LCHColorspace:
case LCHabColorspace:
{
ModulateLCHab(percent_brightness,percent_saturation,percent_hue,
illuminant,&red,&green,&blue);
break;
}
case LCHuvColorspace:
{
ModulateLCHuv(percent_brightness,percent_saturation,percent_hue,
illuminant,&red,&green,&blue);
break;
}
}
image->colormap[i].red=red;
image->colormap[i].green=green;
image->colormap[i].blue=blue;
}
/*
Modulate image.
*/
#if defined(MAGICKCORE_OPENCL_SUPPORT)
if (AccelerateModulateImage(image,percent_brightness,percent_hue,
percent_saturation,colorspace,exception) != MagickFalse)
return(MagickTrue);
#endif
status=MagickTrue;
progress=0;
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
Quantum
*magick_restrict q;
ssize_t
x;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
double
blue,
green,
red;
red=(double) GetPixelRed(image,q);
green=(double) GetPixelGreen(image,q);
blue=(double) GetPixelBlue(image,q);
switch (colorspace)
{
case HCLColorspace:
{
ModulateHCL(percent_hue,percent_saturation,percent_brightness,
&red,&green,&blue);
break;
}
case HCLpColorspace:
{
ModulateHCLp(percent_hue,percent_saturation,percent_brightness,
&red,&green,&blue);
break;
}
case HSBColorspace:
{
ModulateHSB(percent_hue,percent_saturation,percent_brightness,
&red,&green,&blue);
break;
}
case HSLColorspace:
default:
{
ModulateHSL(percent_hue,percent_saturation,percent_brightness,
&red,&green,&blue);
break;
}
case HSVColorspace:
{
ModulateHSV(percent_hue,percent_saturation,percent_brightness,
&red,&green,&blue);
break;
}
case HWBColorspace:
{
ModulateHWB(percent_hue,percent_saturation,percent_brightness,
&red,&green,&blue);
break;
}
case LCHabColorspace:
{
ModulateLCHab(percent_brightness,percent_saturation,percent_hue,
illuminant,&red,&green,&blue);
break;
}
case LCHColorspace:
case LCHuvColorspace:
{
ModulateLCHuv(percent_brightness,percent_saturation,percent_hue,
illuminant,&red,&green,&blue);
break;
}
}
SetPixelRed(image,ClampToQuantum(red),q);
SetPixelGreen(image,ClampToQuantum(green),q);
SetPixelBlue(image,ClampToQuantum(blue),q);
q+=GetPixelChannels(image);
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(image,ModulateImageTag,progress,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
image_view=DestroyCacheView(image_view);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% N e g a t e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% NegateImage() negates the colors in the reference image. The grayscale
% option means that only grayscale values within the image are negated.
%
% The format of the NegateImage method is:
%
% MagickBooleanType NegateImage(Image *image,
% const MagickBooleanType grayscale,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o grayscale: If MagickTrue, only negate grayscale pixels within the image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType NegateImage(Image *image,
const MagickBooleanType grayscale,ExceptionInfo *exception)
{
#define NegateImageTag "Negate/Image"
CacheView
*image_view;
MagickBooleanType
status;
MagickOffsetType
progress;
ssize_t
i;
ssize_t
y;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
if (image->storage_class == PseudoClass)
for (i=0; i < (ssize_t) image->colors; i++)
{
/*
Negate colormap.
*/
if (grayscale != MagickFalse)
if ((image->colormap[i].red != image->colormap[i].green) ||
(image->colormap[i].green != image->colormap[i].blue))
continue;
if ((GetPixelRedTraits(image) & UpdatePixelTrait) != 0)
image->colormap[i].red=QuantumRange-image->colormap[i].red;
if ((GetPixelGreenTraits(image) & UpdatePixelTrait) != 0)
image->colormap[i].green=QuantumRange-image->colormap[i].green;
if ((GetPixelBlueTraits(image) & UpdatePixelTrait) != 0)
image->colormap[i].blue=QuantumRange-image->colormap[i].blue;
}
/*
Negate image.
*/
status=MagickTrue;
progress=0;
image_view=AcquireAuthenticCacheView(image,exception);
if( grayscale != MagickFalse )
{
for (y=0; y < (ssize_t) image->rows; y++)
{
MagickBooleanType
sync;
Quantum
*magick_restrict q;
ssize_t
x;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,
exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
ssize_t
j;
if (IsPixelGray(image,q) == MagickFalse)
{
q+=GetPixelChannels(image);
continue;
}
for (j=0; j < (ssize_t) GetPixelChannels(image); j++)
{
PixelChannel channel = GetPixelChannelChannel(image,j);
PixelTrait traits = GetPixelChannelTraits(image,channel);
if ((traits & UpdatePixelTrait) == 0)
continue;
q[j]=QuantumRange-q[j];
}
q+=GetPixelChannels(image);
}
sync=SyncCacheViewAuthenticPixels(image_view,exception);
if (sync == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
progress++;
proceed=SetImageProgress(image,NegateImageTag,progress,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
image_view=DestroyCacheView(image_view);
return(MagickTrue);
}
/*
Negate image.
*/
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
Quantum
*magick_restrict q;
ssize_t
x;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
ssize_t
j;
for (j=0; j < (ssize_t) GetPixelChannels(image); j++)
{
PixelChannel channel = GetPixelChannelChannel(image,j);
PixelTrait traits = GetPixelChannelTraits(image,channel);
if ((traits & UpdatePixelTrait) == 0)
continue;
q[j]=QuantumRange-q[j];
}
q+=GetPixelChannels(image);
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(image,NegateImageTag,progress,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
image_view=DestroyCacheView(image_view);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% N o r m a l i z e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% The NormalizeImage() method enhances the contrast of a color image by
% mapping the darkest 2 percent of all pixel to black and the brightest
% 1 percent to white.
%
% The format of the NormalizeImage method is:
%
% MagickBooleanType NormalizeImage(Image *image,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType NormalizeImage(Image *image,
ExceptionInfo *exception)
{
double
black_point,
white_point;
black_point=(double) image->columns*image->rows*0.0015;
white_point=(double) image->columns*image->rows*0.9995;
return(ContrastStretchImage(image,black_point,white_point,exception));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S i g m o i d a l C o n t r a s t I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SigmoidalContrastImage() adjusts the contrast of an image with a non-linear
% sigmoidal contrast algorithm. Increase the contrast of the image using a
% sigmoidal transfer function without saturating highlights or shadows.
% Contrast indicates how much to increase the contrast (0 is none; 3 is
% typical; 20 is pushing it); mid-point indicates where midtones fall in the
% resultant image (0 is white; 50% is middle-gray; 100% is black). Set
% sharpen to MagickTrue to increase the image contrast otherwise the contrast
% is reduced.
%
% The format of the SigmoidalContrastImage method is:
%
% MagickBooleanType SigmoidalContrastImage(Image *image,
% const MagickBooleanType sharpen,const char *levels,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o sharpen: Increase or decrease image contrast.
%
% o contrast: strength of the contrast, the larger the number the more
% 'threshold-like' it becomes.
%
% o midpoint: midpoint of the function as a color value 0 to QuantumRange.
%
% o exception: return any errors or warnings in this structure.
%
*/
/*
ImageMagick 6 has a version of this function which uses LUTs.
*/
/*
Sigmoidal function Sigmoidal with inflexion point moved to b and "slope
constant" set to a.
The first version, based on the hyperbolic tangent tanh, when combined with
the scaling step, is an exact arithmetic clone of the sigmoid function
based on the logistic curve. The equivalence is based on the identity
1/(1+exp(-t)) = (1+tanh(t/2))/2
(http://de.wikipedia.org/wiki/Sigmoidfunktion) and the fact that the
scaled sigmoidal derivation is invariant under affine transformations of
the ordinate.
The tanh version is almost certainly more accurate and cheaper. The 0.5
factor in the argument is to clone the legacy ImageMagick behavior. The
reason for making the define depend on atanh even though it only uses tanh
has to do with the construction of the inverse of the scaled sigmoidal.
*/
#if defined(MAGICKCORE_HAVE_ATANH)
#define Sigmoidal(a,b,x) ( tanh((0.5*(a))*((x)-(b))) )
#else
#define Sigmoidal(a,b,x) ( 1.0/(1.0+exp((a)*((b)-(x)))) )
#endif
/*
Scaled sigmoidal function:
( Sigmoidal(a,b,x) - Sigmoidal(a,b,0) ) /
( Sigmoidal(a,b,1) - Sigmoidal(a,b,0) )
See http://osdir.com/ml/video.image-magick.devel/2005-04/msg00006.html and
http://www.cs.dartmouth.edu/farid/downloads/tutorials/fip.pdf. The limit
of ScaledSigmoidal as a->0 is the identity, but a=0 gives a division by
zero. This is fixed below by exiting immediately when contrast is small,
leaving the image (or colormap) unmodified. This appears to be safe because
the series expansion of the logistic sigmoidal function around x=b is
1/2-a*(b-x)/4+...
so that the key denominator s(1)-s(0) is about a/4 (a/2 with tanh).
*/
#define ScaledSigmoidal(a,b,x) ( \
(Sigmoidal((a),(b),(x))-Sigmoidal((a),(b),0.0)) / \
(Sigmoidal((a),(b),1.0)-Sigmoidal((a),(b),0.0)) )
/*
Inverse of ScaledSigmoidal, used for +sigmoidal-contrast. Because b
may be 0 or 1, the argument of the hyperbolic tangent (resp. logistic
sigmoidal) may be outside of the interval (-1,1) (resp. (0,1)), even
when creating a LUT from in gamut values, hence the branching. In
addition, HDRI may have out of gamut values.
InverseScaledSigmoidal is not a two-sided inverse of ScaledSigmoidal:
It is only a right inverse. This is unavoidable.
*/
static inline double InverseScaledSigmoidal(const double a,const double b,
const double x)
{
const double sig0=Sigmoidal(a,b,0.0);
const double sig1=Sigmoidal(a,b,1.0);
const double argument=(sig1-sig0)*x+sig0;
const double clamped=
(
#if defined(MAGICKCORE_HAVE_ATANH)
argument < -1+MagickEpsilon
?
-1+MagickEpsilon
:
( argument > 1-MagickEpsilon ? 1-MagickEpsilon : argument )
);
return(b+(2.0/a)*atanh(clamped));
#else
argument < MagickEpsilon
?
MagickEpsilon
:
( argument > 1-MagickEpsilon ? 1-MagickEpsilon : argument )
);
return(b-log(1.0/clamped-1.0)/a);
#endif
}
MagickExport MagickBooleanType SigmoidalContrastImage(Image *image,
const MagickBooleanType sharpen,const double contrast,const double midpoint,
ExceptionInfo *exception)
{
#define SigmoidalContrastImageTag "SigmoidalContrast/Image"
#define ScaledSig(x) ( ClampToQuantum(QuantumRange* \
ScaledSigmoidal(contrast,QuantumScale*midpoint,QuantumScale*(x))) )
#define InverseScaledSig(x) ( ClampToQuantum(QuantumRange* \
InverseScaledSigmoidal(contrast,QuantumScale*midpoint,QuantumScale*(x))) )
CacheView
*image_view;
MagickBooleanType
status;
MagickOffsetType
progress;
ssize_t
y;
/*
Convenience macros.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
/*
Side effect: may clamp values unless contrast<MagickEpsilon, in which
case nothing is done.
*/
if (contrast < MagickEpsilon)
return(MagickTrue);
/*
Sigmoidal-contrast enhance colormap.
*/
if (image->storage_class == PseudoClass)
{
ssize_t
i;
if( sharpen != MagickFalse )
for (i=0; i < (ssize_t) image->colors; i++)
{
if ((GetPixelRedTraits(image) & UpdatePixelTrait) != 0)
image->colormap[i].red=(MagickRealType) ScaledSig(
image->colormap[i].red);
if ((GetPixelGreenTraits(image) & UpdatePixelTrait) != 0)
image->colormap[i].green=(MagickRealType) ScaledSig(
image->colormap[i].green);
if ((GetPixelBlueTraits(image) & UpdatePixelTrait) != 0)
image->colormap[i].blue=(MagickRealType) ScaledSig(
image->colormap[i].blue);
if ((GetPixelAlphaTraits(image) & UpdatePixelTrait) != 0)
image->colormap[i].alpha=(MagickRealType) ScaledSig(
image->colormap[i].alpha);
}
else
for (i=0; i < (ssize_t) image->colors; i++)
{
if ((GetPixelRedTraits(image) & UpdatePixelTrait) != 0)
image->colormap[i].red=(MagickRealType) InverseScaledSig(
image->colormap[i].red);
if ((GetPixelGreenTraits(image) & UpdatePixelTrait) != 0)
image->colormap[i].green=(MagickRealType) InverseScaledSig(
image->colormap[i].green);
if ((GetPixelBlueTraits(image) & UpdatePixelTrait) != 0)
image->colormap[i].blue=(MagickRealType) InverseScaledSig(
image->colormap[i].blue);
if ((GetPixelAlphaTraits(image) & UpdatePixelTrait) != 0)
image->colormap[i].alpha=(MagickRealType) InverseScaledSig(
image->colormap[i].alpha);
}
}
/*
Sigmoidal-contrast enhance image.
*/
status=MagickTrue;
progress=0;
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
Quantum
*magick_restrict q;
ssize_t
x;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
ssize_t
i;
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
if ((traits & UpdatePixelTrait) == 0)
continue;
if( sharpen != MagickFalse )
q[i]=ScaledSig(q[i]);
else
q[i]=InverseScaledSig(q[i]);
}
q+=GetPixelChannels(image);
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(image,SigmoidalContrastImageTag,progress,
image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
image_view=DestroyCacheView(image_view);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% W h i t e B a l a n c e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% WhiteBalanceImage() applies white balancing to an image according to a
% grayworld assumption in the LAB colorspace.
%
% The format of the WhiteBalanceImage method is:
%
% MagickBooleanType WhiteBalanceImage(Image *image,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: The image to auto-level
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType WhiteBalanceImage(Image *image,
ExceptionInfo *exception)
{
#define WhiteBalanceImageTag "WhiteBalance/Image"
CacheView
*image_view;
const char
*artifact;
double
a_mean,
b_mean;
MagickOffsetType
progress;
MagickStatusType
status;
ssize_t
y;
/*
White balance image.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
if (SetImageStorageClass(image,DirectClass,exception) == MagickFalse)
return(MagickFalse);
status=TransformImageColorspace(image,LabColorspace,exception);
a_mean=0.0;
b_mean=0.0;
image_view=AcquireAuthenticCacheView(image,exception);
for (y=0; y < (ssize_t) image->rows; y++)
{
const Quantum
*magick_restrict p;
ssize_t
x;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception);
if (p == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
a_mean+=QuantumScale*GetPixela(image,p)-0.5;
b_mean+=QuantumScale*GetPixelb(image,p)-0.5;
p+=GetPixelChannels(image);
}
}
a_mean/=((double) image->columns*image->rows);
b_mean/=((double) image->columns*image->rows);
progress=0;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
Quantum
*magick_restrict q;
ssize_t
x;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
double
a,
b;
/*
Scale the chroma distance shifted according to amount of luminance.
*/
a=(double) GetPixela(image,q)-1.1*GetPixelL(image,q)*a_mean;
b=(double) GetPixelb(image,q)-1.1*GetPixelL(image,q)*b_mean;
SetPixela(image,ClampToQuantum(a),q);
SetPixelb(image,ClampToQuantum(b),q);
q+=GetPixelChannels(image);
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(image,WhiteBalanceImageTag,progress,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
image_view=DestroyCacheView(image_view);
artifact=GetImageArtifact(image,"white-balance:vibrance");
if (artifact != (const char *) NULL)
{
ChannelType
channel_mask;
double
black_point = 0.0;
GeometryInfo
geometry_info;
MagickStatusType
flags;
/*
Level the a & b channels.
*/
flags=ParseGeometry(artifact,&geometry_info);
if ((flags & RhoValue) != 0)
black_point=geometry_info.rho;
if ((flags & PercentValue) != 0)
black_point*=(double) (QuantumRange/100.0);
channel_mask=SetImageChannelMask(image,(ChannelType) (aChannel |
bChannel));
status&=LevelImage(image,black_point,(double) QuantumRange-black_point,
1.0,exception);
(void) SetImageChannelMask(image,channel_mask);
}
status&=TransformImageColorspace(image,sRGBColorspace,exception);
return(status != 0 ? MagickTrue : MagickFalse);
}
|
3d7pt.lbpar.c | #include <omp.h>
#include <math.h>
#define ceild(n,d) ceil(((double)(n))/((double)(d)))
#define floord(n,d) floor(((double)(n))/((double)(d)))
#define max(x,y) ((x) > (y)? (x) : (y))
#define min(x,y) ((x) < (y)? (x) : (y))
/*
* Order-1, 3D 7 point stencil
* Adapted from PLUTO and Pochoir test bench
*
* Tareq Malas
*/
#include <stdio.h>
#include <stdlib.h>
#include <sys/time.h>
#ifdef LIKWID_PERFMON
#include <likwid.h>
#endif
#include "print_utils.h"
#define TESTS 2
#define MAX(a,b) ((a) > (b) ? a : b)
#define MIN(a,b) ((a) < (b) ? a : b)
/* Subtract the `struct timeval' values X and Y,
* storing the result in RESULT.
*
* Return 1 if the difference is negative, otherwise 0.
*/
int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y)
{
/* Perform the carry for the later subtraction by updating y. */
if (x->tv_usec < y->tv_usec)
{
int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1;
y->tv_usec -= 1000000 * nsec;
y->tv_sec += nsec;
}
if (x->tv_usec - y->tv_usec > 1000000)
{
int nsec = (x->tv_usec - y->tv_usec) / 1000000;
y->tv_usec += 1000000 * nsec;
y->tv_sec -= nsec;
}
/* Compute the time remaining to wait.
* tv_usec is certainly positive.
*/
result->tv_sec = x->tv_sec - y->tv_sec;
result->tv_usec = x->tv_usec - y->tv_usec;
/* Return 1 if result is negative. */
return x->tv_sec < y->tv_sec;
}
int main(int argc, char *argv[])
{
int t, i, j, k, test;
int Nx, Ny, Nz, Nt;
if (argc > 3) {
Nx = atoi(argv[1])+2;
Ny = atoi(argv[2])+2;
Nz = atoi(argv[3])+2;
}
if (argc > 4)
Nt = atoi(argv[4]);
double ****A = (double ****) malloc(sizeof(double***)*2);
A[0] = (double ***) malloc(sizeof(double**)*Nz);
A[1] = (double ***) malloc(sizeof(double**)*Nz);
for(i=0; i<Nz; i++){
A[0][i] = (double**) malloc(sizeof(double*)*Ny);
A[1][i] = (double**) malloc(sizeof(double*)*Ny);
for(j=0;j<Ny;j++){
A[0][i][j] = (double*) malloc(sizeof(double)*Nx);
A[1][i][j] = (double*) malloc(sizeof(double)*Nx);
}
}
// tile size information, including extra element to decide the list length
int *tile_size = (int*) malloc(sizeof(int));
tile_size[0] = -1;
// The list is modified here before source-to-source transformations
tile_size = (int*) realloc((void *)tile_size, sizeof(int)*5);
tile_size[0] = 32;
tile_size[1] = 32;
tile_size[2] = 24;
tile_size[3] = 2048;
tile_size[4] = -1;
// for timekeeping
int ts_return = -1;
struct timeval start, end, result;
double tdiff = 0.0, min_tdiff=1.e100;
const int BASE = 1024;
const double alpha = 0.0876;
const double beta = 0.0765;
// initialize variables
//
srand(42);
for (i = 1; i < Nz; i++) {
for (j = 1; j < Ny; j++) {
for (k = 1; k < Nx; k++) {
A[0][i][j][k] = 1.0 * (rand() % BASE);
}
}
}
#ifdef LIKWID_PERFMON
LIKWID_MARKER_INIT;
#pragma omp parallel
{
LIKWID_MARKER_THREADINIT;
#pragma omp barrier
LIKWID_MARKER_START("calc");
}
#endif
int num_threads = 1;
#if defined(_OPENMP)
num_threads = omp_get_max_threads();
#endif
for(test=0; test<TESTS; test++){
gettimeofday(&start, 0);
// serial execution - Addition: 6 && Multiplication: 2
/* Copyright (C) 1991-2014 Free Software Foundation, Inc.
This file is part of the GNU C Library.
The GNU C Library is free software; you can redistribute it and/or
modify it under the terms of the GNU Lesser General Public
License as published by the Free Software Foundation; either
version 2.1 of the License, or (at your option) any later version.
The GNU C Library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public
License along with the GNU C Library; if not, see
<http://www.gnu.org/licenses/>. */
/* This header is separate from features.h so that the compiler can
include it implicitly at the start of every compilation. It must
not itself include <features.h> or any other header that includes
<features.h> because the implicit include comes before any feature
test macros that may be defined in a source file before it first
explicitly includes a system header. GCC knows the name of this
header in order to preinclude it. */
/* glibc's intent is to support the IEC 559 math functionality, real
and complex. If the GCC (4.9 and later) predefined macros
specifying compiler intent are available, use them to determine
whether the overall intent is to support these features; otherwise,
presume an older compiler has intent to support these features and
define these macros by default. */
/* wchar_t uses ISO/IEC 10646 (2nd ed., published 2011-03-15) /
Unicode 6.0. */
/* We do not support C11 <threads.h>. */
int t1, t2, t3, t4, t5, t6, t7, t8;
int lb, ub, lbp, ubp, lb2, ub2;
register int lbv, ubv;
/* Start of CLooG code */
if ((Nt >= 2) && (Nx >= 3) && (Ny >= 3) && (Nz >= 3)) {
for (t1=-1;t1<=floord(Nt-2,16);t1++) {
lbp=max(ceild(t1,2),ceild(32*t1-Nt+3,32));
ubp=min(floord(Nt+Nz-4,32),floord(16*t1+Nz+13,32));
#pragma omp parallel for private(lbv,ubv,t3,t4,t5,t6,t7,t8)
for (t2=lbp;t2<=ubp;t2++) {
for (t3=max(max(0,ceild(2*t1-2,3)),ceild(32*t2-Nz-20,24));t3<=min(min(min(floord(Nt+Ny-4,24),floord(16*t1+Ny+29,24)),floord(32*t2+Ny+28,24)),floord(32*t1-32*t2+Nz+Ny+27,24));t3++) {
for (t4=max(max(max(0,ceild(t1-127,128)),ceild(32*t2-Nz-2044,2048)),ceild(24*t3-Ny-2044,2048));t4<=min(min(min(min(floord(Nt+Nx-4,2048),floord(16*t1+Nx+29,2048)),floord(32*t2+Nx+28,2048)),floord(24*t3+Nx+20,2048)),floord(32*t1-32*t2+Nz+Nx+27,2048));t4++) {
for (t5=max(max(max(max(max(0,16*t1),32*t1-32*t2+1),32*t2-Nz+2),24*t3-Ny+2),2048*t4-Nx+2);t5<=min(min(min(min(min(Nt-2,16*t1+31),32*t2+30),24*t3+22),2048*t4+2046),32*t1-32*t2+Nz+29);t5++) {
for (t6=max(max(32*t2,t5+1),-32*t1+32*t2+2*t5-31);t6<=min(min(32*t2+31,-32*t1+32*t2+2*t5),t5+Nz-2);t6++) {
for (t7=max(24*t3,t5+1);t7<=min(24*t3+23,t5+Ny-2);t7++) {
lbv=max(2048*t4,t5+1);
ubv=min(2048*t4+2047,t5+Nx-2);
#pragma ivdep
#pragma vector always
for (t8=lbv;t8<=ubv;t8++) {
A[( t5 + 1) % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] = ((alpha * A[ t5 % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)]) + (beta * (((((A[ t5 % 2][ (-t5+t6) - 1][ (-t5+t7)][ (-t5+t8)] + A[ t5 % 2][ (-t5+t6)][ (-t5+t7) - 1][ (-t5+t8)]) + A[ t5 % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8) - 1]) + A[ t5 % 2][ (-t5+t6) + 1][ (-t5+t7)][ (-t5+t8)]) + A[ t5 % 2][ (-t5+t6)][ (-t5+t7) + 1][ (-t5+t8)]) + A[ t5 % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8) + 1])));;
}
}
}
}
}
}
}
}
}
/* End of CLooG code */
gettimeofday(&end, 0);
ts_return = timeval_subtract(&result, &end, &start);
tdiff = (double) (result.tv_sec + result.tv_usec * 1.0e-6);
min_tdiff = min(min_tdiff, tdiff);
printf("Rank 0 TEST# %d time: %f\n", test, tdiff);
}
PRINT_RESULTS(1, "constant")
#ifdef LIKWID_PERFMON
#pragma omp parallel
{
LIKWID_MARKER_STOP("calc");
}
LIKWID_MARKER_CLOSE;
#endif
// Free allocated arrays (Causing performance degradation
/* for(i=0; i<Nz; i++){
for(j=0;j<Ny;j++){
free(A[0][i][j]);
free(A[1][i][j]);
}
free(A[0][i]);
free(A[1][i]);
}
free(A[0]);
free(A[1]);
*/
return 0;
}
|
matrix.c |
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <stdint.h>
#include <float.h>
#include "mmio.h"
#include <tgmath.h> // interferes with mmio.h
#include "cg.h"
#include "matrix.h"
static int compar(const void *pa, const void *pb)
{
struct matrix_coo *a = (struct matrix_coo*)pa;
struct matrix_coo *b = (struct matrix_coo*)pb;
if (a->i < b->i) return -1;
if (a->i > b->i) return 1;
if (a->j < b->j) return -1;
if (a->j > b->j) return 1;
return 0;
}
void coo_load(const char *fname, int *n, int *nz, struct matrix_coo **a)
{
FILE *f;
if ((f = fopen(fname, "r")) == NULL) {
fprintf(stderr, "Error opening file: %s\n", fname);
exit(1);
}
MM_typecode matcode;
if (mm_read_banner(f, &matcode) != 0) {
fprintf(stderr, "Could not process Matrix Market banner\n");
exit(1);
}
if (!mm_is_matrix(matcode) || !mm_is_sparse(matcode) || mm_is_complex(matcode) || !mm_is_symmetric(matcode)) {
fprintf(stderr, "This application does not support the Market Market type: %s\n",
mm_typecode_to_str(matcode));
exit(1);
}
int M, N, NZ;
if (mm_read_mtx_crd_size(f, &M, &N, &NZ) != 0) {
fprintf(stderr, "Could not parse matrix size\n");
exit(1);
}
if (M != N) {
fprintf(stderr, "Matrix is not square\n");
exit(1);
}
struct matrix_coo *coo = ALLOC(struct matrix_coo, 2 * NZ);
int k = 0;
for (int l = 0; l < NZ; l++) {
double real;
if (mm_read_mtx_crd_entry(f, &coo[k].i, &coo[k].j, &real, NULL, matcode) != 0) {
fprintf(stderr, "Error reading matrix element %i\n", l);
exit(1);
}
coo[k].i--;
coo[k].j--;
coo[k].a = real;
if (coo[k].i == coo[k].j) k++;
else {
coo[k + 1].i = coo[k].j;
coo[k + 1].j = coo[k].i;
coo[k + 1].a = coo[k].a;
k += 2;
}
}
fclose(f);
qsort(coo, k, sizeof(struct matrix_coo), compar);
*n = N; *nz = k; *a = coo;
/* int ii;
for(ii = 0; ii < k; ii++)
printf("i=%d, j=%d, a=%e\n", coo[ii].i, coo[ii].j, coo[ii].a); */
}
double coo_norm_inf(int n, int nz, struct matrix_coo *coo)
{
double *amax = CALLOC(double, n);
for (int i = 0; i < nz; i++) {
int row = coo[i].i;
int val = coo[i].a;
amax[row] += abs(val);
}
double norm = 0.0;
for (int i = 0; i < n; i++) {
if (amax[i] > norm) norm = amax[i];
}
FREE(amax);
return norm;
}
double coo_max_nz(int n, int nz, struct matrix_coo *coo)
{
double *m = CALLOC(double, n);
for (int i = 0; i < nz; i++) {
int row = coo[i].i;
m[row]++;
}
int r = 0.0;
for (int i = 0; i < n; i++) {
if (m[i] > r) r = m[i];
}
FREE(m);
return r;
}
// CSR matrix
struct matrix_csr { struct matrix super; int *i; int *j; DOUBLE *A; };
void csr_dmult(struct matrix_csr *mat, DOUBLE *x, DOUBLE *y)
{
#pragma omp parallel for \
shared(mat, x, y)
for (int k = 0; k < mat->super.n; k++) {
DOUBLE t = 0.0;
for (int l = mat->i[k]; l < mat->i[k + 1]; l++)
t += mat->A[l] * x[mat->j[l]];
y[k] = t;
}
}
void csr_smult(struct matrix_csr *mat, FLOAT *x, FLOAT *y)
{
#pragma omp parallel for \
shared(mat, x, y)
for (int k = 0; k < mat->super.n; k++) {
FLOAT2 t = 0.0;
for (int l = mat->i[k]; l < mat->i[k + 1]; l++)
t += mat->A[l] * x[mat->j[l]];
y[k] = t;
}
}
struct matrix *csr_create(int n, int nz, struct matrix_coo *coo)
{
int *i = ALLOC(int, n + 1);
int *j = ALLOC(int, nz);
DOUBLE *A = ALLOC(DOUBLE, nz);
i[0] = 0;
int l = 0;
for (int k = 0; k < n; k++) {
while (l < nz && coo[l].i == k) {
j[l] = coo[l].j;
A[l] = coo[l].a;
l++;
}
i[k + 1] = l;
}
struct matrix_csr *mat = ALLOC(struct matrix_csr, 1);
mat->super.n = n;
mat->i = i;
mat->j = j;
mat->A = A;
mat->super.dmult = (void (*)(struct matrix *, DOUBLE *, DOUBLE *))csr_dmult;
mat->super.smult = (void (*)(struct matrix *, FLOAT *, FLOAT *))csr_smult;
return (struct matrix *)mat;
}
// dense matrix
struct matrix_dense { struct matrix super; DOUBLE *A; };
void dense_dmult(struct matrix_dense *mat, DOUBLE *x, DOUBLE *y)
{
#pragma omp parallel for \
shared(mat, x, y)
for (int i = 0; i < mat->super.n; i++) {
DOUBLE t = 0.0;
for (int j = 0; j < mat->super.n; j++)
t += mat->A[i * mat->super.n + j] * x[j];
y[i] = t;
}
}
void dense_smult(uint8_t m, struct matrix_dense *mat, FLOAT *x, FLOAT *y)
{
#pragma omp parallel for \
shared(mat, x, y)
for (int i = 0; i < mat->super.n; i++) {
FLOAT2 t = 0.0;
for (int j = 0; j < mat->super.n; j++)
t += mat->A[i * mat->super.n + j] * x[j];
y[i] = t;
}
}
struct matrix *dense_create(int n, int nz, struct matrix_coo *coo)
{
DOUBLE *A = CALLOC(DOUBLE, n * n);
// row major format
for (int k = 0; k < nz; k++) A[coo[k].i * n + coo[k].j] = coo[k].a;
struct matrix_dense *mat = ALLOC(struct matrix_dense, 1);
mat->super.n = n;
mat->super.dmult = (void (*)(struct matrix *, DOUBLE *, DOUBLE *))dense_dmult;
mat->super.smult = (void (*)(struct matrix *, FLOAT *, FLOAT *))dense_smult;
mat->A = A;
return (struct matrix *)mat;
}
// Jacobi preconditioner
struct precond_jacobi { struct matrix super; FLOAT *d; };
void jacobi_smult(struct precond_jacobi *pre, FLOAT *x, FLOAT *y)
{
for (int k = 0; k < pre->super.n; k++) {
y[k] = x[k] / pre->d[k];
}
}
struct matrix *jacobi_create(int n, int nz, struct matrix_coo *coo)
{
FLOAT *d = ALLOC(FLOAT, n);
for (int k = 0; k < n; k++) d[k] = 0.0;
for (int k = 0; k < nz; k++)
if (coo[k].i == coo[k].j)
d[coo[k].i] = coo[k].a;
struct precond_jacobi *pre = ALLOC(struct precond_jacobi, 1);
pre->super.n = n;
pre->super.dmult = NULL;
pre->super.smult = (void (*)(struct matrix *, FLOAT *, FLOAT *))jacobi_smult;
pre->d = d;
return (struct matrix *)pre;
} |
GB_binop__bor_uint16.c | //------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_emult.h"
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB (_AaddB__bor_uint16)
// A.*B function (eWiseMult): GB (_AemultB_01__bor_uint16)
// A.*B function (eWiseMult): GB (_AemultB_02__bor_uint16)
// A.*B function (eWiseMult): GB (_AemultB_03__bor_uint16)
// A.*B function (eWiseMult): GB (_AemultB_bitmap__bor_uint16)
// A*D function (colscale): GB (_AxD__bor_uint16)
// D*A function (rowscale): GB (_DxB__bor_uint16)
// C+=B function (dense accum): GB (_Cdense_accumB__bor_uint16)
// C+=b function (dense accum): GB (_Cdense_accumb__bor_uint16)
// C+=A+B function (dense ewise3): GB ((none))
// C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__bor_uint16)
// C=scalar+B GB (_bind1st__bor_uint16)
// C=scalar+B' GB (_bind1st_tran__bor_uint16)
// C=A+scalar GB (_bind2nd__bor_uint16)
// C=A'+scalar GB (_bind2nd_tran__bor_uint16)
// C type: uint16_t
// A type: uint16_t
// B,b type: uint16_t
// BinaryOp: cij = (aij) | (bij)
#define GB_ATYPE \
uint16_t
#define GB_BTYPE \
uint16_t
#define GB_CTYPE \
uint16_t
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA,A_iso) \
uint16_t aij = GBX (Ax, pA, A_iso)
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB,B_iso) \
uint16_t bij = GBX (Bx, pB, B_iso)
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
uint16_t t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \
cij = GBX (Ax, pA, A_iso)
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \
cij = GBX (Bx, pB, B_iso)
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z,x,y,i,j) \
z = (x) | (y) ;
// true if the binop must be flipped
#define GB_BINOP_FLIP \
0
// op is second
#define GB_OP_IS_SECOND \
0
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_BOR || GxB_NO_UINT16 || GxB_NO_BOR_UINT16)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
#if 0
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB ((none))
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
#endif
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_ewise3_noaccum__bor_uint16)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_dense_ewise3_noaccum_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumB__bor_uint16)
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumb__bor_uint16)
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type uint16_t
uint16_t bwork = (*((uint16_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_AxD__bor_uint16)
(
GrB_Matrix C,
const GrB_Matrix A, bool A_is_pattern,
const GrB_Matrix D, bool D_is_pattern,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint16_t *restrict Cx = (uint16_t *) C->x ;
#include "GB_AxB_colscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_DxB__bor_uint16)
(
GrB_Matrix C,
const GrB_Matrix D, bool D_is_pattern,
const GrB_Matrix B, bool B_is_pattern,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint16_t *restrict Cx = (uint16_t *) C->x ;
#include "GB_AxB_rowscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseAdd: C = A+B or C<M> = A+B
//------------------------------------------------------------------------------
GrB_Info GB (_AaddB__bor_uint16)
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool Ch_is_Mh,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GB_WERK_DECLARE (M_ek_slicing, int64_t) ;
GB_WERK_DECLARE (A_ek_slicing, int64_t) ;
GB_WERK_DECLARE (B_ek_slicing, int64_t) ;
#include "GB_add_template.c"
GB_FREE_WORK ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C = A.*B or C<M> = A.*B
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_01__bor_uint16)
(
GrB_Matrix C,
const int C_sparsity,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_01_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_02__bor_uint16)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool flipxy,
const int64_t *restrict Cp_kfirst,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if GB_BINOP_FLIP
// The operator is not commutative, and does not have a flipped
// variant. For example z=atan2(y,x).
if (flipxy)
{
// use fmult(y,x)
#undef GB_FLIPPED
#define GB_FLIPPED 1
#include "GB_emult_02_template.c"
}
else
{
// use fmult(x,y)
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
}
#else
// No need to handle the flip: the operator is either commutative, or
// has been handled by changing z=div(y,x) to z=rdiv(x,y) for example.
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_03__bor_uint16)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict Cp_kfirst,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_03_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_bitmap__bor_uint16)
(
GrB_Matrix C,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_bitmap_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB (_bind1st__bor_uint16)
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *restrict Bb,
int64_t bnz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint16_t *Cx = (uint16_t *) Cx_output ;
uint16_t x = (*((uint16_t *) x_input)) ;
uint16_t *Bx = (uint16_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < bnz ; p++)
{
if (!GBB (Bb, p)) continue ;
uint16_t bij = GBX (Bx, p, false) ;
Cx [p] = (x) | (bij) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB (_bind2nd__bor_uint16)
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *restrict Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
uint16_t *Cx = (uint16_t *) Cx_output ;
uint16_t *Ax = (uint16_t *) Ax_input ;
uint16_t y = (*((uint16_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
uint16_t aij = GBX (Ax, p, false) ;
Cx [p] = (aij) | (y) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
uint16_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = (x) | (aij) ; \
}
GrB_Info GB (_bind1st_tran__bor_uint16)
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
uint16_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint16_t x = (*((const uint16_t *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
uint16_t
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
uint16_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = (aij) | (y) ; \
}
GrB_Info GB (_bind2nd_tran__bor_uint16)
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint16_t y = (*((const uint16_t *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
GB_unop__floor_fc64_fc64.c | //------------------------------------------------------------------------------
// GB_unop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_atomics.h"
#include "GB_unop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop_apply__floor_fc64_fc64
// op(A') function: GB_unop_tran__floor_fc64_fc64
// C type: GxB_FC64_t
// A type: GxB_FC64_t
// cast: GxB_FC64_t cij = aij
// unaryop: cij = GB_cfloor (aij)
#define GB_ATYPE \
GxB_FC64_t
#define GB_CTYPE \
GxB_FC64_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
GxB_FC64_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = GB_cfloor (x) ;
// casting
#define GB_CAST(z, aij) \
GxB_FC64_t z = aij ;
// cij = op (aij)
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
GxB_FC64_t aij = Ax [pA] ; \
/* Cx [pC] = op (cast (aij)) */ \
GxB_FC64_t z = aij ; \
Cx [pC] = GB_cfloor (z) ; \
}
// true if operator is the identity op with no typecasting
#define GB_OP_IS_IDENTITY_WITH_NO_TYPECAST \
0
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_FLOOR || GxB_NO_FC64)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop_apply__floor_fc64_fc64
(
GxB_FC64_t *Cx, // Cx and Ax may be aliased
const GxB_FC64_t *Ax,
const int8_t *GB_RESTRICT Ab, // A->b if A is bitmap
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
if (Ab == NULL)
{
#if ( GB_OP_IS_IDENTITY_WITH_NO_TYPECAST )
GB_memcpy (Cx, Ax, anz * sizeof (GxB_FC64_t), nthreads) ;
#else
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
GxB_FC64_t aij = Ax [p] ;
GxB_FC64_t z = aij ;
Cx [p] = GB_cfloor (z) ;
}
#endif
}
else
{
// bitmap case, no transpose; A->b already memcpy'd into C->b
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!Ab [p]) continue ;
GxB_FC64_t aij = Ax [p] ;
GxB_FC64_t z = aij ;
Cx [p] = GB_cfloor (z) ;
}
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop_tran__floor_fc64_fc64
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *GB_RESTRICT *Workspaces,
const int64_t *GB_RESTRICT A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
NA.c | // Elements taken from the coop package (src/R_naomit.c)
// Copyright (c) 2016-2017 Drew Schmidt
#include <stdint.h>
#include <string.h>
#include "endianness.h"
#include "NA.h"
#include "Rfloat.h"
#include "unroll.h"
// R uses 0x7ff00000000007a2 for NA_REAL, and 0x7f8007a2 is a reasonable float analogue
float NA_FLOAT; // extern'd in NA.h
float R_NaNf;
static inline float set_na_float()
{
float ret;
#if SPM_BOBE
int32_t x = 0xa207807f;
#else
int32_t x = 0x7f8007a2;
#endif
memcpy((void*) &ret, (void*) &x, sizeof(ret));
return ret;
}
/*
static inline float set_nan_float()
{
float ret;
#if SPM_BOBE
uint32_t NaN = 0x0100807f;
#else
uint32_t NaN = 0x7f800001;
#endif
memcpy((void*) &ret, (void*) &NaN, sizeof(ret));
return ret;
}
*/
int ISNAf(const float x)
{
if (!isnan(x))
return 0;
mrb y;
y.x = x;
#if SPM_BOBE
return y.y[1] == 1954;
#else
return y.y[0] == 1954;
#endif
}
int ISNANf(const float x)
{
return isnan(x) && !ISNAf(x);
}
// have to call on package load to set the global NA_FLOAT
SEXP R_init_NAf()
{
SEXP ret;
PROTECT(ret = newvec(1));
NA_FLOAT = set_na_float();
FLOAT(ret)[0] = NA_FLOAT;
UNPROTECT(1);
return ret;
}
SEXP R_init_NaNf()
{
SEXP ret;
PROTECT(ret = newvec(1));
R_NaNf = set_na_float();
FLOAT(ret)[0] = R_NaNf;
UNPROTECT(1);
return ret;
}
SEXP R_isna_spm(SEXP x)
{
SEXP ret;
const float_len_t m = NROWS(x);
const float_len_t n = NCOLS(x);
float *xf = FLOAT(x);
if (ISAVEC(x))
PROTECT(ret = allocVector(LGLSXP, ((size_t)m*n)));
else
PROTECT(ret = allocMatrix(LGLSXP, m, n));
for (float_len_t j=0; j<n; j++)
{
for (float_len_t i=0; i<m; i++)
{
const float tmp = xf[i + m*j];
LOGICAL(ret)[i + m*j] = isnan(tmp) || ISNAf(tmp);
}
}
UNPROTECT(1);
return ret;
}
// ----------------------------------------------------------------------------
// anyNA
// ----------------------------------------------------------------------------
SEXP R_anyNA_spm(SEXP x)
{
SEXP ret;
const R_xlen_t len = (R_xlen_t) NROWS(x)*NCOLS(x);
PROTECT(ret = allocVector(LGLSXP, 1));
LOGICAL(ret)[0] = anyNA(len, DATA(x));
UNPROTECT(1);
return ret;
}
// ----------------------------------------------------------------------------
// na.omit
// ----------------------------------------------------------------------------
#include <stdlib.h>
#include "safeomp.h"
// faster to index each element and operate accordingly, but
// this is too memory expensive for most applications
// note: R does this anyway because, well, R...
static SEXP R_naomit_spm_small(const float_len_t m, const float_len_t n, const float *const x)
{
SEXP ret;
const size_t len = m*n;
float_len_t m_fin = m;
int *na_vec_ind = (int*) calloc(len, sizeof(*na_vec_ind));
CHECKMALLOC(na_vec_ind);
// get indices of NA's
for (size_t i=0; i<len; i++)
{
if (ISNAf(x[i]) || isnan(x[i]))
na_vec_ind[i] = 1;
}
// adjust col index; turn first column of the NA indices
// to track which rows should go
for (float_len_t j=1; j<n; j++)
{
const float_len_t mj = m*j;
for (float_len_t i=0; i<m; i++)
{
if (na_vec_ind[i + mj])
na_vec_ind[i] = 1;
}
}
// get number of rows of output
for (float_len_t i=0; i<m; i++)
m_fin -= na_vec_ind[i];
if (m_fin == m)
{
free(na_vec_ind);
return R_NilValue;
}
// build reduced matrix
PROTECT(ret = newmat(m_fin, n));
float *ptr = DATA(ret);
for (float_len_t j=0; j<n; j++)
{
const float_len_t mj = m*j;
float_len_t row = 0;
for (float_len_t i=0; i<m; i++)
{
if (!na_vec_ind[i%m])
{
ptr[row + m_fin*j] = x[i + mj];
row++;
}
}
}
free(na_vec_ind);
UNPROTECT(1);
return ret;
}
static SEXP R_naomit_spm_big(const float_len_t m, const float_len_t n, const float *const x)
{
SEXP ret;
float_len_t m_fin = m;
int *rows = (int*) calloc(m, sizeof(*rows));
CHECKMALLOC(rows);
// get indices of NA's
#pragma omp parallel for default(shared) shared(rows)
for (float_len_t j=0; j<n; j++)
{
const float_len_t mj = m*j;
for (float_len_t i=0; i<m; i++)
{
if (ISNAf(x[i + m*j]) || isnan(x[i + mj]))
rows[i] = 1;
}
}
// get number of rows of output
for (float_len_t i=0; i<m; i++)
m_fin -= rows[i];
if (m_fin == m)
{
free(rows);
return R_NilValue;
}
PROTECT(ret = newmat(m_fin, n));
float *ptr = DATA(ret);
// build reduced matrix
#pragma omp parallel for default(shared) shared(rows, ptr, m_fin)
for (float_len_t j=0; j<n; j++)
{
const float_len_t mj = m*j;
float_len_t row = 0;
for (float_len_t i=0; i<m; i++)
{
if (!rows[i])
{
ptr[row + m_fin*j] = x[i + mj];
row++;
}
}
}
free(rows);
UNPROTECT(1);
return ret;
}
static SEXP R_naomit_spm_vec(size_t n, const float *const x)
{
SEXP ret;
size_t numna = 0;
for (size_t i=0; i<n; i++)
{
if (ISNAf(x[i]) || isnan(x[i]))
numna++;
}
PROTECT(ret = newvec(n-numna));
float *retf = FLOAT(ret);
size_t retpos = 0;
for (size_t i=0; i<n; i++)
{
if (!ISNAf(x[i]) && !isnan(x[i]))
retf[retpos++] = x[i];
}
UNPROTECT(1);
return ret;
}
SEXP R_naomit_spm(SEXP x)
{
SEXP ret;
const float_len_t m = NROWS(x);
const float_len_t n = NCOLS(x);
if (ISAVEC(x))
return R_naomit_spm_vec(m, DATA(x));
else if (m*n < OMP_MIN_SIZE)
ret = R_naomit_spm_small(m, n, DATA(x));
else
ret = R_naomit_spm_big(m, n, DATA(x));
if (ret == R_NilValue)
return x;
else
return ret;
}
|
l1_normMEX.c | #include "mex.h"
#include <math.h>
#include "emmintrin.h"
#include "xmmintrin.h"
#ifdef __GNU__
#include <omp.h>
#endif
#ifndef MAXCORES
#define MAXCORES 1
#endif
void mexFunction(int nlhs, mxArray *left[], int nrhs, const mxArray *right[]) {
/* Declare variables */
mwSize elem, cmplx, cmplx1, cmplx2, cmplx3;
long long i, elem2;
const mwSize size[]={1,1};
mxClassID precision, precision1;
mxArray *X1, *X2, *T, *Y;
double *pX1r, *pX1i, *pX2r, *pX2i, *pYr, *pT, *pSd, Td, Sd;
double xr, xi, L1, dL1[2];
__m128d vTd, vSd, vL1, vxr, vxi;
float *pX1rf, *pX1if, *pX2rf, *pX2if, *pYrf, *pTf, *pSf, Tf, Sf;
float xrf, xif, L1f, dL1f[4];
__m128 vTf, vSf, vL1f, vxrf, vxif;
/* Get number of elements */
elem = mxGetNumberOfElements(right[0]);
/* mexPrintf("elem: %i\n",elem);*/
/* Test for complex and obtain data class */
cmplx = mxIsComplex(right[0]);
cmplx1 = mxIsComplex(right[1]);
cmplx2 = mxIsComplex(right[2]);
cmplx3 = mxIsComplex(right[3]);
if (cmplx != cmplx1)
mexErrMsgTxt("Inputs 0 and 1 have different complexity");
if (cmplx2)
mexErrMsgTxt("Input 2 is complex (must be real)");
if (cmplx3)
mexErrMsgTxt("Input 3 is complex (must be real)");
/* Obtain and test data class */
precision = mxGetClassID(right[0]);
precision1 = mxGetClassID(right[1]);
if (precision != precision1)
mexErrMsgTxt("Inputs 0 and 1 have different precision");
/* Get pointers to input arrays and create output array */
Y = mxCreateNumericArray(2, size, precision, mxREAL);
if (precision == mxDOUBLE_CLASS) {
pX1r = mxGetPr(right[0]);
pX2r = mxGetPr(right[1]);
if (cmplx) {
pX1i = mxGetPi(right[0]);
pX2i = mxGetPi(right[1]);
}
pYr = mxGetPr(Y);
}
else {
pX1rf = mxGetData(right[0]);
pX2rf = mxGetData(right[1]);
if (cmplx) {
pX1if = mxGetImagData(right[0]);
pX2if = mxGetImagData(right[1]);
}
pYrf = mxGetData(Y);
}
/* Get pointer to input scalar */
if (mxGetClassID(right[2]) == mxDOUBLE_CLASS)
pT = mxGetData(right[2]);
else
pTf = mxGetData(right[2]);
/* Get pointer to smoothing factor */
if (mxGetClassID(right[3]) == mxDOUBLE_CLASS)
pSd = mxGetData(right[3]);
else
pSf = mxGetData(right[3]);
/* Convert scalars to same data type as input arrays */
if (precision == mxDOUBLE_CLASS) {
if (mxGetClassID(right[2]) == mxDOUBLE_CLASS)
Td = (double)pT[0];
else
Td = (double)pTf[0];
if (mxGetClassID(right[3]) == mxDOUBLE_CLASS)
Sd = (double)pSd[0];
else
Sd = (double)pSf[0];
}
else {
if (mxGetClassID(right[2]) == mxDOUBLE_CLASS)
Tf = (float)pT[0];
else
Tf = (float)pTf[0];
if (mxGetClassID(right[3]) == mxDOUBLE_CLASS)
Sf = (float)pSd[0];
else
Sf = (float)pSf[0];
}
#ifdef __GNU__
/* Set number of threads */
omp_set_num_threads(MAXCORES);
#endif
/* Loop through and compute the abs of the combined coefficients then sum */
if (precision == mxDOUBLE_CLASS) {
if (cmplx) {
/* Compute the number of elements for SIMD loop */
elem2 = (elem/2)*2;
/* SIMD variables */
vTd = _mm_load1_pd(&Td);
vSd = _mm_load1_pd(&Sd);
vL1 = _mm_setzero_pd();
#pragma omp parallel for private(i,vxr,vxi) reduction(+: vL1)
for (i=0; i<elem2; i+=2) {
vxr = _mm_add_pd(_mm_load_pd(pX1r+i),_mm_mul_pd(vTd,_mm_load_pd(pX2r+i)));
vxr = _mm_mul_pd(vxr,vxr);
vxi = _mm_add_pd(_mm_load_pd(pX1i+i),_mm_mul_pd(vTd,_mm_load_pd(pX2i+i)));
vxi = _mm_mul_pd(vxi,vxi);
vL1 = _mm_add_pd(vL1,_mm_sqrt_pd(_mm_add_pd(_mm_add_pd(vxr,vxi),vSd)));
}
/* Save results */
_mm_store_pd(dL1,vL1);
L1 = dL1[0] + dL1[1];
/* Finish the last few elements */
for (i=elem2; i<elem; i++) {
xr = pX1r[i] + Td*pX2r[i];
xi = pX1i[i] + Td*pX2i[i];
L1 += sqrt(xr*xr + xi*xi + Sd);
}
}
else {
#pragma omp parallel for private(i,xr) reduction(+: L1)
for (i=0; i<elem; i++) {
xr = pX1r[i] + Td*pX2r[i];
L1 += sqrt(xr*xr + Sd);
/*L1 += fabs(pX1r[i] + Td*pX2r[i]);*/
}
}
pYr[0] = L1;
}
else {
if (cmplx) {
/* Compute the number of elements for SIMD loop */
elem2 = (elem/4)*4;
/* SIMD variables */
vTf = _mm_load1_ps(&Tf);
vSf = _mm_load1_ps(&Sf);
vL1f = _mm_setzero_ps();
#pragma omp parallel for private(i,vxrf,vxif) reduction(+: vL1f)
for (i=0; i<elem2; i+=4) {
vxrf = _mm_add_ps(_mm_load_ps(pX1rf+i),_mm_mul_ps(vTf,_mm_load_ps(pX2rf+i)));
vxrf = _mm_mul_ps(vxrf,vxrf);
vxif = _mm_add_ps(_mm_load_ps(pX1if+i),_mm_mul_ps(vTf,_mm_load_ps(pX2if+i)));
vxif = _mm_mul_ps(vxif,vxif);
vL1f = _mm_add_ps(vL1f,_mm_sqrt_ps(_mm_add_ps(_mm_add_ps(vxrf,vxif),vSf)));
}
/* Save results */
_mm_store_ps(dL1f,vL1f);
L1f = dL1f[0] + dL1f[1] + dL1f[2] + dL1f[3];
/* Finish the last few elements */
for (i=elem2; i<elem; i++) {
xrf = pX1rf[i] + Tf*pX2rf[i];
xif = pX1if[i] + Tf*pX2if[i];
L1f += sqrt(xrf*xrf + xif*xif + Sf);
}
pYrf[0] = L1f;
}
else {
#pragma omp parallel for private(i,xrf) reduction(+: L1)
for (i=0; i<elem; i++) {
xrf = pX1rf[i] + Tf*pX2rf[i];
L1 += sqrt(xrf*xrf + Sf);
/*L1 += fabs(pX1rf[i] + Tf*pX2rf[i]);*/
}
pYrf[0] = L1;
}
}
/* Return values */
left[0] = Y;
}
|
close_enter_exit.c | // RUN: %libomptarget-compile-run-and-check-generic
// REQUIRES: unified_shared_memory
// UNSUPPORTED: clang-6, clang-7, clang-8, clang-9
// Fails on amdgcn with error: GPU Memory Error
// XFAIL: amdgcn-amd-amdhsa
#include <omp.h>
#include <stdio.h>
#pragma omp requires unified_shared_memory
#define N 1024
int main(int argc, char *argv[]) {
int fails;
void *host_alloc = 0, *device_alloc = 0;
int *a = (int *)malloc(N * sizeof(int));
// Init
for (int i = 0; i < N; ++i) {
a[i] = 10;
}
host_alloc = &a[0];
//
// map + target no close
//
#pragma omp target data map(tofrom : a[ : N]) map(tofrom : device_alloc)
{
#pragma omp target map(tofrom : device_alloc)
{ device_alloc = &a[0]; }
}
// CHECK: a used from unified memory.
if (device_alloc == host_alloc)
printf("a used from unified memory.\n");
//
// map + target with close
//
device_alloc = 0;
#pragma omp target data map(close, tofrom : a[ : N]) map(tofrom : device_alloc)
{
#pragma omp target map(tofrom : device_alloc)
{ device_alloc = &a[0]; }
}
// CHECK: a copied to device.
if (device_alloc != host_alloc)
printf("a copied to device.\n");
//
// map + use_device_ptr no close
//
device_alloc = 0;
#pragma omp target data map(tofrom : a[ : N]) use_device_ptr(a)
{ device_alloc = &a[0]; }
// CHECK: a used from unified memory with use_device_ptr.
if (device_alloc == host_alloc)
printf("a used from unified memory with use_device_ptr.\n");
//
// map + use_device_ptr close
//
device_alloc = 0;
#pragma omp target data map(close, tofrom : a[ : N]) use_device_ptr(a)
{ device_alloc = &a[0]; }
// CHECK: a used from device memory with use_device_ptr.
if (device_alloc != host_alloc)
printf("a used from device memory with use_device_ptr.\n");
//
// map enter/exit + close
//
device_alloc = 0;
#pragma omp target enter data map(close, to : a[ : N])
#pragma omp target map(from : device_alloc)
{ device_alloc = &a[0]; }
#pragma omp target exit data map(from : a[ : N])
// CHECK: a has been mapped to the device.
if (device_alloc != host_alloc)
printf("a has been mapped to the device.\n");
free(a);
// CHECK: Done!
printf("Done!\n");
return 0;
}
|
convolution_pack8to4_int8.h | // Tencent is pleased to support the open source community by making ncnn available.
//
// Copyright (C) 2021 THL A29 Limited, a Tencent company. All rights reserved.
//
// Licensed under the BSD 3-Clause License (the "License"); you may not use this file except
// in compliance with the License. You may obtain a copy of the License at
//
// https://opensource.org/licenses/BSD-3-Clause
//
// Unless required by applicable law or agreed to in writing, software distributed
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
// specific language governing permissions and limitations under the License.
static void convolution_pack8to4_int8_neon(const Mat& bottom_blob, Mat& top_blob, const Mat& weight_data_int8, int kernel_w, int kernel_h, int dilation_w, int dilation_h, int stride_w, int stride_h, const Option& opt)
{
int w = bottom_blob.w;
int channels = bottom_blob.c;
int outw = top_blob.w;
int outh = top_blob.h;
int outch = top_blob.c;
const int maxk = kernel_w * kernel_h;
// kernel offsets
std::vector<int> _space_ofs(maxk);
int* space_ofs = &_space_ofs[0];
{
int p1 = 0;
int p2 = 0;
int gap = w * dilation_h - kernel_w * dilation_w;
for (int i = 0; i < kernel_h; i++)
{
for (int j = 0; j < kernel_w; j++)
{
space_ofs[p1] = p2;
p1++;
p2 += dilation_w;
}
p2 += gap;
}
}
// num_output
#pragma omp parallel for num_threads(opt.num_threads)
for (int p = 0; p < outch; p++)
{
int* outptr = top_blob.channel(p);
for (int i = 0; i < outh; i++)
{
for (int j = 0; j < outw; j++)
{
int32x4_t _sum01 = vdupq_n_s32(0);
int32x4_t _sum23 = vdupq_n_s32(0);
const signed char* kptr = weight_data_int8.channel(p);
// channels
for (int q = 0; q < channels; q++)
{
const Mat m = bottom_blob.channel(q);
const signed char* sptr = m.row<signed char>(i * stride_h) + j * stride_w * 8;
for (int k = 0; k < maxk; k++)
{
int8x8_t _val = vld1_s8(sptr + space_ofs[k] * 8);
int8x8_t _w0 = vld1_s8(kptr);
int8x8_t _w1 = vld1_s8(kptr + 8);
int8x8_t _w2 = vld1_s8(kptr + 16);
int8x8_t _w3 = vld1_s8(kptr + 24);
int16x8_t _wv0 = vmull_s8(_val, _w0);
int16x8_t _wv1 = vmull_s8(_val, _w1);
int16x8_t _wv2 = vmull_s8(_val, _w2);
int16x8_t _wv3 = vmull_s8(_val, _w3);
int16x4_t _wv00 = vpadd_s16(vget_low_s16(_wv0), vget_high_s16(_wv0));
int16x4_t _wv11 = vpadd_s16(vget_low_s16(_wv1), vget_high_s16(_wv1));
int16x4_t _wv22 = vpadd_s16(vget_low_s16(_wv2), vget_high_s16(_wv2));
int16x4_t _wv33 = vpadd_s16(vget_low_s16(_wv3), vget_high_s16(_wv3));
_sum01 = vpadalq_s16(_sum01, vcombine_s16(_wv00, _wv11));
_sum23 = vpadalq_s16(_sum23, vcombine_s16(_wv22, _wv33));
kptr += 32;
}
}
int32x4_t _sum0 = vcombine_s32(vpadd_s32(vget_low_s32(_sum01), vget_high_s32(_sum01)), vpadd_s32(vget_low_s32(_sum23), vget_high_s32(_sum23)));
vst1q_s32(outptr + j * 4, _sum0);
}
outptr += outw * 4;
}
}
}
|
GB_unop__abs_int8_int8.c | //------------------------------------------------------------------------------
// GB_unop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_atomics.h"
#include "GB_unop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB (_unop_apply__abs_int8_int8)
// op(A') function: GB (_unop_tran__abs_int8_int8)
// C type: int8_t
// A type: int8_t
// cast: int8_t cij = aij
// unaryop: cij = GB_IABS (aij)
#define GB_ATYPE \
int8_t
#define GB_CTYPE \
int8_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
int8_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = GB_IABS (x) ;
// casting
#define GB_CAST(z, aij) \
int8_t z = aij ;
// cij = op (aij)
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
int8_t aij = Ax [pA] ; \
/* Cx [pC] = op (cast (aij)) */ \
int8_t z = aij ; \
Cx [pC] = GB_IABS (z) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_ABS || GxB_NO_INT8)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_apply__abs_int8_int8)
(
int8_t *Cx, // Cx and Ax may be aliased
const int8_t *Ax,
const int8_t *restrict Ab, // A->b if A is bitmap
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
if (Ab == NULL)
{
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
int8_t aij = Ax [p] ;
int8_t z = aij ;
Cx [p] = GB_IABS (z) ;
}
}
else
{
// bitmap case, no transpose; A->b already memcpy'd into C->b
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!Ab [p]) continue ;
int8_t aij = Ax [p] ;
int8_t z = aij ;
Cx [p] = GB_IABS (z) ;
}
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_tran__abs_int8_int8)
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
omp50_taskdep_depobj.c | // RUN: %clang %openmp_flags %flags-use-compiler-omp-h %s -o %t && %libomp-run
// UNSUPPORTED: gcc-5, gcc-6, gcc-7, gcc-8
// UNSUPPORTED: clang-5, clang-6, clang-7, clang-8, clang-9, clang-10
// UNSUPPORTED: icc
#include <stdio.h>
#include <stdlib.h>
#include <omp.h>
#include "omp_my_sleep.h"
int a, b;
void mutexinoutset_task() {
if (b != 0) {
fprintf(stderr, "mutexinoutset_task: b != 0 at start of task\n");
exit(EXIT_FAILURE);
}
b++;
if (b != 1) {
fprintf(stderr, "mutexinoutset_task: b != 1\n");
exit(EXIT_FAILURE);
}
my_sleep(0.1);
b--;
if (b != 0) {
fprintf(stderr, "mutexinoutset_task: b != 0 at end of task\n");
exit(EXIT_FAILURE);
}
}
int main(int argc, char** argv) {
omp_depend_t dep_a_in;
omp_depend_t dep_a_out;
omp_depend_t dep_a_inout;
omp_depend_t dep_a_mutexinoutset;
a = 0;
b = 0;
#pragma omp depobj(dep_a_in) depend(in: a)
#pragma omp depobj(dep_a_out) depend(out: a)
#pragma omp depobj(dep_a_inout) depend(inout: a)
#pragma omp depobj(dep_a_mutexinoutset) depend(mutexinoutset: a)
#pragma omp parallel
{
#pragma omp single
{
#pragma omp task depend(depobj: dep_a_out)
{
my_sleep(0.1);
a = 10;
}
#pragma omp task depend(depobj: dep_a_inout)
{
my_sleep(0.1);
a++;
}
#pragma omp task depend(depobj: dep_a_mutexinoutset)
mutexinoutset_task();
#pragma omp task depend(depobj: dep_a_mutexinoutset)
mutexinoutset_task();
#pragma omp task depend(depobj: dep_a_mutexinoutset)
mutexinoutset_task();
#pragma omp task depend(depobj: dep_a_mutexinoutset)
mutexinoutset_task();
#pragma omp task depend(depobj: dep_a_mutexinoutset)
mutexinoutset_task();
#pragma omp task depend(depobj: dep_a_in)
{ a += 10; }
}
}
if (a != 21) {
fprintf(stderr, "a (%d) != 21\n", a);
exit(EXIT_FAILURE);
}
#pragma omp depobj(dep_a_in) destroy
#pragma omp depobj(dep_a_out) destroy
#pragma omp depobj(dep_a_inout) destroy
#pragma omp depobj(dep_a_mutexinoutset) destroy
return EXIT_SUCCESS;
}
|
dyn.pluto.c | #include <omp.h>
#include <math.h>
#define ceild(n,d) ceil(((double)(n))/((double)(d)))
#define floord(n,d) floor(((double)(n))/((double)(d)))
#define max(x,y) ((x) > (y)? (x) : (y))
#define min(x,y) ((x) < (y)? (x) : (y))
#include <math.h>
#include <stdio.h>
#include <stdlib.h>
#define min(x,y) ((x) < (y) ? (x) : (y))
#define max(x,y) ((x) > (y) ? (x) : (y))
#define floord(n,d) (((n)<0) ? -((-(n)+(d)-1)/(d)) : (n)/(d))
#define S0(a, i, j, k) d[i][j] = c[i][k] + c[k][j]
void printMatrix(int**, int, int);
int** allocateMatrix(int);
void deallocateMatrix(int**, int);
void write_results_full(int , double , char );
void write_results(int , double );
void computeDYN0(int** matrix, int n) {
int** c = allocateMatrix(n + 1);
int** d = allocateMatrix(n + 1);
int i, j;
for (i = 0; i < n; i++)
for (j = 0; j < n; j++)
c[i][j] = matrix[i][j];
double start = omp_get_wtime();
int t1, t2, t3, t4, t5, t6;
int lb, ub, lbp, ubp, lb2, ub2;
register int lbv, ubv;
if (n >= 3) {
lbp=0;
ubp=floord(n-1,19);
#pragma omp parallel for private(lbv,ubv,t2,t3,t4,t5,t6)
for (t1=lbp;t1<=ubp;t1++) {
for (t2=0;t2<=min(floord(n-2,25),floord(-19*t1+n,25));t2++) {
for (t3=max(max(ceild(19*t1-27,29),ceild(25*t2-26,29)),ceild(19*t1+25*t2-28,29));t3<=min(floord(n,29),floord(38*t1+25*t2+58,29));t3++) {
if ((t1 <= floord(-25*t2+29*t3-22,38)) && (t2 <= floord(29*t3-28,25))) {
if ((t2+t3)%2 == 0) {
S0(((-25*t2+29*t3-22)/2), (25*t2+24), 29*t3, ((-25*t2+29*t3-22)/2) + (25*t2+24) - 1);;
}
}
if ((t1 == 0) && (t2 >= ceild(29*t3-26,25))) {
for (t5=max(max(1,25*t2),29*t3-2);t5<=min(min(n-2,25*t2+24),29*t3+26);t5++) {
S0(2, t5, (t5+2), 2 + t5 - 1);;
}
}
for (t4=max(max(3,ceild(-25*t2+29*t3-21,2)),19*t1);t4<=min(min(min(floord(29*t3+1,2),floord(-25*t2+n-22,2)),floord(-25*t2+29*t3+2,2)),19*t1+18);t4++) {
S0(t4, (29*t3-2*t4+2), 29*t3, t4 + (29*t3-2*t4+2) - 1);;
for (t5=29*t3-2*t4+3;t5<=25*t2+24;t5++) {
lbv=max(29*t3,t4+t5);
ubv=2*t4+t5-3;
#pragma ivdep
#pragma vector always
for (t6=lbv;t6<=ubv;t6++) {
S0(t4, t5, t6, -t4 + t6 + 1);;
S0(t4, t5, t6, t4 + t5 - 1);;
}
S0(t4, t5, (2*t4+t5-2), t4 + t5 - 1);;
}
}
if (29*t3 == n) {
for (t4=max(max(3,ceild(-25*t2+n-21,2)),19*t1);t4<=min(min(floord(n+1,2),floord(-25*t2+n+2,2)),19*t1+18);t4++) {
if (n%29 == 0) {
S0(t4, (-2*t4+n+2), n, t4 + (-2*t4+n+2) - 1);;
}
for (t5=-2*t4+n+3;t5<=min(25*t2+24,-t4+n);t5++) {
if (n%29 == 0) {
S0(t4, t5, n, -t4 + n + 1);;
}
if (n%29 == 0) {
S0(t4, t5, n, t4 + t5 - 1);;
}
}
}
}
if ((t1 <= floord(29*t3+2,38)) && (t1 >= ceild(29*t3-34,38)) && (t2 == 0) && (t3 >= 2) && (t3 <= floord(n-24,29))) {
if (t3%2 == 0) {
for (t5=1;t5<=24;t5++) {
lbv=29*t3;
ubv=29*t3+t5-1;
#pragma ivdep
#pragma vector always
for (t6=lbv;t6<=ubv;t6++) {
S0(((29*t3+2)/2), t5, t6, -((29*t3+2)/2) + t6 + 1);;
S0(((29*t3+2)/2), t5, t6, ((29*t3+2)/2) + t5 - 1);;
}
S0(((29*t3+2)/2), t5, (29*t3+t5), ((29*t3+2)/2) + t5 - 1);;
}
}
}
if (t3 <= floord(n-1,29)) {
for (t4=max(max(3,ceild(-25*t2+n-21,2)),19*t1);t4<=min(min(floord(29*t3+1,2),floord(-25*t2+29*t3+2,2)),19*t1+18);t4++) {
S0(t4, (29*t3-2*t4+2), 29*t3, t4 + (29*t3-2*t4+2) - 1);;
for (t5=29*t3-2*t4+3;t5<=-2*t4+n+2;t5++) {
lbv=max(29*t3,t4+t5);
ubv=2*t4+t5-3;
#pragma ivdep
#pragma vector always
for (t6=lbv;t6<=ubv;t6++) {
S0(t4, t5, t6, -t4 + t6 + 1);;
S0(t4, t5, t6, t4 + t5 - 1);;
}
S0(t4, t5, (2*t4+t5-2), t4 + t5 - 1);;
}
for (t5=-2*t4+n+3;t5<=min(25*t2+24,-t4+n);t5++) {
lbv=max(29*t3,t4+t5);
ubv=n;
#pragma ivdep
#pragma vector always
for (t6=lbv;t6<=ubv;t6++) {
S0(t4, t5, t6, -t4 + t6 + 1);;
S0(t4, t5, t6, t4 + t5 - 1);;
}
}
}
}
if (t3 <= floord(n-28,29)) {
for (t4=max(max(3,ceild(-25*t2+29*t3+3,2)),19*t1);t4<=min(floord(-25*t2+29*t3+6,2),19*t1+18);t4++) {
for (t5=max(1,25*t2);t5<=25*t2+24;t5++) {
lbv=max(29*t3,t4+t5);
ubv=2*t4+t5-3;
#pragma ivdep
#pragma vector always
for (t6=lbv;t6<=ubv;t6++) {
S0(t4, t5, t6, -t4 + t6 + 1);;
S0(t4, t5, t6, t4 + t5 - 1);;
}
S0(t4, t5, (2*t4+t5-2), t4 + t5 - 1);;
}
}
}
if (t3 >= ceild(n-27,29)) {
for (t4=max(max(3,ceild(-25*t2+29*t3+3,2)),19*t1);t4<=min(floord(-25*t2+n-22,2),19*t1+18);t4++) {
for (t5=max(1,25*t2);t5<=25*t2+24;t5++) {
lbv=max(29*t3,t4+t5);
ubv=2*t4+t5-3;
#pragma ivdep
#pragma vector always
for (t6=lbv;t6<=ubv;t6++) {
S0(t4, t5, t6, -t4 + t6 + 1);;
S0(t4, t5, t6, t4 + t5 - 1);;
}
S0(t4, t5, (2*t4+t5-2), t4 + t5 - 1);;
}
}
}
if ((t1 <= floord(29*t3+2,38)) && (t1 >= ceild(29*t3-34,38)) && (t2 == 0) && (t3 <= floord(n-1,29)) && (t3 >= max(2,ceild(n-23,29)))) {
if (t3%2 == 0) {
for (t5=1;t5<=-29*t3+n;t5++) {
lbv=29*t3;
ubv=29*t3+t5-1;
#pragma ivdep
#pragma vector always
for (t6=lbv;t6<=ubv;t6++) {
S0(((29*t3+2)/2), t5, t6, -((29*t3+2)/2) + t6 + 1);;
S0(((29*t3+2)/2), t5, t6, ((29*t3+2)/2) + t5 - 1);;
}
S0(((29*t3+2)/2), t5, (29*t3+t5), ((29*t3+2)/2) + t5 - 1);;
}
for (t5=-29*t3+n+1;t5<=24;t5++) {
lbv=29*t3;
ubv=n;
#pragma ivdep
#pragma vector always
for (t6=lbv;t6<=ubv;t6++) {
S0(((29*t3+2)/2), t5, t6, -((29*t3+2)/2) + t6 + 1);;
S0(((29*t3+2)/2), t5, t6, ((29*t3+2)/2) + t5 - 1);;
}
}
}
}
for (t4=max(max(max(3,ceild(-25*t2+n-21,2)),ceild(-25*t2+29*t3+3,2)),19*t1);t4<=min(min(min(floord(n+1,2),floord(-25*t2+n+2,2)),19*t1+18),29*t3-n+30);t4++) {
for (t5=max(1,25*t2);t5<=-2*t4+n+2;t5++) {
lbv=max(29*t3,t4+t5);
ubv=2*t4+t5-3;
#pragma ivdep
#pragma vector always
for (t6=lbv;t6<=ubv;t6++) {
S0(t4, t5, t6, -t4 + t6 + 1);;
S0(t4, t5, t6, t4 + t5 - 1);;
}
S0(t4, t5, (2*t4+t5-2), t4 + t5 - 1);;
}
for (t5=-2*t4+n+3;t5<=min(25*t2+24,-t4+n);t5++) {
lbv=max(29*t3,t4+t5);
ubv=n;
#pragma ivdep
#pragma vector always
for (t6=lbv;t6<=ubv;t6++) {
S0(t4, t5, t6, -t4 + t6 + 1);;
S0(t4, t5, t6, t4 + t5 - 1);;
}
}
}
for (t4=max(max(max(ceild(-25*t2+n-21,2),ceild(-25*t2+29*t3+3,2)),19*t1),29*t3-n+31);t4<=min(min(min(floord(n+1,2),floord(-25*t2+n+2,2)),floord(-25*t2+29*t3+6,2)),19*t1+18);t4++) {
for (t5=max(1,25*t2);t5<=-2*t4+n+2;t5++) {
lbv=max(29*t3,t4+t5);
ubv=2*t4+t5-3;
#pragma ivdep
#pragma vector always
for (t6=lbv;t6<=ubv;t6++) {
S0(t4, t5, t6, -t4 + t6 + 1);;
S0(t4, t5, t6, t4 + t5 - 1);;
}
S0(t4, t5, (2*t4+t5-2), t4 + t5 - 1);;
}
for (t5=-2*t4+n+3;t5<=25*t2+24;t5++) {
lbv=max(29*t3,t4+t5);
ubv=n;
#pragma ivdep
#pragma vector always
for (t6=lbv;t6<=ubv;t6++) {
S0(t4, t5, t6, -t4 + t6 + 1);;
S0(t4, t5, t6, t4 + t5 - 1);;
}
}
}
if (t3 <= floord(n-28,29)) {
for (t4=max(max(3,ceild(-25*t2+29*t3+7,2)),19*t1);t4<=min(min(floord(29*t3+29,2),floord(-25*t2+29*t3+30,2)),19*t1+18);t4++) {
for (t5=max(1,25*t2);t5<=29*t3-2*t4+30;t5++) {
lbv=max(29*t3,t4+t5);
ubv=2*t4+t5-3;
#pragma ivdep
#pragma vector always
for (t6=lbv;t6<=ubv;t6++) {
S0(t4, t5, t6, -t4 + t6 + 1);;
S0(t4, t5, t6, t4 + t5 - 1);;
}
S0(t4, t5, (2*t4+t5-2), t4 + t5 - 1);;
}
for (t5=29*t3-2*t4+31;t5<=min(25*t2+24,29*t3-t4+28);t5++) {
lbv=max(29*t3,t4+t5);
ubv=29*t3+28;
#pragma ivdep
#pragma vector always
for (t6=lbv;t6<=ubv;t6++) {
S0(t4, t5, t6, -t4 + t6 + 1);;
S0(t4, t5, t6, t4 + t5 - 1);;
}
}
}
}
if ((t1 <= floord(n+2,38)) && (t1 >= ceild(n-34,38)) && (t2 == 0) && (t3 >= ceild(3*n-58,58))) {
if (n%2 == 0) {
for (t5=1;t5<=min(24,floord(n-2,2));t5++) {
lbv=max(ceild(2*t5+n+2,2),29*t3);
ubv=n;
#pragma ivdep
#pragma vector always
for (t6=lbv;t6<=ubv;t6++) {
S0(((n+2)/2), t5, t6, -((n+2)/2) + t6 + 1);;
S0(((n+2)/2), t5, t6, ((n+2)/2) + t5 - 1);;
}
}
}
}
if ((t1 <= floord(n+2,38)) && (t1 >= ceild(n-34,38)) && (t2 == 0) && (t3 <= floord(3*n-60,58)) && (t3 >= ceild(n-4,29))) {
if (n%2 == 0) {
for (t5=1;t5<=24;t5++) {
lbv=29*t3;
ubv=n;
#pragma ivdep
#pragma vector always
for (t6=lbv;t6<=ubv;t6++) {
S0(((n+2)/2), t5, t6, -((n+2)/2) + t6 + 1);;
S0(((n+2)/2), t5, t6, ((n+2)/2) + t5 - 1);;
}
}
}
}
if (t3 >= ceild(n-27,29)) {
for (t4=max(max(ceild(-25*t2+29*t3+7,2),19*t1),29*t3-n+31);t4<=min(min(floord(n+1,2),floord(-25*t2+n+2,2)),19*t1+18);t4++) {
for (t5=max(1,25*t2);t5<=-2*t4+n+2;t5++) {
lbv=max(29*t3,t4+t5);
ubv=2*t4+t5-3;
#pragma ivdep
#pragma vector always
for (t6=lbv;t6<=ubv;t6++) {
S0(t4, t5, t6, -t4 + t6 + 1);;
S0(t4, t5, t6, t4 + t5 - 1);;
}
S0(t4, t5, (2*t4+t5-2), t4 + t5 - 1);;
}
for (t5=-2*t4+n+3;t5<=29*t3-2*t4+30;t5++) {
lbv=max(29*t3,t4+t5);
ubv=n;
#pragma ivdep
#pragma vector always
for (t6=lbv;t6<=ubv;t6++) {
S0(t4, t5, t6, -t4 + t6 + 1);;
S0(t4, t5, t6, t4 + t5 - 1);;
}
}
for (t5=29*t3-2*t4+31;t5<=min(25*t2+24,-t4+n);t5++) {
lbv=max(29*t3,t4+t5);
ubv=n;
#pragma ivdep
#pragma vector always
for (t6=lbv;t6<=ubv;t6++) {
S0(t4, t5, t6, -t4 + t6 + 1);;
S0(t4, t5, t6, t4 + t5 - 1);;
}
}
}
}
for (t4=max(ceild(-25*t2+n+3,2),19*t1);t4<=min(min(min(n-1,19*t1+18),-25*t2+n),29*t3-n+30);t4++) {
for (t5=max(1,25*t2);t5<=min(25*t2+24,-t4+n);t5++) {
lbv=max(29*t3,t4+t5);
ubv=n;
#pragma ivdep
#pragma vector always
for (t6=lbv;t6<=ubv;t6++) {
S0(t4, t5, t6, -t4 + t6 + 1);;
S0(t4, t5, t6, t4 + t5 - 1);;
}
}
}
for (t4=max(max(ceild(-25*t2+n+3,2),19*t1),29*t3-n+31);t4<=min(floord(-25*t2+29*t3+6,2),19*t1+18);t4++) {
for (t5=max(1,25*t2);t5<=25*t2+24;t5++) {
lbv=max(29*t3,t4+t5);
ubv=n;
#pragma ivdep
#pragma vector always
for (t6=lbv;t6<=ubv;t6++) {
S0(t4, t5, t6, -t4 + t6 + 1);;
S0(t4, t5, t6, t4 + t5 - 1);;
}
}
}
if ((t1 <= floord(n+2,38)) && (t1 >= ceild(n-34,38)) && (t2 == 0) && (t3 <= min(floord(n-5,29),floord(3*n-60,58))) && (t3 >= ceild(n-27,29))) {
if (n%2 == 0) {
for (t5=1;t5<=29*t3-n+28;t5++) {
lbv=max(ceild(2*t5+n+2,2),29*t3);
ubv=n;
#pragma ivdep
#pragma vector always
for (t6=lbv;t6<=ubv;t6++) {
S0(((n+2)/2), t5, t6, -((n+2)/2) + t6 + 1);;
S0(((n+2)/2), t5, t6, ((n+2)/2) + t5 - 1);;
}
}
for (t5=29*t3-n+29;t5<=min(24,floord(n-2,2));t5++) {
lbv=max(ceild(2*t5+n+2,2),29*t3);
ubv=n;
#pragma ivdep
#pragma vector always
for (t6=lbv;t6<=ubv;t6++) {
S0(((n+2)/2), t5, t6, -((n+2)/2) + t6 + 1);;
S0(((n+2)/2), t5, t6, ((n+2)/2) + t5 - 1);;
}
}
}
}
for (t4=max(max(max(ceild(-25*t2+n+3,2),ceild(-25*t2+29*t3+7,2)),19*t1),29*t3-n+31);t4<=min(min(floord(29*t3+29,2),floord(-25*t2+29*t3+30,2)),19*t1+18);t4++) {
for (t5=max(1,25*t2);t5<=29*t3-2*t4+30;t5++) {
lbv=max(29*t3,t4+t5);
ubv=n;
#pragma ivdep
#pragma vector always
for (t6=lbv;t6<=ubv;t6++) {
S0(t4, t5, t6, -t4 + t6 + 1);;
S0(t4, t5, t6, t4 + t5 - 1);;
}
}
for (t5=29*t3-2*t4+31;t5<=min(25*t2+24,-t4+n);t5++) {
lbv=max(29*t3,t4+t5);
ubv=n;
#pragma ivdep
#pragma vector always
for (t6=lbv;t6<=ubv;t6++) {
S0(t4, t5, t6, -t4 + t6 + 1);;
S0(t4, t5, t6, t4 + t5 - 1);;
}
}
}
if ((t1 <= floord(29*t3+30,38)) && (t1 >= ceild(29*t3-6,38)) && (t2 == 0) && (t3 <= floord(2*n-32,29))) {
if (t3%2 == 0) {
for (t5=1;t5<=min(min(24,floord(29*t3+26,2)),floord(-29*t3+2*n-30,2));t5++) {
lbv=max(ceild(29*t3+2*t5+30,2),29*t3);
ubv=min(n,29*t3+28);
#pragma ivdep
#pragma vector always
for (t6=lbv;t6<=ubv;t6++) {
S0(((29*t3+30)/2), t5, t6, -((29*t3+30)/2) + t6 + 1);;
S0(((29*t3+30)/2), t5, t6, ((29*t3+30)/2) + t5 - 1);;
}
}
}
}
for (t4=max(ceild(-25*t2+29*t3+31,2),19*t1);t4<=min(min(min(min(n-1,19*t1+18),-25*t2+n),29*t3+27),-25*t2+29*t3+28);t4++) {
for (t5=max(1,25*t2);t5<=min(min(25*t2+24,-t4+n),29*t3-t4+28);t5++) {
lbv=max(29*t3,t4+t5);
ubv=min(n,29*t3+28);
#pragma ivdep
#pragma vector always
for (t6=lbv;t6<=ubv;t6++) {
S0(t4, t5, t6, -t4 + t6 + 1);;
S0(t4, t5, t6, t4 + t5 - 1);;
}
}
}
}
}
}
}
double execution_time = omp_get_wtime() - start;
printf("normal: %lf\n", execution_time);
write_results(n, execution_time);
printMatrix(d, n, 0);
deallocateMatrix(c, n + 1);
deallocateMatrix(d, n + 1);
}
void computeDYN1(int** matrix, int n) {
int** c = allocateMatrix(n + 1);
int** d = allocateMatrix(n + 1);
int i, j;
for (i = 0; i < n; i++)
for (j = 0; j < n; j++)
c[i][j] = matrix[i][j];
double start = omp_get_wtime();
for (int c0 = 2; c0 < n; c0 += 1)
#pragma omp parallel for private(c0)
for (int c1 = 1; c1 <= n - c0; c1 += 1)
for (int c2 = c0 + c1; c2 <= min(n, 2 * c0 + c1 - 2); c2 += 1) {
if (2 * c0 + c1 >= c2 + 3)
S0(c0, c1, c2, -c0 + c2 + 1);
S0(c0, c1, c2, c0 + c1 - 1);
}
double execution_time = omp_get_wtime() - start;
printf("parallel: %lf\n", execution_time);
write_results(n, execution_time);
printMatrix(d, n, 1);
deallocateMatrix(c, n + 1);
deallocateMatrix(d, n + 1);
}
void computeDYN2(int** matrix, int n) {
int** c = allocateMatrix(n + 1);
int** d = allocateMatrix(n + 1);
int i, j;
for (i = 0; i < n; i++)
for (j = 0; j < n; j++)
c[i][j] = matrix[i][j];
double start = omp_get_wtime();
int tile_size = 2;
for (int c0 = 0; c0 < floord(n, tile_size); c0 += 1)
for (int c1 = 0; c1 < min(-c0 + n / tile_size, (n + 1) / tile_size - 1); c1 += 1)
for (int c2 = max(c0 + c1, c1 + 1); c2 <= min(tile_size * c0 + c1 + 1, (n + 1) / tile_size - 1); c2 += 1)
for (int c3 = max(tile_size * c0 + 1, -c1 + c2 + 1); c3 <= min(tile_size * c0 + 2, -tile_size * c1 + tile_size * c2 + 1); c3 += 1)
#pragma omp parallel for
for (int c4 = tile_size * c1 + 1; c4 <= min(min(tile_size * c1 + 2, n - c3), tile_size * c2 - c3 + 2); c4 += 1)
for (int c5 = max(tile_size * c2 + 1, c3 + c4); c5 <= min(min(n, tile_size * c2 + 2), tile_size * c3 + c4 - 2); c5 += 1) {
if (tile_size * c3 + c4 >= c5 + 3)
S0(c3, c4, c5, -c3 + c5 + 1);
S0(c3, c4, c5, c3 + c4 - 1);
}
double execution_time = omp_get_wtime() - start;
printf("tiles: %lf\n", execution_time);
write_results_full(n, execution_time, '\n');
printMatrix(d, n, 2);
deallocateMatrix(c, n + 1);
deallocateMatrix(d, n + 1);
}
void printMatrix(int** matrix, int N, int fileno) {
char filename[10];
sprintf(filename, "nontiled%d", fileno);
FILE* f = fopen(filename, "wt");
for (int i = 0; i < N; i++) {
for (int j = 0; j < N; j++)
fprintf(f, "%d ", matrix[i][j]);
fprintf(f, "\n");
}
fclose(f);
}
int** allocateMatrix(int N) {
int** t = (int**)malloc(sizeof(int*) * N);
for (int i = 0; i < N; i++) {
t[i] = (int*)malloc(sizeof(int) * N);
}
return t;
}
void deallocateMatrix(int **t, int N) {
for (int i = 0; i < N; i++) {
free(t[i]);
}
free(t);
}
void write_results_full(int n, double execution_time, char end_char)
{
FILE* f = fopen("results.txt", "at");
fprintf(f, "%d:%lf%c", n, execution_time, end_char);
fclose(f);
}
void write_results(int n, double execution_time)
{
write_results_full(n, execution_time, ';');
}
int main(void) {
const int ZMAX = 2010;
int** graph = allocateMatrix(ZMAX);
int g[4][4] = { {1, 1, 0, 1}, {0, 1, 1, 0}, {0, 0, 1, 1}, {0, 0, 0, 1} };
for (int i = 0; i < 4; i++)
for (int j = 0; j < 4; j++)
graph[i][j] = g[i][j];
for (int i = 0; i < ZMAX; i++)
graph[i][i] = 1;
int N = 2000;
while (N < ZMAX)
{
//printMatrix(graph, 6, 9);
computeDYN0(graph, N);
computeDYN1(graph, N);
computeDYN2(graph, N);
N += 10;
}
deallocateMatrix(graph, ZMAX);
return 0;
}
|
GB_binop__lxor_int16.c |
//------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCUDA_DEV
#include "GB_emult.h"
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB (_AaddB__lxor_int16)
// A.*B function (eWiseMult): GB (_AemultB_08__lxor_int16)
// A.*B function (eWiseMult): GB (_AemultB_02__lxor_int16)
// A.*B function (eWiseMult): GB (_AemultB_04__lxor_int16)
// A.*B function (eWiseMult): GB (_AemultB_bitmap__lxor_int16)
// A*D function (colscale): GB (_AxD__lxor_int16)
// D*A function (rowscale): GB (_DxB__lxor_int16)
// C+=B function (dense accum): GB (_Cdense_accumB__lxor_int16)
// C+=b function (dense accum): GB (_Cdense_accumb__lxor_int16)
// C+=A+B function (dense ewise3): GB ((none))
// C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__lxor_int16)
// C=scalar+B GB (_bind1st__lxor_int16)
// C=scalar+B' GB (_bind1st_tran__lxor_int16)
// C=A+scalar GB (_bind2nd__lxor_int16)
// C=A'+scalar GB (_bind2nd_tran__lxor_int16)
// C type: int16_t
// A type: int16_t
// A pattern? 0
// B type: int16_t
// B pattern? 0
// BinaryOp: cij = ((aij != 0) != (bij != 0))
#define GB_ATYPE \
int16_t
#define GB_BTYPE \
int16_t
#define GB_CTYPE \
int16_t
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA,A_iso) \
int16_t aij = GBX (Ax, pA, A_iso)
// true if values of A are not used
#define GB_A_IS_PATTERN \
0 \
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB,B_iso) \
int16_t bij = GBX (Bx, pB, B_iso)
// true if values of B are not used
#define GB_B_IS_PATTERN \
0 \
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
int16_t t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \
cij = GBX (Ax, pA, A_iso)
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \
cij = GBX (Bx, pB, B_iso)
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z,x,y,i,j) \
z = ((x != 0) != (y != 0)) ;
// true if the binop must be flipped
#define GB_BINOP_FLIP \
0
// op is second
#define GB_OP_IS_SECOND \
0
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_LXOR || GxB_NO_INT16 || GxB_NO_LXOR_INT16)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
#if 0
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB ((none))
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
#endif
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
void GB (_Cdense_ewise3_noaccum__lxor_int16)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_noaccum_template.c"
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumB__lxor_int16)
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumb__lxor_int16)
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type int16_t
int16_t bwork = (*((int16_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_AxD__lxor_int16)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix D,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int16_t *restrict Cx = (int16_t *) C->x ;
#include "GB_AxB_colscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_DxB__lxor_int16)
(
GrB_Matrix C,
const GrB_Matrix D,
const GrB_Matrix B,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int16_t *restrict Cx = (int16_t *) C->x ;
#include "GB_AxB_rowscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B
//------------------------------------------------------------------------------
GrB_Info GB (_AaddB__lxor_int16)
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool is_eWiseUnion,
const GB_void *alpha_scalar_in,
const GB_void *beta_scalar_in,
const bool Ch_is_Mh,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GB_WERK_DECLARE (M_ek_slicing, int64_t) ;
GB_WERK_DECLARE (A_ek_slicing, int64_t) ;
GB_WERK_DECLARE (B_ek_slicing, int64_t) ;
int16_t alpha_scalar ;
int16_t beta_scalar ;
if (is_eWiseUnion)
{
alpha_scalar = (*((int16_t *) alpha_scalar_in)) ;
beta_scalar = (*((int16_t *) beta_scalar_in )) ;
}
#include "GB_add_template.c"
GB_FREE_WORKSPACE ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_08__lxor_int16)
(
GrB_Matrix C,
const int C_sparsity,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_08_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_02__lxor_int16)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool flipxy,
const int64_t *restrict Cp_kfirst,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if GB_BINOP_FLIP
// The operator is not commutative, and does not have a flipped
// variant. For example z=atan2(y,x).
if (flipxy)
{
// use fmult(y,x)
#undef GB_FLIPPED
#define GB_FLIPPED 1
#include "GB_emult_02_template.c"
}
else
{
// use fmult(x,y)
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
}
#else
// No need to handle the flip: the operator is either commutative, or
// has been handled by changing z=div(y,x) to z=rdiv(x,y) for example.
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_04__lxor_int16)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict Cp_kfirst,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_04_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_bitmap__lxor_int16)
(
GrB_Matrix C,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_bitmap_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB (_bind1st__lxor_int16)
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *restrict Bb,
int64_t bnz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int16_t *Cx = (int16_t *) Cx_output ;
int16_t x = (*((int16_t *) x_input)) ;
int16_t *Bx = (int16_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < bnz ; p++)
{
if (!GBB (Bb, p)) continue ;
int16_t bij = GBX (Bx, p, false) ;
Cx [p] = ((x != 0) != (bij != 0)) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB (_bind2nd__lxor_int16)
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *restrict Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
int16_t *Cx = (int16_t *) Cx_output ;
int16_t *Ax = (int16_t *) Ax_input ;
int16_t y = (*((int16_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
int16_t aij = GBX (Ax, p, false) ;
Cx [p] = ((aij != 0) != (y != 0)) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
int16_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = ((x != 0) != (aij != 0)) ; \
}
GrB_Info GB (_bind1st_tran__lxor_int16)
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
int16_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int16_t x = (*((const int16_t *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
int16_t
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
int16_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = ((aij != 0) != (y != 0)) ; \
}
GrB_Info GB (_bind2nd_tran__lxor_int16)
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int16_t y = (*((const int16_t *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
_phonopy.c | /* Copyright (C) 2011 Atsushi Togo */
/* All rights reserved. */
/* This file is part of phonopy. */
/* Redistribution and use in source and binary forms, with or without */
/* modification, are permitted provided that the following conditions */
/* are met: */
/* * Redistributions of source code must retain the above copyright */
/* notice, this list of conditions and the following disclaimer. */
/* * Redistributions in binary form must reproduce the above copyright */
/* notice, this list of conditions and the following disclaimer in */
/* the documentation and/or other materials provided with the */
/* distribution. */
/* * Neither the name of the phonopy project nor the names of its */
/* contributors may be used to endorse or promote products derived */
/* from this software without specific prior written permission. */
/* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS */
/* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT */
/* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS */
/* FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE */
/* COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, */
/* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, */
/* BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; */
/* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER */
/* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT */
/* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN */
/* ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE */
/* POSSIBILITY OF SUCH DAMAGE. */
#include <Python.h>
#include <stdio.h>
#include <math.h>
#include <float.h>
#include <numpy/arrayobject.h>
#include <dynmat.h>
#include <derivative_dynmat.h>
#include <kgrid.h>
#include <tetrahedron_method.h>
#define KB 8.6173382568083159E-05
/* PHPYCONST is defined in dynmat.h */
/* Build dynamical matrix */
static PyObject * py_transform_dynmat_to_fc(PyObject *self, PyObject *args);
static PyObject * py_perm_trans_symmetrize_fc(PyObject *self, PyObject *args);
static PyObject *
py_perm_trans_symmetrize_compact_fc(PyObject *self, PyObject *args);
static PyObject * py_transpose_compact_fc(PyObject *self, PyObject *args);
static PyObject * py_get_dynamical_matrix(PyObject *self, PyObject *args);
static PyObject * py_get_nac_dynamical_matrix(PyObject *self, PyObject *args);
static PyObject * py_get_dipole_dipole(PyObject *self, PyObject *args);
static PyObject * py_get_dipole_dipole_q0(PyObject *self, PyObject *args);
static PyObject * py_get_derivative_dynmat(PyObject *self, PyObject *args);
static PyObject * py_get_thermal_properties(PyObject *self, PyObject *args);
static PyObject * py_distribute_fc2(PyObject *self, PyObject *args);
static PyObject * py_compute_permutation(PyObject *self, PyObject *args);
static PyObject * py_gsv_copy_smallest_vectors(PyObject *self, PyObject *args);
static PyObject * py_gsv_set_smallest_vectors(PyObject *self, PyObject *args);
static PyObject *
py_thm_neighboring_grid_points(PyObject *self, PyObject *args);
static PyObject *
py_thm_relative_grid_address(PyObject *self, PyObject *args);
static PyObject *
py_thm_all_relative_grid_address(PyObject *self, PyObject *args);
static PyObject *
py_thm_integration_weight(PyObject *self, PyObject *args);
static PyObject *
py_thm_integration_weight_at_omegas(PyObject *self, PyObject *args);
static PyObject * py_get_tetrahedra_frequenies(PyObject *self, PyObject *args);
static PyObject * py_tetrahedron_method_dos(PyObject *self, PyObject *args);
static void distribute_fc2(double (*fc2)[3][3],
const int * atom_list,
const int len_atom_list,
PHPYCONST double (*r_carts)[3][3],
const int * permutations,
const int * map_atoms,
const int * map_syms,
const int num_rot,
const int num_pos);
static int compute_permutation(int * rot_atom,
PHPYCONST double lat[3][3],
PHPYCONST double (*pos)[3],
PHPYCONST double (*rot_pos)[3],
const int num_pos,
const double symprec);
static void gsv_copy_smallest_vectors(double (*shortest_vectors)[27][3],
int * multiplicity,
PHPYCONST double (*vector_lists)[27][3],
PHPYCONST double (*length_lists)[27],
const int num_lists,
const double symprec);
static void gsv_set_smallest_vectors(double (*smallest_vectors)[27][3],
int *multiplicity,
PHPYCONST double (*pos_to)[3],
const int num_pos_to,
PHPYCONST double (*pos_from)[3],
const int num_pos_from,
PHPYCONST int lattice_points[27][3],
PHPYCONST double reduced_basis[3][3],
PHPYCONST int trans_mat[3][3],
const double symprec);
static double get_free_energy(const double temperature,
const double f);
static double get_entropy(const double temperature,
const double f);
static double get_heat_capacity(const double temperature,
const double f);
static void set_index_permutation_symmetry_fc(double * fc,
const int natom);
static void set_translational_symmetry_fc(double * fc,
const int natom);
static void set_index_permutation_symmetry_compact_fc(double * fc,
const int p2s[],
const int s2pp[],
const int nsym_list[],
const int perms[],
const int n_satom,
const int n_patom,
const int is_transpose);
static void set_translational_symmetry_compact_fc(double * fc,
const int p2s[],
const int n_satom,
const int n_patom);
/* static double get_energy(double temperature, double f); */
static int nint(const double a);
struct module_state {
PyObject *error;
};
#if PY_MAJOR_VERSION >= 3
#define GETSTATE(m) ((struct module_state*)PyModule_GetState(m))
#else
#define GETSTATE(m) (&_state)
static struct module_state _state;
#endif
static PyObject *
error_out(PyObject *m) {
struct module_state *st = GETSTATE(m);
PyErr_SetString(st->error, "something bad happened");
return NULL;
}
static PyMethodDef _phonopy_methods[] = {
{"error_out", (PyCFunction)error_out, METH_NOARGS, NULL},
{"transform_dynmat_to_fc", py_transform_dynmat_to_fc, METH_VARARGS,
"Transform a set of dynmat to force constants"},
{"perm_trans_symmetrize_fc", py_perm_trans_symmetrize_fc, METH_VARARGS,
"Enforce permutation and translational symmetry of force constants"},
{"perm_trans_symmetrize_compact_fc", py_perm_trans_symmetrize_compact_fc,
METH_VARARGS,
"Enforce permutation and translational symmetry of compact force constants"},
{"transpose_compact_fc", py_transpose_compact_fc,
METH_VARARGS,
"Transpose compact force constants"},
{"dynamical_matrix", py_get_dynamical_matrix, METH_VARARGS,
"Dynamical matrix"},
{"nac_dynamical_matrix", py_get_nac_dynamical_matrix, METH_VARARGS,
"NAC dynamical matrix"},
{"dipole_dipole", py_get_dipole_dipole, METH_VARARGS,
"Dipole-dipole interaction"},
{"dipole_dipole_q0", py_get_dipole_dipole_q0, METH_VARARGS,
"q=0 terms of Dipole-dipole interaction"},
{"derivative_dynmat", py_get_derivative_dynmat, METH_VARARGS,
"Q derivative of dynamical matrix"},
{"thermal_properties", py_get_thermal_properties, METH_VARARGS,
"Thermal properties"},
{"distribute_fc2", py_distribute_fc2,
METH_VARARGS,
"Distribute force constants for all atoms in atom_list using precomputed symmetry mappings."},
{"compute_permutation", py_compute_permutation, METH_VARARGS,
"Compute indices of original points in a set of rotated points."},
{"gsv_copy_smallest_vectors", py_gsv_copy_smallest_vectors, METH_VARARGS,
"Implementation detail of get_smallest_vectors."},
{"gsv_set_smallest_vectors", py_gsv_set_smallest_vectors, METH_VARARGS,
"Set candidate vectors."},
{"neighboring_grid_points", py_thm_neighboring_grid_points,
METH_VARARGS, "Neighboring grid points by relative grid addresses"},
{"tetrahedra_relative_grid_address", py_thm_relative_grid_address,
METH_VARARGS, "Relative grid addresses of vertices of 24 tetrahedra"},
{"all_tetrahedra_relative_grid_address",
py_thm_all_relative_grid_address, METH_VARARGS,
"4 (all) sets of relative grid addresses of vertices of 24 tetrahedra"},
{"tetrahedra_integration_weight", py_thm_integration_weight,
METH_VARARGS, "Integration weight for tetrahedron method"},
{"tetrahedra_integration_weight_at_omegas",
py_thm_integration_weight_at_omegas,
METH_VARARGS, "Integration weight for tetrahedron method at omegas"},
{"get_tetrahedra_frequencies", py_get_tetrahedra_frequenies,
METH_VARARGS, "Run tetrahedron method"},
{"tetrahedron_method_dos", py_tetrahedron_method_dos,
METH_VARARGS, "Run tetrahedron method"},
{NULL, NULL, 0, NULL}
};
#if PY_MAJOR_VERSION >= 3
static int _phonopy_traverse(PyObject *m, visitproc visit, void *arg) {
Py_VISIT(GETSTATE(m)->error);
return 0;
}
static int _phonopy_clear(PyObject *m) {
Py_CLEAR(GETSTATE(m)->error);
return 0;
}
static struct PyModuleDef moduledef = {
PyModuleDef_HEAD_INIT,
"_phonopy",
NULL,
sizeof(struct module_state),
_phonopy_methods,
NULL,
_phonopy_traverse,
_phonopy_clear,
NULL
};
#define INITERROR return NULL
PyObject *
PyInit__phonopy(void)
#else
#define INITERROR return
void
init_phonopy(void)
#endif
{
#if PY_MAJOR_VERSION >= 3
PyObject *module = PyModule_Create(&moduledef);
#else
PyObject *module = Py_InitModule("_phonopy", _phonopy_methods);
#endif
struct module_state *st;
if (module == NULL)
INITERROR;
st = GETSTATE(module);
st->error = PyErr_NewException("_phonopy.Error", NULL, NULL);
if (st->error == NULL) {
Py_DECREF(module);
INITERROR;
}
#if PY_MAJOR_VERSION >= 3
return module;
#endif
}
static PyObject * py_transform_dynmat_to_fc(PyObject *self, PyObject *args)
{
PyArrayObject* py_force_constants;
PyArrayObject* py_dynamical_matrices;
PyArrayObject* py_commensurate_points;
PyArrayObject* py_shortest_vectors;
PyArrayObject* py_multiplicities;
PyArrayObject* py_masses;
PyArrayObject* py_s2pp_map;
PyArrayObject* py_fc_index_map;
double* fc;
double* dm;
double (*comm_points)[3];
double (*shortest_vectors)[27][3];
double* masses;
int* multiplicities;
int* s2pp_map;
int* fc_index_map;
int num_patom;
int num_satom;
if (!PyArg_ParseTuple(args, "OOOOOOOO",
&py_force_constants,
&py_dynamical_matrices,
&py_commensurate_points,
&py_shortest_vectors,
&py_multiplicities,
&py_masses,
&py_s2pp_map,
&py_fc_index_map)) {
return NULL;
}
fc = (double*)PyArray_DATA(py_force_constants);
dm = (double*)PyArray_DATA(py_dynamical_matrices);
comm_points = (double(*)[3])PyArray_DATA(py_commensurate_points);
shortest_vectors = (double(*)[27][3])PyArray_DATA(py_shortest_vectors);
masses = (double*)PyArray_DATA(py_masses);
multiplicities = (int*)PyArray_DATA(py_multiplicities);
s2pp_map = (int*)PyArray_DATA(py_s2pp_map);
fc_index_map = (int*)PyArray_DATA(py_fc_index_map);
num_patom = PyArray_DIMS(py_multiplicities)[1];
num_satom = PyArray_DIMS(py_multiplicities)[0];
dym_transform_dynmat_to_fc(fc,
dm,
comm_points,
shortest_vectors,
multiplicities,
masses,
s2pp_map,
fc_index_map,
num_patom,
num_satom);
Py_RETURN_NONE;
}
static PyObject * py_compute_permutation(PyObject *self, PyObject *args)
{
PyArrayObject* permutation;
PyArrayObject* lattice;
PyArrayObject* positions;
PyArrayObject* permuted_positions;
double symprec;
int* rot_atoms;
double (*lat)[3];
double (*pos)[3];
double (*rot_pos)[3];
int num_pos;
int is_found;
if (!PyArg_ParseTuple(args, "OOOOd",
&permutation,
&lattice,
&positions,
&permuted_positions,
&symprec)) {
return NULL;
}
rot_atoms = (int*)PyArray_DATA(permutation);
lat = (double(*)[3])PyArray_DATA(lattice);
pos = (double(*)[3])PyArray_DATA(positions);
rot_pos = (double(*)[3])PyArray_DATA(permuted_positions);
num_pos = PyArray_DIMS(positions)[0];
is_found = compute_permutation(rot_atoms,
lat,
pos,
rot_pos,
num_pos,
symprec);
return Py_BuildValue("i", is_found);
}
static PyObject * py_gsv_copy_smallest_vectors(PyObject *self, PyObject *args)
{
PyArrayObject* py_shortest_vectors;
PyArrayObject* py_multiplicity;
PyArrayObject* py_vectors;
PyArrayObject* py_lengths;
double symprec;
double (*shortest_vectors)[27][3];
double (*vectors)[27][3];
double (*lengths)[27];
int * multiplicity;
int size_super, size_prim;
if (!PyArg_ParseTuple(args, "OOOOd",
&py_shortest_vectors,
&py_multiplicity,
&py_vectors,
&py_lengths,
&symprec)) {
return NULL;
}
shortest_vectors = (double(*)[27][3])PyArray_DATA(py_shortest_vectors);
multiplicity = (int*)PyArray_DATA(py_multiplicity);
vectors = (double(*)[27][3])PyArray_DATA(py_vectors);
lengths = (double(*)[27])PyArray_DATA(py_lengths);
size_super = PyArray_DIMS(py_vectors)[0];
size_prim = PyArray_DIMS(py_vectors)[1];
gsv_copy_smallest_vectors(shortest_vectors,
multiplicity,
vectors,
lengths,
size_super * size_prim,
symprec);
Py_RETURN_NONE;
}
static PyObject * py_gsv_set_smallest_vectors(PyObject *self, PyObject *args)
{
PyArrayObject* py_smallest_vectors;
PyArrayObject* py_multiplicity;
PyArrayObject* py_pos_to;
PyArrayObject* py_pos_from;
PyArrayObject* py_lattice_points;
PyArrayObject* py_reduced_basis;
PyArrayObject* py_trans_mat;
double symprec;
double (*smallest_vectors)[27][3];
int * multiplicity;
double (*pos_to)[3];
double (*pos_from)[3];
int (*lattice_points)[3];
double (*reduced_basis)[3];
int (*trans_mat)[3];
int num_pos_to, num_pos_from;
if (!PyArg_ParseTuple(args, "OOOOOOOd",
&py_smallest_vectors,
&py_multiplicity,
&py_pos_to,
&py_pos_from,
&py_lattice_points,
&py_reduced_basis,
&py_trans_mat,
&symprec)) {
return NULL;
}
smallest_vectors = (double(*)[27][3])PyArray_DATA(py_smallest_vectors);
multiplicity = (int*)PyArray_DATA(py_multiplicity);
pos_to = (double(*)[3])PyArray_DATA(py_pos_to);
pos_from = (double(*)[3])PyArray_DATA(py_pos_from);
num_pos_to = PyArray_DIMS(py_pos_to)[0];
num_pos_from = PyArray_DIMS(py_pos_from)[0];
lattice_points = (int(*)[3])PyArray_DATA(py_lattice_points);
reduced_basis = (double(*)[3])PyArray_DATA(py_reduced_basis);
trans_mat = (int(*)[3])PyArray_DATA(py_trans_mat);
gsv_set_smallest_vectors(smallest_vectors,
multiplicity,
pos_to,
num_pos_to,
pos_from,
num_pos_from,
lattice_points,
reduced_basis,
trans_mat,
symprec);
Py_RETURN_NONE;
}
static PyObject * py_perm_trans_symmetrize_fc(PyObject *self, PyObject *args)
{
PyArrayObject* force_constants;
double *fc;
int level;
int n_satom, i, j, k, l, iter;
double sum;
if (!PyArg_ParseTuple(args, "Oi", &force_constants, &level)) {
return NULL;
}
fc = (double*)PyArray_DATA(force_constants);
n_satom = PyArray_DIMS(force_constants)[0];
for (iter=0; iter < level; iter++) {
/* Subtract drift along column */
for (j = 0; j < n_satom; j++) {
for (k = 0; k < 3; k++) {
for (l = 0; l < 3; l++) {
sum = 0;
for (i = 0; i < n_satom; i++) {
sum += fc[i * n_satom * 9 + j * 9 + k * 3 + l];
}
sum /= n_satom;
for (i = 0; i < n_satom; i++) {
fc[i * n_satom * 9 + j * 9 + k * 3 + l] -= sum;
}
}
}
}
/* Subtract drift along row */
for (i = 0; i < n_satom; i++) {
for (k = 0; k < 3; k++) {
for (l = 0; l < 3; l++) {
sum = 0;
for (j = 0; j < n_satom; j++) {
sum += fc[i * n_satom * 9 + j * 9 + k * 3 + l];
}
sum /= n_satom;
for (j = 0; j < n_satom; j++) {
fc[i * n_satom * 9 + j * 9 + k * 3 + l] -= sum;
}
}
}
}
set_index_permutation_symmetry_fc(fc, n_satom);
}
set_translational_symmetry_fc(fc, n_satom);
Py_RETURN_NONE;
}
static PyObject *
py_perm_trans_symmetrize_compact_fc(PyObject *self, PyObject *args)
{
PyArrayObject* py_fc;
PyArrayObject* py_permutations;
PyArrayObject* py_s2pp_map;
PyArrayObject* py_p2s_map;
PyArrayObject* py_nsym_list;
int level;
double *fc;
int *perms;
int *s2pp;
int *p2s;
int *nsym_list;
int n_patom, n_satom, i, j, k, l, n, iter;
double sum;
if (!PyArg_ParseTuple(args, "OOOOOi",
&py_fc,
&py_permutations,
&py_s2pp_map,
&py_p2s_map,
&py_nsym_list,
&level)) {
return NULL;
}
fc = (double*)PyArray_DATA(py_fc);
perms = (int*)PyArray_DATA(py_permutations);
s2pp = (int*)PyArray_DATA(py_s2pp_map);
p2s = (int*)PyArray_DATA(py_p2s_map);
nsym_list = (int*)PyArray_DATA(py_nsym_list);
n_patom = PyArray_DIMS(py_fc)[0];
n_satom = PyArray_DIMS(py_fc)[1];
for (iter=0; iter < level; iter++) {
for (n = 0; n < 2; n++) {
/* transpose only */
set_index_permutation_symmetry_compact_fc(fc,
p2s,
s2pp,
nsym_list,
perms,
n_satom,
n_patom,
1);
for (i = 0; i < n_patom; i++) {
for (k = 0; k < 3; k++) {
for (l = 0; l < 3; l++) {
sum = 0;
for (j = 0; j < n_satom; j++) {
sum += fc[i * n_satom * 9 + j * 9 + k * 3 + l];
}
sum /= n_satom;
for (j = 0; j < n_satom; j++) {
fc[i * n_satom * 9 + j * 9 + k * 3 + l] -= sum;
}
}
}
}
}
set_index_permutation_symmetry_compact_fc(fc,
p2s,
s2pp,
nsym_list,
perms,
n_satom,
n_patom,
0);
}
set_translational_symmetry_compact_fc(fc, p2s, n_satom, n_patom);
Py_RETURN_NONE;
}
static PyObject * py_transpose_compact_fc(PyObject *self, PyObject *args)
{
PyArrayObject* py_fc;
PyArrayObject* py_permutations;
PyArrayObject* py_s2pp_map;
PyArrayObject* py_p2s_map;
PyArrayObject* py_nsym_list;
double *fc;
int *s2pp;
int *p2s;
int *nsym_list;
int *perms;
int n_patom, n_satom;
if (!PyArg_ParseTuple(args, "OOOOO",
&py_fc,
&py_permutations,
&py_s2pp_map,
&py_p2s_map,
&py_nsym_list)) {
return NULL;
}
fc = (double*)PyArray_DATA(py_fc);
perms = (int*)PyArray_DATA(py_permutations);
s2pp = (int*)PyArray_DATA(py_s2pp_map);
p2s = (int*)PyArray_DATA(py_p2s_map);
nsym_list = (int*)PyArray_DATA(py_nsym_list);
n_patom = PyArray_DIMS(py_fc)[0];
n_satom = PyArray_DIMS(py_fc)[1];
set_index_permutation_symmetry_compact_fc(fc,
p2s,
s2pp,
nsym_list,
perms,
n_satom,
n_patom,
1);
Py_RETURN_NONE;
}
static PyObject * py_get_dynamical_matrix(PyObject *self, PyObject *args)
{
PyArrayObject* py_dynamical_matrix;
PyArrayObject* py_force_constants;
PyArrayObject* py_shortest_vectors;
PyArrayObject* py_q;
PyArrayObject* py_multiplicities;
PyArrayObject* py_masses;
PyArrayObject* py_s2p_map;
PyArrayObject* py_p2s_map;
double* dm;
double* fc;
double* q;
double (*svecs)[27][3];
double* m;
int* multi;
int* s2p_map;
int* p2s_map;
int num_patom;
int num_satom;
if (!PyArg_ParseTuple(args, "OOOOOOOO",
&py_dynamical_matrix,
&py_force_constants,
&py_q,
&py_shortest_vectors,
&py_multiplicities,
&py_masses,
&py_s2p_map,
&py_p2s_map)) {
return NULL;
}
dm = (double*)PyArray_DATA(py_dynamical_matrix);
fc = (double*)PyArray_DATA(py_force_constants);
q = (double*)PyArray_DATA(py_q);
svecs = (double(*)[27][3])PyArray_DATA(py_shortest_vectors);
m = (double*)PyArray_DATA(py_masses);
multi = (int*)PyArray_DATA(py_multiplicities);
s2p_map = (int*)PyArray_DATA(py_s2p_map);
p2s_map = (int*)PyArray_DATA(py_p2s_map);
num_patom = PyArray_DIMS(py_p2s_map)[0];
num_satom = PyArray_DIMS(py_s2p_map)[0];
dym_get_dynamical_matrix_at_q(dm,
num_patom,
num_satom,
fc,
q,
svecs,
multi,
m,
s2p_map,
p2s_map,
NULL,
1);
Py_RETURN_NONE;
}
static PyObject * py_get_nac_dynamical_matrix(PyObject *self, PyObject *args)
{
PyArrayObject* py_dynamical_matrix;
PyArrayObject* py_force_constants;
PyArrayObject* py_shortest_vectors;
PyArrayObject* py_q_cart;
PyArrayObject* py_q;
PyArrayObject* py_multiplicities;
PyArrayObject* py_masses;
PyArrayObject* py_s2p_map;
PyArrayObject* py_p2s_map;
PyArrayObject* py_born;
double factor;
double* dm;
double* fc;
double* q_cart;
double* q;
double (*svecs)[27][3];
double* m;
double (*born)[3][3];
int* multi;
int* s2p_map;
int* p2s_map;
int num_patom;
int num_satom;
int n;
double (*charge_sum)[3][3];
if (!PyArg_ParseTuple(args, "OOOOOOOOOOd",
&py_dynamical_matrix,
&py_force_constants,
&py_q,
&py_shortest_vectors,
&py_multiplicities,
&py_masses,
&py_s2p_map,
&py_p2s_map,
&py_q_cart,
&py_born,
&factor))
return NULL;
dm = (double*)PyArray_DATA(py_dynamical_matrix);
fc = (double*)PyArray_DATA(py_force_constants);
q_cart = (double*)PyArray_DATA(py_q_cart);
q = (double*)PyArray_DATA(py_q);
svecs = (double(*)[27][3])PyArray_DATA(py_shortest_vectors);
m = (double*)PyArray_DATA(py_masses);
born = (double(*)[3][3])PyArray_DATA(py_born);
multi = (int*)PyArray_DATA(py_multiplicities);
s2p_map = (int*)PyArray_DATA(py_s2p_map);
p2s_map = (int*)PyArray_DATA(py_p2s_map);
num_patom = PyArray_DIMS(py_p2s_map)[0];
num_satom = PyArray_DIMS(py_s2p_map)[0];
charge_sum = (double(*)[3][3])
malloc(sizeof(double[3][3]) * num_patom * num_patom);
n = num_satom / num_patom;
dym_get_charge_sum(charge_sum, num_patom, factor / n, q_cart, born);
dym_get_dynamical_matrix_at_q(dm,
num_patom,
num_satom,
fc,
q,
svecs,
multi,
m,
s2p_map,
p2s_map,
charge_sum,
1);
free(charge_sum);
Py_RETURN_NONE;
}
static PyObject * py_get_dipole_dipole(PyObject *self, PyObject *args)
{
PyArrayObject* py_dd;
PyArrayObject* py_dd_q0;
PyArrayObject* py_G_list;
PyArrayObject* py_q_cart;
PyArrayObject* py_q_direction;
PyArrayObject* py_born;
PyArrayObject* py_dielectric;
PyArrayObject* py_positions;
double factor;
double lambda;
double tolerance;
double* dd;
double* dd_q0;
double (*G_list)[3];
double* q_vector;
double* q_direction;
double (*born)[3][3];
double (*dielectric)[3];
double (*pos)[3];
int num_patom, num_G;
if (!PyArg_ParseTuple(args, "OOOOOOOOddd",
&py_dd,
&py_dd_q0,
&py_G_list,
&py_q_cart,
&py_q_direction,
&py_born,
&py_dielectric,
&py_positions,
&factor,
&lambda,
&tolerance))
return NULL;
dd = (double*)PyArray_DATA(py_dd);
dd_q0 = (double*)PyArray_DATA(py_dd_q0);
G_list = (double(*)[3])PyArray_DATA(py_G_list);
if ((PyObject*)py_q_direction == Py_None) {
q_direction = NULL;
} else {
q_direction = (double*)PyArray_DATA(py_q_direction);
}
q_vector = (double*)PyArray_DATA(py_q_cart);
born = (double(*)[3][3])PyArray_DATA(py_born);
dielectric = (double(*)[3])PyArray_DATA(py_dielectric);
pos = (double(*)[3])PyArray_DATA(py_positions);
num_G = PyArray_DIMS(py_G_list)[0];
num_patom = PyArray_DIMS(py_positions)[0];
dym_get_dipole_dipole(dd, /* [natom, 3, natom, 3, (real, imag)] */
dd_q0, /* [natom, 3, 3, (real, imag)] */
G_list, /* [num_kvec, 3] */
num_G,
num_patom,
q_vector,
q_direction,
born,
dielectric,
pos, /* [natom, 3] */
factor, /* 4pi/V*unit-conv */
lambda, /* 4 * Lambda^2 */
tolerance);
Py_RETURN_NONE;
}
static PyObject * py_get_dipole_dipole_q0(PyObject *self, PyObject *args)
{
PyArrayObject* py_dd_q0;
PyArrayObject* py_G_list;
PyArrayObject* py_born;
PyArrayObject* py_dielectric;
PyArrayObject* py_positions;
double lambda;
double tolerance;
double* dd_q0;
double (*G_list)[3];
double (*born)[3][3];
double (*dielectric)[3];
double (*pos)[3];
int num_patom, num_G;
if (!PyArg_ParseTuple(args, "OOOOOdd",
&py_dd_q0,
&py_G_list,
&py_born,
&py_dielectric,
&py_positions,
&lambda,
&tolerance))
return NULL;
dd_q0 = (double*)PyArray_DATA(py_dd_q0);
G_list = (double(*)[3])PyArray_DATA(py_G_list);
born = (double(*)[3][3])PyArray_DATA(py_born);
dielectric = (double(*)[3])PyArray_DATA(py_dielectric);
pos = (double(*)[3])PyArray_DATA(py_positions);
num_G = PyArray_DIMS(py_G_list)[0];
num_patom = PyArray_DIMS(py_positions)[0];
dym_get_dipole_dipole_q0(dd_q0, /* [natom, 3, 3, (real, imag)] */
G_list, /* [num_kvec, 3] */
num_G,
num_patom,
born,
dielectric,
pos, /* [natom, 3] */
lambda, /* 4 * Lambda^2 */
tolerance);
Py_RETURN_NONE;
}
static PyObject * py_get_derivative_dynmat(PyObject *self, PyObject *args)
{
PyArrayObject* derivative_dynmat;
PyArrayObject* py_force_constants;
PyArrayObject* r_vector;
PyArrayObject* lattice;
PyArrayObject* q_vector;
PyArrayObject* py_multiplicities;
PyArrayObject* py_masses;
PyArrayObject* py_s2p_map;
PyArrayObject* py_p2s_map;
PyArrayObject* py_born;
PyArrayObject* dielectric;
PyArrayObject* q_direction;
double nac_factor;
double* ddm;
double* fc;
double* q;
double* lat;
double* r;
double* m;
int* multi;
int* s2p_map;
int* p2s_map;
int num_patom;
int num_satom;
double *z;
double *epsilon;
double *q_dir;
if (!PyArg_ParseTuple(args, "OOOOOOOOOdOOO",
&derivative_dynmat,
&py_force_constants,
&q_vector,
&lattice, /* column vectors */
&r_vector,
&py_multiplicities,
&py_masses,
&py_s2p_map,
&py_p2s_map,
&nac_factor,
&py_born,
&dielectric,
&q_direction)) {
return NULL;
}
ddm = (double*)PyArray_DATA(derivative_dynmat);
fc = (double*)PyArray_DATA(py_force_constants);
q = (double*)PyArray_DATA(q_vector);
lat = (double*)PyArray_DATA(lattice);
r = (double*)PyArray_DATA(r_vector);
m = (double*)PyArray_DATA(py_masses);
multi = (int*)PyArray_DATA(py_multiplicities);
s2p_map = (int*)PyArray_DATA(py_s2p_map);
p2s_map = (int*)PyArray_DATA(py_p2s_map);
num_patom = PyArray_DIMS(py_p2s_map)[0];
num_satom = PyArray_DIMS(py_s2p_map)[0];
if ((PyObject*)py_born == Py_None) {
z = NULL;
} else {
z = (double*)PyArray_DATA(py_born);
}
if ((PyObject*)dielectric == Py_None) {
epsilon = NULL;
} else {
epsilon = (double*)PyArray_DATA(dielectric);
}
if ((PyObject*)q_direction == Py_None) {
q_dir = NULL;
} else {
q_dir = (double*)PyArray_DATA(q_direction);
}
get_derivative_dynmat_at_q(ddm,
num_patom,
num_satom,
fc,
q,
lat,
r,
multi,
m,
s2p_map,
p2s_map,
nac_factor,
z,
epsilon,
q_dir);
Py_RETURN_NONE;
}
/* Thermal properties */
static PyObject * py_get_thermal_properties(PyObject *self, PyObject *args)
{
PyArrayObject* py_thermal_props;
PyArrayObject* py_temperatures;
PyArrayObject* py_frequencies;
PyArrayObject* py_weights;
double cutoff_frequency;
double *temperatures;
double* freqs;
double *thermal_props;
int* w;
int num_qpoints;
int num_bands;
int num_temp;
int i, j, k;
long sum_weights;
double f;
double *tp;
if (!PyArg_ParseTuple(args, "OOOOd",
&py_thermal_props,
&py_temperatures,
&py_frequencies,
&py_weights,
&cutoff_frequency)) {
return NULL;
}
thermal_props = (double*)PyArray_DATA(py_thermal_props);
temperatures = (double*)PyArray_DATA(py_temperatures);
num_temp = PyArray_DIMS(py_temperatures)[0];
freqs = (double*)PyArray_DATA(py_frequencies);
num_qpoints = PyArray_DIMS(py_frequencies)[0];
w = (int*)PyArray_DATA(py_weights);
num_bands = PyArray_DIMS(py_frequencies)[1];
tp = (double*)malloc(sizeof(double) * num_qpoints * num_temp * 3);
for (i = 0; i < num_qpoints * num_temp * 3; i++) {
tp[i] = 0;
}
#pragma omp parallel for private(j, k, f)
for (i = 0; i < num_qpoints; i++){
for (j = 0; j < num_temp; j++) {
for (k = 0; k < num_bands; k++){
f = freqs[i * num_bands + k];
if (temperatures[j] > 0 && f > cutoff_frequency) {
tp[i * num_temp * 3 + j * 3] +=
get_free_energy(temperatures[j], f) * w[i];
tp[i * num_temp * 3 + j * 3 + 1] +=
get_entropy(temperatures[j], f) * w[i];
tp[i * num_temp * 3 + j * 3 + 2] +=
get_heat_capacity(temperatures[j], f) * w[i];
}
}
}
}
for (i = 0; i < num_qpoints; i++) {
for (j = 0; j < num_temp * 3; j++) {
thermal_props[j] += tp[i * num_temp * 3 + j];
}
}
free(tp);
tp = NULL;
Py_RETURN_NONE;
}
static PyObject * py_distribute_fc2(PyObject *self, PyObject *args)
{
PyArrayObject* py_force_constants;
PyArrayObject* py_permutations;
PyArrayObject* py_map_atoms;
PyArrayObject* py_map_syms;
PyArrayObject* py_atom_list;
PyArrayObject* py_rotations_cart;
double (*r_carts)[3][3];
double (*fc2)[3][3];
int *permutations;
int *map_atoms;
int *map_syms;
int *atom_list;
npy_intp num_pos, num_rot, len_atom_list;
if (!PyArg_ParseTuple(args, "OOOOOO",
&py_force_constants,
&py_atom_list,
&py_rotations_cart,
&py_permutations,
&py_map_atoms,
&py_map_syms)) {
return NULL;
}
fc2 = (double(*)[3][3])PyArray_DATA(py_force_constants);
atom_list = (int*)PyArray_DATA(py_atom_list);
len_atom_list = PyArray_DIMS(py_atom_list)[0];
permutations = (int*)PyArray_DATA(py_permutations);
map_atoms = (int*)PyArray_DATA(py_map_atoms);
map_syms = (int*)PyArray_DATA(py_map_syms);
r_carts = (double(*)[3][3])PyArray_DATA(py_rotations_cart);
num_rot = PyArray_DIMS(py_permutations)[0];
num_pos = PyArray_DIMS(py_permutations)[1];
if (PyArray_NDIM(py_map_atoms) != 1 || PyArray_DIMS(py_map_atoms)[0] != num_pos)
{
PyErr_SetString(PyExc_ValueError, "wrong shape for map_atoms");
return NULL;
}
if (PyArray_NDIM(py_map_syms) != 1 || PyArray_DIMS(py_map_syms)[0] != num_pos)
{
PyErr_SetString(PyExc_ValueError, "wrong shape for map_syms");
return NULL;
}
if (PyArray_DIMS(py_rotations_cart)[0] != num_rot)
{
PyErr_SetString(PyExc_ValueError, "permutations and rotations are different length");
return NULL;
}
distribute_fc2(fc2,
atom_list,
len_atom_list,
r_carts,
permutations,
map_atoms,
map_syms,
num_rot,
num_pos);
Py_RETURN_NONE;
}
static PyObject *py_thm_neighboring_grid_points(PyObject *self, PyObject *args)
{
PyArrayObject* py_relative_grid_points;
PyArrayObject* py_relative_grid_address;
PyArrayObject* py_mesh;
PyArrayObject* py_bz_grid_address;
PyArrayObject* py_bz_map;
int grid_point;
int* relative_grid_points;
int (*relative_grid_address)[3];
int num_relative_grid_address;
int *mesh;
int (*bz_grid_address)[3];
int *bz_map;
if (!PyArg_ParseTuple(args, "OiOOOO",
&py_relative_grid_points,
&grid_point,
&py_relative_grid_address,
&py_mesh,
&py_bz_grid_address,
&py_bz_map)) {
return NULL;
}
relative_grid_points = (int*)PyArray_DATA(py_relative_grid_points);
relative_grid_address = (int(*)[3])PyArray_DATA(py_relative_grid_address);
num_relative_grid_address = PyArray_DIMS(py_relative_grid_address)[0];
mesh = (int*)PyArray_DATA(py_mesh);
bz_grid_address = (int(*)[3])PyArray_DATA(py_bz_grid_address);
bz_map = (int*)PyArray_DATA(py_bz_map);
thm_get_neighboring_grid_points(relative_grid_points,
grid_point,
relative_grid_address,
num_relative_grid_address,
mesh,
bz_grid_address,
bz_map);
Py_RETURN_NONE;
}
static PyObject *
py_thm_relative_grid_address(PyObject *self, PyObject *args)
{
PyArrayObject* py_relative_grid_address;
PyArrayObject* py_reciprocal_lattice_py;
int (*relative_grid_address)[4][3];
double (*reciprocal_lattice)[3];
if (!PyArg_ParseTuple(args, "OO",
&py_relative_grid_address,
&py_reciprocal_lattice_py)) {
return NULL;
}
relative_grid_address = (int(*)[4][3])PyArray_DATA(py_relative_grid_address);
reciprocal_lattice = (double(*)[3])PyArray_DATA(py_reciprocal_lattice_py);
thm_get_relative_grid_address(relative_grid_address, reciprocal_lattice);
Py_RETURN_NONE;
}
static PyObject *
py_thm_all_relative_grid_address(PyObject *self, PyObject *args)
{
PyArrayObject* py_relative_grid_address;
int (*relative_grid_address)[24][4][3];
if (!PyArg_ParseTuple(args, "O",
&py_relative_grid_address)) {
return NULL;
}
relative_grid_address =
(int(*)[24][4][3])PyArray_DATA(py_relative_grid_address);
thm_get_all_relative_grid_address(relative_grid_address);
Py_RETURN_NONE;
}
static PyObject *
py_thm_integration_weight(PyObject *self, PyObject *args)
{
double omega;
PyArrayObject* py_tetrahedra_omegas;
char* function;
double (*tetrahedra_omegas)[4];
double iw;
if (!PyArg_ParseTuple(args, "dOs",
&omega,
&py_tetrahedra_omegas,
&function)) {
return NULL;
}
tetrahedra_omegas = (double(*)[4])PyArray_DATA(py_tetrahedra_omegas);
iw = thm_get_integration_weight(omega,
tetrahedra_omegas,
function[0]);
return PyFloat_FromDouble(iw);
}
static PyObject *
py_thm_integration_weight_at_omegas(PyObject *self, PyObject *args)
{
PyArrayObject* py_integration_weights;
PyArrayObject* py_omegas;
PyArrayObject* py_tetrahedra_omegas;
char* function;
double *omegas;
double *iw;
int num_omegas;
double (*tetrahedra_omegas)[4];
if (!PyArg_ParseTuple(args, "OOOs",
&py_integration_weights,
&py_omegas,
&py_tetrahedra_omegas,
&function)) {
return NULL;
}
omegas = (double*)PyArray_DATA(py_omegas);
iw = (double*)PyArray_DATA(py_integration_weights);
num_omegas = (int)PyArray_DIMS(py_omegas)[0];
tetrahedra_omegas = (double(*)[4])PyArray_DATA(py_tetrahedra_omegas);
thm_get_integration_weight_at_omegas(iw,
num_omegas,
omegas,
tetrahedra_omegas,
function[0]);
Py_RETURN_NONE;
}
static PyObject * py_get_tetrahedra_frequenies(PyObject *self, PyObject *args)
{
PyArrayObject* py_freq_tetras;
PyArrayObject* py_grid_points;
PyArrayObject* py_mesh;
PyArrayObject* py_grid_address;
PyArrayObject* py_gp_ir_index;
PyArrayObject* py_relative_grid_address;
PyArrayObject* py_frequencies;
double* freq_tetras;
int* grid_points;
int num_gp_in;
int* mesh;
int (*grid_address)[3];
int* gp_ir_index;
int (*relative_grid_address)[3];
double* frequencies;
int num_band;
int is_shift[3] = {0, 0, 0};
int i, j, k, gp;
int g_addr[3];
int address_double[3];
if (!PyArg_ParseTuple(args, "OOOOOOO",
&py_freq_tetras,
&py_grid_points,
&py_mesh,
&py_grid_address,
&py_gp_ir_index,
&py_relative_grid_address,
&py_frequencies)) {
return NULL;
}
freq_tetras = (double*)PyArray_DATA(py_freq_tetras);
grid_points = (int*)PyArray_DATA(py_grid_points);
num_gp_in = (int)PyArray_DIMS(py_grid_points)[0];
mesh = (int*)PyArray_DATA(py_mesh);
grid_address = (int(*)[3])PyArray_DATA(py_grid_address);
gp_ir_index = (int*)PyArray_DATA(py_gp_ir_index);
relative_grid_address = (int(*)[3])PyArray_DATA(py_relative_grid_address);
frequencies = (double*)PyArray_DATA(py_frequencies);
num_band = (int)PyArray_DIMS(py_frequencies)[1];
for (i = 0; i < num_gp_in; i++) {
#pragma omp parallel for private(k, g_addr, gp, address_double)
for (j = 0; j < num_band * 96; j++) {
for (k = 0; k < 3; k++) {
g_addr[k] = grid_address[grid_points[i]][k] +
relative_grid_address[j % 96][k];
}
kgd_get_grid_address_double_mesh(address_double,
g_addr,
mesh,
is_shift);
gp = kgd_get_grid_point_double_mesh(address_double, mesh);
freq_tetras[i * num_band * 96 + j] =
frequencies[gp_ir_index[gp] * num_band + j / 96];
}
}
Py_RETURN_NONE;
}
static PyObject * py_tetrahedron_method_dos(PyObject *self, PyObject *args)
{
PyArrayObject* py_dos;
PyArrayObject* py_mesh;
PyArrayObject* py_freq_points;
PyArrayObject* py_frequencies;
PyArrayObject* py_coef;
PyArrayObject* py_grid_address;
PyArrayObject* py_grid_mapping_table;
PyArrayObject* py_relative_grid_address;
double *dos;
int* mesh;
double* freq_points;
int num_freq_points;
double* frequencies;
double* coef;
int (*grid_address)[3];
int num_gp;
int num_ir_gp;
int num_coef;
int num_band;
int* grid_mapping_table;
int (*relative_grid_address)[4][3];
int is_shift[3] = {0, 0, 0};
int i, j, k, l, m, q, r, count;
int g_addr[3];
int ir_gps[24][4];
double tetrahedra[24][4];
int address_double[3];
int *gp2ir, *ir_grid_points, *weights;
double iw;
gp2ir = NULL;
ir_grid_points = NULL;
weights = NULL;
if (!PyArg_ParseTuple(args, "OOOOOOOO",
&py_dos,
&py_mesh,
&py_freq_points,
&py_frequencies,
&py_coef,
&py_grid_address,
&py_grid_mapping_table,
&py_relative_grid_address)) {
return NULL;
}
/* dos[num_ir_gp][num_band][num_freq_points][num_coef] */
dos = (double*)PyArray_DATA(py_dos);
mesh = (int*)PyArray_DATA(py_mesh);
freq_points = (double*)PyArray_DATA(py_freq_points);
num_freq_points = (int)PyArray_DIMS(py_freq_points)[0];
frequencies = (double*)PyArray_DATA(py_frequencies);
num_ir_gp = (int)PyArray_DIMS(py_frequencies)[0];
num_band = (int)PyArray_DIMS(py_frequencies)[1];
coef = (double*)PyArray_DATA(py_coef);
num_coef = (int)PyArray_DIMS(py_coef)[1];
grid_address = (int(*)[3])PyArray_DATA(py_grid_address);
num_gp = (int)PyArray_DIMS(py_grid_address)[0];
grid_mapping_table = (int*)PyArray_DATA(py_grid_mapping_table);
relative_grid_address = (int(*)[4][3])PyArray_DATA(py_relative_grid_address);
gp2ir = (int*)malloc(sizeof(int) * num_gp);
ir_grid_points = (int*)malloc(sizeof(int) * num_ir_gp);
weights = (int*)malloc(sizeof(int) * num_ir_gp);
count = 0;
for (i = 0; i < num_gp; i++) {
if (grid_mapping_table[i] == i) {
gp2ir[i] = count;
ir_grid_points[count] = i;
weights[count] = 1;
count++;
} else {
gp2ir[i] = gp2ir[grid_mapping_table[i]];
weights[gp2ir[i]]++;
}
}
if (num_ir_gp != count) {
printf("Something is wrong!\n");
}
#pragma omp parallel for private(j, k, l, m, q, r, iw, ir_gps, g_addr, tetrahedra, address_double)
for (i = 0; i < num_ir_gp; i++) {
/* set 24 tetrahedra */
for (l = 0; l < 24; l++) {
for (q = 0; q < 4; q++) {
for (r = 0; r < 3; r++) {
g_addr[r] = grid_address[ir_grid_points[i]][r] +
relative_grid_address[l][q][r];
}
kgd_get_grid_address_double_mesh(address_double,
g_addr,
mesh,
is_shift);
ir_gps[l][q] = gp2ir[kgd_get_grid_point_double_mesh(address_double, mesh)];
}
}
for (k = 0; k < num_band; k++) {
for (l = 0; l < 24; l++) {
for (q = 0; q < 4; q++) {
tetrahedra[l][q] = frequencies[ir_gps[l][q] * num_band + k];
}
}
for (j = 0; j < num_freq_points; j++) {
iw = thm_get_integration_weight(freq_points[j], tetrahedra, 'I') * weights[i];
for (m = 0; m < num_coef; m++) {
dos[i * num_band * num_freq_points * num_coef +
k * num_coef * num_freq_points + j * num_coef + m] +=
iw * coef[i * num_coef * num_band + m * num_band + k];
}
}
}
}
free(gp2ir);
gp2ir = NULL;
free(ir_grid_points);
ir_grid_points = NULL;
free(weights);
weights = NULL;
Py_RETURN_NONE;
}
static double get_free_energy(const double temperature, const double f)
{
/* temperature is defined by T (K) */
/* 'f' must be given in eV. */
return KB * temperature * log(1 - exp(- f / (KB * temperature)));
}
static double get_entropy(const double temperature, const double f)
{
/* temperature is defined by T (K) */
/* 'f' must be given in eV. */
double val;
val = f / (2 * KB * temperature);
return 1 / (2 * temperature) * f * cosh(val) / sinh(val) - KB * log(2 * sinh(val));
}
static double get_heat_capacity(const double temperature, const double f)
{
/* temperature is defined by T (K) */
/* 'f' must be given in eV. */
/* If val is close to 1. Then expansion is used. */
double val, val1, val2;
val = f / (KB * temperature);
val1 = exp(val);
val2 = (val) / (val1 - 1);
return KB * val1 * val2 * val2;
}
/* static double get_energy(double temperature, double f){ */
/* /\* temperature is defined by T (K) *\/ */
/* /\* 'f' must be given in eV. *\/ */
/* return f / (exp(f / (KB * temperature)) - 1); */
/* } */
static int compute_permutation(int * rot_atom,
PHPYCONST double lat[3][3],
PHPYCONST double (*pos)[3],
PHPYCONST double (*rot_pos)[3],
const int num_pos,
const double symprec)
{
int i,j,k,l;
int search_start;
double distance2, diff_cart;
double diff[3];
for (i = 0; i < num_pos; i++) {
rot_atom[i] = -1;
}
/* optimization: Iterate primarily by pos instead of rot_pos. */
/* (find where 0 belongs in rot_atom, then where 1 belongs, etc.) */
/* Then track the first unassigned index. */
/* */
/* This works best if the permutation is close to the identity. */
/* (more specifically, if the max value of 'rot_atom[i] - i' is small) */
search_start = 0;
for (i = 0; i < num_pos; i++) {
while (rot_atom[search_start] >= 0) {
search_start++;
}
for (j = search_start; j < num_pos; j++) {
if (rot_atom[j] >= 0) {
continue;
}
for (k = 0; k < 3; k++) {
diff[k] = pos[i][k] - rot_pos[j][k];
diff[k] -= nint(diff[k]);
}
distance2 = 0;
for (k = 0; k < 3; k++) {
diff_cart = 0;
for (l = 0; l < 3; l++) {
diff_cart += lat[k][l] * diff[l];
}
distance2 += diff_cart * diff_cart;
}
if (sqrt(distance2) < symprec) {
rot_atom[j] = i;
break;
}
}
}
for (i = 0; i < num_pos; i++) {
if (rot_atom[i] < 0) {
printf("Encounter some problem in compute_permutation.\n");
return 0;
}
}
return 1;
}
/* Implementation detail of get_smallest_vectors. */
/* Finds the smallest vectors within each list and copies them to the output. */
static void gsv_copy_smallest_vectors(double (*shortest_vectors)[27][3],
int * multiplicity,
PHPYCONST double (*vector_lists)[27][3],
PHPYCONST double (*length_lists)[27],
const int num_lists,
const double symprec)
{
int i,j,k;
int count;
double minimum;
double (*vectors)[3];
double * lengths;
for (i = 0; i < num_lists; i++) {
/* Look at a single list of 27 vectors. */
lengths = length_lists[i];
vectors = vector_lists[i];
/* Compute the minimum length. */
minimum = DBL_MAX;
for (j = 0; j < 27; j++) {
if (lengths[j] < minimum) {
minimum = lengths[j];
}
}
/* Copy vectors whose length is within tolerance. */
count = 0;
for (j = 0; j < 27; j++) {
if (lengths[j] - minimum <= symprec) {
for (k = 0; k < 3; k++) {
shortest_vectors[i][count][k] = vectors[j][k];
}
count++;
}
}
multiplicity[i] = count;
}
}
static void gsv_set_smallest_vectors(double (*smallest_vectors)[27][3],
int *multiplicity,
PHPYCONST double (*pos_to)[3],
const int num_pos_to,
PHPYCONST double (*pos_from)[3],
const int num_pos_from,
PHPYCONST int lattice_points[27][3],
PHPYCONST double reduced_basis[3][3],
PHPYCONST int trans_mat[3][3],
const double symprec)
{
int i, j, k, l, count;
double length_tmp, minimum, vec_xyz;
double length[27], vec[27][3];
for (i = 0; i < num_pos_to; i++) {
for (j = 0; j < num_pos_from; j++) {
for (k = 0; k < 27; k++) {
length[k] = 0;
for (l = 0; l < 3; l++) {
vec[k][l] = pos_to[i][l] - pos_from[j][l] + lattice_points[k][l];
}
for (l = 0; l < 3; l++) {
length_tmp = (reduced_basis[l][0] * vec[k][0] +
reduced_basis[l][1] * vec[k][1] +
reduced_basis[l][2] * vec[k][2]);
length[k] += length_tmp * length_tmp;
}
length[k] = sqrt(length[k]);
}
minimum = DBL_MAX;
for (k = 0; k < 27; k++) {
if (length[k] < minimum) {
minimum = length[k];
}
}
count = 0;
for (k = 0; k < 27; k++) {
if (length[k] - minimum < symprec) {
for (l = 0; l < 3; l++) {
/* Transform to supercell coordinates */
vec_xyz = (trans_mat[l][0] * vec[k][0] +
trans_mat[l][1] * vec[k][1] +
trans_mat[l][2] * vec[k][2]);
smallest_vectors[i * num_pos_from + j][count][l] = vec_xyz;
}
count++;
}
}
multiplicity[i * num_pos_from + j] = count;
}
}
}
static void distribute_fc2(double (*fc2)[3][3], /* shape[n_pos][n_pos] */
const int * atom_list,
const int len_atom_list,
PHPYCONST double (*r_carts)[3][3], /* shape[n_rot] */
const int * permutations, /* shape[n_rot][n_pos] */
const int * map_atoms, /* shape [n_pos] */
const int * map_syms, /* shape [n_pos] */
const int num_rot,
const int num_pos)
{
int i, j, k, l, m;
int atom_todo, atom_done, atom_other;
int sym_index;
int *atom_list_reverse;
double (*fc2_done)[3];
double (*fc2_todo)[3];
double (*r_cart)[3];
const int * permutation;
atom_list_reverse = NULL;
atom_list_reverse = (int*)malloc(sizeof(int) * num_pos);
/* atom_list_reverse[!atom_done] is undefined. */
for (i = 0; i < len_atom_list; i++) {
atom_done = map_atoms[atom_list[i]];
if (atom_done == atom_list[i]) {
atom_list_reverse[atom_done] = i;
}
}
for (i = 0; i < len_atom_list; i++) {
/* look up how this atom maps into the done list. */
atom_todo = atom_list[i];
atom_done = map_atoms[atom_todo];
sym_index = map_syms[atom_todo];
/* skip the atoms in the done list, */
/* which are easily identified because they map to themselves. */
if (atom_todo == atom_done) {
continue;
}
/* look up information about the rotation */
r_cart = r_carts[sym_index];
permutation = &permutations[sym_index * num_pos]; /* shape[num_pos] */
/* distribute terms from atom_done to atom_todo */
for (atom_other = 0; atom_other < num_pos; atom_other++) {
fc2_done = fc2[atom_list_reverse[atom_done] * num_pos + permutation[atom_other]];
fc2_todo = fc2[i * num_pos + atom_other];
for (j = 0; j < 3; j++) {
for (k = 0; k < 3; k++) {
for (l = 0; l < 3; l++) {
for (m = 0; m < 3; m++) {
/* P' = R^-1 P R */
fc2_todo[j][k] += r_cart[l][j] * r_cart[m][k] * fc2_done[l][m];
}
}
}
}
}
}
free(atom_list_reverse);
atom_list_reverse = NULL;
}
static void set_index_permutation_symmetry_fc(double * fc,
const int natom)
{
int i, j, k, l, m, n;
for (i = 0; i < natom; i++) {
/* non diagonal part */
for (j = i + 1; j < natom; j++) {
for (k = 0; k < 3; k++) {
for (l = 0; l < 3; l++) {
m = i * natom * 9 + j * 9 + k * 3 + l;
n = j * natom * 9 + i * 9 + l * 3 + k;
fc[m] += fc[n];
fc[m] /= 2;
fc[n] = fc[m];
}
}
}
/* diagnoal part */
for (k = 0; k < 2; k++) {
for (l = k + 1; l < 3; l++) {
m = i * natom * 9 + i * 9 + k * 3 + l;
n = i * natom * 9 + i * 9 + l * 3 + k;
fc[m] += fc[n];
fc[m] /= 2;
fc[n] = fc[m];
}
}
}
}
static void set_translational_symmetry_fc(double * fc,
const int natom)
{
int i, j, k, l, m;
double sums[3][3];
for (i = 0; i < natom; i++) {
for (k = 0; k < 3; k++) {
for (l = 0; l < 3; l++) {
sums[k][l] = 0;
m = i * natom * 9 + k * 3 + l;
for (j = 0; j < natom; j++) {
if (i != j) {
sums[k][l] += fc[m];
}
m += 9;
}
}
}
for (k = 0; k < 3; k++) {
for (l = 0; l < 3; l++) {
fc[i * natom * 9 + i * 9 + k * 3 + l] = -(sums[k][l] + sums[l][k]) / 2;
}
}
}
}
static void set_index_permutation_symmetry_compact_fc(double * fc,
const int p2s[],
const int s2pp[],
const int nsym_list[],
const int perms[],
const int n_satom,
const int n_patom,
const int is_transpose)
{
int i, j, k, l, m, n, i_p, j_p, i_trans;
double fc_elem;
char *done;
done = NULL;
done = (char*)malloc(sizeof(char) * n_satom * n_patom);
for (i = 0; i < n_satom * n_patom; i++) {
done[i] = 0;
}
for (j = 0; j < n_satom; j++) {
j_p = s2pp[j];
for (i_p = 0; i_p < n_patom; i_p++) {
i = p2s[i_p];
if (i == j) { /* diagnoal part */
for (k = 0; k < 3; k++) {
for (l = 0; l < 3; l++) {
if (l > k) {
m = i_p * n_satom * 9 + i * 9 + k * 3 + l;
n = i_p * n_satom * 9 + i * 9 + l * 3 + k;
if (is_transpose) {
fc_elem = fc[m];
fc[m] = fc[n];
fc[n] = fc_elem;
} else {
fc[m] = (fc[m] + fc[n]) / 2;
fc[n] = fc[m];
}
}
}
}
}
if (!done[i_p * n_satom + j]) {
/* (j, i) -- nsym_list[j] --> (j', i') */
/* nsym_list[j] translates j to j' where j' is in */
/* primitive cell. The same translation sends i to i' */
/* where i' is not necessarily to be in primitive cell. */
/* Thus, i' = perms[nsym_list[j] * n_satom + i] */
i_trans = perms[nsym_list[j] * n_satom + i];
done[i_p * n_satom + j] = 1;
done[j_p * n_satom + i_trans] = 1;
for (k = 0; k < 3; k++) {
for (l = 0; l < 3; l++) {
m = i_p * n_satom * 9 + j * 9 + k * 3 + l;
n = j_p * n_satom * 9 + i_trans * 9 + l * 3 + k;
if (is_transpose) {
fc_elem = fc[m];
fc[m] = fc[n];
fc[n] = fc_elem;
} else {
fc[m] = (fc[n] + fc[m]) / 2;
fc[n] = fc[m];
}
}
}
}
}
}
free(done);
done = NULL;
}
static void set_translational_symmetry_compact_fc(double * fc,
const int p2s[],
const int n_satom,
const int n_patom)
{
int j, k, l, m, i_p;
double sums[3][3];
for (i_p = 0; i_p < n_patom; i_p++) {
for (k = 0; k < 3; k++) {
for (l = 0; l < 3; l++) {
sums[k][l] = 0;
m = i_p * n_satom * 9 + k * 3 + l;
for (j = 0; j < n_satom; j++) {
if (p2s[i_p] != j) {
sums[k][l] += fc[m];
}
m += 9;
}
}
}
for (k = 0; k < 3; k++) {
for (l = 0; l < 3; l++) {
fc[i_p * n_satom * 9 + p2s[i_p] * 9 + k * 3 + l] =
-(sums[k][l] + sums[l][k]) / 2;
}
}
}
}
static int nint(const double a)
{
if (a < 0.0)
return (int) (a - 0.5);
else
return (int) (a + 0.5);
}
|
GB_unop__identity_uint16_uint32.c | //------------------------------------------------------------------------------
// GB_unop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_unop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop_apply__identity_uint16_uint32
// op(A') function: GB_unop_tran__identity_uint16_uint32
// C type: uint16_t
// A type: uint32_t
// cast: uint16_t cij = (uint16_t) aij
// unaryop: cij = aij
#define GB_ATYPE \
uint32_t
#define GB_CTYPE \
uint16_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
uint32_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = x ;
// casting
#define GB_CAST(z, aij) \
uint16_t z = (uint16_t) aij ;
// cij = op (aij)
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
uint32_t aij = Ax [pA] ; \
/* Cx [pC] = op (cast (aij)) */ \
uint16_t z = (uint16_t) aij ; \
Cx [pC] = z ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_IDENTITY || GxB_NO_UINT16 || GxB_NO_UINT32)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop_apply__identity_uint16_uint32
(
uint16_t *Cx, // Cx and Ax may be aliased
const uint32_t *Ax,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
uint32_t aij = Ax [p] ;
uint16_t z = (uint16_t) aij ;
Cx [p] = z ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop_tran__identity_uint16_uint32
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *GB_RESTRICT *Rowcounts,
GBI_single_iterator Iter,
const int64_t *GB_RESTRICT A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#define GB_PHASE_2_OF_2
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
batched_banded_inl.h | /*
* nvbio
* Copyright (c) 2011-2014, NVIDIA CORPORATION. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* * Neither the name of the NVIDIA CORPORATION nor the
* names of its contributors may be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY
* DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#pragma once
#include <nvbio/basic/types.h>
#include <nvbio/basic/thrust_view.h>
#include <nvbio/alignment/utils.h>
#include <nvbio/basic/cuda/work_queue.h>
#include <nvbio/basic/strided_iterator.h>
#include <nvbio/alignment/batched_stream.h>
namespace nvbio {
namespace aln {
///@addtogroup private
///@{
template <uint32 BAND_LEN, typename stream_type>
NVBIO_FORCEINLINE NVBIO_HOST_DEVICE
void batched_banded_alignment_score(const stream_type& stream, const uint32 work_id)
{
typedef typename stream_type::aligner_type aligner_type;
typedef typename stream_type::context_type context_type;
typedef typename stream_type::strings_type strings_type;
// load the alignment context
context_type context;
if (stream.init_context( work_id, &context ) == true)
{
// compute the end of the current DP matrix window
const uint32 len = equal<typename aligner_type::algorithm_tag,TextBlockingTag>() ?
stream.text_length( work_id, &context ) :
stream.pattern_length( work_id, &context );
// load the strings to be aligned
strings_type strings;
stream.load_strings( work_id, 0, len, &context, &strings );
// score the current DP matrix window
banded_alignment_score<BAND_LEN>(
stream.aligner(),
strings.pattern,
strings.quals,
strings.text,
context.min_score,
context.sink );
}
// handle the output
stream.output( work_id, &context );
}
template <uint32 BLOCKDIM, uint32 MINBLOCKS, uint32 BAND_LEN, typename stream_type>
__global__ void
__launch_bounds__(BLOCKDIM,MINBLOCKS)
batched_banded_alignment_score_kernel(const stream_type stream)
{
const uint32 tid = blockIdx.x * blockDim.x + threadIdx.x;
if (tid >= stream.size())
return;
batched_banded_alignment_score<BAND_LEN>( stream, tid );
}
///@} // end of private group
///
/// HostThreadScheduler specialization of BatchedBandedAlignmentScore.
///
/// \tparam stream_type the stream of alignment jobs
///
template <uint32 BAND_LEN, typename stream_type>
struct BatchedBandedAlignmentScore<BAND_LEN,stream_type,HostThreadScheduler>
{
static const uint32 BLOCKDIM = 128;
typedef typename stream_type::aligner_type aligner_type;
typedef typename column_storage_type<aligner_type>::type cell_type;
/// return the minimum number of bytes required by the algorithm
///
static uint64 min_temp_storage(const uint32 max_pattern_len, const uint32 max_text_len, const uint32 stream_size) { return 0u; }
/// return the maximum number of bytes required by the algorithm
///
static uint64 max_temp_storage(const uint32 max_pattern_len, const uint32 max_text_len, const uint32 stream_size) { return 0u; }
/// enact the batch execution
///
void enact(stream_type stream, uint64 temp_size = 0u, uint8* temp = NULL);
};
// enact the batch execution
//
template <uint32 BAND_LEN, typename stream_type>
void BatchedBandedAlignmentScore<BAND_LEN,stream_type,HostThreadScheduler>::enact(stream_type stream, uint64 temp_size, uint8* temp)
{
#if defined(_OPENMP)
#pragma omp parallel for
#endif
for (int tid = 0; tid < int( stream.size() ); ++tid)
batched_banded_alignment_score<BAND_LEN>( stream, tid );
}
///
/// DeviceThreadScheduler specialization of BatchedBandedAlignmentScore.
///
/// \tparam stream_type the stream of alignment jobs
///
template <uint32 BLOCKDIM, uint32 MINBLOCKS, uint32 BAND_LEN, typename stream_type>
struct BatchedBandedAlignmentScore<BAND_LEN,stream_type,DeviceThreadBlockScheduler<BLOCKDIM,MINBLOCKS> >
{
typedef typename stream_type::aligner_type aligner_type;
typedef typename column_storage_type<aligner_type>::type cell_type;
/// return the minimum number of bytes required by the algorithm
///
static uint64 min_temp_storage(const uint32 max_pattern_len, const uint32 max_text_len, const uint32 stream_size) { return 0u; }
/// return the maximum number of bytes required by the algorithm
///
static uint64 max_temp_storage(const uint32 max_pattern_len, const uint32 max_text_len, const uint32 stream_size) { return 0u; }
/// enact the batch execution
///
void enact(stream_type stream, uint64 temp_size = 0u, uint8* temp = NULL);
};
// enact the batch execution
//
template <uint32 BLOCKDIM, uint32 MINBLOCKS, uint32 BAND_LEN, typename stream_type>
void BatchedBandedAlignmentScore<BAND_LEN,stream_type,DeviceThreadBlockScheduler<BLOCKDIM,MINBLOCKS> >::enact(stream_type stream, uint64 temp_size, uint8* temp)
{
const uint32 n_blocks = (stream.size() + BLOCKDIM-1) / BLOCKDIM;
batched_banded_alignment_score_kernel<BLOCKDIM,MINBLOCKS,BAND_LEN> <<<n_blocks, BLOCKDIM>>>( stream );
}
///
/// DeviceStagedThreadScheduler specialization of BatchedBandedAlignmentScore.
///
/// \tparam stream_type the stream of alignment jobs
///
template <uint32 BAND_LEN, typename stream_type>
struct BatchedBandedAlignmentScore<BAND_LEN,stream_type,DeviceStagedThreadScheduler>
{
static const uint32 BLOCKDIM = 128;
typedef typename stream_type::aligner_type aligner_type;
typedef typename checkpoint_storage_type<aligner_type>::type cell_type;
/// return the per-element column storage size
///
static uint32 column_storage(const uint32 max_pattern_len, const uint32 max_text_len)
{
const uint32 column_size = uint32( BAND_LEN * sizeof(cell_type) );
return align<4>( column_size );
}
/// return the minimum number of bytes required by the algorithm
///
static uint64 min_temp_storage(const uint32 max_pattern_len, const uint32 max_text_len, const uint32 stream_size)
{
return column_storage( max_pattern_len, max_text_len ) * 1024;
}
/// return the maximum number of bytes required by the algorithm
///
static uint64 max_temp_storage(const uint32 max_pattern_len, const uint32 max_text_len, const uint32 stream_size)
{
return column_storage( max_pattern_len, max_text_len ) * stream_size;
}
/// enact the batch execution
///
void enact(stream_type stream, uint64 temp_size = 0u, uint8* temp = NULL)
{
const uint64 min_temp_size = min_temp_storage(
stream.max_pattern_length(),
stream.max_text_length(),
stream.size() );
thrust::device_vector<uint8> temp_dvec;
if (temp == NULL)
{
temp_size = nvbio::max( min_temp_size, temp_size );
temp_dvec.resize( temp_size );
temp = nvbio::device_view( temp_dvec );
}
// set the queue capacity based on available memory
const uint32 max_pattern_len = stream.max_pattern_length();
const uint32 max_text_len = stream.max_text_length();
const uint32 queue_capacity = uint32( temp_size / column_storage( max_pattern_len, max_text_len ) );
m_work_queue.set_capacity( queue_capacity );
// prepare the work stream
ScoreStream<stream_type> score_stream(
stream, // the alignments stream
temp, // band storage
NULL, // no need for checkpoints
queue_capacity ); // the queue capacity, used for the memory striding
// consume the work stream
m_work_queue.consume( score_stream );
}
private:
cuda::WorkQueue<
cuda::PersistentThreadsQueueTag,
BandedScoreUnit<BAND_LEN, stream_type>,
BLOCKDIM> m_work_queue;
};
// --- Banded Traceback --------------------------------------------------------------------------------------------------------- //
///@addtogroup private
///@{
template <uint32 BAND_LEN, uint32 CHECKPOINTS, typename stream_type, typename cell_type>
NVBIO_FORCEINLINE NVBIO_HOST_DEVICE
void batched_banded_alignment_traceback(stream_type& stream, cell_type* checkpoints, uint32* submatrices, const uint32 stride, const uint32 work_id, const uint32 thread_id)
{
typedef typename stream_type::aligner_type aligner_type;
typedef typename stream_type::context_type context_type;
typedef typename stream_type::strings_type strings_type;
// load the alignment context
context_type context;
if (stream.init_context( work_id, &context ) == false)
{
// handle the output
stream.output( work_id, &context );
return;
}
// compute the end of the current DP matrix window
const uint32 len = equal<typename aligner_type::algorithm_tag,PatternBlockingTag>() ?
stream.pattern_length( work_id, &context ) :
stream.text_length( work_id, &context );
// load the strings to be aligned
strings_type strings;
stream.load_strings( work_id, 0, len, &context, &strings );
// fetch the proper checkpoint storage
typedef strided_iterator<cell_type*> checkpoint_type;
checkpoint_type checkpoint = checkpoint_type( checkpoints + thread_id, stride );
// fetch the proper submatrix storage
typedef strided_iterator<uint32*> submatrix_storage_type;
submatrix_storage_type submatrix_storage = submatrix_storage_type( submatrices + thread_id, stride );
const uint32 BITS = direction_vector_traits<aligner_type>::BITS;
PackedStream<submatrix_storage_type,uint8,BITS,false> submatrix( submatrix_storage );
// score the current DP matrix window
context.alignment = banded_alignment_traceback<BAND_LEN, CHECKPOINTS>(
stream.aligner(),
strings.pattern,
strings.quals,
strings.text,
context.min_score,
context.backtracer,
checkpoint,
submatrix );
// handle the output
stream.output( work_id, &context );
}
template <uint32 BLOCKDIM, uint32 BAND_LEN, uint32 CHECKPOINTS, typename stream_type, typename cell_type>
__global__ void batched_banded_alignment_traceback_kernel(stream_type stream, cell_type* checkpoints, uint32* submatrices, const uint32 stride)
{
const uint32 tid = blockIdx.x * BLOCKDIM + threadIdx.x;
if (tid >= stream.size())
return;
batched_banded_alignment_traceback<BAND_LEN, CHECKPOINTS>( stream, checkpoints, submatrices, stride, tid, tid );
}
template <uint32 BLOCKDIM, uint32 BAND_LEN, uint32 CHECKPOINTS, typename stream_type, typename cell_type>
__global__ void persistent_banded_batched_alignment_traceback_kernel(stream_type stream, cell_type* checkpoints, uint32* submatrices, const uint32 stride)
{
const uint32 grid_threads = gridDim.x * BLOCKDIM;
const uint32 thread_id = threadIdx.x + blockIdx.x*BLOCKDIM;
const uint32 stream_end = stream.size();
// let this CTA fetch all tiles at a grid-threads stride, starting from blockIdx.x*BLOCKDIM
for (uint32 stream_begin = 0; stream_begin < stream_end; stream_begin += grid_threads)
{
const uint32 work_id = thread_id + stream_begin;
if (work_id < stream_end)
batched_banded_alignment_traceback<BAND_LEN, CHECKPOINTS>( stream, checkpoints, submatrices, stride, work_id, thread_id );
}
}
///@} // end of private group
///
/// DeviceThreadScheduler specialization of BatchedAlignmentTraceback.
///
/// \tparam stream_type the stream of alignment jobs
///
template <uint32 BAND_LEN, uint32 CHECKPOINTS, typename stream_type>
struct BatchedBandedAlignmentTraceback<BAND_LEN,CHECKPOINTS, stream_type,DeviceThreadScheduler>
{
static const uint32 BLOCKDIM = 128;
typedef typename stream_type::aligner_type aligner_type;
typedef typename column_storage_type<aligner_type>::type cell_type;
/// return the per-element checkpoint storage size
///
static uint32 checkpoint_storage(const uint32 max_pattern_len, const uint32 max_text_len)
{
return align<4>( uint32( BAND_LEN * ((max_pattern_len + CHECKPOINTS-1) / CHECKPOINTS) * sizeof(cell_type) ) );
}
/// return the per-element submatrix storage size
///
static uint32 submatrix_storage(const uint32 max_pattern_len, const uint32 max_text_len)
{
typedef typename stream_type::aligner_type aligner_type;
const uint32 BITS = direction_vector_traits<aligner_type>::BITS;
const uint32 ELEMENTS_PER_WORD = 32 / BITS;
return ((BAND_LEN * CHECKPOINTS + ELEMENTS_PER_WORD-1) / ELEMENTS_PER_WORD) * sizeof(uint32);
}
/// return the per-element storage size
///
static uint32 element_storage(const uint32 max_pattern_len, const uint32 max_text_len)
{
return checkpoint_storage( max_pattern_len, max_text_len ) +
submatrix_storage( max_pattern_len, max_text_len );
}
/// return the minimum number of bytes required by the algorithm
///
static uint64 min_temp_storage(const uint32 max_pattern_len, const uint32 max_text_len, const uint32 stream_size);
/// return the maximum number of bytes required by the algorithm
///
static uint64 max_temp_storage(const uint32 max_pattern_len, const uint32 max_text_len, const uint32 stream_size);
/// enact the batch execution
///
void enact(stream_type stream, uint64 temp_size = 0u, uint8* temp = NULL);
};
// return the minimum number of bytes required by the algorithm
//
template <uint32 BAND_LEN, uint32 CHECKPOINTS, typename stream_type>
uint64 BatchedBandedAlignmentTraceback<BAND_LEN,CHECKPOINTS, stream_type,DeviceThreadScheduler>::min_temp_storage(const uint32 max_pattern_len, const uint32 max_text_len, const uint32 stream_size)
{
return element_storage( max_pattern_len, max_text_len ) * 1024;
}
// return the maximum number of bytes required by the algorithm
//
template <uint32 BAND_LEN, uint32 CHECKPOINTS, typename stream_type>
uint64 BatchedBandedAlignmentTraceback<BAND_LEN,CHECKPOINTS,stream_type,DeviceThreadScheduler>::max_temp_storage(const uint32 max_pattern_len, const uint32 max_text_len, const uint32 stream_size)
{
return element_storage( max_pattern_len, max_text_len ) * stream_size;
}
// enact the batch execution
//
template <uint32 BAND_LEN, uint32 CHECKPOINTS, typename stream_type>
void BatchedBandedAlignmentTraceback<BAND_LEN,CHECKPOINTS,stream_type,DeviceThreadScheduler>::enact(stream_type stream, uint64 temp_size, uint8* temp)
{
const uint64 min_temp_size = min_temp_storage(
stream.max_pattern_length(),
stream.max_text_length(),
stream.size() );
thrust::device_vector<uint8> temp_dvec;
if (temp_size == 0u)
{
temp_dvec.resize( min_temp_size );
temp = nvbio::device_view( temp_dvec );
temp_size = min_temp_size;
}
// set the queue capacity based on available memory
const uint32 max_pattern_len = stream.max_pattern_length();
const uint32 max_text_len = stream.max_text_length();
const uint32 queue_capacity = uint32( temp_size / element_storage( max_pattern_len, max_text_len ) );
const uint64 checkpoints_size = checkpoint_storage( max_pattern_len, max_text_len );
if (queue_capacity >= stream.size())
{
const uint32 n_blocks = (stream.size() + BLOCKDIM-1) / BLOCKDIM;
cell_type* checkpoints = (cell_type*)(temp);
uint32* submatrices = (uint32*) (temp + checkpoints_size * stream.size());
batched_banded_alignment_traceback_kernel<BLOCKDIM,BAND_LEN,CHECKPOINTS> <<<n_blocks, BLOCKDIM>>>(
stream,
checkpoints,
submatrices,
stream.size() );
}
else
{
// compute the number of blocks we are going to launch
const uint32 n_blocks = nvbio::max( nvbio::min(
(uint32)cuda::max_active_blocks( persistent_banded_batched_alignment_traceback_kernel<BLOCKDIM,BAND_LEN,CHECKPOINTS,stream_type,cell_type>, BLOCKDIM, 0u ),
queue_capacity / BLOCKDIM ), 1u );
cell_type* checkpoints = (cell_type*)(temp);
uint32* submatrices = (uint32*) (temp + checkpoints_size * queue_capacity);
persistent_banded_batched_alignment_traceback_kernel<BLOCKDIM,BAND_LEN,CHECKPOINTS> <<<n_blocks, BLOCKDIM>>>(
stream,
checkpoints,
submatrices,
queue_capacity );
}
}
///@} // end of BatchAlignment group
///@} // end of the Alignment group
} // namespace alignment
} // namespace nvbio
|
maxpool_layer.c | #include "maxpool_layer.h"
#include "convolutional_layer.h"
#include "dark_cuda.h"
#include "utils.h"
#include "gemm.h"
#include <stdio.h>
image get_maxpool_image(maxpool_layer l)
{
int h = l.out_h;
int w = l.out_w;
int c = l.c;
return float_to_image(w,h,c,l.output);
}
image get_maxpool_delta(maxpool_layer l)
{
int h = l.out_h;
int w = l.out_w;
int c = l.c;
return float_to_image(w,h,c,l.delta);
}
void create_maxpool_cudnn_tensors(layer *l)
{
#ifdef CUDNN
CHECK_CUDNN(cudnnCreatePoolingDescriptor(&l->poolingDesc));
CHECK_CUDNN(cudnnCreateTensorDescriptor(&l->srcTensorDesc));
CHECK_CUDNN(cudnnCreateTensorDescriptor(&l->dstTensorDesc));
#endif // CUDNN
}
void cudnn_maxpool_setup(layer *l)
{
#ifdef CUDNN
CHECK_CUDNN(cudnnSetPooling2dDescriptor(
l->poolingDesc,
CUDNN_POOLING_MAX,
CUDNN_NOT_PROPAGATE_NAN, // CUDNN_PROPAGATE_NAN, CUDNN_NOT_PROPAGATE_NAN
l->size,
l->size,
l->pad/2, //0, //l.pad,
l->pad/2, //0, //l.pad,
l->stride_x,
l->stride_y));
CHECK_CUDNN(cudnnSetTensor4dDescriptor(l->srcTensorDesc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, l->batch, l->c, l->h, l->w));
CHECK_CUDNN(cudnnSetTensor4dDescriptor(l->dstTensorDesc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, l->batch, l->out_c, l->out_h, l->out_w));
#endif // CUDNN
}
void cudnn_local_avgpool_setup(layer *l)
{
#ifdef CUDNN
CHECK_CUDNN(cudnnSetPooling2dDescriptor(
l->poolingDesc,
CUDNN_POOLING_AVERAGE_COUNT_EXCLUDE_PADDING,
CUDNN_NOT_PROPAGATE_NAN, // CUDNN_PROPAGATE_NAN, CUDNN_NOT_PROPAGATE_NAN
l->size,
l->size,
l->pad / 2, //0, //l.pad,
l->pad / 2, //0, //l.pad,
l->stride_x,
l->stride_y));
CHECK_CUDNN(cudnnSetTensor4dDescriptor(l->srcTensorDesc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, l->batch, l->c, l->h, l->w));
CHECK_CUDNN(cudnnSetTensor4dDescriptor(l->dstTensorDesc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, l->batch, l->out_c, l->out_h, l->out_w));
#endif // CUDNN
}
maxpool_layer make_maxpool_layer(int batch, int h, int w, int c, int size, int stride_x, int stride_y, int padding, int maxpool_depth, int out_channels, int antialiasing, int avgpool, int train)
{
maxpool_layer l = { (LAYER_TYPE)0 };
l.avgpool = avgpool;
if (avgpool) l.type = LOCAL_AVGPOOL;
else l.type = MAXPOOL;
l.train = train;
const int blur_stride_x = stride_x;
const int blur_stride_y = stride_y;
l.antialiasing = antialiasing;
if (antialiasing) {
stride_x = stride_y = l.stride = l.stride_x = l.stride_y = 1; // use stride=1 in host-layer
}
l.batch = batch;
l.h = h;
l.w = w;
l.c = c;
l.pad = padding;
l.maxpool_depth = maxpool_depth;
l.out_channels = out_channels;
if (maxpool_depth) {
l.out_c = out_channels;
l.out_w = l.w;
l.out_h = l.h;
}
else {
l.out_w = (w + padding - size) / stride_x + 1;
l.out_h = (h + padding - size) / stride_y + 1;
l.out_c = c;
}
l.outputs = l.out_h * l.out_w * l.out_c;
l.inputs = h*w*c;
l.size = size;
l.stride = stride_x;
l.stride_x = stride_x;
l.stride_y = stride_y;
int output_size = l.out_h * l.out_w * l.out_c * batch;
if (train) {
if (!avgpool) l.indexes = (int*)xcalloc(output_size, sizeof(int));
l.delta = (float*)xcalloc(output_size, sizeof(float));
}
l.output = (float*)xcalloc(output_size, sizeof(float));
if (avgpool) {
l.forward = forward_local_avgpool_layer;
l.backward = backward_local_avgpool_layer;
}
else {
l.forward = forward_maxpool_layer;
l.backward = backward_maxpool_layer;
}
#ifdef GPU
if (avgpool) {
l.forward_gpu = forward_local_avgpool_layer_gpu;
l.backward_gpu = backward_local_avgpool_layer_gpu;
}
else {
l.forward_gpu = forward_maxpool_layer_gpu;
l.backward_gpu = backward_maxpool_layer_gpu;
}
if (train) {
if (!avgpool) l.indexes_gpu = cuda_make_int_array(output_size);
l.delta_gpu = cuda_make_array(l.delta, output_size);
}
l.output_gpu = cuda_make_array(l.output, output_size);
create_maxpool_cudnn_tensors(&l);
if (avgpool) cudnn_local_avgpool_setup(&l);
else cudnn_maxpool_setup(&l);
#endif // GPU
l.bflops = (l.size*l.size*l.c * l.out_h*l.out_w) / 1000000000.;
if (avgpool) {
if (stride_x == stride_y)
fprintf(stderr, "avg %2dx%2d/%2d %4d x%4d x%4d -> %4d x%4d x%4d %5.3f BF\n", size, size, stride_x, w, h, c, l.out_w, l.out_h, l.out_c, l.bflops);
else
fprintf(stderr, "avg %2dx%2d/%2dx%2d %4d x%4d x%4d -> %4d x%4d x%4d %5.3f BF\n", size, size, stride_x, stride_y, w, h, c, l.out_w, l.out_h, l.out_c, l.bflops);
}
else {
if (maxpool_depth)
fprintf(stderr, "max-depth %2dx%2d/%2d %4d x%4d x%4d -> %4d x%4d x%4d %5.3f BF\n", size, size, stride_x, w, h, c, l.out_w, l.out_h, l.out_c, l.bflops);
else if (stride_x == stride_y)
fprintf(stderr, "max %2dx%2d/%2d %4d x%4d x%4d -> %4d x%4d x%4d %5.3f BF\n", size, size, stride_x, w, h, c, l.out_w, l.out_h, l.out_c, l.bflops);
else
fprintf(stderr, "max %2dx%2d/%2dx%2d %4d x%4d x%4d -> %4d x%4d x%4d %5.3f BF\n", size, size, stride_x, stride_y, w, h, c, l.out_w, l.out_h, l.out_c, l.bflops);
}
if (l.antialiasing) {
printf("AA: ");
l.input_layer = (layer*)calloc(1, sizeof(layer));
int blur_size = 3;
int blur_pad = blur_size / 2;
if (l.antialiasing == 2) {
blur_size = 2;
blur_pad = 0;
}
*(l.input_layer) = make_convolutional_layer(batch, 1, l.out_h, l.out_w, l.out_c, l.out_c, l.out_c, blur_size, blur_stride_x, blur_stride_y, 1, blur_pad, LINEAR, 0, 0, 0, 0, 0, 1, 0, NULL, 0, 0, train);
const int blur_nweights = l.out_c * blur_size * blur_size; // (n / n) * n * blur_size * blur_size;
int i;
if (blur_size == 2) {
for (i = 0; i < blur_nweights; i += (blur_size*blur_size)) {
l.input_layer->weights[i + 0] = 1 / 4.f;
l.input_layer->weights[i + 1] = 1 / 4.f;
l.input_layer->weights[i + 2] = 1 / 4.f;
l.input_layer->weights[i + 3] = 1 / 4.f;
}
}
else {
for (i = 0; i < blur_nweights; i += (blur_size*blur_size)) {
l.input_layer->weights[i + 0] = 1 / 16.f;
l.input_layer->weights[i + 1] = 2 / 16.f;
l.input_layer->weights[i + 2] = 1 / 16.f;
l.input_layer->weights[i + 3] = 2 / 16.f;
l.input_layer->weights[i + 4] = 4 / 16.f;
l.input_layer->weights[i + 5] = 2 / 16.f;
l.input_layer->weights[i + 6] = 1 / 16.f;
l.input_layer->weights[i + 7] = 2 / 16.f;
l.input_layer->weights[i + 8] = 1 / 16.f;
}
}
for (i = 0; i < l.out_c; ++i) l.input_layer->biases[i] = 0;
#ifdef GPU
if (gpu_index >= 0) {
if (l.antialiasing) l.input_antialiasing_gpu = cuda_make_array(NULL, l.batch*l.outputs);
push_convolutional_layer(*(l.input_layer));
}
#endif // GPU
}
return l;
}
void resize_maxpool_layer(maxpool_layer *l, int w, int h)
{
l->h = h;
l->w = w;
l->inputs = h*w*l->c;
l->out_w = (w + l->pad - l->size) / l->stride_x + 1;
l->out_h = (h + l->pad - l->size) / l->stride_y + 1;
l->outputs = l->out_w * l->out_h * l->out_c;
int output_size = l->outputs * l->batch;
if (l->train) {
if (!l->avgpool) l->indexes = (int*)xrealloc(l->indexes, output_size * sizeof(int));
l->delta = (float*)xrealloc(l->delta, output_size * sizeof(float));
}
l->output = (float*)xrealloc(l->output, output_size * sizeof(float));
#ifdef GPU
CHECK_CUDA(cudaFree(l->output_gpu));
l->output_gpu = cuda_make_array(l->output, output_size);
if (l->train) {
if (!l->avgpool) {
CHECK_CUDA(cudaFree((float *)l->indexes_gpu));
l->indexes_gpu = cuda_make_int_array(output_size);
}
CHECK_CUDA(cudaFree(l->delta_gpu));
l->delta_gpu = cuda_make_array(l->delta, output_size);
}
if(l->avgpool) cudnn_local_avgpool_setup(l);
else cudnn_maxpool_setup(l);
#endif
}
void forward_maxpool_layer(const maxpool_layer l, network_state state)
{
if (l.maxpool_depth)
{
int b, i, j, k, g;
for (b = 0; b < l.batch; ++b) {
#pragma omp parallel for
for (i = 0; i < l.h; ++i) {
for (j = 0; j < l.w; ++j) {
for (g = 0; g < l.out_c; ++g)
{
int out_index = j + l.w*(i + l.h*(g + l.out_c*b));
float max = -FLT_MAX;
int max_i = -1;
for (k = g; k < l.c; k += l.out_c)
{
int in_index = j + l.w*(i + l.h*(k + l.c*b));
float val = state.input[in_index];
max_i = (val > max) ? in_index : max_i;
max = (val > max) ? val : max;
}
l.output[out_index] = max;
if (l.indexes) l.indexes[out_index] = max_i;
}
}
}
}
return;
}
if (!state.train && l.stride_x == l.stride_y) {
forward_maxpool_layer_avx(state.input, l.output, l.indexes, l.size, l.w, l.h, l.out_w, l.out_h, l.c, l.pad, l.stride, l.batch);
}
else
{
int b, i, j, k, m, n;
int w_offset = -l.pad / 2;
int h_offset = -l.pad / 2;
int h = l.out_h;
int w = l.out_w;
int c = l.c;
for (b = 0; b < l.batch; ++b) {
for (k = 0; k < c; ++k) {
for (i = 0; i < h; ++i) {
for (j = 0; j < w; ++j) {
int out_index = j + w*(i + h*(k + c*b));
float max = -FLT_MAX;
int max_i = -1;
for (n = 0; n < l.size; ++n) {
for (m = 0; m < l.size; ++m) {
int cur_h = h_offset + i*l.stride_y + n;
int cur_w = w_offset + j*l.stride_x + m;
int index = cur_w + l.w*(cur_h + l.h*(k + b*l.c));
int valid = (cur_h >= 0 && cur_h < l.h &&
cur_w >= 0 && cur_w < l.w);
float val = (valid != 0) ? state.input[index] : -FLT_MAX;
max_i = (val > max) ? index : max_i;
max = (val > max) ? val : max;
}
}
l.output[out_index] = max;
if (l.indexes) l.indexes[out_index] = max_i;
}
}
}
}
}
if (l.antialiasing) {
network_state s = { 0 };
s.train = state.train;
s.workspace = state.workspace;
s.net = state.net;
s.input = l.output;
forward_convolutional_layer(*(l.input_layer), s);
//simple_copy_ongpu(l.outputs*l.batch, l.output, l.input_antialiasing);
memcpy(l.output, l.input_layer->output, l.input_layer->outputs * l.input_layer->batch * sizeof(float));
}
}
void backward_maxpool_layer(const maxpool_layer l, network_state state)
{
int i;
int h = l.out_h;
int w = l.out_w;
int c = l.out_c;
#pragma omp parallel for
for(i = 0; i < h*w*c*l.batch; ++i){
int index = l.indexes[i];
state.delta[index] += l.delta[i];
}
}
void forward_local_avgpool_layer(const maxpool_layer l, network_state state)
{
int b, i, j, k, m, n;
int w_offset = -l.pad / 2;
int h_offset = -l.pad / 2;
int h = l.out_h;
int w = l.out_w;
int c = l.c;
for (b = 0; b < l.batch; ++b) {
for (k = 0; k < c; ++k) {
for (i = 0; i < h; ++i) {
for (j = 0; j < w; ++j) {
int out_index = j + w*(i + h*(k + c*b));
float avg = 0;
int counter = 0;
for (n = 0; n < l.size; ++n) {
for (m = 0; m < l.size; ++m) {
int cur_h = h_offset + i*l.stride_y + n;
int cur_w = w_offset + j*l.stride_x + m;
int index = cur_w + l.w*(cur_h + l.h*(k + b*l.c));
int valid = (cur_h >= 0 && cur_h < l.h &&
cur_w >= 0 && cur_w < l.w);
if (valid) {
counter++;
avg += state.input[index];
}
}
}
l.output[out_index] = avg / counter;
}
}
}
}
}
void backward_local_avgpool_layer(const maxpool_layer l, network_state state)
{
int b, i, j, k, m, n;
int w_offset = -l.pad / 2;
int h_offset = -l.pad / 2;
int h = l.out_h;
int w = l.out_w;
int c = l.c;
for (b = 0; b < l.batch; ++b) {
for (k = 0; k < c; ++k) {
for (i = 0; i < h; ++i) {
for (j = 0; j < w; ++j) {
int out_index = j + w*(i + h*(k + c*b));
for (n = 0; n < l.size; ++n) {
for (m = 0; m < l.size; ++m) {
int cur_h = h_offset + i*l.stride_y + n;
int cur_w = w_offset + j*l.stride_x + m;
int index = cur_w + l.w*(cur_h + l.h*(k + b*l.c));
int valid = (cur_h >= 0 && cur_h < l.h &&
cur_w >= 0 && cur_w < l.w);
if (valid) state.delta[index] += l.delta[out_index] / (l.size*l.size);
}
}
}
}
}
}
} |
run.c |
#include "camera_utils.h"
#include "network_utils.h"
#include "model_types.h"
#include "const.def"
#include "load_model.h"
#include "image_utils.h"
#include "run_network.h"
#include "eval_results.h"
#include "finalise_output.h"
#include <stdio.h>
#include <stdlib.h>
#include "sds_lib.h"
#include <omp.h>
net_model * squeezeDet;
activation_t *image;//[IMG_HEIGHT][IMG_WIDTH][3];
activation_t *intermediate_feature_map_buf_1;//[MAX_FEATURE_MAP_SIZE];
activation_t *intermediate_feature_map_buf_2;//[MAX_FEATURE_MAP_SIZE];
int num_pred_boxes;
int boxes[TOP_N_DETECTION][4+1+1]; // bbox, class, probability
int proc_n_images=1000;
double begin,end;
int main(){
image = (activation_t *)sds_alloc(IMG_HEIGHT*IMG_WIDTH*3* sizeof(activation_t));
intermediate_feature_map_buf_1 = (activation_t *)sds_alloc(MAX_FEATURE_MAP_SIZE* sizeof(activation_t));
intermediate_feature_map_buf_2 = (activation_t *)sds_alloc(MAX_FEATURE_MAP_SIZE* sizeof(activation_t));
//squeezeDet = (net_model *) xalloc(1,sizeof(net_model));
squeezeDet = (net_model *) sds_alloc(sizeof(net_model));
load_model(squeezeDet);
launch_cam(image);
setup_conn();
printf("--Model loading done-- \n");
//per image processing
while(proc_n_images>0)
{
//#pragma omp parallel num_threads(4)
{
//#pragma omp single
{
begin = omp_get_wtime();
//load_image((activation_t*)image);
grab_image();
end = omp_get_wtime();
printf("imageloading lasts %2.6f s. \n", (end - begin));
begin = omp_get_wtime();
preprocessing((activation_t*)image);
end = omp_get_wtime();
printf("preprocessing lasts %2.6f s. \n", (end - begin));
printf("--Image loading and preprocessing done-- \n");
//clock_t begin = clock();
begin = omp_get_wtime();
}
run_network(squeezeDet, (activation_t*)image, intermediate_feature_map_buf_1,
intermediate_feature_map_buf_2);
//#pragma omp single
{
end = omp_get_wtime();
printf("--CNN Comp done-- \n");
printf("Processing time for run_network is %2.6f s. \n", (end - begin));
begin = omp_get_wtime();
num_pred_boxes = eval_results(intermediate_feature_map_buf_1, (int *)boxes);
end = omp_get_wtime();
printf("finalising lasts is %2.6f s. \n", (end - begin));
printf("Numbers of detected boxes: %d \n\n",num_pred_boxes);
begin = omp_get_wtime();
postprocessing((activation_t*)image);
end = omp_get_wtime();
printf("postprocessing lasts is %2.6f s. \n", (end - begin));
int juu;
for(juu=0;juu<num_pred_boxes;juu++){
printf("box number %d is x: %d,%d; y: %d,%d \n",juu,boxes[juu][0],boxes[juu][2],boxes[juu][1],boxes[juu][3]);
}
begin = omp_get_wtime();
draw_boxes((activation_t*)image, (int*) boxes, num_pred_boxes);
end = omp_get_wtime();
printf(" drawing lasts is %2.6f s. \n", (end - begin));
begin = omp_get_wtime();
//save_image((activation_t*) image);
send_image((char *) image);
end = omp_get_wtime();
printf("saving lasts is %2.6f s. \n", (end - begin));
printf("image number: %d \n", 1000-proc_n_images);
proc_n_images--;
}
}
}
close_conn();
end_cam();
sds_free(intermediate_feature_map_buf_1);
sds_free(intermediate_feature_map_buf_2);
sds_free(image);
}
|
ops.h | /*******************************************************************************
* Copyright (c) 2015-2018 Skymind, Inc.
*
* This program and the accompanying materials are made available under the
* terms of the Apache License, Version 2.0 which is available at
* https://www.apache.org/licenses/LICENSE-2.0.
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
*
* SPDX-License-Identifier: Apache-2.0
******************************************************************************/
#pragma once
#ifndef OPS_H_
#define OPS_H_
#include <op_boilerplate.h>
#include <array/DataTypeUtils.h>
#include <helpers/shape.h>
#include <vector>
#include <Environment.h>
#include <loops/summarystatsreduce.h>
#define MIN 1e-12
#define MAX_FLOAT 1e37
#define MIN_FLOAT 1e-37
#define MAX_INT 2147483647
#define MIN_CUTFOFF -3.79297773665f
#define FLOAT_MIN_NORMAL 1.17549435e-38
#define EPS 1e-5
#define AFFINITY close
#define DOUBLE_PI_T T(2.0 * 3.14159265358979323846)
#define DOUBLE_PI_X X(2.0 * 3.14159265358979323846)
#define no_op_exec_special_any static const bool requiresSpecial = false; static void execSpecial(X *dx, Nd4jLong *xShapeBuffer, Z *result, Nd4jLong *resultShapeBuffer, X *extraParams, Nd4jLong *tadShapeInfo, Nd4jLong *tadOffsets) {}
#define no_op_exec_special_bool static const bool requiresSpecial = false; static void execSpecial(X *dx, Nd4jLong *xShapeBuffer, Z *result, Nd4jLong *resultShapeBuffer, X *extraParams, Nd4jLong *tadShapeInfo, Nd4jLong *tadOffsets) {}
#define no_op_exec_special_same static const bool requiresSpecial = false; static void execSpecial(X *dx, Nd4jLong *xShapeBuffer, X *result, Nd4jLong *resultShapeBuffer, X *extraParams, Nd4jLong *tadShapeInfo, Nd4jLong *tadOffsets) {}
#define no_op_exec_special static const bool requiresSpecial = false; static void execSpecial(X *dx, Nd4jLong *xShapeBuffer, Z *result, Nd4jLong *resultShapeBuffer, Z *extraParams, Nd4jLong *tadShapeInfo, Nd4jLong *tadOffsets) {}
#define no_op_exec_special_accumulation static const bool requiresSpecialAccumulation = false; static void execSpecial(X *x, Nd4jLong *xShapeInfo, Z *extraParams, Z *result, Nd4jLong *resultShapeInfoBuffer, int *dimension, int dimensionLength, Nd4jLong *tadShapeInfo, Nd4jLong *tadOffset){}
#define no_op_exec_special_accumulation_long static const bool requiresSpecialAccumulation = false; static void execSpecial(X *x, Nd4jLong *xShapeInfo, X *extraParams, Z *result, Nd4jLong *resultShapeInfoBuffer, int *dimension, int dimensionLength, Nd4jLong *tadShapeInfo, Nd4jLong *tadOffset){}
#define no_op_exec_special_accumulation_same static const bool requiresSpecialAccumulation = false; static void execSpecial(X *x, Nd4jLong *xShapeInfo, X *extraParams, X *result, Nd4jLong *resultShapeInfoBuffer, int *dimension, int dimensionLength, Nd4jLong *tadShapeInfo, Nd4jLong *tadOffset){}
#ifdef __CUDACC__
#include <helpers/sharedmem.h>
#define no_op_exec_special_any_cuda static __device__ void execSpecialCuda(X *dx, Nd4jLong *xShapeBuffer, Z *result, Nd4jLong *resultShapeBuffer, X *extraParams, int *allocationPointer, Z *reductionPointer, Nd4jLong *tadShapeInfo, Nd4jLong *tadOffsets) {}
#define no_op_exec_special_bool_cuda static __device__ void execSpecialCuda(X *dx, Nd4jLong *xShapeBuffer, Z *result, Nd4jLong *resultShapeBuffer, X *extraParams, int *allocationPointer, Z *reductionPointer, Nd4jLong *tadShapeInfo, Nd4jLong *tadOffsets) {}
#define no_op_exec_special_same_cuda static __device__ void execSpecialCuda(X *dx, Nd4jLong *xShapeBuffer, X *result, Nd4jLong *resultShapeBuffer, X *extraParams, int *allocationPointer, X *reductionPointer, Nd4jLong *tadShapeInfo, Nd4jLong *tadOffsets) {}
#define no_op_exec_special_cuda static __device__ void execSpecialCuda(X *dx, Nd4jLong *xShapeBuffer,Z *result, Nd4jLong *resultShapeBuffer,Z *extraParams, int *allocationPointer, Z *reductionPointer, Nd4jLong *tadShapeInfo, Nd4jLong *tadOffsets) {}
#define no_op_exec_special_accumulation_same_cuda static inline __device__ void execSpecialCuda(X *dx, Nd4jLong *xShapeInfo, X *extraParams, X *result, Nd4jLong *resultShapeInfo, int *dimension, int dimensionLength, X *reductionBuffer, Nd4jLong *tadOnlyShapeInfo, Nd4jLong *tadOffsets) {}
#define no_op_exec_special_accumulation_long_cuda static inline __device__ void execSpecialCuda(X *dx, Nd4jLong *xShapeInfo, X *extraParams, Z *result, Nd4jLong *resultShapeInfo, int *dimension, int dimensionLength, Z *reductionBuffer, Nd4jLong *tadOnlyShapeInfo, Nd4jLong *tadOffsets) {}
#define no_op_exec_special_accumulation_cuda static inline __device__ void execSpecialCuda(X *dx, Nd4jLong *xShapeInfo, Z *extraParams, Z *result, Nd4jLong *resultShapeInfo, int *dimension, int dimensionLength, Z *reductionBuffer, Nd4jLong *tadOnlyShapeInfo, Nd4jLong *tadOffsets) {}
#else
// hacky fix for isnan/being being out of scope
//#ifdef IOS
//#define isinf(x) 0 // this isn't right. But std::isinf fails
//#define isnan(x) 0
//#else
//#define isnan std::isnan
//#define isinf std::isinf
//#endif
#define no_op_exec_special_cuda
#define no_op_exec_special_accumulation_cuda
#define no_op_exec_special_accumulation_same_cuda
#define no_op_exec_special_accumulation_long_cuda
#define no_op_exec_special_any_cuda
#define no_op_exec_special_bool_cuda
#define no_op_exec_special_same_cuda
#define no_op_exec_special_accumulation_same_cuda
#endif
#define SELU_ALPHA 1.6732632423543772848170429916717
#define SELU_LAMBDA 1.0507009873554804934193349852946
#ifdef _OPENMP
#pragma omp declare reduction(maxT : float,double,float16,bfloat16 : \
omp_out = nd4j::math::nd4j_max(omp_in, omp_out) )\
initializer (omp_priv=-MAX_FLOAT)
#pragma omp declare reduction(minT : float,double,float16,bfloat16 : \
omp_out = nd4j::math::nd4j_min(omp_in, omp_out) )\
initializer (omp_priv=MAX_FLOAT)
#pragma omp declare reduction(sumT : float,double,float16,bfloat16,int,Nd4jLong,Nd4jULong,int8_t,uint8_t,bool,int16_t,uint16_t,uint32_t : \
omp_out = omp_in + omp_out)\
initializer (omp_priv=0)
#endif
namespace functions {
namespace indexreduce {
template <typename T>
struct IndexValue {
T value;
Nd4jLong index;
_CUDA_HD IndexValue() = default;
_CUDA_HD IndexValue(const T val, const Nd4jLong ind): index(ind), value(val) {}
};
}
namespace summarystats {
template <typename T>
class SummaryStatsData;
}
}
namespace simdOps {
template <typename X, typename Y, typename Z>
class Add {
public:
op_def static Z op(X d1, Y d2) {
return static_cast<Z>(d1 + d2);
}
op_def static Z op(X d1, Y d2, Z *params) {
return static_cast<Z>(d1 + d2);
}
op_def static Z op(X d1) {
return static_cast<Z>(d1);
}
// op for MetaOps
op_def static Z op(X d1, Y *params) {
return static_cast<Z>(d1 + params[0]);
}
op_def static X startingValue() {
return static_cast<X>(0.f);
}
};
template <typename X, typename Y>
class NewAdd {
public:
op_def static X op(X d1, Y d2, X *params) {
return d1 + d2;
}
};
template <typename X, typename Y, typename Z>
class Subtract {
public:
op_def static Z op(X d1, Y d2) {
return static_cast<Z>(d1 - d2);
}
op_def static Z op(X d1, Y d2, Z *params) {
return static_cast<Z>(d1 - d2);
}
op_def static Z op(X d1) {
return static_cast<Z>(d1);
}
// op for MetaOps
op_def static Z op(X d1, Y *params) {
return static_cast<Z>(d1 - params[0]);
}
};
template <typename X, typename Y, typename Z>
class SquaredSubtract {
public:
op_def static Z op(X d1, Y d2) {
return nd4j::math::nd4j_pow<Z, float, Z>(static_cast<Z>(d1 - d2), 2.f);
}
op_def static Z op(X d1, Y d2, Z *params) {
return nd4j::math::nd4j_pow<Z, float, Z>(static_cast<Z>(d1 - d2), 2.f);
}
op_def static Z op(X d1) {
return d1;
}
// op for MetaOps
op_def static Z op(X d1, Y *params) {
return nd4j::math::nd4j_pow<Z, float, Z>(static_cast<Z>(d1 - params[0]), 2.f);
}
};
template <typename X, typename Y, typename Z>
class ReverseSubtract {
public:
op_def static Z op(X d1, Y d2) {
return static_cast<Z>(d2 - d1);
}
op_def static Z op(X d1, Y d2, Z *params) {
return static_cast<Z>(d2 - d1);
}
op_def static Z op(X d1) {
return d1;
}
// op for MetaOps
op_def static Z op(X d1, Y *params) {
return static_cast<Z>(params[0] - d1);
}
};
template <typename X, typename Y, typename Z>
class LogPoisonLossFull {
public:
op_def static Z op(X z, Y c) {
auto zz = static_cast<Z>(z);
auto zc = static_cast<Z>(c);
return (nd4j::math::nd4j_exp<Y, Z>(c) - zz * zc + (zz * nd4j::math::nd4j_log<X, Z>(z) - zz + static_cast<Z>(0.5f) * nd4j::math::nd4j_log<Z, Z>(static_cast<Z>(DOUBLE_PI_X) * zz)));
}
op_def static Z op(X z, Y c, Z *params) {
auto zz = static_cast<Z>(z);
auto zc = static_cast<Z>(c);
return (nd4j::math::nd4j_exp<Y, Z>(c) - zz * zc + (zz * nd4j::math::nd4j_log<X, Z>(z) - zz + static_cast<Z>(0.5f) * nd4j::math::nd4j_log<Z, Z>(static_cast<Z>(DOUBLE_PI_X) * zz)));
}
op_def static Z op(X z) {
auto zz = static_cast<Z>(z);
return (zz * nd4j::math::nd4j_log<Y, Z>(z) - zz + static_cast<Z>(0.5f) * nd4j::math::nd4j_log<Z, Z>(static_cast<Z>(DOUBLE_PI_X) * zz));
}
// op for MetaOps
op_def static X op(X z, Y *params) {
return (nd4j::math::nd4j_exp<X, X>(params[0]) - z * params[0] + (z * nd4j::math::nd4j_log<X, Z>(z) - z + static_cast<X>(0.5f) * nd4j::math::nd4j_log<X, Z>(DOUBLE_PI_X * z)));
}
};
template <typename X, typename Y, typename Z>
class LogPoisonLoss {
public:
op_def static Z op(X z, Y c) {
auto zz = static_cast<Z>(z);
auto zc = static_cast<Z>(c);
return (nd4j::math::nd4j_exp<Y, Z>(c) - zz * zc);
}
op_def static Z op(X z, Y c, Z *params) {
auto zz = static_cast<Z>(z);
auto zc = static_cast<Z>(c);
return (nd4j::math::nd4j_exp<Y, Z>(c) - zz * zc);
}
op_def static Z op(X z) {
return static_cast<Z>(z);
}
// op for MetaOps
op_def static Z op(X z, Y *params) {
return (nd4j::math::nd4j_exp<Y, Z>(params[0]) - static_cast<Z>(z) * static_cast<Z>(params[0]));
}
};
template <typename X, typename Y, typename Z>
class Multiply {
public:
op_def static Z op(X d1, Y d2) {
return static_cast<Z>(d1 * d2);
}
op_def static Z op(X d1, Y d2, Z *params) {
return static_cast<Z>(d1 * d2);
}
op_def static Z op(X d1) {
return static_cast<Z>(d1);
}
// op for MetaOps
op_def static Z op(X d1, Y *params) {
return static_cast<Z>(d1 * params[0]);
}
op_def static X startingValue() {
return static_cast<X>(1.f);
}
};
template <typename X, typename Y, typename Z>
class Divide {
public:
op_def static Z op(X d1, Y d2) {
return static_cast<Z>(d1 / d2);
}
op_def static Z op(X d1, Y d2, Z *params) {
return static_cast<Z>(d1 / d2);
}
op_def static Z op(X d1) {
return static_cast<Z>(d1);
}
// op for MetaOps
op_def static Z op(X d1, Y *params) {
return static_cast<Z>(d1 / params[0]);
}
op_def static X startingValue() {
return static_cast<X>(1);
}
};
template <typename X, typename Y, typename Z>
class SafeDivide {
public:
op_def static Z op(X d1, Y d2) {
if(d2 == static_cast<Y>(0))
return static_cast<Z>(0);
return static_cast<Z>(d1 / d2);
}
op_def static Z op(X d1, Y d2, Z *params) {
if(d2 == static_cast<Y>(0))
return static_cast<Z>(0);
return static_cast<Z>(d1 / d2);
}
op_def static Z op(X d1) {
return static_cast<Z>(d1);
}
// op for MetaOps
op_def static Z op(X d1, Y *params) {
if(params[0] == static_cast<Y>(0))
return static_cast<Z>(0);
return static_cast<Z>(d1 / params[0]);
}
};
template <typename X, typename Y, typename Z>
class FloorDiv {
public:
op_def static Z op(X d1, Y d2) {
return nd4j::math::nd4j_floor<Z,Z>(static_cast<Z>(d1 / d2));
}
op_def static Z op(X d1, Y d2, Z *params) {
return nd4j::math::nd4j_floor<Z,Z>(static_cast<Z>(d1 / d2));
}
op_def static Z op(X d1) {
return nd4j::math::nd4j_floor<Z,Z>(static_cast<Z>(d1));
}
// op for MetaOps
op_def static Z op(X d1, Y *params) {
return nd4j::math::nd4j_floor<Z,Z>(static_cast<Z>(d1 / params[0]));
}
};
template <typename X, typename Y, typename Z>
class TruncateDiv {
public:
op_def static Z op(X d1, Y d2) {
auto i1 = static_cast<int>(d1);
auto i2 = static_cast<int>(d2);
return static_cast<Z>(i1 / i2);
}
op_def static Z op(X d1, Y d2, Z *params) {
auto i1 = static_cast<int>(d1);
auto i2 = static_cast<int>(d2);
return static_cast<Z>(i1 / i2);
}
op_def static Z op(X d1) {
return d1;
}
// op for MetaOps
op_def static Z op(X d1, Y *params) {
auto i1 = static_cast<int>(d1);
auto i2 = static_cast<int>(params[0]);
return static_cast<Z>(i1 / i2);
}
};
template <typename X, typename Y, typename Z>
class TruncateMod {
public:
op_def static Z op(X d1, Y d2) {
auto i1 = static_cast<int>(d1);
auto i2 = static_cast<int>(d2);
return static_cast<Z>(i1 % i2);
}
op_def static Z op(X d1, Y d2, Z *params) {
auto i1 = static_cast<int>(d1);
auto i2 = static_cast<int>(d2);
return static_cast<Z>(i1 % i2);
}
op_def static Z op(X d1) {
return static_cast<Z>(d1);
}
// op for MetaOps
op_def static Z op(X d1, Y *params) {
auto i1 = static_cast<int>(d1);
auto i2 = static_cast<int>(params[0]);
return static_cast<Z>(i1 % i2);
}
};
template<typename X, typename Y, typename Z>
class Remainder {
public:
op_def static Z op(X d1, Y d2) {
return nd4j::math::nd4j_remainder<X, Y, Z>(d1, d2);
}
op_def static Z op(X d1, Y d2, Z *params) {
return nd4j::math::nd4j_remainder<X, Y, Z>(d1, d2);
}
op_def static Z op(X d1) {
return d1;
}
// op for MetaOps
op_def static Z op(X d1, Y *params) {
return nd4j::math::nd4j_remainder<X, Y, Z>(d1, params[0]);
}
};
template <typename X, typename Y, typename Z>
class FMod {
public:
op_def static Z op(X d1, Y d2) {
return nd4j::math::nd4j_fmod<X, Y, Z>(d1, d2);
}
op_def static Z op(X d1, Y d2, Z *params) {
return nd4j::math::nd4j_fmod<X, Y, Z>(d1, d2);
}
op_def static Z op(X d1) {
return d1;
}
// op for MetaOps
op_def static Z op(X d1, Y *params) {
return nd4j::math::nd4j_fmod<X, Y, Z>(d1, params[0]);
}
};
template <typename X, typename Y, typename Z>
class FloorMod {
public:
op_def static Z op(X d1, Y d2) {
auto m = nd4j::math::nd4j_fmod<X, Y, Z>(d1, d2);
return (d1 < static_cast<X>(0)) == (d2 < static_cast<Y>(0)) ? m : nd4j::math::nd4j_fmod<Z, Y, Z>(m + static_cast<Z>(d2), d2);
}
op_def static Z op(X d1, Y d2, Z *params) {
auto m = nd4j::math::nd4j_fmod<X, Y, Z>(d1, d2);
return (d1 < static_cast<X>(0.0f)) == (d2 < static_cast<Y>(0)) ? m : nd4j::math::nd4j_fmod<Z, Y, Z>(m + static_cast<Z>(d2), d2);
}
op_def static Z op(X d1) {
return d1;
}
// op for MetaOps
op_def static Z op(X d1, Y *params) {
return op(d1, params[0]);
}
};
template <typename X, typename Y, typename Z>
class ReverseDivide {
public:
op_def static Z op(X d1, Y d2) {
return static_cast<Z>(d2 / d1);
}
op_def static Z op(X d1, Y d2, Z *params) {
return static_cast<Z>(d2 / d1);
}
op_def static Z op(X d1) {
return static_cast<Z>(d1);
}
// op for MetaOps
op_def static Z op(X d1, Y *params) {
return static_cast<Z>(params[0] / d1);
}
};
template <typename X, typename Y, typename Z>
class CopyPws {
public:
op_def static Z op(X d1, Y d2) {
return static_cast<Z>(d2);
}
op_def static Z op(X d1, Y d2, Z *params) {
return static_cast<Z>(d2);
}
op_def static Z op(X d1) {
return static_cast<Z>(d1);
}
op_def static Z op(X d1, Y *params) {
return static_cast<Z>(d1);
}
};
template <typename X>
class Copy {
public:
no_op_exec_special_same
no_op_exec_special_same_cuda
op_def static X op(X d1, X *params) {
return d1;
}
};
template <typename X, typename Y, typename Z>
class Copy2 {
public:
op_def static Z op(X d1, Y d2) {
return static_cast<Z>(d2);
}
op_def static Z op(X d1, Y d2, Z *params) {
return static_cast<Z>(d2);
}
op_def static Z op(X d1) {
return static_cast<Z>(d1);
}
op_def static Z op(X d1, Y *params) {
return static_cast<Z>(d1);
}
};
template <typename X, typename Y, typename Z>
class Axpy {
public:
op_def static Z op(X d1, Y d2) {
return static_cast<Z>(d2 + d1);
}
op_def static Z op(X d1, Y d2, Z *params) {
auto alpha = params[0];
return alpha * static_cast<Z>(d1) + static_cast<Z>(d2);
}
op_def static Z op(X d1) {
return static_cast<Z>(d1);
}
};
template <typename X, typename Z>
class Assign {
public:
no_op_exec_special_any
no_op_exec_special_any_cuda
op_def static Z op(X d1, X *params) {
return static_cast<Z>(d1);
}
};
template <typename X, typename Z>
class And {
public:
no_op_exec_special_bool
no_op_exec_special_bool_cuda
op_def static Z op(X d1, X d2) {
return d2 + d1;
}
op_def static Z op(X d1, X d2, X *params) {
if (params != nullptr) {
auto comp = params[0];
return d1 != comp && d2 != comp ? static_cast<Z>(1) : static_cast<Z>(0);
} else {
auto b1 = static_cast<bool>(d1);
auto b2 = static_cast<bool>(d2);
return (b1 && b2) ? static_cast<Z>(1) : static_cast<Z>(0);
}
}
op_def static Z op(X d1) {
return d1;
}
// op for MetaOps
op_def static Z op(X d1, X *params) {
return static_cast<Z>(119);
}
};
template <typename X, typename Z>
class Or {
public:
no_op_exec_special_bool
no_op_exec_special_bool_cuda
op_def static Z op(X d1, X d2) {
return d2 + d1;
}
op_def static Z op(X d1, X d2, X *params) {
if (params != nullptr) {
auto comp = params[0];
return d1 != comp || d2 != comp ? static_cast<Z>(1) : static_cast<Z>(0);
} else {
auto b1 = static_cast<bool>(d1);
auto b2 = static_cast<bool>(d2);
return b1 || b2 ? static_cast<Z>(1) : static_cast<Z>(0);
}
}
op_def static Z op(X d1) {
return d1;
}
// op for MetaOps
op_def static Z op(X d1, X *params) {
return static_cast<Z>(119);
}
};
template <typename X, typename Z>
class Xor {
public:
no_op_exec_special_bool
no_op_exec_special_bool_cuda
op_def static Z op(X d1, X d2) {
return d2 + d1;
}
op_def static Z op(X d1, X d2, X *params) {
if (params != nullptr) {
auto comp = params[0];
return ((d1 == comp && d2 != comp) || (d1 != comp && d2 == comp)) ? static_cast<Z>(1) : static_cast<Z>(0);
} else {
auto b1 = static_cast<bool>(d1);
auto b2 = static_cast<bool>(d2);
return (!b1 && b2 )||(b1 && !b2) ? static_cast<Z>(1) : static_cast<Z>(0);
}
}
op_def static Z op(X d1) {
return d1;
}
};
template <typename X, typename Z>
class Not {
public:
no_op_exec_special_bool
no_op_exec_special_bool_cuda
op_def static Z op(X d1, X d2) {
return static_cast<Z>(0);
}
op_def static Z op(X d1, X d2, X *params) {
return d1 != d2 ? static_cast<Z>(1) : static_cast<Z>(0);
}
// this transform op should run only on boolean input
op_def static Z op(X d1, X *params) {
auto b1 = static_cast<bool>(d1);
return !b1;
}
};
template <typename X, typename Y, typename Z>
class LogicalNot {
public:
op_def static Z op(X d1, Y d2) {
return !((int) d1 && (int) d2);
}
op_def static Z op(X d1, Y d2, Z *params) {
return static_cast<X>(!(static_cast<int>(d1) && static_cast<int>(d2)));
}
op_def static Z op(X d1) {
return d1;
}
// op for MetaOps
op_def static Z op(X d1, Y *params) {
return static_cast<X>(119);
}
};
template <typename X, typename Y, typename Z>
class LogicalXor {
public:
op_def static Z op(X d1, Y d2) {
auto i1 = static_cast<int>(d1);
auto i2 = static_cast<int>(d2);
return (i1 | i2) &~ (i1 & i2);
}
op_def static Z op(X d1, Y d2, Z *params) {
return op(d1, d2);
}
op_def static Z op(X d1) {
return d1;
}
// op for MetaOps
op_def static Z op(X d1, Y *params) {
return static_cast<Z>(119);
}
};
template <typename X, typename Y, typename Z>
class LogicalAnd {
public:
op_def static Z op(X d1, Y d2) {
return static_cast<int>(d1) & static_cast<int>(d2);
}
op_def static Z op(X d1, Y d2, Z *params) {
return op(d1, d2);
}
op_def static Z op(Y d1) {
return d1;
}
// op for MetaOps
op_def static Z op(X d1, Y *params) {
return static_cast<Z>(119);
}
};
template <typename X, typename Y, typename Z>
class LogicalOr {
public:
op_def static Z op(X d1, Y d2) {
return static_cast<int>(d1) | static_cast<int>(d2);
}
op_def static Z op(X d1, Y d2, Z *params) {
return op(d1, d2);
}
op_def static Z op(X d1) {
return d1;
}
// op for MetaOps
op_def static Z op(X d1, Y *params) {
return static_cast<X>(119);
}
};
template <typename X, typename Y, typename Z>
class Mod {
public:
/*
// just a optional note, feel free to remove later
op_def static half op(half d1, half d2, half *params) {
return __float2half(simdOps::Mod<float>::op(__half2float(d1), __half2float(d2), nullptr));
}
*/
op_def static Z op(X d1, Y d2) {
return static_cast<int>(d1) % static_cast<int>(d2);
}
op_def static Z op(X d1, Y d2, Z *params) {
return op(d1, d2);
}
// op for MetaOp
op_def static Z op(X d1, Y *params) {
return op(d1, params[0]);
}
};
template <typename X, typename Y, typename Z>
class ReverseMod {
public:
op_def static Z op(X d1, Y d2) {
return static_cast<int>(d2) % static_cast<int>(d1);
}
op_def static Z op(X d1, Y d2, Z *params) {
return op(d1, d2);
}
// op for MetaOp
op_def static Z op(X d1, Y *params) {
return op(d1, params[0]);
}
};
/**
* Whether 2 elements in an array
* are epsilion equal
*/
template <typename X, typename Z>
class Epsilon {
public:
op_def static Z op(X d1, X d2) {
X diff = d1 - d2;
X absDiff = nd4j::math::nd4j_abs<X>(diff);
if (absDiff <= static_cast<X>(MIN))
return static_cast<Z>(1);
return static_cast<Z>(0);
}
op_def static Z op(X d1, X d2, X *params) {
return op(d1, d2);
}
op_def static Z op(X d1, X *params) {
return d1;
}
};
template <typename X, typename Z>
class EqualTo {
public:
op_def static Z op(X d1, X d2) {
return d1 == d2;
}
op_def static Z op(X d1, X d2, X *params) {
return op(d1, d2);
}
op_def static Z op(X d1, X *params) {
return d1;
}
};
template <typename X, typename Z>
class NotEqualTo {
public:
op_def static Z op(X d1, X d2) {
return d1 != d2;
}
op_def static Z op(X d1, X d2, X *params) {
return op(d1, d2);
}
op_def static Z op(X d1, X *params) {
return d1;
}
};
template <typename X, typename Z>
class GreaterThanOrEqual {
public:
op_def static Z op(X d1, X d2) {
return d1 >= d2;
}
op_def static Z op(X d1, X d2, X *params) {
return op(d1, d2);
}
// FIXME: this signature clashes with MetaOp stuff
op_def static Z op(X d1, X *params) {
return d1;
}
};
template <typename X, typename Z>
class GreaterThan {
public:
op_def static Z op(X d1, X d2) {
return d1 > d2;
}
op_def static Z op(X d1, X d2, X *params) {
return op(d1, d2);
}
// FIXME: this signature clashes with MetaOp stuff
op_def static Z op(X d1, X *params) {
return d1;
}
};
template <typename X, typename Z>
class LessThan {
public:
op_def static Z op(X d1, X d2) {
return d1 < d2;
}
op_def static Z op(X d1, X d2, X *params) {
return op(d1, d2);
}
op_def static Z op(X d1, X *params) {
return d1;
}
};
template <typename X, typename Z>
class LessThanOrEqual {
public:
op_def static Z op(X d1, X d2) {
return d1 <= d2;
}
op_def static Z op(X d1, X d2, X *params) {
return op(d1, d2);
}
op_def static Z op(X d1, X *params) {
return d1;
}
};
template <typename X>
class Abs {
public:
no_op_exec_special_same
no_op_exec_special_same_cuda
op_def static X op(X d1, X *params) {
return nd4j::math::nd4j_abs<X>(d1);
}
};
template <typename X>
class Ceiling {
public:
no_op_exec_special_same
no_op_exec_special_same_cuda
op_def static X op(X d1, X *params) {
return nd4j::math::nd4j_ceil<X,X>(d1);
}
};
template <typename X>
class Cosine {
public:
no_op_exec_special_same
no_op_exec_special_same_cuda
op_def static X op(X d1, X *params) {
return nd4j::math::nd4j_cos<X,X>(d1);
}
};
template <typename X>
class Exp {
public:
no_op_exec_special_same
no_op_exec_special_same_cuda
op_def static X op(X d1, X *params) {
return nd4j::math::nd4j_exp<X, X>(d1);
}
};
template <typename X>
class HardTanhDerivative {
public:
no_op_exec_special_same
no_op_exec_special_same_cuda
op_def static X op(X d1, X *params) {
return ((d1 >= static_cast<X>(-1.f) && d1 <= static_cast<X>(1.f)) ? static_cast<X>(1.f) : static_cast<X>(0.f));
}
};
template <typename X>
class HardTanh {
public:
no_op_exec_special_same
no_op_exec_special_same_cuda
op_def static X op(X d1, X *params) {
if (d1 < static_cast<X>(-1))
return static_cast<X>(-1);
else if (d1 > static_cast<X>(1))
return static_cast<X>(1);
else
return d1;
}
};
template <typename X>
class Floor {
public:
no_op_exec_special_same
no_op_exec_special_same_cuda
op_def static X op(X d1, X *params) {
return nd4j::math::nd4j_floor<X,X>(d1);
}
};
template <typename X>
class Log {
public:
no_op_exec_special_same
no_op_exec_special_same_cuda
op_def static X op(X d1, X *params) {
return nd4j::math::nd4j_log<X, X>(d1);
}
};
template <typename X>
class Log1p {
public:
no_op_exec_special_same
no_op_exec_special_same_cuda
op_def static X op(X d1, X *params) {
return nd4j::math::nd4j_log<X, X>(1 + d1);
}
};
template <typename X, typename Y, typename Z>
class LogX {
public:
op_def static Z op(X d1, Y d2, Z *params) {
return nd4j::math::nd4j_log<X, Z>(d1) / nd4j::math::nd4j_log<Y, Z>(d2) ;
}
};
template <typename X>
class StabilizeFP16 {
public:
no_op_exec_special_same
no_op_exec_special_same_cuda
op_def static X op(X d1, X *params) {
if (d1 <= static_cast<X>(0))
return static_cast<X>(nd4j::DataTypeUtils::min<float16>());
else return d1;
}
};
template <typename X>
class StabilizeX {
public:
no_op_exec_special_same
no_op_exec_special_same_cuda
op_def static X op(X d1, X *params) {
if (d1 <= static_cast<X>(0))
return nd4j::DataTypeUtils::min<X>();
else return d1;
}
};
template <typename X>
class SpecialDerivative {
public:
no_op_exec_special_same
no_op_exec_special_same_cuda
op_def static X op(X d1, X *params) {
return d1 * (static_cast<X>(1.f) - d1);
}
};
template <typename X>
class Neg {
public:
no_op_exec_special_same
no_op_exec_special_same_cuda
op_def static X op(X d1, X *params) {
return -d1;
}
};
template <typename X>
class Erf {
public:
no_op_exec_special_same
no_op_exec_special_same_cuda
op_def static X op(X d1, X *params) {
return nd4j::math::nd4j_erf<X,X>(d1);
}
};
template <typename X>
class Erfc {
public:
no_op_exec_special_same
no_op_exec_special_same_cuda
op_def static X op(X d1, X *params) {
return nd4j::math::nd4j_erfc<X,X>(d1);
}
};
template <typename X>
class Reciprocal {
public:
no_op_exec_special_same
no_op_exec_special_same_cuda
// op_def static T op(T d1) {
// return (T(1.0f) / d1);
// }
// op for MetaOps
op_def static X op(X d1, X *params) {
return (static_cast<X>(1) / d1);
}
};
template <typename X, typename Z>
class Sqr {
public:
no_op_exec_special
no_op_exec_special_cuda
op_def static Z op(X d1, Z *params) {
return nd4j::math::nd4j_pow<X, X, Z>(d1, static_cast<X>(2));
}
op_def static Z op(X d1) {
return nd4j::math::nd4j_pow<X, X, Z>(d1, static_cast<X>(2));
}
};
template <typename X, typename Y, typename Z>
class RelativeError {
public:
no_op_exec_special
no_op_exec_special_cuda
op_def static Z op(X d1, Y d2) {
return nd4j::math::nd4j_re<X>(d1, d2);
}
op_def static Z op(X d1, Y d2, Z *params) {
return op(d1, d2);
}
op_def static Z op(X d1) {
return static_cast<Z>(0);
}
};
template <typename X, typename Y, typename Z>
class BinaryRelativeError {
public:
no_op_exec_special
no_op_exec_special_cuda
op_def static Z op(X d1, Y d2, Z *params) {
X threshold = params[0];
return nd4j::math::nd4j_re<X>(d1, d2) > threshold ? static_cast<Z>(1) : static_cast<Z>(0);
}
op_def static Z op(X d1) {
return static_cast<Z>(0);
}
};
template <typename X, typename Y, typename Z>
class BinaryMinimumAbsoluteRelativeError {
public:
no_op_exec_special
no_op_exec_special_cuda
op_def static Z op(X d1, X *params) {
X d2 = params[0];
X thresholdRelative = params[1];
X thresholdAbsolute = params[2];
return nd4j::math::nd4j_re<X>(d1, d2) > thresholdRelative ? (nd4j::math::nd4j_abs<X>(d1 - static_cast<X>(d2)) < thresholdAbsolute ? static_cast<Z>(0) : static_cast<Z>(1)) : static_cast<Z>(0);
}
op_def static Z op(X d1, Y d2, Z *params) {
X thresholdRelative = params[0];
X thresholdAbsolute = params[1];
return nd4j::math::nd4j_re<X>(d1, d2) > thresholdRelative ? (nd4j::math::nd4j_abs<X>(d1 - static_cast<X>(d2)) < thresholdAbsolute ? static_cast<Z>(0) : static_cast<Z>(1)) : static_cast<Z>(0);
}
op_def static Z op(X d1) {
return static_cast<Z>(0);
}
};
template <typename X, typename Y, typename Z>
class Pow {
public:
no_op_exec_special
no_op_exec_special_cuda
op_def static Z op(X d1, Z *params) {
return nd4j::math::nd4j_pow<X, X, Z>(d1, params[0]);
}
op_def static Z op(X d1, Y d2) {
return nd4j::math::nd4j_pow<X, Y, Z>(d1, d2);
}
op_def static Z op(X d1, Y d2, Z *params) {
return nd4j::math::nd4j_pow<X, Y, Z>(d1, d2);
}
op_def static Z op(X d1) {
return d1;
}
};
template <typename X, typename Y, typename Z>
class PowDerivative {
public:
no_op_exec_special
no_op_exec_special_cuda
op_def static Z op(X d1, Z *params) {
return params[0] * nd4j::math::nd4j_pow<X, Z, Z>(d1, static_cast<Z>(params[0]) - static_cast<Z>(1.f));
}
op_def static Z op(X d1, Y d2) {
return static_cast<Z>(d2) * nd4j::math::nd4j_pow<X, Z, Z>(d1, static_cast<Z>(d2) - static_cast<Z>(1.f));
}
op_def static Z op(X d1, Y d2, Z *params) {
return static_cast<Z>(d2) * nd4j::math::nd4j_pow<X, Z, Z>(d1, static_cast<Z>(d2) - static_cast<Z>(1.f));
}
op_def static Z op(X d1) {
return d1;
}
};
template <typename X>
class Round {
public:
no_op_exec_special_same
no_op_exec_special_same_cuda
op_def static X op(X d1, X *params) {
return nd4j::math::nd4j_round<X,X>(d1);
}
};
template <typename X, typename Z>
class IsNan {
public:
no_op_exec_special_bool
no_op_exec_special_bool_cuda
no_op_exec_special_accumulation
no_op_exec_special_accumulation_cuda
op_def static Z op(X d1, X *params) {
return nd4j::math::nd4j_isnan(d1) ? static_cast<X>(1) : static_cast<X>(0);
}
op_def static X startingValue(const X *input) {
return static_cast<X>(0);
}
op_def static Z merge(X old, X opOutput, X *extraParams) {
return opOutput + old;
}
op_def static Z update(X old, X opOutput, X *extraParams) {
return opOutput + old;
}
op_def static Z postProcess(X reduction, Nd4jLong n, X *extraParams) {
return reduction;
}
};
template <typename X>
class Expm1 {
public:
no_op_exec_special_same
no_op_exec_special_same_cuda
op_def static X op(X d1, X *params) {
return nd4j::math::nd4j_exp<X, X>(d1) - static_cast<X>(1);
}
};
template <typename X, typename Z>
class IsPositive {
public:
no_op_exec_special_bool
no_op_exec_special_bool_cuda
no_op_exec_special_accumulation
no_op_exec_special_accumulation_cuda
op_def static Z op(X d1, X *params) {
return d1 > (X)0.f;
}
op_def static X startingValue(const X *input) {
return static_cast<X>(0);
}
op_def static Z merge(X old, X opOutput, X *extraParams) {
return opOutput + old;
}
op_def static Z update(X old, X opOutput, X *extraParams) {
return opOutput + old;
}
op_def static Z postProcess(X reduction, Nd4jLong n, X *extraParams) {
return reduction;
}
};
template <typename X, typename Z>
class IsInf {
public:
no_op_exec_special_bool
no_op_exec_special_bool_cuda
no_op_exec_special_accumulation
no_op_exec_special_accumulation_cuda
op_def static Z op(X d1, X *params) {
return nd4j::math::nd4j_isinf<X>(d1) ? static_cast<Z>(1) : static_cast<Z>(0);
}
op_def static X startingValue(const X *input) {
return static_cast<X>(0);
}
op_def static Z merge(X old, X opOutput, X *extraParams) {
return opOutput + old;
}
op_def static Z update(X old, X opOutput, X *extraParams) {
return opOutput + old;
}
op_def static Z postProcess(X reduction, Nd4jLong n, X *extraParams) {
return reduction;
}
};
template <typename X, typename Z>
class IsInfOrNan{
public:
no_op_exec_special_bool
no_op_exec_special_bool_cuda
no_op_exec_special_accumulation
no_op_exec_special_accumulation_cuda
op_def static Z op(X d1, X *params) {
return nd4j::math::nd4j_isfin<X>(d1) ? static_cast<Z>(0) : static_cast<Z>(1);
}
op_def static X startingValue(const X *input) {
return static_cast<X>(0);
}
op_def static Z merge(X old, X opOutput, X *extraParams) {
return opOutput + old;
}
op_def static Z update(X old, X opOutput, X *extraParams) {
return opOutput + old;
}
op_def static Z postProcess(X reduction, Nd4jLong n, X *extraParams) {
return reduction;
}
};
template <typename X, typename Z>
class IsFinite {
public:
no_op_exec_special_bool
no_op_exec_special_bool_cuda
no_op_exec_special_accumulation
no_op_exec_special_accumulation_cuda
op_def static Z op(X d1, X *params) {
return nd4j::math::nd4j_isfin<X>(d1) ? static_cast<Z>(1) : static_cast<Z>(0);
}
op_def static X startingValue(const X *input) {
return static_cast<X>(0);
}
op_def static Z merge(X old, X opOutput, X *extraParams) {
return opOutput + old;
}
op_def static Z update(X old, X opOutput, X *extraParams) {
return opOutput + old;
}
op_def static Z postProcess(X reduction, Nd4jLong n, X *extraParams) {
return reduction;
}
};
template <typename X>
class ClipByValue {
public:
no_op_exec_special_same
no_op_exec_special_same_cuda
op_def static X op(X d1, X *params) {
if (d1 > params[1])
return params[1];
if (d1 < params[0])
return params[0];
return d1;
}
};
template <typename X, typename Y, typename Z>
class LstmClip {
public:
no_op_exec_special
no_op_exec_special_cuda
op_def static Z op(X d1, Y d2, Z *params) {
X _v = (X) d2;
if (d1 > _v)
return _v;
else if (d1 < _v)
return _v;
else return d1;
}
};
template <typename X>
class Swish {
public:
no_op_exec_special_same
no_op_exec_special_same_cuda
op_def static X op(X d1, X *params) {
return d1 * nd4j::math::nd4j_sigmoid<X,X>(d1);
}
};
template <typename X>
class SwishDerivative {
public:
no_op_exec_special_same
no_op_exec_special_same_cuda
op_def static X op(X d1, X *params) {
X ex = nd4j::math::nd4j_pow<X, X, X>(static_cast<X>(M_E), d1);
return (ex * (d1 + ex + static_cast<X>(1.f))) / nd4j::math::nd4j_pow<X, X, X>((ex + static_cast<X>(1.f)) , static_cast<X>(2.f));
}
};
template <typename X>
class LogSigmoid {
public:
no_op_exec_special_same
no_op_exec_special_same_cuda
op_def static X op(X d1, X *params) {
return nd4j::math::nd4j_log<X, X>(nd4j::math::nd4j_sigmoid<X, X>(d1));
}
};
template <typename X>
class LogSigmoidDerivative {
public:
no_op_exec_special_same
no_op_exec_special_same_cuda
op_def static X op(X d1, X *params) {
X ex = nd4j::math::nd4j_pow<X, X, X>(M_E, d1);
return static_cast<X>(1.f) / (ex + static_cast<X>(1.f));
}
};
template <typename X>
class Sigmoid {
public:
no_op_exec_special_same
no_op_exec_special_same_cuda
op_def static X op(X d1, X *params) {
return nd4j::math::nd4j_sigmoid<X, X>(d1);
}
};
template <typename X>
class SigmoidDerivative {
public:
no_op_exec_special_same
no_op_exec_special_same_cuda
op_def static X op(X d1, X *params) {
return nd4j::math::nd4j_sigmoidderivative<X, X>(d1);
}
};
template <typename X>
class HardSigmoid {
public:
no_op_exec_special_same
no_op_exec_special_same_cuda
op_def static X op(X d1, X *params) {
return nd4j::math::nd4j_min<X>(static_cast<X>(1), nd4j::math::nd4j_max<X>(static_cast<X>(0), (static_cast<X>(0.2f)) * d1 + static_cast<X>(0.5f)));
}
};
template <typename X>
class HardSigmoidDerivative {
public:
no_op_exec_special_same
no_op_exec_special_same_cuda
op_def static X op(X d1, X *params) {
return d1 < static_cast<X>(-2.5f) || d1 > static_cast<X>(2.5f) ? static_cast<X>(0.f) : static_cast<X>(0.2f);
}
};
/**
* Scale to be between a min and max
*/
template <typename X>
class SetRange {
public:
no_op_exec_special_same
no_op_exec_special_same_cuda
op_def static X op(X d1, X *params) {
auto min = params[0];
auto max = params[1];
if (static_cast<X>(d1) >= min && static_cast<X>(d1) <= max)
return d1;
if (min == static_cast<X>(0) && max == static_cast<X>(1)) {
auto val = static_cast<X>(1) / (static_cast<X>(1) + nd4j::math::nd4j_exp<X, X>(-d1));
return (nd4j::math::nd4j_floor<X,X>(val * (max - min)) + min);
}
return (nd4j::math::nd4j_floor<X,X>(d1 * (max - min)) + min);
}
};
template <typename X>
class Sin {
public:
no_op_exec_special_same
no_op_exec_special_same_cuda
op_def static X op(X d1, X *params) {
return nd4j::math::nd4j_sin<X,X>(d1);
}
};
template <typename X>
class Square {
public:
no_op_exec_special_same
no_op_exec_special_same_cuda
op_def static X op(X d1, X *params) {
return d1 * d1;
}
};
template <typename X, typename Z>
class Sqrt {
public:
no_op_exec_special
no_op_exec_special_cuda
op_def static Z op(X d1, Z *params) {
return nd4j::math::nd4j_sqrt<X, Z>(d1);
}
};
template <typename X, typename Z>
class RSqrt {
public:
no_op_exec_special
no_op_exec_special_cuda
op_def static Z op(X d1, Z *params) {
return static_cast<Z>(1) / nd4j::math::nd4j_sqrt<X, Z>(d1);
}
};
template <typename X>
class Rint {
public:
no_op_exec_special_same
no_op_exec_special_same_cuda
op_def static X op(X d1, X *params) {
return nd4j::math::nd4j_rint<X,X>(d1);
}
};
template <typename X>
class SoftPlus {
public:
no_op_exec_special_same
no_op_exec_special_same_cuda
op_def static X op(X d1, X *params) {
return nd4j::math::softplus<X, X>(d1);
}
};
template <typename X>
class Sign {
public:
no_op_exec_special_same
no_op_exec_special_same_cuda
op_def static X op(X d1, X *params) {
return (d1 > static_cast<X>(0)) - (d1 < static_cast<X>(0));
}
};
template <typename X>
class TimesOneMinus {
public:
no_op_exec_special_same
no_op_exec_special_same_cuda
op_def static X op(X d1, X *params) {
return d1 * (static_cast<X>(1) - d1);
}
};
template <typename X>
class RationalTanh {
public:
no_op_exec_special_same
no_op_exec_special_same_cuda
op_def static X op(X d1, X *params) {
// keep 2/3 as runtime variable, to match precision
auto dis = (static_cast<X>(2) / static_cast<X>(3)) * d1;
auto tanh = nd4j::math::nd4j_sgn<X,X>(dis) * (static_cast<X>(1) - (static_cast<X>(1) / (static_cast<X>(1) + static_cast<X>(nd4j::math::nd4j_abs<X>(dis)) + nd4j::math::nd4j_pow<X, X, X>(dis, static_cast<X>(2)) + static_cast<X>(1.41645f) * nd4j::math::nd4j_pow<X, X, X>(dis, static_cast<X>(4)) )));
return static_cast<X>(1.7159f) * tanh;
}
};
template <typename X>
class RationalTanhDerivative {
public:
no_op_exec_special_same
no_op_exec_special_same_cuda
op_def static X op(X d1, X *params) {
auto dis = (static_cast<X>(2.f) / static_cast<X>(3.f)) * d1;
auto a = static_cast<X>(1.f) + nd4j::math::nd4j_abs<X>(dis) + nd4j::math::nd4j_pow<X, X, X>(dis, static_cast<X>(2.f)) + static_cast<X>(1.41645f) * nd4j::math::nd4j_pow<X, X, X>(dis, static_cast<X>(4));
auto tDeriv = (static_cast<X>(1.f) + nd4j::math::nd4j_sign<X,X>(dis) * (static_cast<X>(2.f) * dis + static_cast<X>(4.f) * static_cast<X>(1.41645f) * nd4j::math::nd4j_pow<X, X, X>(dis, static_cast<X>(3)))) / (a * a);
return static_cast<X>(1.7159f) * (static_cast<X>(2.f) / static_cast<X>(3.f)) * tDeriv;
}
};
template <typename X>
class Tanh {
public:
no_op_exec_special_same
no_op_exec_special_same_cuda
op_def static X op(X d1, X *params) {
return nd4j::math::nd4j_tanh<X, X>(d1);
}
};
template <typename X>
class RectifiedTanh {
public:
no_op_exec_special_same
no_op_exec_special_same_cuda
op_def static X op(X d1, X *params) {
return nd4j::math::nd4j_max<X>(static_cast<X>(0), nd4j::math::nd4j_tanh<X,X>(d1));
}
};
template <typename X>
class RectifiedTanhDerivative {
public:
no_op_exec_special_same
no_op_exec_special_same_cuda
op_def static X op(X d1, X *params) {
return d1 > static_cast<X>(0.f) ? nd4j::math::nd4j_tanhderivative<X,X>(d1) : static_cast<X>(0.f);
}
};
template <typename X>
class ATanh {
public:
no_op_exec_special_same
no_op_exec_special_same_cuda
op_def static X op(X d1, X *params) {
return nd4j::math::nd4j_atanh<X,X>(d1);
}
};
template <typename X>
class TanhDerivative {
public:
no_op_exec_special_same
no_op_exec_special_same_cuda
op_def static X op(X d1, X *params) {
return nd4j::math::nd4j_tanhderivative<X,X>(d1);
}
};
template <typename X>
class Cube {
public:
no_op_exec_special_same
no_op_exec_special_same_cuda
op_def static X op(X d1, X *params) {
return d1 * d1 * d1;
}
};
template <typename X>
class CubeDerivative {
public:
no_op_exec_special_same
no_op_exec_special_same_cuda
op_def static X op(X d1, X *params) {
return static_cast<X>(3) * d1 * d1;
}
};
template <typename X>
class ACos {
public:
no_op_exec_special_same
no_op_exec_special_same_cuda
op_def static X op(X d1, X *params) {
return nd4j::math::nd4j_acos<X, X>(d1);
}
};
template <typename X>
class ASinh {
public:
no_op_exec_special_same
no_op_exec_special_same_cuda
op_def static X op(X d1, X *params) {
return nd4j::math::nd4j_asinh<X, X>(d1);
}
};
template <typename X>
class ASinhDerivative {
public:
no_op_exec_special_same
no_op_exec_special_same_cuda
op_def static X op(X d1, X *params) {
return static_cast<X>(1.f) / (nd4j::math::nd4j_sqrt<X, X>(nd4j::math::nd4j_pow<X, X, X>(d1, static_cast<X>(2.f)) + static_cast<X>(1.f)));
}
};
template <typename X>
class ACosh {
public:
no_op_exec_special_same
no_op_exec_special_same_cuda
op_def static X op(X d1, X *params) {
return nd4j::math::nd4j_acosh<X, X>(d1);
}
};
template <typename X>
class ACoshDerivative {
public:
no_op_exec_special_same
no_op_exec_special_same_cuda
op_def static X op(X d1, X *params) {
return static_cast<X>(1.f) / (nd4j::math::nd4j_sqrt<X, X>(d1 - static_cast<X>(1.f)) * nd4j::math::nd4j_sqrt<X, X>(d1 + static_cast<X>(1.f)));
}
};
template <typename X>
class Ones {
public:
no_op_exec_special_same
no_op_exec_special_same_cuda
op_def static X op(X d1, X *params) {
return static_cast<X>(1.0f);
}
};
template <typename X>
class SoftSign {
public:
no_op_exec_special_same
no_op_exec_special_same_cuda
op_def static X op(X d1, X *params) {
return nd4j::math::nd4j_softsign<X, X>(d1);
}
};
template <typename X>
class SoftSignDerivative {
public:
no_op_exec_special_same
no_op_exec_special_same_cuda
op_def static X op(X d1, X *params) {
return nd4j::math::nd4j_softsignderivative<X,X>(d1);
}
};
template <typename X, typename Z>
class MatchConditionBool {
public:
no_op_exec_special_bool
no_op_exec_special_bool_cuda
// this op return 1.0 if condition met, 0.0 otherwise
op_def static Z op(X d1, X *extraParams) {
X compare = extraParams[0];
X eps = extraParams[1];
auto mode = static_cast<int>(extraParams[2]);
//nd4j_printf("value: %f; comp: %f; eps: %f; mode: %i;\n", d1, compare, eps, mode);
switch (mode) {
case 0: // equals
return nd4j::math::nd4j_abs<X>(d1 - compare) <= eps ? true : false;
case 1: // not equals
return nd4j::math::nd4j_abs<X>(d1 - compare) > eps ? true : false;
case 2: // less_than
return d1 < compare ? true : false;
case 3: // greater_than
return d1 > compare ? true : false;
case 4: // less_or_equals_than
return d1 <= compare ? true : false;
case 5: // greater_or_equals_than
return d1 >= compare ? true : false;
case 6: // abs_less_than
return nd4j::math::nd4j_abs<X>(d1) < compare ? true : false;
case 7: // abs_greater_than
return nd4j::math::nd4j_abs<X>(d1) > compare ? true : false;
case 8: // is inf
return nd4j::math::nd4j_isinf(d1) ? true : false;
case 9: // is nan
return nd4j::math::nd4j_isnan(d1) ? true : false;
case 10:
return (d1 == compare) ? true : false;
case 11:
return (d1 != compare) ? true : false;
case 12: // abs_greater_or_equals_than
return nd4j::math::nd4j_abs<X>(d1) >= compare ? true : false;
case 13: // abs_less_or_equals_than
return nd4j::math::nd4j_abs<X>(d1) <= compare ? true : false;
case 14:
// isFinite
return !(nd4j::math::nd4j_isinf(d1) || nd4j::math::nd4j_isnan(d1));
case 15:
// isInfinite
return nd4j::math::nd4j_isinf(d1) || nd4j::math::nd4j_isnan(d1);
default:
printf("Undefined match condition: [%i]\n", mode);
}
return d1;
}
};
template <typename X, typename Z>
class MatchCondition {
public:
no_op_exec_special
no_op_exec_special_cuda
no_op_exec_special_accumulation_long
no_op_exec_special_accumulation_cuda
op_def static Z startingValue(const X *input) {
return static_cast<Z>(0);
}
op_def static Z merge(Z old, Z opOutput, X *extraParams) {
return old + opOutput;
}
op_def static Z update(Z old, Z opOutput, X *extraParams) {
return old + opOutput;
}
// this op return 1.0 if condition met, 0.0 otherwise
op_def static Z op(X d1, X *extraParams) {
X compare = extraParams[0];
X eps = extraParams[1];
auto mode = static_cast<int>(extraParams[2]);
//printf("value: %f; comp: %f; eps: %f; mode: %i;\n", (float) d1, (float) compare, (float) eps, mode);
switch (mode) {
case 0: // equals
return nd4j::math::nd4j_abs<X>(d1 - compare) <= eps ? 1 : 0;
case 1: // not equals
return nd4j::math::nd4j_abs<X>(d1 - compare) > eps ? 1 : 0;
case 2: // less_than
return d1 < compare ? 1 : 0;
case 3: // greater_than
return d1 > compare ? 1 : 0;
case 4: // less_or_equals_than
return d1 <= compare ? 1 : 0;
case 5: // greater_or_equals_than
return d1 >= compare ? 1 : 0;
case 6: // abs_less_than
return nd4j::math::nd4j_abs<X>(d1) < compare ? 1 : 0;
case 7: // abs_greater_than
return nd4j::math::nd4j_abs<X>(d1) > compare ? 1 : 0;
case 8: // is inf
return nd4j::math::nd4j_isinf(d1) ? 1 : 0;
case 9: // is nan
return nd4j::math::nd4j_isnan(d1) ? 1 : 0;
case 10:
return (d1 == compare) ? 1 : 0;
case 11:
return (d1 != compare) ? 1 : 0;
case 12: // abs_greater_or_equals_than
return nd4j::math::nd4j_abs<X>(d1) >= compare ? 1 : 0;
case 13: // abs_less_or_equals_than
return nd4j::math::nd4j_abs<X>(d1) <= compare ? 1 : 0;
case 14:
// isFinite
return !(nd4j::math::nd4j_isinf(d1) || nd4j::math::nd4j_isnan(d1)) ? 1 : 0;
case 15:
// isInfinite
return nd4j::math::nd4j_isinf(d1) || nd4j::math::nd4j_isnan(d1) ? 1 : 0;
default:
printf("Undefined match condition: [%i]\n", mode);
}
return d1;
}
op_def static Z postProcess(Z reduction, Nd4jLong n, X *extraParams) {
return reduction;
}
};
template <typename X>
class ELU {
public:
no_op_exec_special_same
no_op_exec_special_same_cuda
op_def static X op(X d1, X *params) {
return nd4j::math::nd4j_elu<X,X>(d1);
}
};
template <typename X>
class ELUDerivative {
public:
no_op_exec_special_same
no_op_exec_special_same_cuda
op_def static X op(X d1, X *params) {
return nd4j::math::nd4j_eluderivative<X,X>(d1);
}
};
template <typename X, typename Y, typename Z>
class RELU {
public:
no_op_exec_special_same
no_op_exec_special_same_cuda
op_def static Z op(X d1, Y d2, Z *params) {
auto xt = static_cast<Z>(d1);
auto xf = static_cast<Z>(d2);
return xt < xf ? xf : xt;
}
};
template <typename X, typename Y, typename Z>
class SXELogitsSmoother {
public:
op_def static Z op(X d1, Y d2, Z *params) {
return d1 * ((X)1.f - (X) d2) + (X)(0.5f) * (X) d2;
}
};
template <typename X, typename Y, typename Z>
class RELU6 {
public:
no_op_exec_special_same
no_op_exec_special_same_cuda
op_def static Z op(X d1, Y d2, Z *params) {
auto relu = simdOps::RELU<X,Y,Z>::op(d1, d2, params);
return relu < static_cast<Z>(6) ? relu : static_cast<Z>(6);
}
};
template <typename X, typename Y, typename Z>
class LeakyRELU {
public:
no_op_exec_special
no_op_exec_special_cuda
op_def static Z op(X d1, Y d2, Z *params) {
return nd4j::math::nd4j_leakyrelu<X,Z>(d1, d2);
}
};
template <typename X>
class SELU {
public:
no_op_exec_special_same
no_op_exec_special_same_cuda
op_def static X op(X d1, X *params) {
return d1 > static_cast<X>(0.0f) ? static_cast<X>(SELU_LAMBDA) * static_cast<X>(d1) : static_cast<X>(SELU_LAMBDA) * (static_cast<X>(SELU_ALPHA) * nd4j::math::nd4j_exp<X, X>(d1) - static_cast<X>(SELU_ALPHA));
}
};
template <typename X>
class SELUDerivative {
public:
no_op_exec_special_same
no_op_exec_special_same_cuda
op_def static X op(X d1, X *params) {
return d1 > static_cast<X>(0.f) ? static_cast<X>(SELU_LAMBDA) : static_cast<X>(SELU_ALPHA) * static_cast<X>(SELU_LAMBDA) * nd4j::math::nd4j_exp<X, X>(d1);
}
};
template <typename X, typename Y, typename Z>
class LeakyRELUDerivative {
public:
no_op_exec_special
no_op_exec_special_cuda
op_def static Z op(X d1, Y d2, Z *params) {
if (d1 >= static_cast<X>(0))
return static_cast<Z>(1);
else
return static_cast<Z>(d2);
}
};
template <typename X>
class ASin {
public:
no_op_exec_special_same
no_op_exec_special_same_cuda
op_def static X op(X d1, X *params) {
return nd4j::math::nd4j_asin<X,X>(d1);
}
};
template <typename X>
class Sinh {
public:
no_op_exec_special_same
no_op_exec_special_same_cuda
op_def static X op(X d1, X *params) {
return nd4j::math::nd4j_sinh<X,X>(d1);
}
};
template <typename X>
class SinhDerivative {
public:
no_op_exec_special_same
no_op_exec_special_same_cuda
op_def static X op(X d1, X *params) {
return nd4j::math::nd4j_cosh<X, X>(d1);
}
};
template <typename X>
class Cosh {
public:
no_op_exec_special_same
no_op_exec_special_same_cuda
op_def static X op(X d1, X *params) {
return nd4j::math::nd4j_cosh<X,X>(d1);
}
};
template <typename X>
class Tan {
public:
no_op_exec_special_same
no_op_exec_special_same_cuda
op_def static X op(X d1, X *params) {
return nd4j::math::nd4j_tan<X,X>(d1);
}
};
template <typename X>
class TanDerivative {
public:
no_op_exec_special_same
no_op_exec_special_same_cuda
op_def static X op(X d1, X *params) {
return static_cast<X>(1.f) / nd4j::math::nd4j_pow<X, X, X>(nd4j::math::nd4j_cos<X, X>(d1), static_cast<X>(2.0f));
}
};
template <typename X>
class ATan {
public:
no_op_exec_special_same
no_op_exec_special_same_cuda
op_def static X op(X d1, X *params) {
return nd4j::math::nd4j_atan<X, X>(d1);
}
};
template <typename X, typename Y, typename Z>
class Atan2 {
public:
no_op_exec_special
no_op_exec_special_cuda
op_def static Z op(X d1, Y d2) {
return nd4j::math::nd4j_atan2<X, Z>(d2, d1);
}
op_def static Z op(X d1, Y d2, Z *params) {
return op(d1, d2);
}
// op for MetaOps
op_def static Z op(X d1, Y *params) {
return op(d1, params[0]);
}
};
template <typename X>
class Identity {
public:
no_op_exec_special_same
no_op_exec_special_same_cuda
op_def static X op(X d1, X *params) {
return d1;
}
};
template <typename X>
class Stabilize {
public:
no_op_exec_special_same
no_op_exec_special_same_cuda
op_def static X op(X d1, X *params) {
X k = params[0];
if (d1 * k > static_cast<X>(- MIN_CUTFOFF))
return static_cast<X>(- MIN_CUTFOFF) / k;
else if (d1 * k < static_cast<X>(MIN_CUTFOFF))
return static_cast<X>(MIN_CUTFOFF) / k;
return d1;
}
};
template <typename X, typename Y, typename Z>
class Step {
public:
no_op_exec_special_same
no_op_exec_special_same_cuda
op_def static Z op(X d1, Y d2, Z *params) {
return (d1 > static_cast<X>(d2) ? static_cast<Z>(1) : static_cast<Z>(0));
}
};
template <typename X>
class OneMinus {
public:
no_op_exec_special_same
no_op_exec_special_same_cuda
op_def static X op(X d1, X *params) {
return static_cast<X>(1) - d1;
}
};
template <typename X>
class Sum {
public:
no_op_exec_special_accumulation_same
no_op_exec_special_accumulation_same_cuda
op_def static X startingValue(const X *input) {
return static_cast<X>(0.0f);
}
op_def static X merge(X old, X opOutput, X *extraParams) {
return opOutput + old;
}
op_def static X update(X old, X opOutput, X *extraParams) {
return opOutput + old;
}
op_def static X op(X d1, X *extraParams) {
return d1;
}
op_def static X postProcess(X reduction, Nd4jLong n, X *extraParams) {
return reduction;
}
};
template <typename X, typename Z>
class ShannonEntropy {
public:
no_op_exec_special_accumulation
no_op_exec_special_accumulation_cuda
op_def static X startingValue(const X *input) {
return static_cast<X>(0);
}
op_def static Z merge(X old, X opOutput, Z *extraParams) {
return opOutput + old;
}
op_def static Z update(X old, X opOutput, Z *extraParams) {
return opOutput + old;
}
op_def static Z op(X d1, Z *extraParams) {
return nd4j::math::nd4j_pow<X, X, Z>(d1, static_cast<X>(2)) * nd4j::math::nd4j_log<X, Z>(nd4j::math::nd4j_pow<X, X, Z>(d1, static_cast<X>(2.0f)));
}
op_def static Z postProcess(X reduction, Nd4jLong n, Z *extraParams) {
return -reduction;
}
};
template <typename X, typename Z>
class LogEntropy {
public:
no_op_exec_special_accumulation
no_op_exec_special_accumulation_cuda
op_def static X startingValue(const X *input) {
return static_cast<X>(0);
}
op_def static Z merge(X old, X opOutput, Z *extraParams) {
return opOutput + old;
}
op_def static Z update(X old, X opOutput, Z *extraParams) {
return opOutput + old;
}
op_def static Z op(X d1, Z *extraParams) {
return static_cast<Z>(d1) * nd4j::math::nd4j_log<X, Z>(d1);
}
op_def static Z postProcess(X reduction, Nd4jLong n, Z *extraParams) {
//entropy is -sum(p(x) * log(p(x))); log entropy is log of this
return nd4j::math::nd4j_log<X, Z>(-reduction);
}
};
template <typename X, typename Z>
class Entropy {
public:
no_op_exec_special_accumulation
no_op_exec_special_accumulation_cuda
op_def static X startingValue(const X *input) {
return static_cast<X>(0);
}
op_def static Z merge(X old, X opOutput, Z *extraParams) {
return opOutput + old;
}
op_def static Z update(X old, X opOutput, Z *extraParams) {
return opOutput + old;
}
op_def static Z op(X d1, Z *extraParams) {
return static_cast<Z>(d1) * nd4j::math::nd4j_log<X, Z>(d1);
}
op_def static Z postProcess(X reduction, Nd4jLong n, Z *extraParams) {
return static_cast<Z>(-reduction); //entropy is -sum(p(x) * log(p(x)))
}
};
template <typename X>
class ASum {
public:
no_op_exec_special_accumulation_same
no_op_exec_special_accumulation_same_cuda
op_def static X startingValue(const X *input) {
return static_cast<X>(0);
}
op_def static X merge(X old, X opOutput, X *extraParams) {
return nd4j::math::nd4j_abs<X>(opOutput) + nd4j::math::nd4j_abs<X>(old);
}
op_def static X update(X old, X opOutput, X *extraParams) {
return nd4j::math::nd4j_abs<X>(opOutput) + nd4j::math::nd4j_abs<X>(old);
}
op_def static X op(X d1, X *extraParams) {
return nd4j::math::nd4j_abs<X>(d1);
}
op_def static X postProcess(X reduction, Nd4jLong n, X *extraParams) {
return nd4j::math::nd4j_abs<X>(reduction);
}
};
template <typename X, typename Z>
class CountNonZero {
public:
no_op_exec_special_accumulation_long
no_op_exec_special_accumulation_cuda
op_def static Z startingValue(const X *input) {
return static_cast<Z>(0);
}
op_def static Z merge(Z old, Z opOutput, X *extraParams) {
return opOutput + old;
}
op_def static Z update(Z old, Z opOutput, X *extraParams) {
return opOutput + old;
}
op_def static Z op(X d1, X *extraParams) {
return d1 == static_cast<X>(0.0f) ? static_cast<Z>(0.0f) : static_cast<Z>(1.0f);
}
op_def static Z postProcess(Z reduction, Nd4jLong n, X *extraParams) {
return reduction;
}
};
template <typename X, typename Z>
class CountZero {
public:
no_op_exec_special_accumulation_long
no_op_exec_special_accumulation_cuda
op_def static Z startingValue(const X *input) {
return static_cast<Z>(0.0f);
}
op_def static Z merge(Z old, Z opOutput, X *extraParams) {
return opOutput + old;
}
op_def static Z update(Z old, Z opOutput, X *extraParams) {
return opOutput + old;
}
op_def static Z op(X d1, X *extraParams) {
return d1 == static_cast<X>(0) ? static_cast<X>(1) : static_cast<X>(0);
}
op_def static Z postProcess(X reduction, Nd4jLong n, X *extraParams) {
return static_cast<Z>(reduction);
}
};
template <typename X>
class Prod {
public:
no_op_exec_special_accumulation_same
no_op_exec_special_accumulation_same_cuda
op_def static X startingValue(const X *input) {
return static_cast<X>(1);
}
op_def static X merge(X old, X opOutput, X *extraParams) {
return opOutput * old;
}
op_def static X update(X old, X opOutput, X *extraParams) {
return opOutput * old;
}
op_def static X op(X d1, X *extraParams) {
return d1;
}
op_def static X postProcess(X reduction, Nd4jLong n, X *extraParams) {
return reduction;
}
};
template <typename X, typename Z>
class Any {
public:
no_op_exec_special_accumulation
no_op_exec_special_accumulation_cuda
op_def static X startingValue(const X *input) {
return static_cast<X>(0.0f);
}
op_def static Z merge(X old, X opOutput, X *extraParams) {
return opOutput + old;
}
op_def static Z update(X old, X opOutput, X *extraParams) {
return opOutput + old;
}
op_def static Z op(X d1, X *extraParams) {
return d1;
}
op_def static Z postProcess(X reduction, Nd4jLong n, X *extraParams) {
return reduction > static_cast<X>(0) ? static_cast<Z>(1) : static_cast<Z>(0) ;
}
};
template <typename X, typename Z>
class All {
public:
no_op_exec_special_accumulation
no_op_exec_special_accumulation_cuda
op_def static X startingValue(const X *input) {
return static_cast<X>(1);
}
op_def static Z merge(X old, X opOutput, X *extraParams) {
return opOutput * old;
}
op_def static Z update(X old, X opOutput, X *extraParams) {
return opOutput * old;
}
op_def static Z op(X d1, X *extraParams) {
return d1;
}
op_def static Z postProcess(X reduction, Nd4jLong n, X *extraParams) {
return reduction > static_cast<X>(0) ? static_cast<Z>(1) : static_cast<Z>(0);
}
};
template <typename X, typename Z>
class Mean {
public:
no_op_exec_special_accumulation
no_op_exec_special_accumulation_cuda
op_def static X startingValue(const X *input) {
return static_cast<X>(0);
}
op_def static Z merge(X old, X opOutput, Z *extraParams) {
return opOutput + old;
}
op_def static Z update(X old, X opOutput, Z *extraParams) {
return opOutput + old;
}
op_def static Z op(X d1, Z *extraParams) {
return d1;
}
op_def static Z postProcess(X reduction, Nd4jLong n, Z *extraParams) {
return (Z) reduction / (Z) n;
}
};
template <typename X, typename Z>
class AMean {
public:
no_op_exec_special_accumulation
no_op_exec_special_accumulation_cuda
op_def static X startingValue(const X *input) {
return static_cast<X>(0);
}
op_def static Z merge(X old, X opOutput, Z *extraParams) {
return nd4j::math::nd4j_abs<X>(opOutput) + nd4j::math::nd4j_abs<X>(old);
}
op_def static X update(X old, X opOutput, Z *extraParams) {
return nd4j::math::nd4j_abs<X>(opOutput) + nd4j::math::nd4j_abs<X>(old);
}
op_def static Z op(X d1, Z *extraParams) {
return nd4j::math::nd4j_abs<X>(d1);
}
op_def static Z postProcess(X reduction, Nd4jLong n, Z *extraParams) {
return nd4j::math::nd4j_abs<X>(reduction) / static_cast<X>(n);
}
};
template <typename X>
class Max {
public:
no_op_exec_special_accumulation_same
no_op_exec_special_accumulation_same_cuda
op_def static X startingValue(const X *input) {
return input[0];
}
op_def static X merge(X old, X opOutput, X *extraParams) {
return nd4j::math::nd4j_max<X>(old, opOutput);
}
op_def static X update(X old, X opOutput, X *extraParams) {
return nd4j::math::nd4j_max<X>(opOutput, old);
}
op_def static X op(X d1, X d2, X *params) {
return nd4j::math::nd4j_max<X>(d1, d2);
}
op_def static X op(X d1, X d2) {
return nd4j::math::nd4j_max<X>(d1, d2);
}
// FIXME: this signature overlaps with MetaOp
op_def static X op(X d1, X *extraParams) {
return d1;
}
op_def static X postProcess(X reduction, Nd4jLong n, X *extraParams) {
return reduction;
}
};
template <typename X, typename Y, typename Z>
class AMaxPairwise {
public:
op_def static Z op(X d1, Y d2, Z *params) {
return op(d1, d2);
}
op_def static Z op(X d1, Y d2) {
auto z1 = static_cast<Z>(d1);
auto z2 = static_cast<Z>(d2);
if (nd4j::math::nd4j_abs<Z>(z1) > nd4j::math::nd4j_abs<Z>(z2))
return z1;
else
return z2;
}
};
template <typename X, typename Y, typename Z>
class AMinPairwise {
public:
op_def static Z op(X d1, Y d2, Z *params) {
return op(d1, d2);
}
op_def static Z op(X d1, Y d2) {
auto z1 = static_cast<Z>(d1);
auto z2 = static_cast<Z>(d2);
if (nd4j::math::nd4j_abs<Z>(z1) < nd4j::math::nd4j_abs<Z>(z2))
return z1;
else
return z2;
}
};
template <typename X, typename Y, typename Z>
class MaxPairwise {
public:
op_def static Z op(X d1, Y d2, Z *params) {
return nd4j::math::nd4j_max<Z>(static_cast<Z>(d1), static_cast<Z>(d2));
}
op_def static Z op(X d1, Y d2) {
return nd4j::math::nd4j_max<Z>(static_cast<Z>(d1), static_cast<Z>(d2));
}
};
template <typename X, typename Y, typename Z>
class MinPairwise {
public:
op_def static Z op(X d1, Y d2, Z *params) {
return nd4j::math::nd4j_min<Z>(static_cast<Z>(d1), static_cast<Z>(d2));
}
op_def static Z op(X d1, Y d2) {
return nd4j::math::nd4j_min<Z>(static_cast<Z>(d1), static_cast<Z>(d2));
}
};
template <typename X>
class AMax {
public:
no_op_exec_special_accumulation_same
no_op_exec_special_accumulation_same_cuda
op_def static X startingValue(const X *input) {
return input[0];
}
op_def static X merge(X old, X opOutput, X *extraParams) {
return nd4j::math::nd4j_max<X>(nd4j::math::nd4j_abs<X>(old), nd4j::math::nd4j_abs<X>(opOutput));
}
op_def static X update(X old, X opOutput, X *extraParams) {
return nd4j::math::nd4j_max<X>(nd4j::math::nd4j_abs<X>(opOutput), nd4j::math::nd4j_abs<X>(old));
}
op_def static X op(X d1, X d2, X *params) {
return nd4j::math::nd4j_max<X>(nd4j::math::nd4j_abs<X>(d1), nd4j::math::nd4j_abs<X>(d2));
}
op_def static X op(X d1, X d2) {
return nd4j::math::nd4j_abs<X>(d1) > nd4j::math::nd4j_abs<X>(d2) ? d1 : d2;
}
// FIXME: this signature overlaps with MetaOp
op_def static X op(X d1, X *extraParams) {
return nd4j::math::nd4j_abs<X>(d1);
}
op_def static X postProcess(X reduction, Nd4jLong n, X *extraParams) {
return nd4j::math::nd4j_abs<X>(reduction);
}
};
template <typename X>
class AMin {
public:
no_op_exec_special_accumulation_same
no_op_exec_special_accumulation_same_cuda
op_def static X startingValue(const X *input) {
return input[0];
}
op_def static X merge(X old, X opOutput, X *extraParams) {
return nd4j::math::nd4j_min<X>(nd4j::math::nd4j_abs<X>(old), nd4j::math::nd4j_abs<X>(opOutput));
}
op_def static X update(X old, X opOutput, X *extraParams) {
return nd4j::math::nd4j_min<X>(nd4j::math::nd4j_abs<X>(opOutput), nd4j::math::nd4j_abs<X>(old));
}
op_def static X op(X d1, X d2, X *params) {
return nd4j::math::nd4j_min<X>(nd4j::math::nd4j_abs<X>(d1), nd4j::math::nd4j_abs<X>(d2));
}
op_def static X op(X d1, X d2) {
return nd4j::math::nd4j_min<X>(nd4j::math::nd4j_abs<X>(d1), nd4j::math::nd4j_abs<X>(d2));
}
// FIXME: this signature overlaps with MetaOp
op_def static X op(X d1, X *extraParams) {
return nd4j::math::nd4j_abs<X>(d1);
}
op_def static X postProcess(X reduction, Nd4jLong n, X *extraParams) {
return nd4j::math::nd4j_abs<X>(reduction);
}
};
template <typename X>
class Min {
public:
no_op_exec_special_accumulation_same
no_op_exec_special_accumulation_same_cuda
op_def static X startingValue(const X *input) {
return input[0];
}
op_def static X merge(X old, X opOutput, X *extraParams) {
return nd4j::math::nd4j_min<X>(old, opOutput);
}
op_def static X update(X old, X opOutput, X *extraParams) {
return nd4j::math::nd4j_min<X>(opOutput, old);
}
op_def static X op(X d1, X d2, X *params) {
return nd4j::math::nd4j_min<X>(d1, d2);
}
op_def static X op(X d1, X d2) {
return nd4j::math::nd4j_min<X>(d1, d2);
}
// FIXME: this signature overlaps with MetaOp
op_def static X op(X d1, X *extraParams) {
return d1;
}
op_def static X postProcess(X reduction, Nd4jLong n, X *extraParams) {
return reduction;
}
};
template <typename X, typename Z>
class Norm1 {
public:
no_op_exec_special_accumulation
no_op_exec_special_accumulation_cuda
op_def static X startingValue(const X *input) {
return static_cast<X>(0);
}
op_def static Z merge(X old, X opOutput, Z *extraParams) {
return opOutput + old;
}
op_def static Z update(X old, X opOutput, Z *extraParams) {
return opOutput + old;
}
op_def static Z op(X d1, Z *extraParams) {
return static_cast<Z>(nd4j::math::nd4j_abs<X>(d1));
}
op_def static Z postProcess(X reduction, Nd4jLong n, Z *extraParams) {
return static_cast<Z>(reduction);
}
};
template <typename X, typename Z>
class Norm2 {
public:
no_op_exec_special_accumulation
no_op_exec_special_accumulation_cuda
op_def static X startingValue(const X *input) {
return static_cast<X>(0);
}
op_def static Z merge(X old, X opOutput, Z *extraParams) {
return opOutput + old;
}
op_def static Z update(X old, X opOutput, Z *extraParams) {
return opOutput + old;
}
op_def static Z postProcess(X reduction, Nd4jLong n, Z *extraParams) {
return nd4j::math::nd4j_sqrt<X, Z>(reduction);
}
op_def static Z op(X d1, Z *extraParams) {
return static_cast<Z>(d1 * d1);
}
};
template <typename X, typename Z>
class SquaredNorm {
public:
no_op_exec_special_accumulation
no_op_exec_special_accumulation_cuda
op_def static X startingValue(const X *input) {
return static_cast<X>(0);
}
op_def static Z merge(X old, X opOutput, Z *extraParams) {
return opOutput + old;
}
op_def static Z update(X old, X opOutput, Z *extraParams) {
return opOutput + old;
}
op_def static Z op(X d1, Z *extraParams) {
return static_cast<Z>(d1 * d1);
}
op_def static Z postProcess(X reduction, Nd4jLong n, Z *extraParams) {
return static_cast<Z>(reduction);
}
};
template <typename X, typename Z>
class NormFrobenius {
public:
no_op_exec_special_accumulation
no_op_exec_special_accumulation_cuda
op_def static X startingValue(const X *input) {
return static_cast<X>(0);
}
op_def static Z merge(X old, X opOutput, Z *extraParams) {
return opOutput + old;
}
op_def static Z update(X old, X opOutput, Z *extraParams) {
return opOutput + old;
}
op_def static Z op(X d1, Z *extraParams) {
X v = nd4j::math::nd4j_abs<X>(d1);
return static_cast<Z>(v * v);
}
op_def static Z postProcess(X reduction, Nd4jLong n, Z *extraParams) {
return nd4j::math::nd4j_sqrt<X, Z>(reduction);
}
};
template <typename X, typename Z>
class NormP {
public:
no_op_exec_special_accumulation
no_op_exec_special_accumulation_cuda
op_def static X startingValue(const X *input) {
return static_cast<X>(0);
}
op_def static Z merge(X old, X opOutput, Z *extraParams) {
return opOutput + old;
}
op_def static Z update(X old, X opOutput, Z *extraParams) {
return opOutput + old;
}
op_def static Z op(X d1, Z *extraParams) {
return nd4j::math::nd4j_pow<X, Z, Z>(nd4j::math::nd4j_abs<X>(d1), extraParams[0]);
}
op_def static Z postProcess(X reduction, Nd4jLong n, Z *extraParams) {
return nd4j::math::nd4j_pow<X, Z, Z>(reduction, static_cast<Z>(1.0f) / extraParams[0]);
}
};
template <typename X, typename Z>
class NormMax {
public:
no_op_exec_special_accumulation
no_op_exec_special_accumulation_cuda
op_def static X startingValue(const X *input) {
return static_cast<X>(0);
}
op_def static Z merge(X old, X opOutput, Z *extraParams) {
return opOutput + old;
}
op_def static Z update(X old, X opOutput, Z *extraParams) {
return nd4j::math::nd4j_max<X>(nd4j::math::nd4j_abs<X>(old),
nd4j::math::nd4j_abs<X>(opOutput));
}
op_def static Z op(X d1, Z *extraParams) {
return d1;
}
op_def static Z postProcess(X reduction, Nd4jLong n, Z *extraParams) {
return static_cast<Z>(nd4j::math::nd4j_max<X>(nd4j::math::nd4j_abs<X>(reduction), nd4j::math::nd4j_abs<X>(reduction)));
}
};
template <typename X, typename Z>
class Variance {
public:
no_op_exec_special_accumulation
no_op_exec_special_accumulation_cuda
op_def static X startingValue(const X *input) {
return static_cast<X>(0.0f);
}
op_def static Z merge(X old, X opOutput, Z *extraParams) {
return old + opOutput;
}
op_def static Z update(X old, X opOutput, Z *extraParams) {
return old + opOutput;
}
op_def static X op(X d1, Z *extraParams) {
X mean = static_cast<X>(extraParams[0]);
X ret = d1 - mean;
return ret * ret;
}
op_def static Z postProcess(X reduction, Nd4jLong n, Z *extraParams) {
// T bias = extraParams[1];
// return (reduction - (nd4j::math::nd4j_pow<T>(bias, static_cast<T>(2.0f)) / static_cast<T>(n))) / (n - 1)
return static_cast<Z>(reduction) / static_cast<Z>(n - 1);
}
};
/**
* Standard deviation of a buffer
*/
template <typename X, typename Z>
class StandardDeviation {
public:
no_op_exec_special_accumulation
no_op_exec_special_accumulation_cuda
op_def static X startingValue(const X *input) {
return static_cast<X>(0.0f);
}
op_def static Z merge(X old, X opOutput, Z *extraParams) {
return old + opOutput;
}
op_def static Z update(X old, X opOutput, Z *extraParams) {
return old + opOutput;
}
op_def static Z op(X d1, Z *extraParams) {
X mean = extraParams[0];
X ret = d1 - mean;
return ret * ret;
}
op_def static Z postProcess(X reduction, Nd4jLong n, Z *extraParams) {
Z ret = Variance<X,Z>::postProcess(reduction, n, extraParams);
Z sqrtRet = nd4j::math::nd4j_sqrt<X, Z>(ret);
return sqrtRet;
}
};
template <typename X, typename Y>
class CosineSimilarity {
public:
static const int extraParamsLen = 2;
op_def static X *generateExtraParams() {
//T *extraParams = new T[2];
return nullptr;
}
op_def static void finalizeExtraParams(X *extraParams) {
//delete[] extraParams;
}
op_def static Y startingValue(const X *input) {
return static_cast<Y>(0.0f);
}
op_def static Y postProcess(Y reduction, Nd4jLong n, Y *extraParams) {
return reduction / (nd4j::math::nd4j_sqrt<Y, Y>(extraParams[0]) * nd4j::math::nd4j_sqrt<Y, Y>(extraParams[1]));
}
op_def static Y op(X d1, X d2, Y *extraParams) {
extraParams[0] += static_cast<Y>(d1 * d1);
extraParams[1] += static_cast<Y>(d2 * d2);
return static_cast<Y>(d1 * d2);
}
op_def static void aggregateExtraParams(Y *extraParamsTotal, Y *extraParamsLocal) {
extraParamsTotal[0] += extraParamsLocal[0];
extraParamsTotal[1] += extraParamsLocal[1];
}
#ifdef __CUDACC__
static _CUDA_D inline Y opAtomic(X d1, X d2, Y *extraParams) {
nd4j::math::atomics::nd4j_atomicAdd(&extraParams[0],static_cast<Y>(d1 * d1));
nd4j::math::atomics::nd4j_atomicAdd(&extraParams[1],static_cast<Y>(d2 * d2));
return static_cast<Y>(d1 * d2);
}
#endif
op_def static Y update(Y old, Y opOutput, Y *extraParams) {
return old + opOutput;
}
op_def static Y merge(Y old, Y opOutput, Y *extraParams) {
return update(old, opOutput, extraParams);
}
};
template <typename X, typename Y>
class JaccardDistance {
public:
static const int extraParamsLen = 2;
op_def static X *generateExtraParams() {
//T *extraParams = new T[2];
return nullptr;
}
op_def static void finalizeExtraParams(X *extraParams) {
//delete[] extraParams;
}
op_def static Y startingValue(const X *input) {
return static_cast<X>(0.0f);
}
op_def static Y postProcess(Y reduction, Nd4jLong n, Y *extraParams) {
// num / denom
return (static_cast<Y>(1.0f)) - (extraParams[0] / extraParams[1]);
}
op_def static Y num(X d1, X d2) {
return nd4j::math::nd4j_min<X>(d1, d2);
}
op_def static Y denom(X d1, X d2) {
return nd4j::math::nd4j_max<X>(d1, d2);
}
op_def static Y op(X d1, X d2, Y *extraParams) {
extraParams[0] += static_cast<Y>(num(d1, d2));
extraParams[1] += static_cast<Y>(denom(d1, d2));
return static_cast<Y>(0.0f);
}
op_def static void aggregateExtraParams(Y *extraParamsTotal, Y *extraParamsLocal) {
extraParamsTotal[0] += extraParamsLocal[0];
extraParamsTotal[1] += extraParamsLocal[1];
}
#ifdef __CUDACC__
__device__
static inline Y opAtomic(X d1, X d2, Y *extraParams) {
nd4j::math::atomics::nd4j_atomicAdd(&extraParams[0],num(d1, d2));
nd4j::math::atomics::nd4j_atomicAdd(&extraParams[1], denom(d1, d2));
return static_cast<Y>(0.0f);
}
#endif
op_def static Y update(Y old, Y opOutput, Y *extraParams) {
return old + opOutput;
}
op_def static Y merge(Y old, Y opOutput, Y *extraParams) {
return update(old, opOutput, extraParams);
}
};
template <typename X, typename Y>
class SimpleHammingDistance {
public:
static const int extraParamsLen = 0;
op_def static X *generateExtraParams() {
//T *extraParams = new T[2];
return nullptr;
}
op_def static void finalizeExtraParams(X *extraParams) {
//delete[] extraParams;
}
op_def static Y startingValue(X *input) {
return static_cast<Y>(0.0f);
}
op_def static Y postProcess(Y reduction, Nd4jLong n, Y *extraParams) {
return static_cast<Y>(reduction / n);
}
op_def static Y op(X d1, X d2, Y *extraParams) {
return (d1 == d2) ? static_cast<Y>(0.0f) : static_cast<Y>(1.0f);
}
op_def static void aggregateExtraParams(X *extraParamsTotal, X *extraParamsLocal) {
}
#ifdef __CUDACC__
__device__
static inline Y opAtomic(X d1, X d2, Y *extraParams) {
return op(d1, d2, extraParams);
}
#endif
op_def static Y update(Y old, Y opOutput, Y *extraParams) {
return old + opOutput;
}
op_def static Y merge(Y old, Y opOutput, Y *extraParams) {
return update(old, opOutput, extraParams);
}
};
template <typename X, typename Y>
class CosineDistance {
public:
static const int extraParamsLen = 2;
op_def static X *generateExtraParams() {
//T *extraParams = new T[2];
return nullptr;
}
op_def static void finalizeExtraParams(X *extraParams) {
//delete[] extraParams;
}
op_def static Y startingValue(X *input) {
return static_cast<Y>(0.0f);
}
op_def static Y postProcess(Y reduction, Nd4jLong n, Y *extraParams) {
return (static_cast<Y>(1.0f)) - (reduction / (nd4j::math::nd4j_sqrt<Y, Y>(extraParams[0]) * nd4j::math::nd4j_sqrt<Y, Y>(extraParams[1])));
}
op_def static Y op(X d1, X d2, Y *extraParams) {
extraParams[0] += static_cast<Y>(nd4j::math::nd4j_abs<X>(d1) * nd4j::math::nd4j_abs<X>(d1));
extraParams[1] += static_cast<Y>(nd4j::math::nd4j_abs<X>(d2) * nd4j::math::nd4j_abs<X>(d2));
return (d1 * d2);
}
op_def static void aggregateExtraParams(Y *extraParamsTotal, Y *extraParamsLocal) {
extraParamsTotal[0] += extraParamsLocal[0];
extraParamsTotal[1] += extraParamsLocal[1];
}
#ifdef __CUDACC__
static _CUDA_D inline Y opAtomic(X d1, X d2, Y *extraParams) {
nd4j::math::atomics::nd4j_atomicAdd(&extraParams[0], nd4j::math::nd4j_abs<Y>(d1) * nd4j::math::nd4j_abs<Y>(d1));
nd4j::math::atomics::nd4j_atomicAdd(&extraParams[1], nd4j::math::nd4j_abs<Y>(d2) * nd4j::math::nd4j_abs<Y>(d2));
return (d1 * d2);
}
#endif
op_def static Y update(Y old, Y opOutput, Y *extraParams) {
return old + opOutput;
}
op_def static Y merge(Y old, Y opOutput, Y *extraParams) {
return update(old, opOutput, extraParams);
}
};
/**
* Dot product between 2 arrays
*/
template <typename X, typename Y>
class Dot {
public:
static const int extraParamsLen = 0;
op_def static X * generateExtraParams() {
return nullptr;
}
op_def static void finalizeExtraParams(X *extraParamsRef) {
//no-op
//delete[] * extraParamsRef;
}
op_def static Y startingValue(X *input) {
return static_cast<Y>(0.0f);
}
op_def static Y postProcess(Y reduction, Nd4jLong n, Y *extraParamsRef) {
return reduction;
}
op_def static Y op(X d1, X d2, Y *extraParamsRef) {
return static_cast<Y>(d1 * d2);
}
#ifdef __CUDACC__
__device__
static inline Y opAtomic(X d1, X d2, Y *extraParamsRef) {
return op(d1, d2, extraParamsRef);
}
#endif
op_def static Y update(Y old, Y opOutput, Y *extraParamsRef) {
return opOutput + old;
}
op_def static Y merge(Y old, Y opOutput, Y *extraParamsRef) {
return update(old, opOutput, extraParamsRef);
}
op_def static void aggregateExtraParams(Y *extraParamsTotal, Y *extraParamsLocal) {}
};
/**
* Op to check equality within arrays
*/
template <typename X, typename Z>
class EqualsWithEps {
public:
static const int extraParamsLen = 0;
op_def static X * generateExtraParams() {
return nullptr;
}
op_def static void finalizeExtraParams(X *extraParamsRef) {
//no-op
}
op_def static Z startingValue(X *input) {
return static_cast<Z>(0.0f);
}
op_def static Z postProcess(Z reduction, Nd4jLong n, Z *extraParamsRef) {
return reduction;
}
op_def static Z op(X d1, X d2, Z *extraParamsRef) {
Z eps = nd4j::math::nd4j_abs<Z>(extraParamsRef[2]);
Z diff = static_cast<Z>(nd4j::math::nd4j_abs<X>(d1 - d2));
// works well except in the range of very large numbers
if (diff <= eps)
return static_cast<Z>(0.f);
// Knuth approach
// works well except in the range of very small numbers
if (diff <= nd4j::math::nd4j_max<Z>(nd4j::math::nd4j_abs<Z>(static_cast<Z>(d1)), nd4j::math::nd4j_abs<Z>(static_cast<Z>(d2))) * eps)
return static_cast<Z>(0.f);
return static_cast<Z>(1.f);
}
#ifdef __CUDACC__
__device__
static inline Z opAtomic(X d1, X d2, Z *extraParamsRef) {
return op(d1, d2, extraParamsRef);
}
#endif
op_def static Z update(Z old, Z opOutput, Z *extraParamsRef) {
return opOutput + old;
}
op_def static Z merge(X old, Z opOutput, Z *extraParamsRef) {
return update(old, opOutput, extraParamsRef);
}
op_def static void aggregateExtraParams(Z *extraParamsTotal, Z *extraParamsLocal) {}
};
template <typename X, typename Y>
class EuclideanDistance {
public:
static const int extraParamsLen = 0;
op_def static X * generateExtraParams() {
return nullptr;
}
op_def static void finalizeExtraParams(X *extraParamsRef) {
//no-op
}
op_def static Y startingValue(X *input) {
return static_cast<Y>(0.0f);
}
op_def static Y postProcess(Y reduction, Nd4jLong n, Y *extraParamsRef) {
return nd4j::math::nd4j_sqrt<Y, Y>(reduction);
}
op_def static Y op(X d1, X d2, Y *extraParamsRef) {
X ret = d1 - d2;
return static_cast<Y>(ret * ret);
}
#ifdef __CUDACC__
__device__
static inline Y opAtomic(X d1, X d2, Y *extraParamsRef) {
return op(d1, d2, extraParamsRef);
}
#endif
op_def static Y update(Y old, Y opOutput, Y *extraParamsRef) {
return opOutput + old;
}
op_def static Y merge(Y old, Y opOutput, Y *extraParamsRef) {
return update(old, opOutput, extraParamsRef);
}
op_def static void aggregateExtraParams(Y *extraParamsTotal, Y *extraParamsLocal) {}
};
template <typename X, typename Y>
class ManhattanDistance {
public:
static const int extraParamsLen = 0;
op_def static X * generateExtraParams() {
return nullptr;
}
op_def static void finalizeExtraParams(X *extraParamsRef) {
//no-op
}
op_def static Y startingValue(X *input) {
return static_cast<Y>(0.0f);
}
op_def static Y postProcess(Y reduction, Nd4jLong n, Y *extraParamsRef) {
return reduction;
}
op_def static Y op(X d1, X d2, Y *extraParamsRef) {
return nd4j::math::nd4j_abs<X>(d1 - d2);
}
op_def static Y update(Y old, Y opOutput, Y *extraParamsRef) {
return old + opOutput;
}
op_def static void aggregateExtraParams(Y *extraParamsTotal, Y *extraParamsLocal) {
}
#ifdef __CUDACC__
__device__
static inline Y opAtomic(X d1, X d2, Y *extraParamsRef) {
return op(d1, d2, extraParamsRef);
}
#endif
#ifndef __clang__
#pragma omp declare simd uniform(extraParamsRef)
#endif
op_def static Y merge(X old, X opOutput, X *extraParamsRef) {
return update(old, opOutput, extraParamsRef);
}
};
template <typename X>
class IndexAbsoluteMax {
public:
static _CUDA_HD inline functions::indexreduce::IndexValue<X> op(functions::indexreduce::IndexValue<X> val, X *extraParams) {
return nd4j::math::nd4j_abs<X>(val);
}
static _CUDA_HD inline functions::indexreduce::IndexValue<X> update(functions::indexreduce::IndexValue<X> &old, functions::indexreduce::IndexValue<X> &opOutput, X *extraParams) {
opOutput.value = nd4j::math::nd4j_abs<X>(opOutput.value);
old.value = nd4j::math::nd4j_abs<X>(old.value);
if (opOutput.value > old.value)
return opOutput;
#ifdef __CUDACC__
// workaround for cuda race condition at merge phase
else if (opOutput.value == old.value && opOutput.index < old.index)
return opOutput;
#elif defined(__GNUC__)
#endif
return old;
}
static _CUDA_HD inline functions::indexreduce::IndexValue<X> merge(
functions::indexreduce::IndexValue<X> f1,
functions::indexreduce::IndexValue<X> f2, X *extraParams) {
if (nd4j::math::nd4j_abs<X>(f1.value) > nd4j::math::nd4j_abs<X>(f2.value))
return f2;
return f1;
}
static _CUDA_HD inline functions::indexreduce::IndexValue<X> postProcess(
functions::indexreduce::IndexValue<X> reduction, int n, int xOffset,
X *dx, int incx, X *extraParams, X *result) {
return reduction;
}
static _CUDA_HD inline X startingValue(X *input) {
return 0;
}
static _CUDA_HD inline functions::indexreduce::IndexValue<X> startingIndexValue(X *input) {
functions::indexreduce::IndexValue<X> local;
local.value = startingValue(input);
local.index = 0;
return local;
}
static _CUDA_HD inline functions::indexreduce::IndexValue<X> op(functions::indexreduce::IndexValue<X> d1,
functions::indexreduce::IndexValue<X> d2, X *extraParams) {
return d1;
}
};
template <typename X>
class FirstIndex {
public:
static _CUDA_HD inline functions::indexreduce::IndexValue<X> op(functions::indexreduce::IndexValue<X> val, X *extraParams) {
return val;
}
static _CUDA_HD functions::indexreduce::IndexValue<X> update(functions::indexreduce::IndexValue<X> &old, functions::indexreduce::IndexValue<X> &opOutput, X *extraParams) {
#ifdef __CUDACC__
if (opOutput.index < 0)
return old;
#endif
auto res = simdOps::MatchCondition<X,X>::op(opOutput.value, extraParams);
//printf("res: %f; oldIdx: %i; newIdx: %i\n", res, old.index, opOutput.index);
if (res == static_cast<X>(0))
return old;
if (old.index < 0)
return opOutput;
if (old.index > opOutput.index)
return opOutput;
return old;
}
static _CUDA_HD inline X startingValue(X *input) {
return -nd4j::DataTypeUtils::max<X>();
}
static _CUDA_HD inline functions::indexreduce::IndexValue<X> startingIndexValue(X *input) {
functions::indexreduce::IndexValue<X> local;
local.value = startingValue(input);
local.index = -1;
return local;
}
static _CUDA_HD inline functions::indexreduce::IndexValue<X> op(functions::indexreduce::IndexValue<X> d1,
functions::indexreduce::IndexValue<X> d2, X *extraParams) {
return d1;
}
static _CUDA_HD inline functions::indexreduce::IndexValue<X> merge(
functions::indexreduce::IndexValue<X> f1,
functions::indexreduce::IndexValue<X> f2, X *extraParams) {
if (f1.index > f2.index)
return f2;
return f1;
}
static _CUDA_HD inline functions::indexreduce::IndexValue<X> postProcess(
functions::indexreduce::IndexValue<X> reduction, int n, int xOffset,
X *dx, int incx, X *extraParams, X *result) {
return reduction;
}
};
template <typename X>
class LastIndex {
public:
static _CUDA_HD inline functions::indexreduce::IndexValue<X> op(functions::indexreduce::IndexValue<X> val, X *extraParams) {
return val;
}
static _CUDA_HD functions::indexreduce::IndexValue<X> update(functions::indexreduce::IndexValue<X> &old, functions::indexreduce::IndexValue<X> &opOutput, X *extraParams) {
#ifdef __CUDACC__
if (opOutput.index < 0)
return old;
#endif
auto res = simdOps::MatchCondition<X,X>::op(opOutput.value, extraParams);
if (res == static_cast<X>(0))
return old;
if (old.index < 0)
return opOutput;
if (old.index < opOutput.index)
return opOutput;
return old;
}
static _CUDA_HD inline X startingValue(X *input) {
return -nd4j::DataTypeUtils::max<X>();
}
static _CUDA_HD inline functions::indexreduce::IndexValue<X> startingIndexValue(X *input) {
functions::indexreduce::IndexValue<X> local;
local.value = startingValue(input);
local.index = -1;
return local;
}
static _CUDA_HD inline functions::indexreduce::IndexValue<X> op(functions::indexreduce::IndexValue<X> d1,
functions::indexreduce::IndexValue<X> d2, X *extraParams) {
return d1;
}
static _CUDA_HD inline functions::indexreduce::IndexValue<X> merge(
functions::indexreduce::IndexValue<X> f1,
functions::indexreduce::IndexValue<X> f2, X *extraParams) {
if (f1.index < f2.index)
return f2;
return f1;
}
static _CUDA_HD inline functions::indexreduce::IndexValue<X> postProcess(
functions::indexreduce::IndexValue<X> reduction, int n, int xOffset,
X *dx, int incx, X *extraParams, X *result) {
return reduction;
}
};
template <typename X>
class IndexMax {
public:
static _CUDA_HD inline functions::indexreduce::IndexValue<X> op(functions::indexreduce::IndexValue<X> val, X *extraParams) {
return val;
}
static _CUDA_HD functions::indexreduce::IndexValue<X> update(functions::indexreduce::IndexValue<X> &old, functions::indexreduce::IndexValue<X> &opOutput, X *extraParams) {
if (opOutput.value > old.value) {
return opOutput;
}
#ifdef __CUDACC__
// workaround for cuda race condition at merge phase
else if (opOutput.value == old.value && opOutput.index < old.index)
return opOutput;
#elif defined(__GNUC__)
#endif
return old;
}
static _CUDA_HD inline functions::indexreduce::IndexValue<X> merge(
functions::indexreduce::IndexValue<X> f1,
functions::indexreduce::IndexValue<X> f2, X *extraParams) {
if (f1.value > f2.value)
return f2;
return f1;
}
static _CUDA_HD inline functions::indexreduce::IndexValue<X> postProcess(
functions::indexreduce::IndexValue<X> reduction, int n, int xOffset,
X *dx, int incx, X *extraParams, X *result) {
return reduction;
}
static _CUDA_HD inline X startingValue(X *input) {
return -nd4j::DataTypeUtils::max<X>();
}
static _CUDA_HD inline functions::indexreduce::IndexValue<X> startingIndexValue(X *input) {
functions::indexreduce::IndexValue<X> local;
local.value = startingValue(input);
local.index = 0;
return local;
}
static _CUDA_HD inline functions::indexreduce::IndexValue<X> op(functions::indexreduce::IndexValue<X> d1,
functions::indexreduce::IndexValue<X> d2, X *extraParams) {
return d1;
}
};
template <typename X>
class IndexAbsoluteMin {
public:
static _CUDA_HD inline functions::indexreduce::IndexValue<X> op(
functions::indexreduce::IndexValue<X> val, X *extraParams) {
return val;
}
static _CUDA_HD inline X startingValue(X *input) {
return nd4j::DataTypeUtils::max<X>();
}
static _CUDA_HD inline functions::indexreduce::IndexValue<X> startingIndexValue(X *input) {
functions::indexreduce::IndexValue<X> local;
local.value = startingValue(input);
local.index = 0;
return local;
}
static _CUDA_HD inline functions::indexreduce::IndexValue<X> update(functions::indexreduce::IndexValue<X> &old, functions::indexreduce::IndexValue<X> &opOutput, X *extraParams) {
opOutput.value = nd4j::math::nd4j_abs<X>(opOutput.value);
old.value = nd4j::math::nd4j_abs<X>(old.value);
if (opOutput.value < old.value)
return opOutput;
#ifdef __CUDACC__
// workaround for cuda race condition at merge phase
else if (opOutput.value == old.value && opOutput.index < old.index)
return opOutput;
#elif defined(__GNUC__)
#endif
return old;
}
static _CUDA_HD inline functions::indexreduce::IndexValue<X> merge(
functions::indexreduce::IndexValue<X> f1,
functions::indexreduce::IndexValue<X> f2, X *extraParams) {
if (nd4j::math::nd4j_abs<X>(f1.value) < nd4j::math::nd4j_abs<X>(f2.value))
return f2;
return f1;
}
static _CUDA_HD inline functions::indexreduce::IndexValue<X> postProcess(
functions::indexreduce::IndexValue<X> reduction, int n, int xOffset,
X *dx, int incx, X *extraParams, X *result) {
return reduction;
}
static _CUDA_HD inline functions::indexreduce::IndexValue<X> op(functions::indexreduce::IndexValue<X> d1,
functions::indexreduce::IndexValue<X> d2, X *extraParams) {
return d1;
}
};
template <typename X>
class IndexMin {
public:
static _CUDA_HD inline functions::indexreduce::IndexValue<X> op(
functions::indexreduce::IndexValue<X> val, X *extraParams) {
return val;
}
static _CUDA_HD inline X startingValue(X *input) {
return nd4j::DataTypeUtils::max<X>();
}
static _CUDA_HD inline functions::indexreduce::IndexValue<X> startingIndexValue(X *input) {
functions::indexreduce::IndexValue<X> local;
local.value = startingValue(input);
local.index = 0;
return local;
}
static _CUDA_HD inline functions::indexreduce::IndexValue<X> update(functions::indexreduce::IndexValue<X> &old, functions::indexreduce::IndexValue<X> &opOutput, X *extraParams) {
if (opOutput.value < old.value)
return opOutput;
#ifdef __CUDACC__
// workaround for cuda race condition at merge phase
else if (opOutput.value == old.value && opOutput.index < old.index)
return opOutput;
#elif defined(__GNUC__)
#endif
return old;
}
static _CUDA_HD inline functions::indexreduce::IndexValue<X> merge(
functions::indexreduce::IndexValue<X> f1,
functions::indexreduce::IndexValue<X> f2, X *extraParams) {
if (f1.value < f2.value)
return f2;
return f1;
}
static _CUDA_HD inline functions::indexreduce::IndexValue<X> postProcess(
functions::indexreduce::IndexValue<X> reduction, int n, int xOffset,
X *dx, int incx, X *extraParams, X *result) {
return reduction;
}
static _CUDA_HD inline functions::indexreduce::IndexValue<X> op(functions::indexreduce::IndexValue<X> d1,
functions::indexreduce::IndexValue<X> d2, X *extraParams) {
return d1;
}
};
template <typename X, typename Z>
class SummaryStatsVariance {
public:
static _CUDA_HD inline Z getValue(const bool biasCorrected, functions::summarystats::SummaryStatsData<X> val) {
if (biasCorrected) {
Z ret = static_cast<Z>(val.varianceBiasCorrected());
if (ret < static_cast<Z>(0.0f))
return static_cast<Z>(val.variance());
return ret;
}
return static_cast<Z>(val.variance());
}
static _CUDA_HD inline functions::summarystats::SummaryStatsData<X> op(functions::summarystats::SummaryStatsData<X> d1, Z *extraParams) {
return d1;
}
};
template <typename X, typename Z>
class SummaryStatsStandardDeviation {
public:
static _CUDA_HD inline Z getValue(const bool biasCorrected, functions::summarystats::SummaryStatsData<X> val) {
if (biasCorrected) {
auto ret = static_cast<Z>(val.varianceBiasCorrected());
if (ret < static_cast<Z>(0.0f))
return nd4j::math::nd4j_sqrt<double, Z>(val.variance());
else
return nd4j::math::nd4j_sqrt<double, Z>(ret);
}
return nd4j::math::nd4j_sqrt<double, Z>(val.variance());
}
static _CUDA_HD inline functions::summarystats::SummaryStatsData<X> op(functions::summarystats::SummaryStatsData<X> d1, Z *extraParams) {
return d1;
}
};
template <typename X>
class DropOut {
public:
no_op_exec_special_same
no_op_exec_special_same_cuda
inline _CUDA_D static X op(X d1, X *params) {
X prob = params[0];
#ifdef __CUDACC__
X length = params[1];
X tid = gridDim.x * blockDim.x + threadIdx.x;
X rnd = nd4j::math::nd4j_abs<X>(nd4j::math::nd4j_cos<X>(static_cast<X>(clock64()) * static_cast<X>(tid) + static_cast<X>(length) * static_cast<X>(tid)));
#else
X rnd = static_cast<X>(rand() / RAND_MAX);
#endif
return rnd >= prob ? static_cast<X>(0.0f) : d1;
}
};
template <typename X, typename Y, typename Z>
class DropOutInverted {
public:
no_op_exec_special
no_op_exec_special_cuda
#ifdef __CUDACC__
__device__
#endif
inline static Z op(X d1, Y d2, Z *params) {
Y prob = d2;
#ifdef __CUDACC__
X length = params[1];
X tid = gridDim.x * blockDim.x + threadIdx.x;
X rnd = nd4j::math::nd4j_abs<X>(nd4j::math::nd4j_cos<X>(static_cast<X>(clock64()) * static_cast<X>(tid) + static_cast<X>(length) * static_cast<X>(tid)));
#else
X rnd = static_cast<X>(rand() / RAND_MAX);
#endif
return rnd >= static_cast<X>(prob) ? static_cast<Z>(0.0f) : reinterpret_cast<Z>(d1 / static_cast<X>(prob));
}
};
template <typename X, typename Y, typename Z>
class ReplaceNans {
public:
no_op_exec_special
no_op_exec_special_cuda
op_def static Z op(X d1, Y d2, Z *params) {
return nd4j::math::nd4j_isnan(d1) ? static_cast<Z>(d2) : static_cast<Z>(d1) ;
}
};
// this op is used for conditional pairwise transforms only
template <typename X, typename Y, typename Z>
class CompareAndReplace{
public:
// op definition for PairWise Transform
op_def static Z op(X d1, Y d2, Z *params) {
auto zd1 = static_cast<Z>(d1);
auto zd2 = static_cast<Z>(d2);
auto compare = params[0];
auto eps = params[2];
int mode = (int) params[3];
if (mode == 0) // equals
if (nd4j::math::nd4j_abs<Z>(zd1 - compare) <= eps)
return zd2;
else
return zd1;
else if (mode == 1) // not equals eps
if (nd4j::math::nd4j_abs<Z>(zd1 - compare) > eps)
return zd2;
else
return zd1;
else if (mode == 2) // less_than eps
if (zd1 < compare)
return zd2;
else
return zd1;
else if (mode ==3) // greater_than
if (zd1 > compare)
return zd2;
else
return zd1;
else if (mode == 4) // less_or_equals_than
if (zd1 <= compare)
return zd2;
else
return zd1;
else if (mode == 5) // greater_or_equals_than
if (zd1 >= compare)
return zd2;
else
return zd1;
else if (mode == 6) // abs_less_than
if (nd4j::math::nd4j_abs<Z>(zd1) < compare)
return zd2;
else
return zd1;
else if (mode == 7) // abs_greater_than
if (nd4j::math::nd4j_abs<Z>(zd1) > compare)
return zd2;
else
return zd1;
else if (mode == 8) // is inf
if (nd4j::math::nd4j_isinf(zd1))
return zd2;
else
return zd1;
else if (mode == 9) // is nan
if (nd4j::math::nd4j_isnan(zd1))
return zd2;
else
return zd1;
else if (mode == 10)
if (zd1 == compare)
return zd2;
else
return zd1;
else if (mode == 11)
if (zd1 != compare)
return zd2;
else
return zd1;
else if (mode == 12) // abs_greater_or_equals_than
if (nd4j::math::nd4j_abs<Z>(zd1) >= compare)
return zd2;
else
return zd1;
else if (mode == 13) // abs_less_or_equals_than
if (nd4j::math::nd4j_abs<Z>(zd1) <= compare)
return zd2;
else
return zd1;
else
printf("Undefined boolean operation: [%i]\n", mode);
return zd1;
}
};
template <typename X, typename Y, typename Z>
class CompareAndSet {
public:
// op definition for PairWise Transform
op_def static Z op(X dX, Y dY, Z *params) {
auto d1 = static_cast<Z>(dX);
auto d2 = static_cast<Z>(dY);
auto compare = params[0];
auto eps = params[2];
auto mode = static_cast<int>(params[3]);
if (mode == 0) // equals
if (nd4j::math::nd4j_abs<Z>(d2 - compare) <= eps)
return d2;
else
return d1;
else if (mode == 1) // not equals
if (nd4j::math::nd4j_abs<Z>(d2 - compare) > eps)
return d2;
else
return d1;
else if (mode == 2) // less_than
if (d2 < compare)
return d2;
else
return d1;
else if (mode ==3) // greater_than
if (d2 > compare)
return d2;
else
return d1;
else if (mode == 4) // less_or_equals_than
if (d2 <= compare)
return d2;
else
return d1;
else if (mode == 5) // greater_or_equals_than
if (d2 >= compare)
return d2;
else
return d1;
else if (mode == 6) // abs_less_than
if (nd4j::math::nd4j_abs<Z>(d2) < compare)
return d2;
else
return d1;
else if (mode == 7) // abs_greater_than
if (nd4j::math::nd4j_abs<Z>(d2) > compare)
return d2;
else
return d1;
else if (mode == 8) // is inf
if (nd4j::math::nd4j_isinf(d2))
return d2;
else
return d1;
else if (mode == 9) // is nan
if (nd4j::math::nd4j_isnan(d2))
return d2;
else
return d1;
else if (mode == 10)
if (d2 == compare)
return d2;
else
return d1;
else if (mode == 11)
if (d2 != compare)
return d2;
else
return d1;
else if (mode == 12) // abs_greater_or_equals_than
if (nd4j::math::nd4j_abs<Z>(d1) >= compare)
return d2;
else
return d1;
else if (mode == 13) // abs_less_or_equals_than
if (nd4j::math::nd4j_abs<Z>(d1) <= compare)
return d2;
else
return d1;
else
printf("Undefined boolean operation: [%i]\n", mode);
return d1;
}
};
template <typename X>
class CompareAndSetTransform {
public:
no_op_exec_special_same
no_op_exec_special_same_cuda
// op definition for Transform
op_def static X op(X d1, X *params) {
auto compare = params[0];
auto set = params[1];
auto eps = params[2];
// with mode == 0 we do set if d1 equals to compare, and with mode == 1 - we go otherwise
int mode = (int) params[3];
if (mode == 0) // equals
if (nd4j::math::nd4j_abs<X>(d1 - compare) <= eps)
return set;
else
return d1;
//return nd4j::math::nd4j_abs<T>(d1 - compare) <= eps ? set : d1;
else if (mode == 1) // not equals
if (nd4j::math::nd4j_abs<X>(d1 - compare) > eps)
return set;
else
return d1;
//return nd4j::math::nd4j_abs<T>(d1 - compare) > eps ? set : d1;
else if (mode == 2) // less_than
if (d1 < compare)
return set;
else
return d1;
else if (mode ==3) // greater_than
if (d1 > compare)
return set;
else
return d1;
else if (mode == 4) // less_or_equals_than
if (d1 <= compare)
return set;
else
return d1;
else if (mode == 5) // greater_or_equals_than
if (d1 >= compare)
return set;
else
return d1;
else if (mode == 6) // abs_less_than
if (nd4j::math::nd4j_abs<X>(d1) < compare)
return set;
else
return d1;
else if (mode == 7) // abs_greater_than
if (nd4j::math::nd4j_abs<X>(d1) > compare)
return set;
else
return d1;
else if (mode == 8) // is inf
if (nd4j::math::nd4j_isinf(d1))
return set;
else
return d1;
else if (mode == 9) // is nan
if (nd4j::math::nd4j_isnan(d1))
return set;
else
return d1;
else if (mode == 10)
if (d1 == compare)
return set;
else
return d1;
else if (mode == 11)
if (d1 != compare)
return set;
else
return d1;
else if (mode == 12) // abs_greater_or_equals_than
if (nd4j::math::nd4j_abs<X>(d1) >= compare)
return set;
else
return d1;
else if (mode == 13) // abs_less_or_equals_than
if (nd4j::math::nd4j_abs<X>(d1) <= compare)
return set;
else
return d1;
else
printf("Undefined boolean operation: [%i]\n", mode);
return d1;
}
};
}
#endif
|
sum.c | #include <stdio.h>
#include <stdlib.h>
int main()
{
int n = 40000000;
//int n = 400000;
double* vector = (double*) malloc(n * sizeof(double));
double sum = 0;
#pragma omp parallel for simd
for(int x = 0; x < n; ++x)
vector[x] = x;
#pragma omp parallel for simd reduction(+: sum) schedule(static, 100000)
for(int y = 0; y < n; ++y)
sum += vector[y];
//printf("%lf", sum);
free(vector);
vector = NULL;
return 0;
}
|
test2.c | int main() {
int x;
#pragma omp parallel
{
x = 0;
0;
if (1) {
2;
#pragma omp barrier
x;
3;
} else {
4;
while (5) {
6;
#pragma omp barrier
7;
}
8;
}
9;
#pragma omp barrier
10;
}
}
|
6812.c | // this source is derived from CHILL AST originally from file '/uufs/chpc.utah.edu/common/home/u1142914/lib/ytopt_vinu/polybench/polybench-code/stencils/fdtd-2d/kernel.c' as parsed by frontend compiler rose
void kernel_fdtd_2d(int tmax, int nx, int ny, double ex[1000 + 0][1200 + 0], double ey[1000 + 0][1200 + 0], double hz[1000 + 0][1200 + 0], double _fict_[500 + 0]) {
int t10;
int t8;
int t6;
int t4;
int t2;
for (t2 = 0; t2 <= tmax - 1; t2 += 1) {
for (t4 = 0; t4 <= ny - 1; t4 += 1)
ey[0][t4] = _fict_[t2];
#pragma omp parallel for
for (t4 = 1; t4 <= nx - 1; t4 += 16)
for (t6 = t4; t6 <= (t4 + 15 < nx - 1 ? t4 + 15 : nx - 1); t6 += 1)
for (t8 = 0; t8 <= ny - 1; t8 += 32)
for (t10 = t8; t10 <= (ny - 1 < t8 + 31 ? ny - 1 : t8 + 31); t10 += 1)
ey[t6][t10] = ey[t6][t10] - 0.5 * (hz[t6][t10] - hz[t6 - 1][t10]);
#pragma omp parallel for
for (t4 = 0; t4 <= nx - 1; t4 += 16)
for (t6 = t4; t6 <= (t4 + 15 < nx - 1 ? t4 + 15 : nx - 1); t6 += 1)
for (t8 = 1; t8 <= ny - 1; t8 += 32)
for (t10 = t8; t10 <= (ny - 1 < t8 + 31 ? ny - 1 : t8 + 31); t10 += 1)
ex[t6][t10] = ex[t6][t10] - 0.5 * (hz[t6][t10] - hz[t6][t10 - 1]);
#pragma omp parallel for
for (t4 = 0; t4 <= nx - 2; t4 += 16)
for (t6 = t4; t6 <= (t4 + 15 < nx - 2 ? t4 + 15 : nx - 2); t6 += 1)
for (t8 = 0; t8 <= ny - 2; t8 += 32)
for (t10 = t8; t10 <= (ny - 2 < t8 + 31 ? ny - 2 : t8 + 31); t10 += 1)
hz[t6][t10] = hz[t6][t10] - 0.69999999999999996 * (ex[t6][t10 + 1] - ex[t6][t10] + ey[t6 + 1][t10] - ey[t6][t10]);
}
}
|
par_vector.c | /******************************************************************************
* Copyright 1998-2019 Lawrence Livermore National Security, LLC and other
* HYPRE Project Developers. See the top-level COPYRIGHT file for details.
*
* SPDX-License-Identifier: (Apache-2.0 OR MIT)
******************************************************************************/
/******************************************************************************
*
* Member functions for hypre_Vector class.
*
*****************************************************************************/
#include "_hypre_parcsr_mv.h"
HYPRE_Int hypre_FillResponseParToVectorAll(void*, HYPRE_Int, HYPRE_Int, void*, MPI_Comm, void**,
HYPRE_Int*);
/*--------------------------------------------------------------------------
* hypre_ParVectorCreate
*--------------------------------------------------------------------------*/
/* If create is called and partitioning is NOT null, then it is assumed that it
is array of length 2 containing the start row of the calling processor
followed by the start row of the next processor - AHB 6/05 */
hypre_ParVector *
hypre_ParVectorCreate( MPI_Comm comm,
HYPRE_BigInt global_size,
HYPRE_BigInt *partitioning_in )
{
hypre_ParVector *vector;
HYPRE_Int num_procs, my_id, local_size;
HYPRE_BigInt partitioning[2];
if (global_size < 0)
{
hypre_error_in_arg(2);
return NULL;
}
vector = hypre_CTAlloc(hypre_ParVector, 1, HYPRE_MEMORY_HOST);
hypre_MPI_Comm_rank(comm, &my_id);
if (!partitioning_in)
{
hypre_MPI_Comm_size(comm, &num_procs);
hypre_GenerateLocalPartitioning(global_size, num_procs, my_id, partitioning);
}
else
{
partitioning[0] = partitioning_in[0];
partitioning[1] = partitioning_in[1];
}
local_size = (HYPRE_Int) (partitioning[1] - partitioning[0]);
hypre_ParVectorAssumedPartition(vector) = NULL;
hypre_ParVectorComm(vector) = comm;
hypre_ParVectorGlobalSize(vector) = global_size;
hypre_ParVectorPartitioning(vector)[0] = partitioning[0];
hypre_ParVectorPartitioning(vector)[1] = partitioning[1];
hypre_ParVectorFirstIndex(vector) = hypre_ParVectorPartitioning(vector)[0];
hypre_ParVectorLastIndex(vector) = hypre_ParVectorPartitioning(vector)[1] - 1;
hypre_ParVectorLocalVector(vector) = hypre_SeqVectorCreate(local_size);
/* set defaults */
hypre_ParVectorOwnsData(vector) = 1;
hypre_ParVectorActualLocalSize(vector) = 0;
return vector;
}
/*--------------------------------------------------------------------------
* hypre_ParMultiVectorCreate
*--------------------------------------------------------------------------*/
hypre_ParVector *
hypre_ParMultiVectorCreate( MPI_Comm comm,
HYPRE_BigInt global_size,
HYPRE_BigInt *partitioning,
HYPRE_Int num_vectors )
{
/* note that global_size is the global length of a single vector */
hypre_ParVector *vector = hypre_ParVectorCreate( comm, global_size, partitioning );
hypre_ParVectorNumVectors(vector) = num_vectors;
return vector;
}
/*--------------------------------------------------------------------------
* hypre_ParVectorDestroy
*--------------------------------------------------------------------------*/
HYPRE_Int
hypre_ParVectorDestroy( hypre_ParVector *vector )
{
if (vector)
{
if ( hypre_ParVectorOwnsData(vector) )
{
hypre_SeqVectorDestroy(hypre_ParVectorLocalVector(vector));
}
if (hypre_ParVectorAssumedPartition(vector))
{
hypre_AssumedPartitionDestroy(hypre_ParVectorAssumedPartition(vector));
}
hypre_TFree(vector, HYPRE_MEMORY_HOST);
}
return hypre_error_flag;
}
/*--------------------------------------------------------------------------
* hypre_ParVectorInitialize
*--------------------------------------------------------------------------*/
HYPRE_Int
hypre_ParVectorInitialize_v2( hypre_ParVector *vector, HYPRE_MemoryLocation memory_location )
{
if (!vector)
{
hypre_error_in_arg(1);
return hypre_error_flag;
}
hypre_SeqVectorInitialize_v2(hypre_ParVectorLocalVector(vector), memory_location);
hypre_ParVectorActualLocalSize(vector) = hypre_VectorSize(hypre_ParVectorLocalVector(vector));
return hypre_error_flag;
}
HYPRE_Int
hypre_ParVectorInitialize( hypre_ParVector *vector )
{
return hypre_ParVectorInitialize_v2(vector, hypre_ParVectorMemoryLocation(vector));
}
/*--------------------------------------------------------------------------
* hypre_ParVectorSetDataOwner
*--------------------------------------------------------------------------*/
HYPRE_Int
hypre_ParVectorSetDataOwner( hypre_ParVector *vector,
HYPRE_Int owns_data )
{
if (!vector)
{
hypre_error_in_arg(1);
return hypre_error_flag;
}
hypre_ParVectorOwnsData(vector) = owns_data;
return hypre_error_flag;
}
/*--------------------------------------------------------------------------
* hypre_ParVectorSetNumVectors
* call before calling hypre_ParVectorInitialize
* probably this will do more harm than good, use hypre_ParMultiVectorCreate
*--------------------------------------------------------------------------*/
#if 0
HYPRE_Int
hypre_ParVectorSetNumVectors( hypre_ParVector *vector,
HYPRE_Int num_vectors )
{
HYPRE_Int ierr = 0;
hypre_Vector *local_vector = hypre_ParVectorLocalVector(v);
hypre_SeqVectorSetNumVectors( local_vector, num_vectors );
return ierr;
}
#endif
/*--------------------------------------------------------------------------
* hypre_ParVectorRead
*--------------------------------------------------------------------------*/
hypre_ParVector*
hypre_ParVectorRead( MPI_Comm comm,
const char *file_name )
{
char new_file_name[80];
hypre_ParVector *par_vector;
HYPRE_Int my_id;
HYPRE_BigInt partitioning[2];
HYPRE_BigInt global_size;
FILE *fp;
hypre_MPI_Comm_rank(comm, &my_id);
hypre_sprintf(new_file_name, "%s.INFO.%d", file_name, my_id);
fp = fopen(new_file_name, "r");
hypre_fscanf(fp, "%b\n", &global_size);
hypre_fscanf(fp, "%b\n", &partitioning[0]);
hypre_fscanf(fp, "%b\n", &partitioning[1]);
fclose (fp);
par_vector = hypre_CTAlloc(hypre_ParVector, 1, HYPRE_MEMORY_HOST);
hypre_ParVectorComm(par_vector) = comm;
hypre_ParVectorGlobalSize(par_vector) = global_size;
hypre_ParVectorFirstIndex(par_vector) = partitioning[0];
hypre_ParVectorLastIndex(par_vector) = partitioning[1] - 1;
hypre_ParVectorPartitioning(par_vector)[0] = partitioning[0];
hypre_ParVectorPartitioning(par_vector)[1] = partitioning[1];
hypre_ParVectorOwnsData(par_vector) = 1;
hypre_sprintf(new_file_name, "%s.%d", file_name, my_id);
hypre_ParVectorLocalVector(par_vector) = hypre_SeqVectorRead(new_file_name);
/* multivector code not written yet */
hypre_assert( hypre_ParVectorNumVectors(par_vector) == 1 );
return par_vector;
}
/*--------------------------------------------------------------------------
* hypre_ParVectorPrint
*--------------------------------------------------------------------------*/
HYPRE_Int
hypre_ParVectorPrint( hypre_ParVector *vector,
const char *file_name )
{
char new_file_name[80];
hypre_Vector *local_vector;
MPI_Comm comm;
HYPRE_Int my_id;
HYPRE_BigInt *partitioning;
HYPRE_BigInt global_size;
FILE *fp;
if (!vector)
{
hypre_error_in_arg(1);
return hypre_error_flag;
}
local_vector = hypre_ParVectorLocalVector(vector);
comm = hypre_ParVectorComm(vector);
partitioning = hypre_ParVectorPartitioning(vector);
global_size = hypre_ParVectorGlobalSize(vector);
hypre_MPI_Comm_rank(comm, &my_id);
hypre_sprintf(new_file_name, "%s.%d", file_name, my_id);
hypre_SeqVectorPrint(local_vector, new_file_name);
hypre_sprintf(new_file_name, "%s.INFO.%d", file_name, my_id);
fp = fopen(new_file_name, "w");
hypre_fprintf(fp, "%b\n", global_size);
hypre_fprintf(fp, "%b\n", partitioning[0]);
hypre_fprintf(fp, "%b\n", partitioning[1]);
fclose(fp);
return hypre_error_flag;
}
/*--------------------------------------------------------------------------
* hypre_ParVectorSetConstantValues
*--------------------------------------------------------------------------*/
HYPRE_Int
hypre_ParVectorSetConstantValues( hypre_ParVector *v,
HYPRE_Complex value )
{
hypre_Vector *v_local = hypre_ParVectorLocalVector(v);
return hypre_SeqVectorSetConstantValues(v_local, value);
}
/*--------------------------------------------------------------------------
* hypre_ParVectorSetRandomValues
*--------------------------------------------------------------------------*/
HYPRE_Int
hypre_ParVectorSetRandomValues( hypre_ParVector *v,
HYPRE_Int seed )
{
HYPRE_Int my_id;
hypre_Vector *v_local = hypre_ParVectorLocalVector(v);
MPI_Comm comm = hypre_ParVectorComm(v);
hypre_MPI_Comm_rank(comm, &my_id);
seed *= (my_id + 1);
return hypre_SeqVectorSetRandomValues(v_local, seed);
}
/*--------------------------------------------------------------------------
* hypre_ParVectorCopy
*--------------------------------------------------------------------------*/
HYPRE_Int
hypre_ParVectorCopy( hypre_ParVector *x,
hypre_ParVector *y )
{
hypre_Vector *x_local = hypre_ParVectorLocalVector(x);
hypre_Vector *y_local = hypre_ParVectorLocalVector(y);
return hypre_SeqVectorCopy(x_local, y_local);
}
/*--------------------------------------------------------------------------
* hypre_ParVectorCloneShallow
* returns a complete copy of a hypre_ParVector x - a shallow copy, re-using
* the partitioning and data arrays of x
*--------------------------------------------------------------------------*/
hypre_ParVector *
hypre_ParVectorCloneShallow( hypre_ParVector *x )
{
hypre_ParVector * y =
hypre_ParVectorCreate(hypre_ParVectorComm(x), hypre_ParVectorGlobalSize(x),
hypre_ParVectorPartitioning(x));
hypre_ParVectorOwnsData(y) = 1;
/* ...This vector owns its local vector, although the local vector doesn't
* own _its_ data */
hypre_SeqVectorDestroy( hypre_ParVectorLocalVector(y) );
hypre_ParVectorLocalVector(y) = hypre_SeqVectorCloneShallow(hypre_ParVectorLocalVector(x) );
hypre_ParVectorFirstIndex(y) = hypre_ParVectorFirstIndex(x);
return y;
}
hypre_ParVector *
hypre_ParVectorCloneDeep_v2( hypre_ParVector *x, HYPRE_MemoryLocation memory_location )
{
hypre_ParVector *y =
hypre_ParVectorCreate(hypre_ParVectorComm(x), hypre_ParVectorGlobalSize(x),
hypre_ParVectorPartitioning(x));
hypre_ParVectorOwnsData(y) = 1;
hypre_SeqVectorDestroy( hypre_ParVectorLocalVector(y) );
hypre_ParVectorLocalVector(y) = hypre_SeqVectorCloneDeep_v2( hypre_ParVectorLocalVector(x),
memory_location );
hypre_ParVectorFirstIndex(y) = hypre_ParVectorFirstIndex(x); //RL: WHY HERE?
return y;
}
HYPRE_Int
hypre_ParVectorMigrate(hypre_ParVector *x, HYPRE_MemoryLocation memory_location)
{
if (!x)
{
return hypre_error_flag;
}
if ( hypre_GetActualMemLocation(memory_location) !=
hypre_GetActualMemLocation(hypre_ParVectorMemoryLocation(x)) )
{
hypre_Vector *x_local = hypre_SeqVectorCloneDeep_v2(hypre_ParVectorLocalVector(x), memory_location);
hypre_SeqVectorDestroy(hypre_ParVectorLocalVector(x));
hypre_ParVectorLocalVector(x) = x_local;
}
else
{
hypre_VectorMemoryLocation(hypre_ParVectorLocalVector(x)) = memory_location;
}
return hypre_error_flag;
}
/*--------------------------------------------------------------------------
* hypre_ParVectorScale
*--------------------------------------------------------------------------*/
HYPRE_Int
hypre_ParVectorScale( HYPRE_Complex alpha,
hypre_ParVector *y )
{
hypre_Vector *y_local = hypre_ParVectorLocalVector(y);
return hypre_SeqVectorScale( alpha, y_local);
}
/*--------------------------------------------------------------------------
* hypre_ParVectorAxpy
*--------------------------------------------------------------------------*/
HYPRE_Int
hypre_ParVectorAxpy( HYPRE_Complex alpha,
hypre_ParVector *x,
hypre_ParVector *y )
{
hypre_Vector *x_local = hypre_ParVectorLocalVector(x);
hypre_Vector *y_local = hypre_ParVectorLocalVector(y);
return hypre_SeqVectorAxpy( alpha, x_local, y_local);
}
/*--------------------------------------------------------------------------
* hypre_ParVectorInnerProd
*--------------------------------------------------------------------------*/
HYPRE_Real
hypre_ParVectorInnerProd( hypre_ParVector *x,
hypre_ParVector *y )
{
MPI_Comm comm = hypre_ParVectorComm(x);
hypre_Vector *x_local = hypre_ParVectorLocalVector(x);
hypre_Vector *y_local = hypre_ParVectorLocalVector(y);
HYPRE_Real result = 0.0;
HYPRE_Real local_result = hypre_SeqVectorInnerProd(x_local, y_local);
#ifdef HYPRE_PROFILE
hypre_profile_times[HYPRE_TIMER_ID_ALL_REDUCE] -= hypre_MPI_Wtime();
#endif
hypre_MPI_Allreduce(&local_result, &result, 1, HYPRE_MPI_REAL,
hypre_MPI_SUM, comm);
#ifdef HYPRE_PROFILE
hypre_profile_times[HYPRE_TIMER_ID_ALL_REDUCE] += hypre_MPI_Wtime();
#endif
return result;
}
/*--------------------------------------------------------------------------
* hypre_ParVectorElmdivpy
* y = y + x ./ b [MATLAB Notation]
*--------------------------------------------------------------------------*/
HYPRE_Int
hypre_ParVectorElmdivpy( hypre_ParVector *x,
hypre_ParVector *b,
hypre_ParVector *y )
{
hypre_Vector *x_local = hypre_ParVectorLocalVector(x);
hypre_Vector *b_local = hypre_ParVectorLocalVector(b);
hypre_Vector *y_local = hypre_ParVectorLocalVector(y);
return hypre_SeqVectorElmdivpy(x_local, b_local, y_local);
}
/*--------------------------------------------------------------------------
* hypre_ParVectorElmdivpyMarked
* y[i] += x[i] / b[i] where marker[i] == marker_val
*--------------------------------------------------------------------------*/
HYPRE_Int
hypre_ParVectorElmdivpyMarked( hypre_ParVector *x,
hypre_ParVector *b,
hypre_ParVector *y,
HYPRE_Int *marker,
HYPRE_Int marker_val )
{
hypre_Vector *x_local = hypre_ParVectorLocalVector(x);
hypre_Vector *b_local = hypre_ParVectorLocalVector(b);
hypre_Vector *y_local = hypre_ParVectorLocalVector(y);
return hypre_SeqVectorElmdivpyMarked(x_local, b_local, y_local, marker, marker_val);
}
/*--------------------------------------------------------------------------
* hypre_VectorToParVector:
* generates a ParVector from a Vector on proc 0 and distributes the pieces
* to the other procs in comm
*--------------------------------------------------------------------------*/
hypre_ParVector *
hypre_VectorToParVector ( MPI_Comm comm,
hypre_Vector *v,
HYPRE_BigInt *vec_starts )
{
HYPRE_BigInt global_size;
HYPRE_BigInt *global_vec_starts = NULL;
HYPRE_BigInt first_index;
HYPRE_BigInt last_index;
HYPRE_Int local_size;
HYPRE_Int num_vectors;
HYPRE_Int num_procs, my_id;
HYPRE_Int global_vecstride, vecstride, idxstride;
hypre_ParVector *par_vector;
hypre_Vector *local_vector;
HYPRE_Complex *v_data;
HYPRE_Complex *local_data;
hypre_MPI_Request *requests;
hypre_MPI_Status *status, status0;
HYPRE_Int i, j, k, p;
hypre_MPI_Comm_size(comm, &num_procs);
hypre_MPI_Comm_rank(comm, &my_id);
if (my_id == 0)
{
global_size = (HYPRE_BigInt)hypre_VectorSize(v);
v_data = hypre_VectorData(v);
num_vectors = hypre_VectorNumVectors(v); /* for multivectors */
global_vecstride = hypre_VectorVectorStride(v);
}
hypre_MPI_Bcast(&global_size, 1, HYPRE_MPI_INT, 0, comm);
hypre_MPI_Bcast(&num_vectors, 1, HYPRE_MPI_INT, 0, comm);
hypre_MPI_Bcast(&global_vecstride, 1, HYPRE_MPI_INT, 0, comm);
if (num_vectors == 1)
{
par_vector = hypre_ParVectorCreate(comm, global_size, vec_starts);
}
else
{
par_vector = hypre_ParMultiVectorCreate(comm, global_size, vec_starts, num_vectors);
}
vec_starts = hypre_ParVectorPartitioning(par_vector);
first_index = hypre_ParVectorFirstIndex(par_vector);
last_index = hypre_ParVectorLastIndex(par_vector);
local_size = (HYPRE_Int)(last_index - first_index) + 1;
if (my_id == 0)
{
global_vec_starts = hypre_CTAlloc(HYPRE_BigInt, num_procs + 1, HYPRE_MEMORY_HOST);
}
hypre_MPI_Gather(&first_index, 1, HYPRE_MPI_BIG_INT, global_vec_starts,
1, HYPRE_MPI_BIG_INT, 0, comm);
if (my_id == 0)
{
global_vec_starts[num_procs] = hypre_ParVectorGlobalSize(par_vector);
}
hypre_ParVectorInitialize(par_vector);
local_vector = hypre_ParVectorLocalVector(par_vector);
local_data = hypre_VectorData(local_vector);
vecstride = hypre_VectorVectorStride(local_vector);
idxstride = hypre_VectorIndexStride(local_vector);
/* so far the only implemented multivector StorageMethod is 0 */
hypre_assert( idxstride == 1 );
if (my_id == 0)
{
requests = hypre_CTAlloc(hypre_MPI_Request, num_vectors * (num_procs - 1), HYPRE_MEMORY_HOST);
status = hypre_CTAlloc(hypre_MPI_Status, num_vectors * (num_procs - 1), HYPRE_MEMORY_HOST);
k = 0;
for (p = 1; p < num_procs; p++)
for (j = 0; j < num_vectors; ++j)
{
hypre_MPI_Isend( &v_data[(HYPRE_Int) global_vec_starts[p]] + j * global_vecstride,
(HYPRE_Int)(global_vec_starts[p + 1] - global_vec_starts[p]),
HYPRE_MPI_COMPLEX, p, 0, comm, &requests[k++] );
}
if (num_vectors == 1)
{
for (i = 0; i < local_size; i++)
{
local_data[i] = v_data[i];
}
}
else
{
for (j = 0; j < num_vectors; ++j)
{
for (i = 0; i < local_size; i++)
{
local_data[i + j * vecstride] = v_data[i + j * global_vecstride];
}
}
}
hypre_MPI_Waitall(num_procs - 1, requests, status);
hypre_TFree(requests, HYPRE_MEMORY_HOST);
hypre_TFree(status, HYPRE_MEMORY_HOST);
}
else
{
for ( j = 0; j < num_vectors; ++j )
hypre_MPI_Recv( local_data + j * vecstride, local_size, HYPRE_MPI_COMPLEX,
0, 0, comm, &status0 );
}
if (global_vec_starts)
{
hypre_TFree(global_vec_starts, HYPRE_MEMORY_HOST);
}
return par_vector;
}
/*--------------------------------------------------------------------------
* hypre_ParVectorToVectorAll:
* generates a Vector on every proc which has a piece of the data
* from a ParVector on several procs in comm,
* vec_starts needs to contain the partitioning across all procs in comm
*--------------------------------------------------------------------------*/
hypre_Vector *
hypre_ParVectorToVectorAll( hypre_ParVector *par_v )
{
MPI_Comm comm = hypre_ParVectorComm(par_v);
HYPRE_BigInt global_size = hypre_ParVectorGlobalSize(par_v);
hypre_Vector *local_vector = hypre_ParVectorLocalVector(par_v);
HYPRE_Int num_procs, my_id;
HYPRE_Int num_vectors = hypre_ParVectorNumVectors(par_v);
hypre_Vector *vector;
HYPRE_Complex *vector_data;
HYPRE_Complex *local_data;
HYPRE_Int local_size;
hypre_MPI_Request *requests;
hypre_MPI_Status *status;
HYPRE_Int i, j;
HYPRE_Int *used_procs;
HYPRE_Int num_types, num_requests;
HYPRE_Int vec_len, proc_id;
HYPRE_Int *new_vec_starts;
HYPRE_Int num_contacts;
HYPRE_Int contact_proc_list[1];
HYPRE_Int contact_send_buf[1];
HYPRE_Int contact_send_buf_starts[2];
HYPRE_Int max_response_size;
HYPRE_Int *response_recv_buf = NULL;
HYPRE_Int *response_recv_buf_starts = NULL;
hypre_DataExchangeResponse response_obj;
hypre_ProcListElements send_proc_obj;
HYPRE_Int *send_info = NULL;
hypre_MPI_Status status1;
HYPRE_Int count, tag1 = 112, tag2 = 223;
HYPRE_Int start;
hypre_MPI_Comm_size(comm, &num_procs);
hypre_MPI_Comm_rank(comm, &my_id);
local_size = (HYPRE_Int)(hypre_ParVectorLastIndex(par_v) -
hypre_ParVectorFirstIndex(par_v) + 1);
/* determine procs which hold data of par_v and store ids in used_procs */
/* we need to do an exchange data for this. If I own row then I will contact
processor 0 with the endpoint of my local range */
if (local_size > 0)
{
num_contacts = 1;
contact_proc_list[0] = 0;
contact_send_buf[0] = hypre_ParVectorLastIndex(par_v);
contact_send_buf_starts[0] = 0;
contact_send_buf_starts[1] = 1;
}
else
{
num_contacts = 0;
contact_send_buf_starts[0] = 0;
contact_send_buf_starts[1] = 0;
}
/*build the response object*/
/*send_proc_obj will be for saving info from contacts */
send_proc_obj.length = 0;
send_proc_obj.storage_length = 10;
send_proc_obj.id = hypre_CTAlloc(HYPRE_Int, send_proc_obj.storage_length, HYPRE_MEMORY_HOST);
send_proc_obj.vec_starts =
hypre_CTAlloc(HYPRE_Int, send_proc_obj.storage_length + 1, HYPRE_MEMORY_HOST);
send_proc_obj.vec_starts[0] = 0;
send_proc_obj.element_storage_length = 10;
send_proc_obj.elements =
hypre_CTAlloc(HYPRE_BigInt, send_proc_obj.element_storage_length, HYPRE_MEMORY_HOST);
max_response_size = 0; /* each response is null */
response_obj.fill_response = hypre_FillResponseParToVectorAll;
response_obj.data1 = NULL;
response_obj.data2 = &send_proc_obj; /*this is where we keep info from contacts*/
hypre_DataExchangeList(num_contacts,
contact_proc_list, contact_send_buf,
contact_send_buf_starts, sizeof(HYPRE_Int),
//0, &response_obj,
sizeof(HYPRE_Int), &response_obj,
max_response_size, 1,
comm, (void**) &response_recv_buf,
&response_recv_buf_starts);
/* now processor 0 should have a list of ranges for processors that have rows -
these are in send_proc_obj - it needs to create the new list of processors
and also an array of vec starts - and send to those who own row*/
if (my_id)
{
if (local_size)
{
/* look for a message from processor 0 */
hypre_MPI_Probe(0, tag1, comm, &status1);
hypre_MPI_Get_count(&status1, HYPRE_MPI_INT, &count);
send_info = hypre_CTAlloc(HYPRE_Int, count, HYPRE_MEMORY_HOST);
hypre_MPI_Recv(send_info, count, HYPRE_MPI_INT, 0, tag1, comm, &status1);
/* now unpack */
num_types = send_info[0];
used_procs = hypre_CTAlloc(HYPRE_Int, num_types, HYPRE_MEMORY_HOST);
new_vec_starts = hypre_CTAlloc(HYPRE_Int, num_types + 1, HYPRE_MEMORY_HOST);
for (i = 1; i <= num_types; i++)
{
used_procs[i - 1] = (HYPRE_Int)send_info[i];
}
for (i = num_types + 1; i < count; i++)
{
new_vec_starts[i - num_types - 1] = send_info[i] ;
}
}
else /* clean up and exit */
{
hypre_TFree(send_proc_obj.vec_starts, HYPRE_MEMORY_HOST);
hypre_TFree(send_proc_obj.id, HYPRE_MEMORY_HOST);
hypre_TFree(send_proc_obj.elements, HYPRE_MEMORY_HOST);
if (response_recv_buf) { hypre_TFree(response_recv_buf, HYPRE_MEMORY_HOST); }
if (response_recv_buf_starts) { hypre_TFree(response_recv_buf_starts, HYPRE_MEMORY_HOST); }
return NULL;
}
}
else /* my_id ==0 */
{
num_types = send_proc_obj.length;
used_procs = hypre_CTAlloc(HYPRE_Int, num_types, HYPRE_MEMORY_HOST);
new_vec_starts = hypre_CTAlloc(HYPRE_Int, num_types + 1, HYPRE_MEMORY_HOST);
new_vec_starts[0] = 0;
for (i = 0; i < num_types; i++)
{
used_procs[i] = send_proc_obj.id[i];
new_vec_starts[i + 1] = send_proc_obj.elements[i] + 1;
}
hypre_qsort0(used_procs, 0, num_types - 1);
hypre_qsort0(new_vec_starts, 0, num_types);
/*now we need to put into an array to send */
count = 2 * num_types + 2;
send_info = hypre_CTAlloc(HYPRE_Int, count, HYPRE_MEMORY_HOST);
send_info[0] = num_types;
for (i = 1; i <= num_types; i++)
{
send_info[i] = (HYPRE_Int)used_procs[i - 1];
}
for (i = num_types + 1; i < count; i++)
{
send_info[i] = new_vec_starts[i - num_types - 1];
}
requests = hypre_CTAlloc(hypre_MPI_Request, num_types, HYPRE_MEMORY_HOST);
status = hypre_CTAlloc(hypre_MPI_Status, num_types, HYPRE_MEMORY_HOST);
/* don't send to myself - these are sorted so my id would be first*/
start = 0;
if (used_procs[0] == 0)
{
start = 1;
}
for (i = start; i < num_types; i++)
{
hypre_MPI_Isend(send_info, count, HYPRE_MPI_INT, used_procs[i],
tag1, comm, &requests[i - start]);
}
hypre_MPI_Waitall(num_types - start, requests, status);
hypre_TFree(status, HYPRE_MEMORY_HOST);
hypre_TFree(requests, HYPRE_MEMORY_HOST);
}
/* clean up */
hypre_TFree(send_proc_obj.vec_starts, HYPRE_MEMORY_HOST);
hypre_TFree(send_proc_obj.id, HYPRE_MEMORY_HOST);
hypre_TFree(send_proc_obj.elements, HYPRE_MEMORY_HOST);
hypre_TFree(send_info, HYPRE_MEMORY_HOST);
if (response_recv_buf) { hypre_TFree(response_recv_buf, HYPRE_MEMORY_HOST); }
if (response_recv_buf_starts) { hypre_TFree(response_recv_buf_starts, HYPRE_MEMORY_HOST); }
/* now proc 0 can exit if it has no rows */
if (!local_size)
{
hypre_TFree(used_procs, HYPRE_MEMORY_HOST);
hypre_TFree(new_vec_starts, HYPRE_MEMORY_HOST);
return NULL;
}
/* everyone left has rows and knows: new_vec_starts, num_types, and used_procs */
/* this vector should be rather small */
local_data = hypre_VectorData(local_vector);
vector = hypre_SeqVectorCreate((HYPRE_Int)global_size);
hypre_VectorNumVectors(vector) = num_vectors;
hypre_SeqVectorInitialize(vector);
vector_data = hypre_VectorData(vector);
num_requests = 2 * num_types;
requests = hypre_CTAlloc(hypre_MPI_Request, num_requests, HYPRE_MEMORY_HOST);
status = hypre_CTAlloc(hypre_MPI_Status, num_requests, HYPRE_MEMORY_HOST);
/* initialize data exchange among used_procs and generate vector - here we
send to ourself also*/
j = 0;
for (i = 0; i < num_types; i++)
{
proc_id = used_procs[i];
vec_len = (HYPRE_Int)(new_vec_starts[i + 1] - new_vec_starts[i]);
hypre_MPI_Irecv(&vector_data[(HYPRE_Int)new_vec_starts[i]], num_vectors * vec_len,
HYPRE_MPI_COMPLEX, proc_id, tag2, comm, &requests[j++]);
}
for (i = 0; i < num_types; i++)
{
hypre_MPI_Isend(local_data, num_vectors * local_size, HYPRE_MPI_COMPLEX,
used_procs[i], tag2, comm, &requests[j++]);
}
hypre_MPI_Waitall(num_requests, requests, status);
if (num_requests)
{
hypre_TFree(requests, HYPRE_MEMORY_HOST);
hypre_TFree(status, HYPRE_MEMORY_HOST);
hypre_TFree(used_procs, HYPRE_MEMORY_HOST);
}
hypre_TFree(new_vec_starts, HYPRE_MEMORY_HOST);
return vector;
}
/*--------------------------------------------------------------------------
* hypre_ParVectorPrintIJ
*--------------------------------------------------------------------------*/
HYPRE_Int
hypre_ParVectorPrintIJ( hypre_ParVector *vector,
HYPRE_Int base_j,
const char *filename )
{
MPI_Comm comm;
HYPRE_BigInt global_size, j;
HYPRE_BigInt *partitioning;
HYPRE_Complex *local_data;
HYPRE_Int myid, num_procs, i, part0;
char new_filename[255];
FILE *file;
if (!vector)
{
hypre_error_in_arg(1);
return hypre_error_flag;
}
comm = hypre_ParVectorComm(vector);
global_size = hypre_ParVectorGlobalSize(vector);
partitioning = hypre_ParVectorPartitioning(vector);
/* multivector code not written yet */
hypre_assert( hypre_ParVectorNumVectors(vector) == 1 );
if ( hypre_ParVectorNumVectors(vector) != 1 ) { hypre_error_in_arg(1); }
hypre_MPI_Comm_rank(comm, &myid);
hypre_MPI_Comm_size(comm, &num_procs);
hypre_sprintf(new_filename, "%s.%05d", filename, myid);
if ((file = fopen(new_filename, "w")) == NULL)
{
hypre_error_w_msg(HYPRE_ERROR_GENERIC, "Error: can't open output file %s\n");
return hypre_error_flag;
}
local_data = hypre_VectorData(hypre_ParVectorLocalVector(vector));
hypre_fprintf(file, "%b \n", global_size);
for (i = 0; i < 2; i++)
{
hypre_fprintf(file, "%b ", partitioning[i] + base_j);
}
hypre_fprintf(file, "\n");
part0 = partitioning[0];
for (j = part0; j < partitioning[1]; j++)
{
hypre_fprintf(file, "%b %.14e\n", j + base_j, local_data[(HYPRE_Int)(j - part0)]);
}
fclose(file);
return hypre_error_flag;
}
/*--------------------------------------------------------------------------
* hypre_ParVectorReadIJ
* Warning: wrong base for assumed partition if base > 0
*--------------------------------------------------------------------------*/
HYPRE_Int
hypre_ParVectorReadIJ( MPI_Comm comm,
const char *filename,
HYPRE_Int *base_j_ptr,
hypre_ParVector **vector_ptr )
{
HYPRE_BigInt global_size, J;
hypre_ParVector *vector;
hypre_Vector *local_vector;
HYPRE_Complex *local_data;
HYPRE_BigInt partitioning[2];
HYPRE_Int base_j;
HYPRE_Int myid, num_procs, i, j;
char new_filename[255];
FILE *file;
hypre_MPI_Comm_size(comm, &num_procs);
hypre_MPI_Comm_rank(comm, &myid);
hypre_sprintf(new_filename, "%s.%05d", filename, myid);
if ((file = fopen(new_filename, "r")) == NULL)
{
hypre_error_w_msg(HYPRE_ERROR_GENERIC, "Error: can't open output file %s\n");
return hypre_error_flag;
}
hypre_fscanf(file, "%b", &global_size);
/* this may need to be changed so that the base is available in the file! */
hypre_fscanf(file, "%b", partitioning);
for (i = 0; i < 2; i++)
{
hypre_fscanf(file, "%b", partitioning + i);
}
/* This is not yet implemented correctly! */
base_j = 0;
vector = hypre_ParVectorCreate(comm, global_size,
partitioning);
hypre_ParVectorInitialize(vector);
local_vector = hypre_ParVectorLocalVector(vector);
local_data = hypre_VectorData(local_vector);
for (j = 0; j < (HYPRE_Int)(partitioning[1] - partitioning[0]); j++)
{
hypre_fscanf(file, "%b %le", &J, local_data + j);
}
fclose(file);
*base_j_ptr = base_j;
*vector_ptr = vector;
/* multivector code not written yet */
hypre_assert( hypre_ParVectorNumVectors(vector) == 1 );
if ( hypre_ParVectorNumVectors(vector) != 1 ) { hypre_error(HYPRE_ERROR_GENERIC); }
return hypre_error_flag;
}
/*--------------------------------------------------------------------
* hypre_FillResponseParToVectorAll
* Fill response function for determining the send processors
* data exchange
*--------------------------------------------------------------------*/
HYPRE_Int
hypre_FillResponseParToVectorAll( void *p_recv_contact_buf,
HYPRE_Int contact_size,
HYPRE_Int contact_proc,
void *ro,
MPI_Comm comm,
void **p_send_response_buf,
HYPRE_Int *response_message_size )
{
HYPRE_Int myid;
HYPRE_Int i, index, count, elength;
HYPRE_BigInt *recv_contact_buf = (HYPRE_BigInt * ) p_recv_contact_buf;
hypre_DataExchangeResponse *response_obj = (hypre_DataExchangeResponse*)ro;
hypre_ProcListElements *send_proc_obj = (hypre_ProcListElements*)response_obj->data2;
hypre_MPI_Comm_rank(comm, &myid );
/*check to see if we need to allocate more space in send_proc_obj for ids*/
if (send_proc_obj->length == send_proc_obj->storage_length)
{
send_proc_obj->storage_length += 10; /*add space for 10 more processors*/
send_proc_obj->id = hypre_TReAlloc(send_proc_obj->id, HYPRE_Int,
send_proc_obj->storage_length, HYPRE_MEMORY_HOST);
send_proc_obj->vec_starts =
hypre_TReAlloc(send_proc_obj->vec_starts, HYPRE_Int,
send_proc_obj->storage_length + 1, HYPRE_MEMORY_HOST);
}
/*initialize*/
count = send_proc_obj->length;
index = send_proc_obj->vec_starts[count]; /*this is the number of elements*/
/*send proc*/
send_proc_obj->id[count] = contact_proc;
/*do we need more storage for the elements?*/
if (send_proc_obj->element_storage_length < index + contact_size)
{
elength = hypre_max(contact_size, 10);
elength += index;
send_proc_obj->elements = hypre_TReAlloc(send_proc_obj->elements,
HYPRE_BigInt, elength, HYPRE_MEMORY_HOST);
send_proc_obj->element_storage_length = elength;
}
/*populate send_proc_obj*/
for (i = 0; i < contact_size; i++)
{
send_proc_obj->elements[index++] = recv_contact_buf[i];
}
send_proc_obj->vec_starts[count + 1] = index;
send_proc_obj->length++;
/*output - no message to return (confirmation) */
*response_message_size = 0;
return hypre_error_flag;
}
/* -----------------------------------------------------------------------------
* return the sum of all local elements of the vector
* ----------------------------------------------------------------------------- */
HYPRE_Complex hypre_ParVectorLocalSumElts( hypre_ParVector * vector )
{
return hypre_SeqVectorSumElts( hypre_ParVectorLocalVector(vector) );
}
HYPRE_Int
hypre_ParVectorGetValuesHost(hypre_ParVector *vector,
HYPRE_Int num_values,
HYPRE_BigInt *indices,
HYPRE_BigInt base,
HYPRE_Complex *values)
{
HYPRE_Int i, ierr = 0;
HYPRE_BigInt first_index = hypre_ParVectorFirstIndex(vector);
HYPRE_BigInt last_index = hypre_ParVectorLastIndex(vector);
hypre_Vector *local_vector = hypre_ParVectorLocalVector(vector);
HYPRE_Complex *data = hypre_VectorData(local_vector);
/*
if (hypre_VectorOwnsData(local_vector) == 0)
{
hypre_error_w_msg(HYPRE_ERROR_GENERIC,"Vector does not own data! -- hypre_ParVectorGetValues.");
return hypre_error_flag;
}
*/
if (indices)
{
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i) reduction(+:ierr) HYPRE_SMP_SCHEDULE
#endif
for (i = 0; i < num_values; i++)
{
HYPRE_BigInt index = indices[i] - base;
if (index < first_index || index > last_index)
{
ierr ++;
}
else
{
HYPRE_Int local_index = (HYPRE_Int) (index - first_index);
values[i] = data[local_index];
}
}
if (ierr)
{
hypre_error_in_arg(3);
hypre_error_w_msg(HYPRE_ERROR_GENERIC, "Index out of range! -- hypre_ParVectorGetValues.");
hypre_printf("Index out of range! -- hypre_ParVectorGetValues\n");
}
}
else
{
if (num_values > hypre_VectorSize(local_vector))
{
hypre_error_in_arg(2);
return hypre_error_flag;
}
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE
#endif
for (i = 0; i < num_values; i++)
{
values[i] = data[i];
}
}
return hypre_error_flag;
}
HYPRE_Int
hypre_ParVectorGetValues2(hypre_ParVector *vector,
HYPRE_Int num_values,
HYPRE_BigInt *indices,
HYPRE_BigInt base,
HYPRE_Complex *values)
{
#if defined(HYPRE_USING_CUDA) || defined(HYPRE_USING_HIP)
if (HYPRE_EXEC_DEVICE == hypre_GetExecPolicy1( hypre_ParVectorMemoryLocation(vector) ))
{
hypre_ParVectorGetValuesDevice(vector, num_values, indices, base, values);
}
else
#endif
{
hypre_ParVectorGetValuesHost(vector, num_values, indices, base, values);
}
return hypre_error_flag;
}
HYPRE_Int
hypre_ParVectorGetValues(hypre_ParVector *vector,
HYPRE_Int num_values,
HYPRE_BigInt *indices,
HYPRE_Complex *values)
{
return hypre_ParVectorGetValues2(vector, num_values, indices, 0, values);
}
|
atomic_write_codegen.c | // RUN: %clang_cc1 -verify -triple x86_64-apple-darwin10 -fopenmp -x c -emit-llvm %s -o - | FileCheck %s
// RUN: %clang_cc1 -fopenmp -x c -triple x86_64-apple-darwin10 -emit-pch -o %t %s
// RUN: %clang_cc1 -fopenmp -x c -triple x86_64-apple-darwin10 -include-pch %t -verify %s -emit-llvm -o - | FileCheck %s
// expected-no-diagnostics
#ifndef HEADER
#define HEADER
_Bool bv, bx;
char cv, cx;
unsigned char ucv, ucx;
short sv, sx;
unsigned short usv, usx;
int iv, ix;
unsigned int uiv, uix;
long lv, lx;
unsigned long ulv, ulx;
long long llv, llx;
unsigned long long ullv, ullx;
float fv, fx;
double dv, dx;
long double ldv, ldx;
_Complex int civ, cix;
_Complex float cfv, cfx;
_Complex double cdv, cdx;
typedef int int4 __attribute__((__vector_size__(16)));
int4 int4x;
struct BitFields {
int : 32;
int a : 31;
} bfx;
struct BitFields_packed {
int : 32;
int a : 31;
} __attribute__ ((__packed__)) bfx_packed;
struct BitFields2 {
int : 31;
int a : 1;
} bfx2;
struct BitFields2_packed {
int : 31;
int a : 1;
} __attribute__ ((__packed__)) bfx2_packed;
struct BitFields3 {
int : 11;
int a : 14;
} bfx3;
struct BitFields3_packed {
int : 11;
int a : 14;
} __attribute__ ((__packed__)) bfx3_packed;
struct BitFields4 {
short : 16;
int a: 1;
long b : 7;
} bfx4;
struct BitFields4_packed {
short : 16;
int a: 1;
long b : 7;
} __attribute__ ((__packed__)) bfx4_packed;
typedef float float2 __attribute__((ext_vector_type(2)));
float2 float2x;
register int rix __asm__("0");
int main() {
// CHECK: load i8, i8*
// CHECK: store atomic i8
#pragma omp atomic write
bx = bv;
// CHECK: load i8, i8*
// CHECK: store atomic i8
#pragma omp atomic write
cx = cv;
// CHECK: load i8, i8*
// CHECK: store atomic i8
#pragma omp atomic write
ucx = ucv;
// CHECK: load i16, i16*
// CHECK: store atomic i16
#pragma omp atomic write
sx = sv;
// CHECK: load i16, i16*
// CHECK: store atomic i16
#pragma omp atomic write
usx = usv;
// CHECK: load i32, i32*
// CHECK: store atomic i32
#pragma omp atomic write
ix = iv;
// CHECK: load i32, i32*
// CHECK: store atomic i32
#pragma omp atomic write
uix = uiv;
// CHECK: load i64, i64*
// CHECK: store atomic i64
#pragma omp atomic write
lx = lv;
// CHECK: load i64, i64*
// CHECK: store atomic i64
#pragma omp atomic write
ulx = ulv;
// CHECK: load i64, i64*
// CHECK: store atomic i64
#pragma omp atomic write
llx = llv;
// CHECK: load i64, i64*
// CHECK: store atomic i64
#pragma omp atomic write
ullx = ullv;
// CHECK: load float, float*
// CHECK: bitcast float {{.*}} to i32
// CHECK: store atomic i32 {{.*}}, i32* bitcast (float*
#pragma omp atomic write
fx = fv;
// CHECK: load double, double*
// CHECK: bitcast double {{.*}} to i64
// CHECK: store atomic i64 {{.*}}, i64* bitcast (double*
#pragma omp atomic write
dx = dv;
// CHECK: [[LD:%.+]] = load x86_fp80, x86_fp80*
// CHECK: [[BITCAST:%.+]] = bitcast x86_fp80* [[LDTEMP:%.*]] to i8*
// CHECK: call void @llvm.memset.p0i8.i64(i8* [[BITCAST]], i8 0, i64 16, i32 16, i1 false)
// CHECK: store x86_fp80 [[LD]], x86_fp80* [[LDTEMP]]
// CHECK: [[BITCAST:%.+]] = bitcast x86_fp80* [[LDTEMP:%.*]] to i128*
// CHECK: [[LD:%.+]] = load i128, i128* [[BITCAST]]
// CHECK: store atomic i128 [[LD]], i128* bitcast (x86_fp80*
#pragma omp atomic write
ldx = ldv;
// CHECK: [[REAL_VAL:%.+]] = load i32, i32* getelementptr inbounds ({ i32, i32 }, { i32, i32 }* @{{.*}}, i32 0, i32 0)
// CHECK: [[IMG_VAL:%.+]] = load i32, i32* getelementptr inbounds ({ i32, i32 }, { i32, i32 }* @{{.*}}, i32 0, i32 1)
// CHECK: [[TEMP_REAL_REF:%.+]] = getelementptr inbounds { i32, i32 }, { i32, i32 }* [[TEMP:%.+]], i32 0, i32 0
// CHECK: [[TEMP_IMG_REF:%.+]] = getelementptr inbounds { i32, i32 }, { i32, i32 }* [[TEMP]], i32 0, i32 1
// CHECK: store i32 [[REAL_VAL]], i32* [[TEMP_REAL_REF]]
// CHECK: store i32 [[IMG_VAL]], i32* [[TEMP_IMG_REF]]
// CHECK: [[BITCAST:%.+]] = bitcast { i32, i32 }* [[TEMP]] to i8*
// CHECK: call void @__atomic_store(i64 8, i8* bitcast ({ i32, i32 }* @{{.*}} to i8*), i8* [[BITCAST]], i32 0)
#pragma omp atomic write
cix = civ;
// CHECK: [[REAL_VAL:%.+]] = load float, float* getelementptr inbounds ({ float, float }, { float, float }* @{{.*}}, i32 0, i32 0)
// CHECK: [[IMG_VAL:%.+]] = load float, float* getelementptr inbounds ({ float, float }, { float, float }* @{{.*}}, i32 0, i32 1)
// CHECK: [[TEMP_REAL_REF:%.+]] = getelementptr inbounds { float, float }, { float, float }* [[TEMP:%.+]], i32 0, i32 0
// CHECK: [[TEMP_IMG_REF:%.+]] = getelementptr inbounds { float, float }, { float, float }* [[TEMP]], i32 0, i32 1
// CHECK: store float [[REAL_VAL]], float* [[TEMP_REAL_REF]]
// CHECK: store float [[IMG_VAL]], float* [[TEMP_IMG_REF]]
// CHECK: [[BITCAST:%.+]] = bitcast { float, float }* [[TEMP]] to i8*
// CHECK: call void @__atomic_store(i64 8, i8* bitcast ({ float, float }* @{{.*}} to i8*), i8* [[BITCAST]], i32 0)
#pragma omp atomic write
cfx = cfv;
// CHECK: [[REAL_VAL:%.+]] = load double, double* getelementptr inbounds ({ double, double }, { double, double }* @{{.*}}, i32 0, i32 0)
// CHECK: [[IMG_VAL:%.+]] = load double, double* getelementptr inbounds ({ double, double }, { double, double }* @{{.*}}, i32 0, i32 1)
// CHECK: [[TEMP_REAL_REF:%.+]] = getelementptr inbounds { double, double }, { double, double }* [[TEMP:%.+]], i32 0, i32 0
// CHECK: [[TEMP_IMG_REF:%.+]] = getelementptr inbounds { double, double }, { double, double }* [[TEMP]], i32 0, i32 1
// CHECK: store double [[REAL_VAL]], double* [[TEMP_REAL_REF]]
// CHECK: store double [[IMG_VAL]], double* [[TEMP_IMG_REF]]
// CHECK: [[BITCAST:%.+]] = bitcast { double, double }* [[TEMP]] to i8*
// CHECK: call void @__atomic_store(i64 16, i8* bitcast ({ double, double }* @{{.*}} to i8*), i8* [[BITCAST]], i32 5)
// CHECK: call{{.*}} @__kmpc_flush(
#pragma omp atomic seq_cst write
cdx = cdv;
// CHECK: load i8, i8*
// CHECK: store atomic i64
#pragma omp atomic write
ulx = bv;
// CHECK: load i8, i8*
// CHECK: store atomic i8
#pragma omp atomic write
bx = cv;
// CHECK: load i8, i8*
// CHECK: store atomic i8
// CHECK: call{{.*}} @__kmpc_flush(
#pragma omp atomic write, seq_cst
cx = ucv;
// CHECK: load i16, i16*
// CHECK: store atomic i64
#pragma omp atomic write
ulx = sv;
// CHECK: load i16, i16*
// CHECK: store atomic i64
#pragma omp atomic write
lx = usv;
// CHECK: load i32, i32*
// CHECK: store atomic i32
// CHECK: call{{.*}} @__kmpc_flush(
#pragma omp atomic seq_cst, write
uix = iv;
// CHECK: load i32, i32*
// CHECK: store atomic i32
#pragma omp atomic write
ix = uiv;
// CHECK: load i64, i64*
// CHECK: [[VAL:%.+]] = trunc i64 %{{.*}} to i32
// CHECK: [[TEMP_REAL_REF:%.+]] = getelementptr inbounds { i32, i32 }, { i32, i32 }* [[TEMP:%.+]], i32 0, i32 0
// CHECK: [[TEMP_IMG_REF:%.+]] = getelementptr inbounds { i32, i32 }, { i32, i32 }* [[TEMP]], i32 0, i32 1
// CHECK: store i32 [[VAL]], i32* [[TEMP_REAL_REF]]
// CHECK: store i32 0, i32* [[TEMP_IMG_REF]]
// CHECK: [[BITCAST:%.+]] = bitcast { i32, i32 }* [[TEMP]] to i8*
// CHECK: call void @__atomic_store(i64 8, i8* bitcast ({ i32, i32 }* @{{.+}} to i8*), i8* [[BITCAST]], i32 0)
#pragma omp atomic write
cix = lv;
// CHECK: load i64, i64*
// CHECK: store atomic i32 %{{.+}}, i32* bitcast (float*
#pragma omp atomic write
fx = ulv;
// CHECK: load i64, i64*
// CHECK: store atomic i64 %{{.+}}, i64* bitcast (double*
#pragma omp atomic write
dx = llv;
// CHECK: load i64, i64*
// CHECK: [[VAL:%.+]] = uitofp i64 %{{.+}} to x86_fp80
// CHECK: [[BITCAST:%.+]] = bitcast x86_fp80* [[TEMP:%.+]] to i8*
// CHECK: call void @llvm.memset.p0i8.i64(i8* [[BITCAST]], i8 0, i64 16, i32 16, i1 false)
// CHECK: store x86_fp80 [[VAL]], x86_fp80* [[TEMP]]
// CHECK: [[BITCAST:%.+]] = bitcast x86_fp80* [[TEMP]] to i128*
// CHECK: [[VAL:%.+]] = load i128, i128* [[BITCAST]]
// CHECK: store atomic i128 [[VAL]], i128* bitcast (x86_fp80*
#pragma omp atomic write
ldx = ullv;
// CHECK: load float, float*
// CHECK: [[VAL:%.+]] = fptosi float %{{.*}} to i32
// CHECK: [[TEMP_REAL_REF:%.+]] = getelementptr inbounds { i32, i32 }, { i32, i32 }* [[TEMP:%.+]], i32 0, i32 0
// CHECK: [[TEMP_IMG_REF:%.+]] = getelementptr inbounds { i32, i32 }, { i32, i32 }* [[TEMP]], i32 0, i32 1
// CHECK: store i32 [[VAL]], i32* [[TEMP_REAL_REF]]
// CHECK: store i32 0, i32* [[TEMP_IMG_REF]]
// CHECK: [[BITCAST:%.+]] = bitcast { i32, i32 }* [[TEMP]] to i8*
// CHECK: call void @__atomic_store(i64 8, i8* bitcast ({ i32, i32 }* @{{.+}} to i8*), i8* [[BITCAST]], i32 0)
#pragma omp atomic write
cix = fv;
// CHECK: load double, double*
// CHECK: store atomic i16
#pragma omp atomic write
sx = dv;
// CHECK: load x86_fp80, x86_fp80*
// CHECK: store atomic i8
#pragma omp atomic write
bx = ldv;
// CHECK: load i32, i32* getelementptr inbounds ({ i32, i32 }, { i32, i32 }* @{{.+}}, i32 0, i32 0)
// CHECK: load i32, i32* getelementptr inbounds ({ i32, i32 }, { i32, i32 }* @{{.+}}, i32 0, i32 1)
// CHECK: icmp ne i32 %{{.+}}, 0
// CHECK: icmp ne i32 %{{.+}}, 0
// CHECK: or i1
// CHECK: store atomic i8
#pragma omp atomic write
bx = civ;
// CHECK: load float, float* getelementptr inbounds ({ float, float }, { float, float }* @{{.*}}, i32 0, i32 0)
// CHECK: store atomic i16
#pragma omp atomic write
usx = cfv;
// CHECK: load double, double* getelementptr inbounds ({ double, double }, { double, double }* @{{.+}}, i32 0, i32 0)
// CHECK: store atomic i64
#pragma omp atomic write
llx = cdv;
// CHECK-DAG: [[IDX:%.+]] = load i16, i16* @{{.+}}
// CHECK-DAG: load i8, i8*
// CHECK-DAG: [[VEC_ITEM_VAL:%.+]] = zext i1 %{{.+}} to i32
// CHECK: [[I128VAL:%.+]] = load atomic i128, i128* bitcast (<4 x i32>* [[DEST:@.+]] to i128*) monotonic
// CHECK: br label %[[CONT:.+]]
// CHECK: [[CONT]]
// CHECK: [[OLD_I128:%.+]] = phi i128 [ [[I128VAL]], %{{.+}} ], [ [[FAILED_I128_OLD_VAL:%.+]], %[[CONT]] ]
// CHECK: [[BITCAST:%.+]] = bitcast <4 x i32>* [[LDTEMP:%.+]] to i128*
// CHECK: store i128 [[OLD_I128]], i128* [[BITCAST]],
// CHECK: [[VEC_VAL:%.+]] = load <4 x i32>, <4 x i32>* [[LDTEMP]]
// CHECK: [[NEW_VEC_VAL:%.+]] = insertelement <4 x i32> [[VEC_VAL]], i32 [[VEC_ITEM_VAL]], i16 [[IDX]]
// CHECK: store <4 x i32> [[NEW_VEC_VAL]], <4 x i32>* [[LDTEMP]]
// CHECK: [[NEW_I128:%.+]] = load i128, i128* [[BITCAST]]
// CHECK: [[RES:%.+]] = cmpxchg i128* bitcast (<4 x i32>* [[DEST]] to i128*), i128 [[OLD_I128]], i128 [[NEW_I128]] monotonic monotonic
// CHECK: [[FAILED_I128_OLD_VAL:%.+]] = extractvalue { i128, i1 } [[RES]], 0
// CHECK: [[FAIL_SUCCESS:%.+]] = extractvalue { i128, i1 } [[RES]], 1
// CHECK: br i1 [[FAIL_SUCCESS]], label %[[EXIT:.+]], label %[[CONT]]
// CHECK: [[EXIT]]
#pragma omp atomic write
int4x[sv] = bv;
// CHECK: load x86_fp80, x86_fp80* @{{.+}}
// CHECK: [[NEW_VAL:%.+]] = fptosi x86_fp80 %{{.+}} to i32
// CHECK: [[PREV_VALUE:%.+]] = load atomic i32, i32* bitcast (i8* getelementptr (i8, i8* bitcast (%struct.BitFields* @{{.+}} to i8*), i64 4) to i32*) monotonic
// CHECK: br label %[[CONT:.+]]
// CHECK: [[CONT]]
// CHECK: [[OLD_BF_VALUE:%.+]] = phi i32 [ [[PREV_VALUE]], %[[EXIT]] ], [ [[FAILED_OLD_VAL:%.+]], %[[CONT]] ]
// CHECK: [[BF_VALUE:%.+]] = and i32 [[NEW_VAL]], 2147483647
// CHECK: [[BF_CLEAR:%.+]] = and i32 %{{.+}}, -2147483648
// CHECK: or i32 [[BF_CLEAR]], [[BF_VALUE]]
// CHECK: store i32 %{{.+}}, i32* [[LDTEMP:%.+]]
// CHECK: [[NEW_BF_VALUE:%.+]] = load i32, i32* [[LDTEMP]]
// CHECK: [[RES:%.+]] = cmpxchg i32* bitcast (i8* getelementptr (i8, i8* bitcast (%struct.BitFields* @{{.+}} to i8*), i64 4) to i32*), i32 [[OLD_BF_VALUE]], i32 [[NEW_BF_VALUE]] monotonic monotonic
// CHECK: [[FAILED_OLD_VAL]] = extractvalue { i32, i1 } [[RES]], 0
// CHECK: [[FAIL_SUCCESS:%.+]] = extractvalue { i32, i1 } [[RES]], 1
// CHECK: br i1 [[FAIL_SUCCESS]], label %[[EXIT:.+]], label %[[CONT]]
// CHECK: [[EXIT]]
#pragma omp atomic write
bfx.a = ldv;
// CHECK: load x86_fp80, x86_fp80* @{{.+}}
// CHECK: [[NEW_VAL:%.+]] = fptosi x86_fp80 %{{.+}} to i32
// CHECK: [[BITCAST:%.+]] = bitcast i32* [[LDTEMP:%.+]] to i8*
// CHECK: call void @__atomic_load(i64 4, i8* getelementptr (i8, i8* bitcast (%struct.BitFields_packed* @{{.+}} to i8*), i64 4), i8* [[BITCAST]], i32 0)
// CHECK: br label %[[CONT:.+]]
// CHECK: [[CONT]]
// CHECK: [[OLD_BF_VALUE:%.+]] = load i32, i32* [[LDTEMP]],
// CHECK: store i32 [[OLD_BF_VALUE]], i32* [[LDTEMP1:%.+]],
// CHECK: [[OLD_BF_VALUE:%.+]] = load i32, i32* [[LDTEMP1]],
// CHECK: [[BF_VALUE:%.+]] = and i32 [[NEW_VAL]], 2147483647
// CHECK: [[BF_CLEAR:%.+]] = and i32 [[OLD_BF_VALUE]], -2147483648
// CHECK: or i32 [[BF_CLEAR]], [[BF_VALUE]]
// CHECK: store i32 %{{.+}}, i32* [[LDTEMP1]]
// CHECK: [[BITCAST_TEMP_OLD_BF_ADDR:%.+]] = bitcast i32* [[LDTEMP]] to i8*
// CHECK: [[BITCAST_TEMP_NEW_BF_ADDR:%.+]] = bitcast i32* [[LDTEMP1]] to i8*
// CHECK: [[FAIL_SUCCESS:%.+]] = call zeroext i1 @__atomic_compare_exchange(i64 4, i8* getelementptr (i8, i8* bitcast (%struct.BitFields_packed* @{{.+}} to i8*), i64 4), i8* [[BITCAST_TEMP_OLD_BF_ADDR]], i8* [[BITCAST_TEMP_NEW_BF_ADDR]], i32 0, i32 0)
// CHECK: br i1 [[FAIL_SUCCESS]], label %[[EXIT:.+]], label %[[CONT]]
// CHECK: [[EXIT]]
#pragma omp atomic write
bfx_packed.a = ldv;
// CHECK: load x86_fp80, x86_fp80* @{{.+}}
// CHECK: [[NEW_VAL:%.+]] = fptosi x86_fp80 %{{.+}} to i32
// CHECK: [[PREV_VALUE:%.+]] = load atomic i32, i32* getelementptr inbounds (%struct.BitFields2, %struct.BitFields2* @{{.+}}, i32 0, i32 0) monotonic
// CHECK: br label %[[CONT:.+]]
// CHECK: [[CONT]]
// CHECK: [[OLD_BF_VALUE:%.+]] = phi i32 [ [[PREV_VALUE]], %[[EXIT]] ], [ [[FAILED_OLD_VAL:%.+]], %[[CONT]] ]
// CHECK: [[BF_AND:%.+]] = and i32 [[NEW_VAL]], 1
// CHECK: [[BF_VALUE:%.+]] = shl i32 [[BF_AND]], 31
// CHECK: [[BF_CLEAR:%.+]] = and i32 %{{.+}}, 2147483647
// CHECK: or i32 [[BF_CLEAR]], [[BF_VALUE]]
// CHECK: store i32 %{{.+}}, i32* [[LDTEMP:%.+]]
// CHECK: [[NEW_BF_VALUE:%.+]] = load i32, i32* [[LDTEMP]]
// CHECK: [[RES:%.+]] = cmpxchg i32* getelementptr inbounds (%struct.BitFields2, %struct.BitFields2* @{{.+}}, i32 0, i32 0), i32 [[OLD_BF_VALUE]], i32 [[NEW_BF_VALUE]] monotonic monotonic
// CHECK: [[FAILED_OLD_VAL]] = extractvalue { i32, i1 } [[RES]], 0
// CHECK: [[FAIL_SUCCESS:%.+]] = extractvalue { i32, i1 } [[RES]], 1
// CHECK: br i1 [[FAIL_SUCCESS]], label %[[EXIT:.+]], label %[[CONT]]
// CHECK: [[EXIT]]
#pragma omp atomic write
bfx2.a = ldv;
// CHECK: load x86_fp80, x86_fp80* @{{.+}}
// CHECK: [[NEW_VAL:%.+]] = fptosi x86_fp80 %{{.+}} to i32
// CHECK: [[PREV_VALUE:%.+]] = load atomic i8, i8* getelementptr (i8, i8* bitcast (%struct.BitFields2_packed* @{{.+}} to i8*), i64 3) monotonic
// CHECK: br label %[[CONT:.+]]
// CHECK: [[CONT]]
// CHECK: [[OLD_BF_VALUE:%.+]] = phi i8 [ [[PREV_VALUE]], %[[EXIT]] ], [ [[FAILED_OLD_VAL:%.+]], %[[CONT]] ]
// CHECK: [[TRUNC:%.+]] = trunc i32 [[NEW_VAL]] to i8
// CHECK: [[BF_AND:%.+]] = and i8 [[TRUNC]], 1
// CHECK: [[BF_VALUE:%.+]] = shl i8 [[BF_AND]], 7
// CHECK: [[BF_CLEAR:%.+]] = and i8 %{{.+}}, 127
// CHECK: or i8 [[BF_CLEAR]], [[BF_VALUE]]
// CHECK: store i8 %{{.+}}, i8* [[LDTEMP:%.+]]
// CHECK: [[NEW_BF_VALUE:%.+]] = load i8, i8* [[LDTEMP]]
// CHECK: [[RES:%.+]] = cmpxchg i8* getelementptr (i8, i8* bitcast (%struct.BitFields2_packed* @{{.+}} to i8*), i64 3), i8 [[OLD_BF_VALUE]], i8 [[NEW_BF_VALUE]] monotonic monotonic
// CHECK: [[FAILED_OLD_VAL]] = extractvalue { i8, i1 } [[RES]], 0
// CHECK: [[FAIL_SUCCESS:%.+]] = extractvalue { i8, i1 } [[RES]], 1
// CHECK: br i1 [[FAIL_SUCCESS]], label %[[EXIT:.+]], label %[[CONT]]
// CHECK: [[EXIT]]
#pragma omp atomic write
bfx2_packed.a = ldv;
// CHECK: load x86_fp80, x86_fp80* @{{.+}}
// CHECK: [[NEW_VAL:%.+]] = fptosi x86_fp80 %{{.+}} to i32
// CHECK: [[PREV_VALUE:%.+]] = load atomic i32, i32* getelementptr inbounds (%struct.BitFields3, %struct.BitFields3* @{{.+}}, i32 0, i32 0) monotonic
// CHECK: br label %[[CONT:.+]]
// CHECK: [[CONT]]
// CHECK: [[OLD_BF_VALUE:%.+]] = phi i32 [ [[PREV_VALUE]], %[[EXIT]] ], [ [[FAILED_OLD_VAL:%.+]], %[[CONT]] ]
// CHECK: [[BF_AND:%.+]] = and i32 [[NEW_VAL]], 16383
// CHECK: [[BF_VALUE:%.+]] = shl i32 [[BF_AND]], 11
// CHECK: [[BF_CLEAR:%.+]] = and i32 %{{.+}}, -33552385
// CHECK: or i32 [[BF_CLEAR]], [[BF_VALUE]]
// CHECK: store i32 %{{.+}}, i32* [[LDTEMP:%.+]]
// CHECK: [[NEW_BF_VALUE:%.+]] = load i32, i32* [[LDTEMP]]
// CHECK: [[RES:%.+]] = cmpxchg i32* getelementptr inbounds (%struct.BitFields3, %struct.BitFields3* @{{.+}}, i32 0, i32 0), i32 [[OLD_BF_VALUE]], i32 [[NEW_BF_VALUE]] monotonic monotonic
// CHECK: [[FAILED_OLD_VAL]] = extractvalue { i32, i1 } [[RES]], 0
// CHECK: [[FAIL_SUCCESS:%.+]] = extractvalue { i32, i1 } [[RES]], 1
// CHECK: br i1 [[FAIL_SUCCESS]], label %[[EXIT:.+]], label %[[CONT]]
// CHECK: [[EXIT]]
#pragma omp atomic write
bfx3.a = ldv;
// CHECK: load x86_fp80, x86_fp80* @{{.+}}
// CHECK: [[NEW_VAL:%.+]] = fptosi x86_fp80 %{{.+}} to i32
// CHECK: [[LDTEMP:%.+]] = bitcast i32* %{{.+}} to i24*
// CHECK: [[BITCAST:%.+]] = bitcast i24* %{{.+}} to i8*
// CHECK: call void @__atomic_load(i64 3, i8* getelementptr (i8, i8* bitcast (%struct.BitFields3_packed* @{{.+}} to i8*), i64 1), i8* [[BITCAST]], i32 0)
// CHECK: br label %[[CONT:.+]]
// CHECK: [[CONT]]
// CHECK: [[OLD_VAL:%.+]] = load i24, i24* %{{.+}},
// CHECK: store i24 [[OLD_VAL]], i24* [[TEMP:%.+]],
// CHECK: [[TRUNC:%.+]] = trunc i32 [[NEW_VAL]] to i24
// CHECK: [[BF_AND:%.+]] = and i24 [[TRUNC]], 16383
// CHECK: [[BF_VALUE:%.+]] = shl i24 [[BF_AND]], 3
// CHECK: [[BF_CLEAR:%.+]] = and i24 %{{.+}}, -131065
// CHECK: or i24 [[BF_CLEAR]], [[BF_VALUE]]
// CHECK: store i24 %{{.+}}, i24* [[TEMP]]
// CHECK: [[BITCAST_TEMP_OLD_BF_ADDR:%.+]] = bitcast i24* [[LDTEMP]] to i8*
// CHECK: [[BITCAST_TEMP_NEW_BF_ADDR:%.+]] = bitcast i24* [[TEMP]] to i8*
// CHECK: [[FAIL_SUCCESS:%.+]] = call zeroext i1 @__atomic_compare_exchange(i64 3, i8* getelementptr (i8, i8* bitcast (%struct.BitFields3_packed* @{{.+}} to i8*), i64 1), i8* [[BITCAST_TEMP_OLD_BF_ADDR]], i8* [[BITCAST_TEMP_NEW_BF_ADDR]], i32 0, i32 0)
// CHECK: br i1 [[FAIL_SUCCESS]], label %[[EXIT:.+]], label %[[CONT]]
// CHECK: [[EXIT]]
#pragma omp atomic write
bfx3_packed.a = ldv;
// CHECK: load x86_fp80, x86_fp80* @{{.+}}
// CHECK: [[NEW_VAL:%.+]] = fptosi x86_fp80 %{{.+}} to i32
// CHECK: [[PREV_VALUE:%.+]] = load atomic i64, i64* bitcast (%struct.BitFields4* @{{.+}} to i64*) monotonic
// CHECK: br label %[[CONT:.+]]
// CHECK: [[CONT]]
// CHECK: [[OLD_BF_VALUE:%.+]] = phi i64 [ [[PREV_VALUE]], %[[EXIT]] ], [ [[FAILED_OLD_VAL:%.+]], %[[CONT]] ]
// CHECK: [[ZEXT:%.+]] = zext i32 [[NEW_VAL]] to i64
// CHECK: [[BF_AND:%.+]] = and i64 [[ZEXT]], 1
// CHECK: [[BF_VALUE:%.+]] = shl i64 [[BF_AND]], 16
// CHECK: [[BF_CLEAR:%.+]] = and i64 %{{.+}}, -65537
// CHECK: or i64 [[BF_CLEAR]], [[BF_VALUE]]
// CHECK: store i64 %{{.+}}, i64* [[LDTEMP:%.+]]
// CHECK: [[NEW_BF_VALUE:%.+]] = load i64, i64* [[LDTEMP]]
// CHECK: [[RES:%.+]] = cmpxchg i64* bitcast (%struct.BitFields4* @{{.+}} to i64*), i64 [[OLD_BF_VALUE]], i64 [[NEW_BF_VALUE]] monotonic monotonic
// CHECK: [[FAILED_OLD_VAL]] = extractvalue { i64, i1 } [[RES]], 0
// CHECK: [[FAIL_SUCCESS:%.+]] = extractvalue { i64, i1 } [[RES]], 1
// CHECK: br i1 [[FAIL_SUCCESS]], label %[[EXIT:.+]], label %[[CONT]]
// CHECK: [[EXIT]]
#pragma omp atomic write
bfx4.a = ldv;
// CHECK: load x86_fp80, x86_fp80* @{{.+}}
// CHECK: [[NEW_VAL:%.+]] = fptosi x86_fp80 %{{.+}} to i32
// CHECK: [[PREV_VALUE:%.+]] = load atomic i8, i8* getelementptr inbounds (%struct.BitFields4_packed, %struct.BitFields4_packed* @{{.+}}, i32 0, i32 0, i64 2) monotonic
// CHECK: br label %[[CONT:.+]]
// CHECK: [[CONT]]
// CHECK: [[OLD_BF_VALUE:%.+]] = phi i8 [ [[PREV_VALUE]], %[[EXIT]] ], [ [[FAILED_OLD_VAL:%.+]], %[[CONT]] ]
// CHECK: [[TRUNC:%.+]] = trunc i32 [[NEW_VAL]] to i8
// CHECK: [[BF_VALUE:%.+]] = and i8 [[TRUNC]], 1
// CHECK: [[BF_CLEAR:%.+]] = and i8 %{{.+}}, -2
// CHECK: or i8 [[BF_CLEAR]], [[BF_VALUE]]
// CHECK: store i8 %{{.+}}, i8* [[LDTEMP:%.+]]
// CHECK: [[NEW_BF_VALUE:%.+]] = load i8, i8* [[LDTEMP]]
// CHECK: [[RES:%.+]] = cmpxchg i8* getelementptr inbounds (%struct.BitFields4_packed, %struct.BitFields4_packed* @{{.+}}, i32 0, i32 0, i64 2), i8 [[OLD_BF_VALUE]], i8 [[NEW_BF_VALUE]] monotonic monotonic
// CHECK: [[FAILED_OLD_VAL]] = extractvalue { i8, i1 } [[RES]], 0
// CHECK: [[FAIL_SUCCESS:%.+]] = extractvalue { i8, i1 } [[RES]], 1
// CHECK: br i1 [[FAIL_SUCCESS]], label %[[EXIT:.+]], label %[[CONT]]
// CHECK: [[EXIT]]
#pragma omp atomic write
bfx4_packed.a = ldv;
// CHECK: load x86_fp80, x86_fp80* @{{.+}}
// CHECK: [[NEW_VAL:%.+]] = fptosi x86_fp80 %{{.+}} to i64
// CHECK: [[PREV_VALUE:%.+]] = load atomic i64, i64* bitcast (%struct.BitFields4* @{{.+}} to i64*) monotonic
// CHECK: br label %[[CONT:.+]]
// CHECK: [[CONT]]
// CHECK: [[OLD_BF_VALUE:%.+]] = phi i64 [ [[PREV_VALUE]], %[[EXIT]] ], [ [[FAILED_OLD_VAL:%.+]], %[[CONT]] ]
// CHECK: [[BF_AND:%.+]] = and i64 [[NEW_VAL]], 127
// CHECK: [[BF_VALUE:%.+]] = shl i64 [[BF_AND]], 17
// CHECK: [[BF_CLEAR:%.+]] = and i64 %{{.+}}, -16646145
// CHECK: or i64 [[BF_CLEAR]], [[BF_VALUE]]
// CHECK: store i64 %{{.+}}, i64* [[LDTEMP:%.+]]
// CHECK: [[NEW_BF_VALUE:%.+]] = load i64, i64* [[LDTEMP]]
// CHECK: [[RES:%.+]] = cmpxchg i64* bitcast (%struct.BitFields4* @{{.+}} to i64*), i64 [[OLD_BF_VALUE]], i64 [[NEW_BF_VALUE]] monotonic monotonic
// CHECK: [[FAILED_OLD_VAL]] = extractvalue { i64, i1 } [[RES]], 0
// CHECK: [[FAIL_SUCCESS:%.+]] = extractvalue { i64, i1 } [[RES]], 1
// CHECK: br i1 [[FAIL_SUCCESS]], label %[[EXIT:.+]], label %[[CONT]]
// CHECK: [[EXIT]]
#pragma omp atomic write
bfx4.b = ldv;
// CHECK: load x86_fp80, x86_fp80* @{{.+}}
// CHECK: [[NEW_VAL:%.+]] = fptosi x86_fp80 %{{.+}} to i64
// CHECK: [[PREV_VALUE:%.+]] = load atomic i8, i8* getelementptr inbounds (%struct.BitFields4_packed, %struct.BitFields4_packed* @{{.+}}, i32 0, i32 0, i64 2) monotonic
// CHECK: br label %[[CONT:.+]]
// CHECK: [[CONT]]
// CHECK: [[OLD_BF_VALUE:%.+]] = phi i8 [ [[PREV_VALUE]], %[[EXIT]] ], [ [[FAILED_OLD_VAL:%.+]], %[[CONT]] ]
// CHECK: [[TRUNC:%.+]] = trunc i64 [[NEW_VAL]] to i8
// CHECK: [[BF_AND:%.+]] = and i8 [[TRUNC]], 127
// CHECK: [[BF_VALUE:%.+]] = shl i8 [[BF_AND]], 1
// CHECK: [[BF_CLEAR:%.+]] = and i8 %{{.+}}, 1
// CHECK: or i8 [[BF_CLEAR]], [[BF_VALUE]]
// CHECK: store i8 %{{.+}}, i8* [[LDTEMP:%.+]]
// CHECK: [[NEW_BF_VALUE:%.+]] = load i8, i8* [[LDTEMP]]
// CHECK: [[RES:%.+]] = cmpxchg i8* getelementptr inbounds (%struct.BitFields4_packed, %struct.BitFields4_packed* @{{.+}}, i32 0, i32 0, i64 2), i8 [[OLD_BF_VALUE]], i8 [[NEW_BF_VALUE]] monotonic monotonic
// CHECK: [[FAILED_OLD_VAL]] = extractvalue { i8, i1 } [[RES]], 0
// CHECK: [[FAIL_SUCCESS:%.+]] = extractvalue { i8, i1 } [[RES]], 1
// CHECK: br i1 [[FAIL_SUCCESS]], label %[[EXIT:.+]], label %[[CONT]]
// CHECK: [[EXIT]]
#pragma omp atomic write
bfx4_packed.b = ldv;
// CHECK: load i64, i64*
// CHECK: [[VEC_ITEM_VAL:%.+]] = uitofp i64 %{{.+}} to float
// CHECK: [[I64VAL:%.+]] = load atomic i64, i64* bitcast (<2 x float>* [[DEST:@.+]] to i64*) monotonic
// CHECK: br label %[[CONT:.+]]
// CHECK: [[CONT]]
// CHECK: [[OLD_I64:%.+]] = phi i64 [ [[I64VAL]], %{{.+}} ], [ [[FAILED_I64_OLD_VAL:%.+]], %[[CONT]] ]
// CHECK: [[BITCAST:%.+]] = bitcast <2 x float>* [[LDTEMP:%.+]] to i64*
// CHECK: store i64 [[OLD_I64]], i64* [[BITCAST]],
// CHECK: [[VEC_VAL:%.+]] = load <2 x float>, <2 x float>* [[LDTEMP]]
// CHECK: [[NEW_VEC_VAL:%.+]] = insertelement <2 x float> [[VEC_VAL]], float [[VEC_ITEM_VAL]], i64 0
// CHECK: store <2 x float> [[NEW_VEC_VAL]], <2 x float>* [[LDTEMP]]
// CHECK: [[NEW_I64:%.+]] = load i64, i64* [[BITCAST]]
// CHECK: [[RES:%.+]] = cmpxchg i64* bitcast (<2 x float>* [[DEST]] to i64*), i64 [[OLD_I64]], i64 [[NEW_I64]] monotonic monotonic
// CHECK: [[FAILED_I64_OLD_VAL:%.+]] = extractvalue { i64, i1 } [[RES]], 0
// CHECK: [[FAIL_SUCCESS:%.+]] = extractvalue { i64, i1 } [[RES]], 1
// CHECK: br i1 [[FAIL_SUCCESS]], label %[[EXIT:.+]], label %[[CONT]]
// CHECK: [[EXIT]]
#pragma omp atomic write
float2x.x = ulv;
// CHECK: call i32 @llvm.read_register.i32(
// CHECK: sitofp i32 %{{.+}} to double
// CHECK: bitcast double %{{.+}} to i64
// CHECK: store atomic i64 %{{.+}}, i64* bitcast (double* @{{.+}} to i64*) seq_cst
// CHECK: call{{.*}} @__kmpc_flush(
#pragma omp atomic write seq_cst
dv = rix;
return 0;
}
#endif
|
dd_dtw_openmp.c | /*!
@file dtw_openmp.c
@brief DTAIDistance.dtw
@author Wannes Meert
@copyright Copyright © 2020 Wannes Meert. Apache License, Version 2.0, see LICENSE for details.
*/
#include "dd_dtw_openmp.h"
/**
Check the arguments passed to dtw_distances_* and prepare the array of indices to be used.
The indices are created upfront to allow for easy parallelization.
@param block Block to indicate which series to compare.
@param nb_series Number of series
@param cbs Column begin indices for a row series index
@param rls Location start for row in distances array
@param length Length of (compact) distances matrix
@param settings : Settings for DTW
@return 0 if all is ok, other number if not.
*/
int dtw_distances_prepare(DTWBlock *block, idx_t nb_series, idx_t **cbs, idx_t **rls, idx_t *length, DTWSettings *settings) {
idx_t cb, rs, ir;
*length = dtw_distances_length(block, nb_series);
if (length == 0) {
return 1;
}
// Correct block
if (block->re == 0) {
block->re = nb_series;
}
if (block->ce == 0) {
block->ce = nb_series;
}
if (block->re <= block->rb) {
*length = 0;
return 1;
}
if (block->ce <= block->cb) {
*length = 0;
return 1;
}
*cbs = (idx_t *)malloc(sizeof(idx_t) * (block->re - block->rb));
if (!cbs) {
printf("Error: dtw_distances_* - cannot allocate memory (cbs length = %zu)", block->re - block->rb);
*length = 0;
return 1;
}
*rls = (idx_t *)malloc(sizeof(idx_t) * (block->re - block->rb));
if (!rls) {
printf("Error: dtw_distances_* - cannot allocate memory (rls length = %zu)", block->re - block->rb);
*length = 0;
return 1;
}
ir = 0;
rs = 0;
assert(block->rb < block->re);
for (idx_t r=block->rb; r<block->re; r++) {
if (r + 1 > block->cb) {
cb = r+1;
} else {
cb = block->cb;
}
(*cbs)[ir] = cb;
(*rls)[ir] = rs;
rs += block->ce - cb;
ir += 1;
}
return 0;
}
/*!
Distance matrix for n-dimensional DTW, executed on a list of pointers to arrays and in parallel.
@see dtw_distances_ptrs
*/
idx_t dtw_distances_ptrs_parallel(seq_t **ptrs, idx_t nb_ptrs, idx_t* lengths, seq_t* output,
DTWBlock* block, DTWSettings* settings) {
#if defined(_OPENMP)
idx_t r, c, r_i, c_i;
idx_t length;
idx_t *cbs, *rls;
if (dtw_distances_prepare(block, nb_ptrs, &cbs, &rls, &length, settings) != 0) {
return 0;
}
r_i=0;
// Rows have different lengths, thus use guided scheduling to make threads with shorter rows
// not wait for threads with longer rows. Also the first rows are always longer than the last
// ones (upper triangular matrix), so this nicely aligns with the guided strategy.
// Using schedule("static, 1") is also fast for the same reason (neighbor rows are almost
// the same length, thus a circular assignment works well) but assumes all DTW computations take
// the same amount of time.
#pragma omp parallel for private(r_i, c_i, r, c) schedule(guided)
for (r_i=0; r_i < (block->re - block->rb); r_i++) {
r = block->rb + r_i;
c_i = 0;
for (c=cbs[r_i]; c<block->ce; c++) {
// printf("r_i=%zu - r=%zu - c_i=%zu - c=%zu\n", r_i, r, c_i, c);
double value = dtw_distance(ptrs[r], lengths[r],
ptrs[c], lengths[c], settings);
// printf("r_i=%zu - r=%zu - c_i=%zu - c=%zu - value=%.4f\n", r_i, r, c_i, c, value);
// printf("rls[r_i] + c_i = %zu + %zu\n", rls[r_i], c_i);
output[rls[r_i] + c_i] = value;
c_i++;
}
}
free(cbs);
free(rls);
return length;
#else
printf("ERROR: DTAIDistanceC is compiled without OpenMP support.\n");
return 0;
#endif
}
/*!
Distance matrix for n-dimensional DTW, executed on a list of pointers to arrays and in parallel.
@see dtw_distances_ndim_ptrs
*/
idx_t dtw_distances_ndim_ptrs_parallel(seq_t **ptrs, idx_t nb_ptrs, idx_t* lengths, int ndim, seq_t* output,
DTWBlock* block, DTWSettings* settings) {
#if defined(_OPENMP)
idx_t r, c, r_i, c_i;
idx_t length;
idx_t *cbs, *rls;
if (dtw_distances_prepare(block, nb_ptrs, &cbs, &rls, &length, settings) != 0) {
return 0;
}
r_i=0;
#pragma omp parallel for private(r_i, c_i, r, c) schedule(guided)
for (r_i=0; r_i < (block->re - block->rb); r_i++) {
r = block->rb + r_i;
c_i = 0;
for (c=cbs[r_i]; c<block->ce; c++) {
double value = dtw_distance_ndim(ptrs[r], lengths[r],
ptrs[c], lengths[c],
ndim, settings);
// printf("pi=%zu - r=%zu - c=%zu - value=%.4f\n", pi, r, c, value);
output[rls[r_i] + c_i] = value;
c_i++;
}
}
free(cbs);
free(rls);
return length;
#else
printf("ERROR: DTAIDistanceC is compiled without OpenMP support.\n");
return 0;
#endif
}
/*!
Distance matrix for n-dimensional DTW, executed on a 2-dimensional array and in parallel.
@see dtw_distances_matrix
*/
idx_t dtw_distances_matrix_parallel(seq_t *matrix, idx_t nb_rows, idx_t nb_cols, seq_t* output, DTWBlock* block, DTWSettings* settings) {
#if defined(_OPENMP)
idx_t r, c, r_i, c_i;
idx_t length;
idx_t *cbs, *rls;
if (dtw_distances_prepare(block, nb_rows, &cbs, &rls, &length, settings) != 0) {
return 0;
}
r_i = 0;
#pragma omp parallel for private(r_i, c_i, r, c) schedule(guided)
for (r_i=0; r_i < (block->re - block->rb); r_i++) {
r = block->rb + r_i;
c_i = 0;
for (c=cbs[r_i]; c<block->ce; c++) {
double value = dtw_distance(&matrix[r*nb_cols], nb_cols,
&matrix[c*nb_cols], nb_cols, settings);
output[rls[r_i] + c_i] = value;
c_i++;
}
}
free(cbs);
free(rls);
return length;
#else
printf("ERROR: DTAIDistanceC is compiled without OpenMP support.\n");
return 0;
#endif
}
/*!
Distance matrix for n-dimensional DTW, executed on a 3-dimensional array and in parallel.
@see dtw_distances_ndim_matrix
*/
idx_t dtw_distances_ndim_matrix_parallel(seq_t *matrix, idx_t nb_rows, idx_t nb_cols, int ndim, seq_t* output, DTWBlock* block, DTWSettings* settings) {
#if defined(_OPENMP)
idx_t r, c, r_i, c_i;
idx_t length;
idx_t *cbs, *rls;
if (dtw_distances_prepare(block, nb_rows, &cbs, &rls, &length, settings) != 0) {
return 0;
}
r_i = 0;
#pragma omp parallel for private(r_i, c_i, r, c) schedule(guided)
for (r_i=0; r_i < (block->re - block->rb); r_i++) {
r = block->rb + r_i;
c_i = 0;
for (c=cbs[r_i]; c<block->ce; c++) {
double value = dtw_distance_ndim(&matrix[r*nb_cols*ndim], nb_cols,
&matrix[c*nb_cols*ndim], nb_cols,
ndim, settings);
// printf("pi=%zu - r=%zu->%zu - c=%zu - value=%.4f\n", pi, r, r*nb_cols, c, value);
output[rls[r_i] + c_i] = value;
c_i++;
}
}
free(cbs);
free(rls);
return length;
#else
printf("ERROR: DTAIDistanceC is compiled without OpenMP support.\n");
return 0;
#endif
}
|
alignment.c | /**********************************************************************************************/
/* This program is part of the Barcelona OpenMP Tasks Suite */
/* Copyright (C) 2009 Barcelona Supercomputing Center - Centro Nacional de Supercomputacion */
/* Copyright (C) 2009 Universitat Politecnica de Catalunya */
/* */
/* This program is free software; you can redistribute it and/or modify */
/* it under the terms of the GNU General Public License as published by */
/* the Free Software Foundation; either version 2 of the License, or */
/* (at your option) any later version. */
/* */
/* This program is distributed in the hope that it will be useful, */
/* but WITHOUT ANY WARRANTY; without even the implied warranty of */
/* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the */
/* GNU General Public License for more details. */
/* */
/* You should have received a copy of the GNU General Public License */
/* along with this program; if not, write to the Free Software */
/* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */
/**********************************************************************************************/
/* Original code from the Application Kernel Matrix by Cray */
/* that was based on the ClustalW application */
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <math.h>
#include <sys/time.h>
#include <libgen.h>
#include "sequence.h"
#include "param.h"
#include "alignment.h"
#include "bots.h"
int ktup, window, signif;
int prot_ktup, prot_window, prot_signif;
int gap_pos1, gap_pos2, mat_avscore;
int nseqs, max_aa;
#define MAX_ALN_LENGTH 5000
int *seqlen_array, def_aa_xref[NUMRES+1];
int *bench_output, *seq_output;
double gap_open, gap_extend;
double prot_gap_open, prot_gap_extend;
double pw_go_penalty, pw_ge_penalty;
double prot_pw_go_penalty, prot_pw_ge_penalty;
char **args, **names, **seq_array;
int matrix[NUMRES][NUMRES];
double gap_open_scale;
double gap_extend_scale;
// dnaFlag default value is false
int dnaFlag = FALSE;
// clustalw default value is false
int clustalw = FALSE;
#define INT_SCALE 100
#define MIN(a,b) ((a)<(b)?(a):(b))
#define tbgap(k) ((k) <= 0 ? 0 : tb + gh * (k))
#define tegap(k) ((k) <= 0 ? 0 : te + gh * (k))
/***********************************************************************
* :
**********************************************************************/
void del(int k, int *print_ptr, int *last_print, int *displ)
{
if (*last_print<0) *last_print = displ[(*print_ptr)-1] -= k;
else *last_print = displ[(*print_ptr)++] = -k;
}
/***********************************************************************
* :
**********************************************************************/
void add(int v, int *print_ptr, int *last_print, int *displ)
{
if (*last_print < 0) {
displ[(*print_ptr)-1] = v;
displ[(*print_ptr)++] = *last_print;
} else {
*last_print = displ[(*print_ptr)++] = v;
}
}
/***********************************************************************
* :
**********************************************************************/
int calc_score(int iat, int jat, int v1, int v2, int seq1, int seq2)
{
int i, j, ipos, jpos;
ipos = v1 + iat;
jpos = v2 + jat;
i = seq_array[seq1][ipos];
j = seq_array[seq2][jpos];
return (matrix[i][j]);
}
/***********************************************************************
* :
**********************************************************************/
int get_matrix(int *matptr, int *xref, int scale)
{
int gg_score = 0;
int gr_score = 0;
int i, j, k, ti, tj, ix;
int av1, av2, av3, min, max, maxres;
for (i = 0; i <= max_aa; i++)
for (j = 0; j <= max_aa; j++) matrix[i][j] = 0;
ix = 0;
maxres = 0;
for (i = 0; i <= max_aa; i++) {
ti = xref[i];
for (j = 0; j <= i; j++) {
tj = xref[j];
if ((ti != -1) && (tj != -1)) {
k = matptr[ix];
if (ti == tj) {
matrix[ti][ti] = k * scale;
maxres++;
} else {
matrix[ti][tj] = k * scale;
matrix[tj][ti] = k * scale;
}
ix++;
}
}
}
maxres--;
av1 = av2 = av3 = 0;
for (i = 0; i <= max_aa; i++) {
for (j = 0; j <= i; j++) {
av1 += matrix[i][j];
if (i == j) av2 += matrix[i][j];
else av3 += matrix[i][j];
}
}
av1 /= (maxres*maxres)/2;
av2 /= maxres;
av3 /= (int) (((double)(maxres*maxres-maxres))/2);
mat_avscore = -av3;
min = max = matrix[0][0];
for (i = 0; i <= max_aa; i++)
for (j = 1; j <= i; j++) {
if (matrix[i][j] < min) min = matrix[i][j];
if (matrix[i][j] > max) max = matrix[i][j];
}
for (i = 0; i < gap_pos1; i++) {
matrix[i][gap_pos1] = gr_score;
matrix[gap_pos1][i] = gr_score;
matrix[i][gap_pos2] = gr_score;
matrix[gap_pos2][i] = gr_score;
}
matrix[gap_pos1][gap_pos1] = gg_score;
matrix[gap_pos2][gap_pos2] = gg_score;
matrix[gap_pos2][gap_pos1] = gg_score;
matrix[gap_pos1][gap_pos2] = gg_score;
maxres += 2;
return(maxres);
}
/***********************************************************************
* :
**********************************************************************/
void forward_pass(char *ia, char *ib, int n, int m, int *se1, int *se2, int *maxscore, int g, int gh)
{
int i, j, f, p, t, hh;
int HH[MAX_ALN_LENGTH];
int DD[MAX_ALN_LENGTH];
*maxscore = 0;
*se1 = *se2 = 0;
for (i = 0; i <= m; i++) {HH[i] = 0; DD[i] = -g;}
for (i = 1; i <= n; i++) {
hh = p = 0;
f = -g;
for (j = 1; j <= m; j++) {
f -= gh;
t = hh - g - gh;
if (f < t) f = t;
DD[j] -= gh;
t = HH[j] - g - gh;
if (DD[j] < t) DD[j] = t;
hh = p + matrix[(int)ia[i]][(int)ib[j]];
if (hh < f) hh = f;
if (hh < DD[j]) hh = DD[j];
if (hh < 0) hh = 0;
p = HH[j];
HH[j] = hh;
if (hh > *maxscore) {*maxscore = hh; *se1 = i; *se2 = j;}
}
}
}
/***********************************************************************
* :
**********************************************************************/
void reverse_pass(char *ia, char *ib, int se1, int se2, int *sb1, int *sb2, int maxscore, int g, int gh)
{
int i, j, f, p, t, hh, cost;
int HH[MAX_ALN_LENGTH];
int DD[MAX_ALN_LENGTH];
cost = 0;
*sb1 = *sb2 = 1;
for (i = se2; i > 0; i--){ HH[i] = -1; DD[i] = -1;}
for (i = se1; i > 0; i--) {
hh = f = -1;
if (i == se1) p = 0; else p = -1;
for (j = se2; j > 0; j--) {
f -= gh;
t = hh - g - gh;
if (f < t) f = t;
DD[j] -= gh;
t = HH[j] - g - gh;
if (DD[j] < t) DD[j] = t;
hh = p + matrix[(int)ia[i]][(int)ib[j]];
if (hh < f) hh = f;
if (hh < DD[j]) hh = DD[j];
p = HH[j];
HH[j] = hh;
if (hh > cost) {
cost = hh; *sb1 = i; *sb2 = j;
if (cost >= maxscore) break;
}
}
if (cost >= maxscore) break;
}
}
/***********************************************************************
* :
**********************************************************************/
int diff (int A, int B, int M, int N, int tb, int te, int *print_ptr, int *last_print, int *displ, int seq1, int seq2, int g, int gh)
{
int i, j, f, e, s, t, hh;
int midi, midj, midh, type;
int HH[MAX_ALN_LENGTH];
int DD[MAX_ALN_LENGTH];
int RR[MAX_ALN_LENGTH];
int SS[MAX_ALN_LENGTH];
if (N <= 0) {if (M > 0) del(M, print_ptr, last_print, displ); return( - (int) tbgap(M)); }
if (M <= 1) {
if (M <= 0) {add(N, print_ptr, last_print, displ); return( - (int)tbgap(N));}
midh = -(tb+gh) - tegap(N);
hh = -(te+gh) - tbgap(N);
if (hh > midh) midh = hh;
midj = 0;
for (j = 1; j <= N; j++) {
hh = calc_score(1,j,A,B,seq1,seq2) - tegap(N-j) - tbgap(j-1);
if (hh > midh) {midh = hh; midj = j;}
}
if (midj == 0) {
del(1, print_ptr, last_print, displ);
add(N, print_ptr, last_print, displ);
} else {
if (midj > 1) add(midj-1, print_ptr, last_print, displ);
displ[(*print_ptr)++] = *last_print = 0;
if (midj < N) add(N-midj, print_ptr, last_print, displ);
}
return midh;
}
midi = M / 2;
HH[0] = 0.0;
t = -tb;
for (j = 1; j <= N; j++) {
HH[j] = t = t - gh;
DD[j] = t - g;
}
t = -tb;
for (i = 1; i <= midi; i++) {
s = HH[0];
HH[0] = hh = t = t - gh;
f = t - g;
for (j = 1; j <= N; j++) {
if ((hh = hh - g - gh) > (f = f - gh)) f = hh;
if ((hh = HH[j] - g - gh) > (e = DD[j]- gh)) e = hh;
hh = s + calc_score(i,j,A,B,seq1,seq2);
if (f > hh) hh = f;
if (e > hh) hh = e;
s = HH[j];
HH[j] = hh;
DD[j] = e;
}
}
DD[0] = HH[0];
RR[N] = 0;
t = -te;
for (j = N-1; j >= 0; j--) {RR[j] = t = t - gh; SS[j] = t - g;}
t = -te;
for (i = M - 1; i >= midi; i--) {
s = RR[N];
RR[N] = hh = t = t-gh;
f = t - g;
for (j = N - 1; j >= 0; j--) {
if ((hh = hh - g - gh) > (f = f - gh)) f = hh;
if ((hh = RR[j] - g - gh) > (e = SS[j] - gh)) e = hh;
hh = s + calc_score(i+1,j+1,A,B,seq1,seq2);
if (f > hh) hh = f;
if (e > hh) hh = e;
s = RR[j];
RR[j] = hh;
SS[j] = e;
}
}
SS[N] = RR[N];
midh = HH[0] + RR[0];
midj = 0;
type = 1;
for (j = 0; j <= N; j++) {
hh = HH[j] + RR[j];
if (hh >= midh)
if (hh > midh || (HH[j] != DD[j] && RR[j] == SS[j]))
{midh = hh; midj = j;}
}
for (j = N; j >= 0; j--) {
hh = DD[j] + SS[j] + g;
if (hh > midh) {midh = hh;midj = j;type = 2;}
}
if (type == 1) {
diff(A, B, midi, midj, tb, g, print_ptr, last_print, displ, seq1, seq2, g, gh);
diff(A+midi, B+midj, M-midi, N-midj, g, te, print_ptr, last_print, displ, seq1, seq2, g, gh);
} else {
diff(A, B, midi-1, midj, tb, 0.0, print_ptr, last_print, displ, seq1, seq2, g, gh);
del(2, print_ptr, last_print, displ);
diff(A+midi+1, B+midj, M-midi-1, N-midj, 0.0, te, print_ptr, last_print, displ, seq1, seq2, g, gh);
}
return midh;
}
/***********************************************************************
* :
**********************************************************************/
double tracepath(int tsb1, int tsb2, int *print_ptr, int *displ, int seq1, int seq2)
{
int i, k;
int i1 = tsb1;
int i2 = tsb2;
int pos = 0;
int count = 0;
for (i = 1; i <= *print_ptr - 1; ++i) {
if (displ[i]==0) {
char c1 = seq_array[seq1][i1];
char c2 = seq_array[seq2][i2];
if ((c1!=gap_pos1) && (c1 != gap_pos2) && (c1 == c2)) count++;
++i1; ++i2; ++pos;
} else if ((k = displ[i]) > 0) {
i2 += k;
pos += k;
} else {
i1 -= k;
pos -= k;
}
}
return (100.0 * (double) count);
}
int pairalign()
{
int i, n, m, si, sj;
int len1, len2, maxres;
double gg, mm_score;
int *mat_xref, *matptr;
matptr = gon250mt;
mat_xref = def_aa_xref;
maxres = get_matrix(matptr, mat_xref, 10);
if (maxres == 0) return(-1);
bots_message("Start aligning ");
#pragma omp parallel
{
#pragma omp for schedule(dynamic) private(i,n,si,sj,len1,m)
for (si = 0; si < nseqs; si++) {
n = seqlen_array[si+1];
for (i = 1, len1 = 0; i <= n; i++) {
char c = seq_array[si+1][i];
if ((c != gap_pos1) && (c != gap_pos2)) len1++;
}
for (sj = si + 1; sj < nseqs; sj++)
{
m = seqlen_array[sj+1];
if ( n == 0 || m == 0 ) {
bench_output[si*nseqs+sj] = (int) 1.0;
} else {
#pragma omp task untied \
private(i,gg,len2,mm_score) firstprivate(m,n,si,sj,len1) \
shared(nseqs, bench_output,seqlen_array,seq_array,gap_pos1,gap_pos2,pw_ge_penalty,pw_go_penalty,mat_avscore)
{
int se1, se2, sb1, sb2, maxscore, seq1, seq2, g, gh;
int displ[2*MAX_ALN_LENGTH+1];
int print_ptr, last_print;
for (i = 1, len2 = 0; i <= m; i++) {
char c = seq_array[sj+1][i];
if ((c != gap_pos1) && (c != gap_pos2)) len2++;
}
if ( dnaFlag == TRUE ) {
g = (int) ( 2 * INT_SCALE * pw_go_penalty * gap_open_scale ); // gapOpen
gh = (int) (INT_SCALE * pw_ge_penalty * gap_extend_scale); //gapExtend
} else {
gg = pw_go_penalty + log((double) MIN(n, m)); // temporary value
g = (int) ((mat_avscore <= 0) ? (2 * INT_SCALE * gg) : (2 * mat_avscore * gg * gap_open_scale) ); // gapOpen
gh = (int) (INT_SCALE * pw_ge_penalty); //gapExtend
}
seq1 = si + 1;
seq2 = sj + 1;
forward_pass(&seq_array[seq1][0], &seq_array[seq2][0], n, m, &se1, &se2, &maxscore, g, gh);
reverse_pass(&seq_array[seq1][0], &seq_array[seq2][0], se1, se2, &sb1, &sb2, maxscore, g, gh);
print_ptr = 1;
last_print = 0;
diff(sb1-1, sb2-1, se1-sb1+1, se2-sb2+1, 0, 0, &print_ptr, &last_print, displ, seq1, seq2, g, gh);
mm_score = tracepath(sb1, sb2, &print_ptr, displ, seq1, seq2);
if (len1 == 0 || len2 == 0) mm_score = 0.0;
else mm_score /= (double) MIN(len1,len2);
bench_output[si*nseqs+sj] = (int) mm_score;
} // end task
} // end if (n == 0 || m == 0)
} // for (j)
} // end parallel for (i)
} // end parallel
bots_message(" completed!\n");
return 0;
}
int pairalign_seq()
{
int i, n, m, si, sj;
int len1, len2, maxres;
double gg, mm_score;
int *mat_xref, *matptr;
matptr = gon250mt;
mat_xref = def_aa_xref;
maxres = get_matrix(matptr, mat_xref, 10);
if (maxres == 0) return(-1);
for (si = 0; si < nseqs; si++) {
n = seqlen_array[si+1];
for (i = 1, len1 = 0; i <= n; i++) {
char c = seq_array[si+1][i];
if ((c != gap_pos1) && (c != gap_pos2)) len1++;
}
for (sj = si + 1; sj < nseqs; sj++) {
m = seqlen_array[sj+1];
if ( n == 0 || m == 0) {
seq_output[si*nseqs+sj] = (int) 1.0;
} else {
int se1, se2, sb1, sb2, maxscore, seq1, seq2, g, gh;
int displ[2*MAX_ALN_LENGTH+1];
int print_ptr, last_print;
for (i = 1, len2 = 0; i <= m; i++) {
char c = seq_array[sj+1][i];
if ((c != gap_pos1) && (c != gap_pos2)) len2++;
}
if ( dnaFlag == TRUE ) {
g = (int) ( 2 * INT_SCALE * pw_go_penalty * gap_open_scale ); // gapOpen
gh = (int) (INT_SCALE * pw_ge_penalty * gap_extend_scale); //gapExtend
} else {
gg = pw_go_penalty + log((double) MIN(n, m)); // temporary value
g = (int) ((mat_avscore <= 0) ? (2 * INT_SCALE * gg) : (2 * mat_avscore * gg * gap_open_scale) ); // gapOpen
gh = (int) (INT_SCALE * pw_ge_penalty); //gapExtend
}
seq1 = si + 1;
seq2 = sj + 1;
forward_pass(&seq_array[seq1][0], &seq_array[seq2][0], n, m, &se1, &se2, &maxscore, g, gh);
reverse_pass(&seq_array[seq1][0], &seq_array[seq2][0], se1, se2, &sb1, &sb2, maxscore, g, gh);
print_ptr = 1;
last_print = 0;
diff(sb1-1, sb2-1, se1-sb1+1, se2-sb2+1, 0, 0, &print_ptr, &last_print, displ, seq1, seq2, g, gh);
mm_score = tracepath(sb1, sb2, &print_ptr, displ, seq1, seq2);
if (len1 == 0 || len2 == 0) mm_score = 0.0;
else mm_score /= (double) MIN(len1,len2);
seq_output[si*nseqs+sj] = (int) mm_score;
}
}
}
return 0;
}
/***********************************************************************
* :
**********************************************************************/
void init_matrix(void)
{
int i, j;
char c1, c2;
gap_pos1 = NUMRES - 2;
gap_pos2 = NUMRES - 1;
max_aa = strlen(amino_acid_codes) - 2;
for (i = 0; i < NUMRES; i++) def_aa_xref[i] = -1;
for (i = 0; (c1 = amino_acid_order[i]); i++)
for (j = 0; (c2 = amino_acid_codes[j]); j++)
if (c1 == c2) {def_aa_xref[i] = j; break;}
}
void pairalign_init (char *filename)
{
int i;
if (!filename || !filename[0]) {
bots_error(0, "Please specify an input file with the -f option\n");
}
init_matrix();
nseqs = readseqs(filename);
bots_message("Multiple Pairwise Alignment (%d sequences)\n", nseqs);
for (i = 1; i <= nseqs; i++)
bots_debug("Sequence %d: %s %6.d aa\n", i, names[i], seqlen_array[i]);
if ( clustalw == TRUE ) {
gap_open_scale = 0.6667;
gap_extend_scale = 0.751;
} else {
gap_open_scale = 1.0;
gap_extend_scale = 1.0;
}
if ( dnaFlag == TRUE ) {
// Using DNA parameters
ktup = 2;
window = 4;
signif = 4;
gap_open = 15.00;
gap_extend = 6.66;
pw_go_penalty = 15.00;
pw_ge_penalty = 6.66;
} else {
// Using protein parameters
ktup = 1;
window = 5;
signif = 5;
gap_open = 10.0;
gap_extend = 0.2;
pw_go_penalty = 10.0;
pw_ge_penalty = 0.1;
}
}
void align_init ()
{
int i,j;
bench_output = (int *) malloc(sizeof(int)*nseqs*nseqs);
for(i = 0; i<nseqs; i++)
for(j = 0; j<nseqs; j++)
bench_output[i*nseqs+j] = 0;
}
void align()
{
pairalign();
}
void align_seq_init ()
{
int i,j;
seq_output = (int *) malloc(sizeof(int)*nseqs*nseqs);
bench_output = (int *) malloc(sizeof(int)*nseqs*nseqs);
for(i = 0; i<nseqs; i++)
for(j = 0; j<nseqs; j++)
seq_output[i*nseqs+j] = 0;
}
void align_seq()
{
pairalign_seq();
}
void align_end ()
{
int i,j;
for(i = 0; i<nseqs; i++)
for(j = 0; j<nseqs; j++)
if (bench_output[i*nseqs+j] != 0)
bots_debug("Benchmark sequences (%d:%d) Aligned. Score: %d\n",
i+1 , j+1 , (int) bench_output[i*nseqs+j]);
}
int align_verify ()
{
int i,j;
int result = BOTS_RESULT_SUCCESSFUL;
for(i = 0; i<nseqs; i++)
for(j = 0; j<nseqs; j++)
if (bench_output[i*nseqs+j] != seq_output[i*nseqs+j])
{
bots_message("Error: Optimized prot. (%3d:%3d)=%5d Sequential prot. (%3d:%3d)=%5d\n",
i+1, j+1, (int) bench_output[i*nseqs+j],
i+1, j+1, (int) seq_output[i*nseqs+j]);
result = BOTS_RESULT_UNSUCCESSFUL;
}
return result;
}
|
SplineR2RAdoptor.h | //////////////////////////////////////////////////////////////////////////////////////
// This file is distributed under the University of Illinois/NCSA Open Source License.
// See LICENSE file in top directory for details.
//
// Copyright (c) 2016 Jeongnim Kim and QMCPACK developers.
//
// File developed by: Jeremy McMinnis, jmcminis@gmail.com, University of Illinois at Urbana-Champaign
// Jeongnim Kim, jeongnim.kim@gmail.com, University of Illinois at Urbana-Champaign
// Mark A. Berrill, berrillma@ornl.gov, Oak Ridge National Laboratory
//
// File created by: Jeongnim Kim, jeongnim.kim@gmail.com, University of Illinois at Urbana-Champaign
//////////////////////////////////////////////////////////////////////////////////////
#ifndef QMCPLUSPLUS_EINSPLINE_R2RSOA_ADOPTOR_H
#define QMCPLUSPLUS_EINSPLINE_R2RSOA_ADOPTOR_H
#include <OhmmsSoA/Container.h>
#include <spline2/MultiBspline.hpp>
#include <spline2/MultiBsplineEval.hpp>
#include "QMCWaveFunctions/BsplineFactory/SplineAdoptorBase.h"
namespace qmcplusplus
{
/** adoptor class to match ST real spline with TT real SPOs
* @tparam ST precision of spline
* @tparam TT precision of SPOs
* @tparam D dimension
*
* Requires temporage storage and multiplication of the sign of the real part of the phase
* Internal storage ST type arrays are aligned and padded.
*/
template<typename ST, typename TT>
struct SplineR2RSoA: public SplineAdoptorBase<ST,3>
{
static const int D=3;
bool IsGamma;
using BaseType=SplineAdoptorBase<ST,3>;
using SplineType=typename bspline_traits<ST,3>::SplineType;
using BCType=typename bspline_traits<ST,3>::BCType;
using PointType=typename BaseType::PointType;
using SingleSplineType=typename BaseType::SingleSplineType;
using vContainer_type=Vector<ST,aligned_allocator<ST> >;
using gContainer_type=VectorSoaContainer<ST,3>;
using hContainer_type=VectorSoaContainer<ST,6>;
using BaseType::first_spo;
using BaseType::last_spo;
using SplineAdoptorBase<ST,D>::HalfG;
using BaseType::GGt;
using BaseType::PrimLattice;
using BaseType::kPoints;
using BaseType::offset;
///number of points of the original grid
int BaseN[3];
///offset of the original grid, always 0
int BaseOffset[3];
///multi bspline set
MultiBspline<ST>* SplineInst;
///expose the pointer to reuse the reader and only assigned with create_spline
///also used as identifier of shallow copy
SplineType* MultiSpline;
vContainer_type myV;
vContainer_type myL;
gContainer_type myG;
hContainer_type myH;
SplineR2RSoA(): BaseType(), SplineInst(nullptr), MultiSpline(nullptr)
{
this->is_complex=false;
this->is_soa_ready=true;
this->AdoptorName="SplineR2RSoAAdoptor";
this->KeyWord="SplineR2RSoA";
}
SplineR2RSoA(const SplineR2RSoA& a):
SplineAdoptorBase<ST,3>(a),SplineInst(a.SplineInst),MultiSpline(nullptr)
{
const size_t n=a.myV.size();
myV.resize(n); myG.resize(n); myL.resize(n); myH.resize(n);
}
~SplineR2RSoA()
{
if(MultiSpline != nullptr) delete SplineInst;
}
inline void resizeStorage(size_t n, size_t nvals)
{
BaseType::init_base(n);
const size_t npad=getAlignedSize<ST>(n);
myV.resize(npad);
myG.resize(npad);
myL.resize(npad);
myH.resize(npad);
IsGamma=( (HalfG[0]==0) && (HalfG[1]==0) && (HalfG[2]==0));
}
void bcast_tables(Communicate* comm)
{
chunked_bcast(comm, MultiSpline);
}
void gather_tables(Communicate* comm)
{
if(comm->size()==1) return;
const int Nbands = kPoints.size();
const int Nbandgroups = comm->size();
offset.resize(Nbandgroups+1,0);
FairDivideLow(Nbands,Nbandgroups,offset);
gatherv(comm, MultiSpline, MultiSpline->z_stride, offset);
}
template<typename GT, typename BCT>
void create_spline(GT& xyz_g, BCT& xyz_bc)
{
GGt=dot(transpose(PrimLattice.G),PrimLattice.G);
SplineInst=new MultiBspline<ST>();
SplineInst->create(xyz_g,xyz_bc,myV.size());
MultiSpline=SplineInst->spline_m;
for(size_t i=0; i<D; ++i)
{
BaseOffset[i]=0;
BaseN[i]=xyz_g[i].num+3;
}
qmc_common.memory_allocated += SplineInst->sizeInByte();
}
inline void flush_zero()
{
SplineInst->flush_zero();
}
inline void set_spline(SingleSplineType* spline_r, SingleSplineType* spline_i, int twist, int ispline, int level)
{
SplineInst->copy_spline(spline_r, ispline, BaseOffset, BaseN);
}
void set_spline(ST* restrict psi_r, ST* restrict psi_i, int twist, int ispline, int level)
{
Vector<ST> v_r(psi_r,0);
SplineInst->set(ispline, v_r);
}
inline void set_spline_domain(SingleSplineType* spline_r, SingleSplineType* spline_i,
int twist, int ispline, const int* offset_l, const int* mesh_l)
{
}
bool read_splines(hdf_archive& h5f)
{
std::ostringstream o;
o<<"spline_" << SplineAdoptorBase<ST,D>::MyIndex;
einspline_engine<SplineType> bigtable(SplineInst->spline_m);
return h5f.read(bigtable,o.str().c_str());//"spline_0");
}
bool write_splines(hdf_archive& h5f)
{
std::ostringstream o;
o<<"spline_" << SplineAdoptorBase<ST,D>::MyIndex;
einspline_engine<SplineType> bigtable(SplineInst->spline_m);
return h5f.write(bigtable,o.str().c_str());//"spline_0");
}
/** convert position in PrimLattice unit and return sign */
inline int convertPos(const PointType& r, PointType& ru)
{
ru=PrimLattice.toUnit(r);
int bc_sign=0;
for(int i=0; i<D; i++)
if( -std::numeric_limits<ST>::epsilon() < ru[i] && ru[i] < 0 )
ru[i] = ST(0.0);
else
{
ST img = std::floor(ru[i]);
ru[i] -= img;
bc_sign += HalfG[i] * (int)img;
}
return bc_sign;
}
template<typename VV>
inline void assign_v(int bc_sign, const vContainer_type& myV, VV& psi, int first = 0, int last = -1) const
{
// protect last
last = last<0 ? kPoints.size() : (last>kPoints.size() ? kPoints.size() : last);
const ST signed_one = (bc_sign &1)? -1:1;
#pragma omp simd
for(size_t j=first; j<last; ++j)
psi[first_spo+j]=signed_one*myV[j];
}
template<typename VV>
inline void evaluate_v(const ParticleSet& P, const int iat, VV& psi)
{
const PointType& r=P.activeR(iat);
PointType ru;
int bc_sign=convertPos(r,ru);
#pragma omp parallel
{
int first, last;
FairDivideAligned(myV.size(), getAlignment<ST>(),
omp_get_num_threads(),
omp_get_thread_num(),
first, last);
spline2::evaluate3d(SplineInst->spline_m,ru,myV,first,last);
assign_v(bc_sign,myV,psi,first,last);
}
}
template<typename VM, typename VAV>
inline void evaluateValues(const VirtualParticleSet& VP, VM& psiM, VAV& SPOMem)
{
#pragma omp parallel
{
int first, last;
FairDivideAligned(myV.size(), getAlignment<ST>(),
omp_get_num_threads(),
omp_get_thread_num(),
first, last);
const size_t m=psiM.cols();
for(int iat=0; iat<VP.getTotalNum(); ++iat)
{
const PointType& r=VP.activeR(iat);
PointType ru;
int bc_sign=convertPos(r,ru);
Vector<TT> psi(psiM[iat],m);
spline2::evaluate3d(SplineInst->spline_m,ru,myV,first,last);
assign_v(bc_sign,myV,psi,first,last);
}
}
}
inline size_t estimateMemory(const int nP) { return 0; }
template<typename VV, typename GV>
inline void assign_vgl(int bc_sign, VV& psi, GV& dpsi, VV& d2psi, int first = 0, int last = -1) const
{
// protect last
last = last<0 ? kPoints.size() : (last>kPoints.size() ? kPoints.size() : last);
const ST signed_one = (bc_sign &1)? -1:1;
const ST g00=PrimLattice.G(0), g01=PrimLattice.G(1), g02=PrimLattice.G(2),
g10=PrimLattice.G(3), g11=PrimLattice.G(4), g12=PrimLattice.G(5),
g20=PrimLattice.G(6), g21=PrimLattice.G(7), g22=PrimLattice.G(8);
const ST symGG[6]={GGt[0],GGt[1]+GGt[3],GGt[2]+GGt[6],GGt[4],GGt[5]+GGt[7],GGt[8]};
const ST* restrict g0=myG.data(0);
const ST* restrict g1=myG.data(1);
const ST* restrict g2=myG.data(2);
const ST* restrict h00=myH.data(0);
const ST* restrict h01=myH.data(1);
const ST* restrict h02=myH.data(2);
const ST* restrict h11=myH.data(3);
const ST* restrict h12=myH.data(4);
const ST* restrict h22=myH.data(5);
#pragma omp simd
for(size_t j=first; j<last; ++j)
{
const size_t psiIndex=first_spo+j;
psi[psiIndex]=signed_one*myV[j];
dpsi[psiIndex][0]=signed_one*(g00*g0[j]+g01*g1[j]+g02*g2[j]);
dpsi[psiIndex][1]=signed_one*(g10*g0[j]+g11*g1[j]+g12*g2[j]);
dpsi[psiIndex][2]=signed_one*(g20*g0[j]+g21*g1[j]+g22*g2[j]);
d2psi[psiIndex]=signed_one*SymTrace(h00[j],h01[j],h02[j],h11[j],h12[j],h22[j],symGG);
}
}
/** assign_vgl_from_l can be used when myL is precomputed and myV,myG,myL in cartesian
*/
template<typename VV, typename GV>
inline void assign_vgl_from_l(int bc_sign, VV& psi, GV& dpsi, VV& d2psi)
{
const ST signed_one = (bc_sign &1)? -1:1;
const ST* restrict g0=myG.data(0);
const ST* restrict g1=myG.data(1);
const ST* restrict g2=myG.data(2);
#pragma omp simd
for(int psiIndex=first_spo; psiIndex<last_spo; ++psiIndex)
{
const size_t j=psiIndex-first_spo;
psi[psiIndex]=signed_one*myV[j];
dpsi[psiIndex][0]=signed_one*g0[j];
dpsi[psiIndex][1]=signed_one*g1[j];
dpsi[psiIndex][2]=signed_one*g2[j];
d2psi[psiIndex]=signed_one*myL[j];
}
}
template<typename VV, typename GV>
inline void evaluate_vgl(const ParticleSet& P, const int iat, VV& psi, GV& dpsi, VV& d2psi)
{
const PointType& r=P.activeR(iat);
PointType ru;
int bc_sign=convertPos(r,ru);
#pragma omp parallel
{
int first, last;
FairDivideAligned(myV.size(), getAlignment<ST>(),
omp_get_num_threads(),
omp_get_thread_num(),
first, last);
spline2::evaluate3d_vgh(SplineInst->spline_m,ru,myV,myG,myH,first,last);
assign_vgl(bc_sign,psi,dpsi,d2psi,first,last);
}
}
template<typename VV, typename GV, typename GGV>
void assign_vgh(int bc_sign, VV& psi, GV& dpsi, GGV& grad_grad_psi, int first = 0, int last = -1) const
{
// protect last
last = last<0 ? kPoints.size() : (last>kPoints.size() ? kPoints.size() : last);
const ST signed_one = (bc_sign &1)? -1:1;
const ST g00=PrimLattice.G(0), g01=PrimLattice.G(1), g02=PrimLattice.G(2),
g10=PrimLattice.G(3), g11=PrimLattice.G(4), g12=PrimLattice.G(5),
g20=PrimLattice.G(6), g21=PrimLattice.G(7), g22=PrimLattice.G(8);
const ST* restrict g0=myG.data(0);
const ST* restrict g1=myG.data(1);
const ST* restrict g2=myG.data(2);
const ST* restrict h00=myH.data(0);
const ST* restrict h01=myH.data(1);
const ST* restrict h02=myH.data(2);
const ST* restrict h11=myH.data(3);
const ST* restrict h12=myH.data(4);
const ST* restrict h22=myH.data(5);
#pragma omp simd
for(size_t j=first; j<last; ++j)
{
//dot(PrimLattice.G,myG[j])
const ST dX_r = g00*g0[j]+g01*g1[j]+g02*g2[j];
const ST dY_r = g10*g0[j]+g11*g1[j]+g12*g2[j];
const ST dZ_r = g20*g0[j]+g21*g1[j]+g22*g2[j];
const size_t psiIndex=j+first_spo;
psi[psiIndex] =signed_one*myV[j];
dpsi[psiIndex][0]=signed_one*dX_r;
dpsi[psiIndex][1]=signed_one*dY_r;
dpsi[psiIndex][2]=signed_one*dZ_r;
const ST h_xx_r=v_m_v(h00[j],h01[j],h02[j],h11[j],h12[j],h22[j],g00,g01,g02,g00,g01,g02);
const ST h_xy_r=v_m_v(h00[j],h01[j],h02[j],h11[j],h12[j],h22[j],g00,g01,g02,g10,g11,g12);
const ST h_xz_r=v_m_v(h00[j],h01[j],h02[j],h11[j],h12[j],h22[j],g00,g01,g02,g20,g21,g22);
const ST h_yx_r=v_m_v(h00[j],h01[j],h02[j],h11[j],h12[j],h22[j],g10,g11,g12,g00,g01,g02);
const ST h_yy_r=v_m_v(h00[j],h01[j],h02[j],h11[j],h12[j],h22[j],g10,g11,g12,g10,g11,g12);
const ST h_yz_r=v_m_v(h00[j],h01[j],h02[j],h11[j],h12[j],h22[j],g10,g11,g12,g20,g21,g22);
const ST h_zx_r=v_m_v(h00[j],h01[j],h02[j],h11[j],h12[j],h22[j],g20,g21,g22,g00,g01,g02);
const ST h_zy_r=v_m_v(h00[j],h01[j],h02[j],h11[j],h12[j],h22[j],g20,g21,g22,g10,g11,g12);
const ST h_zz_r=v_m_v(h00[j],h01[j],h02[j],h11[j],h12[j],h22[j],g20,g21,g22,g20,g21,g22);
grad_grad_psi[psiIndex][0]=signed_one*h_xx_r;
grad_grad_psi[psiIndex][1]=signed_one*h_xy_r;
grad_grad_psi[psiIndex][2]=signed_one*h_xz_r;
grad_grad_psi[psiIndex][3]=signed_one*h_yx_r;
grad_grad_psi[psiIndex][4]=signed_one*h_yy_r;
grad_grad_psi[psiIndex][5]=signed_one*h_yz_r;
grad_grad_psi[psiIndex][6]=signed_one*h_zx_r;
grad_grad_psi[psiIndex][7]=signed_one*h_zy_r;
grad_grad_psi[psiIndex][8]=signed_one*h_zz_r;
}
}
template<typename VV, typename GV, typename GGV>
void evaluate_vgh(const ParticleSet& P, const int iat, VV& psi, GV& dpsi, GGV& grad_grad_psi)
{
const PointType& r=P.activeR(iat);
PointType ru;
int bc_sign=convertPos(r,ru);
#pragma omp parallel
{
int first, last;
FairDivideAligned(myV.size(), getAlignment<ST>(),
omp_get_num_threads(),
omp_get_thread_num(),
first, last);
spline2::evaluate3d_vgh(SplineInst->spline_m,ru,myV,myG,myH,first,last);
assign_vgh(bc_sign,psi,dpsi,grad_grad_psi,first,last);
}
}
};
}
#endif
|
raytracer.h | #pragma once
#include "resource.h"
#include <linalg.h>
#include <memory>
#include <omp.h>
#include <random>
#include <time.h>
using namespace linalg::aliases;
namespace cg::renderer
{
struct ray
{
ray(float3 position, float3 direction) : position(position)
{
this->direction = normalize(direction);
}
float3 position;
float3 direction;
};
struct payload
{
float t;
float3 bary;
cg::color color;
};
template<typename VB>
struct triangle
{
triangle(const VB& vertex_a, const VB& vertex_b, const VB& vertex_c);
float3 a;
float3 b;
float3 c;
float3 ba;
float3 ca;
float3 na;
float3 nb;
float3 nc;
float3 ambient;
float3 diffuse;
float3 emissive;
};
template<typename VB>
inline triangle<VB>::triangle(const VB& vertex_a, const VB& vertex_b, const VB& vertex_c)
{
a = float3{ vertex_a.x, vertex_a.y, vertex_a.z };
b = float3{ vertex_b.x, vertex_b.y, vertex_b.z };
c = float3{ vertex_c.x, vertex_c.y, vertex_c.z };
ba = b - a;
ca = c - a;
na = float3{ vertex_a.nx, vertex_a.ny, vertex_a.nz };
nb = float3{ vertex_b.nx, vertex_b.ny, vertex_b.nz };
nc = float3{ vertex_c.nx, vertex_c.ny, vertex_c.nz };
ambient = {
vertex_a.ambient_r,
vertex_a.ambient_g,
vertex_a.ambient_b,
};
diffuse = {
vertex_a.diffuse_r,
vertex_a.diffuse_g,
vertex_a.diffuse_b,
};
emissive = {
vertex_a.emissive_r,
vertex_a.emissive_g,
vertex_a.emissive_b,
};
}
template<typename VB>
class aabb
{
public:
void add_triangle(const triangle<VB> triangle);
const std::vector<triangle<VB>>& get_traingles() const;
bool aabb_test(const ray& ray) const;
protected:
std::vector<triangle<VB>> triangles;
float3 aabb_min;
float3 aabb_max;
};
struct light
{
float3 position;
float3 color;
};
template<typename VB, typename RT>
class raytracer
{
public:
raytracer(){};
~raytracer(){};
void set_render_target(std::shared_ptr<resource<RT>> in_render_target);
void clear_render_target(const RT& in_clear_value);
void set_viewport(size_t in_width, size_t in_height);
void set_per_shape_vertex_buffer(
std::vector<std::shared_ptr<cg::resource<VB>>> in_per_shape_vertex_buffer);
void build_acceleration_structure();
std::vector<aabb<VB>> acceleration_structures;
void ray_generation(float3 position, float3 direction, float3 right, float3 up, float frame_weight = 1.f);
payload trace_ray(const ray& ray, size_t depth, float max_t = 1000.f, float min_t = 0.001f) const;
payload intersection_shader(const triangle<VB>& triangle, const ray& ray) const;
std::function<payload(const ray& ray)> miss_shader = nullptr;
std::function<payload(const ray& ray, payload& payload, const triangle<VB>& triangle, size_t depth)> closest_hit_shader =
nullptr;
std::function<payload(const ray& ray, payload& payload, const triangle<VB>& triangle)> any_hit_shader =
nullptr;
float get_random(const int thread_num, float range = 0.1f) const;
protected:
std::shared_ptr<cg::resource<RT>> render_target;
std::vector<std::shared_ptr<cg::resource<VB>>> per_shape_vertex_buffer;
size_t width = 1920;
size_t height = 1080;
};
template<typename VB, typename RT>
inline void raytracer<VB, RT>::set_render_target(std::shared_ptr<resource<RT>> in_render_target)
{
render_target = in_render_target;
}
template<typename VB, typename RT>
inline void raytracer<VB, RT>::clear_render_target(const RT& in_clear_value)
{
for (size_t i = 0; i < render_target->get_number_of_elements(); i++)
{
render_target->item(i) = in_clear_value;
}
}
template<typename VB, typename RT>
inline void raytracer<VB, RT>::set_per_shape_vertex_buffer(
std::vector<std::shared_ptr<cg::resource<VB>>> in_per_shape_vertex_buffer)
{
per_shape_vertex_buffer = in_per_shape_vertex_buffer;
}
template<typename VB, typename RT>
inline void raytracer<VB, RT>::build_acceleration_structure()
{
for (auto& vertex_buffer : per_shape_vertex_buffer)
{
aabb<VB> aabb;
size_t vertex_id = 0;
while (vertex_id < vertex_buffer->get_number_of_elements())
{
triangle<VB> triangle(
vertex_buffer->item(vertex_id++), vertex_buffer->item(vertex_id++),
vertex_buffer->item(vertex_id++));
aabb.add_triangle(triangle);
}
acceleration_structures.push_back(aabb);
}
}
template<typename VB, typename RT>
inline void raytracer<VB, RT>::set_viewport(size_t in_width, size_t in_height)
{
width = in_width;
height = in_height;
}
template<typename VB, typename RT>
inline void raytracer<VB, RT>::ray_generation(
float3 position, float3 direction, float3 right, float3 up, float frame_weight)
{
for (int x = 0; x < width; x++)
{
if (x % 10 == 0)
printf("\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b%.0f%% If 0%% cpu press any key", x * 100.f / width);
#pragma omp parallel for
for (int y = 0; y < height; y++)
{
float u;
float v;
float x_jitter = get_random(omp_get_thread_num() + clock());
float y_jitter = get_random(omp_get_thread_num() + clock());
//[0, width-1] -> [-1,1]
u = 2.f * (x + x_jitter) / static_cast<float>(width - 1) - 1.f;
u *= static_cast<float>(width) / static_cast<float>(height);
v = 2.f * (y + y_jitter) / static_cast<float>(height - 1) - 1.f;
float u_delta = 0.5f / static_cast<float>(width - 1);
u_delta *= static_cast<float>(width) / static_cast<float>(height);
float v_delta = 0.5f / static_cast<float>(height - 1);
// No anti-aliasiing
/*float3 ray_direction = direction + u*right - v*up;
ray ray(position, ray_direction);
payload payload = trace_ray(ray, 1);
render_target->item(x, y) = RT::from_color(payload.color);*/
// TAA
float3 ray_direction =
direction + u * right - v * up;
ray ray(position, ray_direction);
payload payload = trace_ray(ray, 5);
cg::color accumulated_color =
cg::color::from_float3(render_target->item(x, y).to_float3());
cg::color result{
(accumulated_color.r + frame_weight * payload.color.r) / 2.f,
(accumulated_color.g + frame_weight * payload.color.g) / 2.f,
(accumulated_color.b + frame_weight * payload.color.b) / 2.f,
};
render_target->item(x, y) = RT::from_color(result);
// SSAA
/*ray ray0(position, ray_direction);
payload payload_0 = trace_ray(ray0, 1);
ray ray1(position, ray_direction + u_delta*right);
payload payload_1 = trace_ray(ray1, 1);
ray ray2(position, ray_direction - v_delta*up);
payload payload_2 = trace_ray(ray2, 1);
ray ray3(position, ray_direction + u_delta * right - v_delta * up);
payload payload_3 = trace_ray(ray3, 1);
float r = (payload_0.color.r + payload_1.color.r +
payload_2.color.r + payload_3.color.r) / 4.f;
float g = (payload_0.color.g + payload_1.color.g +
payload_2.color.g + payload_3.color.g) /
4.f;
float b = (payload_0.color.b + payload_1.color.b +
payload_2.color.b + payload_3.color.b) /
4.f;
cg::color accumulated_color{r, g, b};
render_target->item(x, y) = RT::from_color(accumulated_color);*/
}
}
}
template<typename VB, typename RT>
inline payload
raytracer<VB, RT>::trace_ray(const ray& ray, size_t depth, float max_t, float min_t) const
{
if (depth == 0)
{
return miss_shader(ray);
}
depth--;
payload closest_hit_payload = {};
closest_hit_payload.t = max_t;
const triangle<VB>* closest_triangle = nullptr;
for (auto& aabb : acceleration_structures)
{
if (aabb.aabb_test(ray)) // if hit the bounding box
{
for (auto& triangle : aabb.get_traingles()) // trace triangles or skip
{
payload payload = intersection_shader(triangle, ray);
if (payload.t > min_t && payload.t < closest_hit_payload.t)
{
closest_hit_payload = payload;
closest_triangle = ▵
if (any_hit_shader)
return any_hit_shader(ray, payload, triangle);
}
}
}
}
if (closest_hit_payload.t < max_t)
{
if (closest_hit_shader)
return closest_hit_shader(ray, closest_hit_payload, *closest_triangle, depth);
}
else
{
return miss_shader(ray);
}
}
template<typename VB, typename RT>
inline payload
raytracer<VB, RT>::intersection_shader(const triangle<VB>& triangle, const ray& ray) const
{
payload payload{};
payload.t = -1.f;
float3 pvec = cross(ray.direction, triangle.ca);
float det = dot(triangle.ba, pvec);
if (det > -1e-8 && det < 1e-8)
{
return payload;
}
float inv_det = 1.f / det;
float3 tvec = ray.position - triangle.a;
float u = dot(tvec, pvec) * inv_det;
if (u < 0.f || u > 1.f)
return payload;
float3 qvec = cross(tvec, triangle.ba);
float v = dot(ray.direction, qvec) * inv_det;
if (v < 0.f || v > 1.f || u + v > 1.f)
return payload;
payload.t = dot(triangle.ca, qvec) * inv_det;
payload.bary = float3{ 1.f - u - v, u, v };
return payload;
}
template<typename VB, typename RT>
inline float raytracer<VB, RT>::get_random(const int thread_num, const float range) const
{
static std::default_random_engine generator(thread_num);
static std::normal_distribution<float> distribution(0.f, range);
return distribution(generator);
}
template<typename VB>
inline void aabb<VB>::add_triangle(const triangle<VB> triangle)
{
if (triangles.empty())
{
aabb_min = aabb_min = triangle.a;
}
triangles.push_back(triangle);
aabb_max = max(triangle.a, aabb_max);
aabb_max = max(triangle.b, aabb_max);
aabb_max = max(triangle.c, aabb_max);
aabb_min = min(triangle.a, aabb_min);
aabb_min = min(triangle.b, aabb_min);
aabb_min = min(triangle.c, aabb_min);
}
template<typename VB>
inline const std::vector<triangle<VB>>& aabb<VB>::get_traingles() const
{
return triangles;
}
template<typename VB>
inline bool aabb<VB>::aabb_test(const ray& ray) const
{
float3 invRaydir = float3(1.f) / ray.direction;
float3 t0 = (aabb_max - ray.position) * invRaydir;
float3 t1 = (aabb_min - ray.position) * invRaydir;
float3 tmin = min(t0, t1);
float3 tmax = max(t0, t1);
return maxelem(tmin) <= minelem(tmax);
}
} // namespace cg::renderer |
3d25pt_var.c | /*
* Order-1, 3D 25 point stencil with axis-symmetric ariable coefficients
* Adapted from PLUTO and Pochoir test bench
*
* Tareq Malas
*/
#include <stdio.h>
#include <stdlib.h>
#include <sys/time.h>
#ifdef LIKWID_PERFMON
#include <likwid.h>
#endif
#include "print_utils.h"
#define TESTS 2
#define MAX(a,b) ((a) > (b) ? a : b)
#define MIN(a,b) ((a) < (b) ? a : b)
/* Subtract the `struct timeval' values X and Y,
* storing the result in RESULT.
*
* Return 1 if the difference is negative, otherwise 0.
*/
int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y)
{
/* Perform the carry for the later subtraction by updating y. */
if (x->tv_usec < y->tv_usec)
{
int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1;
y->tv_usec -= 1000000 * nsec;
y->tv_sec += nsec;
}
if (x->tv_usec - y->tv_usec > 1000000)
{
int nsec = (x->tv_usec - y->tv_usec) / 1000000;
y->tv_usec += 1000000 * nsec;
y->tv_sec -= nsec;
}
/* Compute the time remaining to wait.
* tv_usec is certainly positive.
*/
result->tv_sec = x->tv_sec - y->tv_sec;
result->tv_usec = x->tv_usec - y->tv_usec;
/* Return 1 if result is negative. */
return x->tv_sec < y->tv_sec;
}
int main(int argc, char *argv[])
{
int t, i, j, k, m, test;
int Nx, Ny, Nz, Nt;
if (argc > 3) {
Nx = atoi(argv[1])+8;
Ny = atoi(argv[2])+8;
Nz = atoi(argv[3])+8;
}
if (argc > 4)
Nt = atoi(argv[4]);
// allocate the arrays
double ****A = (double ****) malloc(sizeof(double***)*2);
for(m=0; m<2;m++){
A[m] = (double ***) malloc(sizeof(double**)*Nz);
for(i=0; i<Nz; i++){
A[m][i] = (double**) malloc(sizeof(double*)*Ny);
for(j=0;j<Ny;j++){
A[m][i][j] = (double*) malloc(sizeof(double)*Nx);
}
}
}
double ****coef = (double ****) malloc(sizeof(double***)*13);
for(m=0; m<13;m++){
coef[m] = (double ***) malloc(sizeof(double**)*Nz);
for(i=0; i<Nz; i++){
coef[m][i] = (double**) malloc(sizeof(double*)*Ny);
for(j=0;j<Ny;j++){
coef[m][i][j] = (double*) malloc(sizeof(double)*Nx);
}
}
}
// tile size information, including extra element to decide the list length
int *tile_size = (int*) malloc(sizeof(int));
tile_size[0] = -1;
// The list is modified here before source-to-source transformations
tile_size = (int*) realloc((void *)tile_size, sizeof(int)*5);
tile_size[0] = 4;
tile_size[1] = 4;
tile_size[2] = 16;
tile_size[3] = 256;
tile_size[4] = -1;
// for timekeeping
int ts_return = -1;
struct timeval start, end, result;
double tdiff = 0.0, min_tdiff=1.e100;
const int BASE = 1024;
// initialize variables
//
srand(42);
for (i = 1; i < Nz; i++) {
for (j = 1; j < Ny; j++) {
for (k = 1; k < Nx; k++) {
A[0][i][j][k] = 1.0 * (rand() % BASE);
}
}
}
for (m=0; m<13; m++) {
for (i=1; i<Nz; i++) {
for (j=1; j<Ny; j++) {
for (k=1; k<Nx; k++) {
coef[m][i][j][k] = 1.0 * (rand() % BASE);
}
}
}
}
#ifdef LIKWID_PERFMON
LIKWID_MARKER_INIT;
#pragma omp parallel
{
LIKWID_MARKER_THREADINIT;
#pragma omp barrier
LIKWID_MARKER_START("calc");
}
#endif
int num_threads = 1;
#if defined(_OPENMP)
num_threads = omp_get_max_threads();
#endif
for(test=0; test<TESTS; test++){
gettimeofday(&start, 0);
// serial execution - Addition: 6 && Multiplication: 2
#pragma scop
for (t = 0; t < Nt; t++) {
for (i = 4; i < Nz-4; i++) {
for (j = 4; j < Ny-4; j++) {
for (k = 4; k < Nx-4; k++) {
A[(t+1)%2][i][j][k] =
coef[0][i][j][k] * A[(t)%2][i ][j ][k ] +
coef[1][i][j][k] * (A[(t)%2][i-1][j ][k ] + A[(t)%2][i+1][j ][k ]) +
coef[2][i][j][k] * (A[(t)%2][i ][j-1][k ] + A[(t)%2][i ][j+1][k ]) +
coef[3][i][j][k] * (A[(t)%2][i ][j ][k-1] + A[(t)%2][i ][j ][k+1]) +
coef[4][i][j][k] * (A[(t)%2][i-2][j ][k ] + A[(t)%2][i+2][j ][k ]) +
coef[5][i][j][k] * (A[(t)%2][i ][j-2][k ] + A[(t)%2][i ][j+2][k ]) +
coef[6][i][j][k] * (A[(t)%2][i ][j ][k-2] + A[(t)%2][i ][j ][k+2]) +
coef[7][i][j][k] * (A[(t)%2][i-3][j ][k ] + A[(t)%2][i+3][j ][k ]) +
coef[8][i][j][k] * (A[(t)%2][i ][j-3][k ] + A[(t)%2][i ][j+3][k ]) +
coef[9][i][j][k] * (A[(t)%2][i ][j ][k-3] + A[(t)%2][i ][j ][k+3]) +
coef[10][i][j][k]* (A[(t)%2][i-4][j ][k ] + A[(t)%2][i+4][j ][k ]) +
coef[11][i][j][k]* (A[(t)%2][i ][j-4][k ] + A[(t)%2][i ][j+4][k ]) +
coef[12][i][j][k]* (A[(t)%2][i ][j ][k-4] + A[(t)%2][i ][j ][k+4]) ;
}
}
}
}
#pragma endscop
gettimeofday(&end, 0);
ts_return = timeval_subtract(&result, &end, &start);
tdiff = (double) (result.tv_sec + result.tv_usec * 1.0e-6);
min_tdiff = min(min_tdiff, tdiff);
printf("Rank 0 TEST# %d time: %f\n", test, tdiff);
}
PRINT_RESULTS(4, "variable axis-symmetric")
#ifdef LIKWID_PERFMON
#pragma omp parallel
{
LIKWID_MARKER_STOP("calc");
}
LIKWID_MARKER_CLOSE;
#endif
// Free allocated arrays
for(i=0; i<Nz; i++){
for(j=0;j<Ny;j++){
free(A[0][i][j]);
free(A[1][i][j]);
}
free(A[0][i]);
free(A[1][i]);
}
free(A[0]);
free(A[1]);
for(m=0; m<13;m++){
for(i=0; i<Nz; i++){
for(j=0;j<Ny;j++){
free(coef[m][i][j]);
}
free(coef[m][i]);
}
free(coef[m]);
}
return 0;
}
|
is.c | /*--------------------------------------------------------------------
NAS Parallel Benchmarks 2.3 OpenMP C versions - IS
This benchmark is an OpenMP C version of the NPB IS code.
The OpenMP C versions are developed by RWCP and derived from the serial
Fortran versions in "NPB 2.3-serial" developed by NAS.
Permission to use, copy, distribute and modify this software for any
purpose with or without fee is hereby granted.
This software is provided "as is" without express or implied warranty.
Send comments on the OpenMP C versions to pdp-openmp@rwcp.or.jp
Information on OpenMP activities at RWCP is available at:
http://pdplab.trc.rwcp.or.jp/pdperf/Omni/
Information on NAS Parallel Benchmarks 2.3 is available at:
http://www.nas.nasa.gov/NAS/NPB/
--------------------------------------------------------------------*/
/*--------------------------------------------------------------------
Author: M. Yarrow
OpenMP C version: S. Satoh
--------------------------------------------------------------------*/
#include "npbparams.h"
#include <stdlib.h>
#include <stdio.h>
#if defined(_OPENMP)
#include <omp.h>
#endif /* _OPENMP */
/*****************************************************************/
/* For serial IS, buckets are not really req'd to solve NPB1 IS */
/* spec, but their use on some machines improves performance, on */
/* other machines the use of buckets compromises performance, */
/* probably because it is extra computation which is not req'd. */
/* (Note: Mechanism not understood, probably cache related) */
/* Example: SP2-66MhzWN: 50% speedup with buckets */
/* Example: SGI Indy5000: 50% slowdown with buckets */
/* Example: SGI O2000: 400% slowdown with buckets (Wow!) */
/*****************************************************************/
/* #define USE_BUCKETS */
/* buckets are not used in the OpenMP C version */
/******************/
/* default values */
/******************/
#ifndef CLASS
#define CLASS 'S'
#endif
/*************/
/* CLASS S */
/*************/
#if CLASS == 'S'
#define TOTAL_KEYS_LOG_2 16
#define MAX_KEY_LOG_2 11
#define NUM_BUCKETS_LOG_2 9
#endif
/*************/
/* CLASS W */
/*************/
#if CLASS == 'W'
#define TOTAL_KEYS_LOG_2 20
#define MAX_KEY_LOG_2 16
#define NUM_BUCKETS_LOG_2 10
#endif
/*************/
/* CLASS A */
/*************/
#if CLASS == 'A'
#define TOTAL_KEYS_LOG_2 23
#define MAX_KEY_LOG_2 19
#define NUM_BUCKETS_LOG_2 10
#endif
/*************/
/* CLASS B */
/*************/
#if CLASS == 'B'
#define TOTAL_KEYS_LOG_2 25
#define MAX_KEY_LOG_2 21
#define NUM_BUCKETS_LOG_2 10
#endif
/*************/
/* CLASS C */
/*************/
#if CLASS == 'C'
#define TOTAL_KEYS_LOG_2 27
#define MAX_KEY_LOG_2 23
#define NUM_BUCKETS_LOG_2 10
#endif
#define TOTAL_KEYS (1 << TOTAL_KEYS_LOG_2)
#define MAX_KEY (1 << MAX_KEY_LOG_2)
#define NUM_BUCKETS (1 << NUM_BUCKETS_LOG_2)
#define NUM_KEYS TOTAL_KEYS
#define SIZE_OF_BUFFERS NUM_KEYS
#define MAX_ITERATIONS 10
#define TEST_ARRAY_SIZE 5
/*************************************/
/* Typedef: if necessary, change the */
/* size of int here by changing the */
/* int type to, say, long */
/*************************************/
typedef int INT_TYPE;
/********************/
/* Some global info */
/********************/
INT_TYPE *key_buff_ptr_global; /* used by full_verify to get */
/* copies of rank info */
int passed_verification;
/************************************/
/* These are the three main arrays. */
/* See SIZE_OF_BUFFERS def above */
/************************************/
INT_TYPE key_array[SIZE_OF_BUFFERS],
key_buff1[SIZE_OF_BUFFERS],
key_buff2[SIZE_OF_BUFFERS],
partial_verify_vals[TEST_ARRAY_SIZE];
#ifdef USE_BUCKETS
INT_TYPE bucket_size[NUM_BUCKETS],
bucket_ptrs[NUM_BUCKETS];
#endif
/**********************/
/* Partial verif info */
/**********************/
INT_TYPE test_index_array[TEST_ARRAY_SIZE],
test_rank_array[TEST_ARRAY_SIZE],
S_test_index_array[TEST_ARRAY_SIZE] =
{48427,17148,23627,62548,4431},
S_test_rank_array[TEST_ARRAY_SIZE] =
{0,18,346,64917,65463},
W_test_index_array[TEST_ARRAY_SIZE] =
{357773,934767,875723,898999,404505},
W_test_rank_array[TEST_ARRAY_SIZE] =
{1249,11698,1039987,1043896,1048018},
A_test_index_array[TEST_ARRAY_SIZE] =
{2112377,662041,5336171,3642833,4250760},
A_test_rank_array[TEST_ARRAY_SIZE] =
{104,17523,123928,8288932,8388264},
B_test_index_array[TEST_ARRAY_SIZE] =
{41869,812306,5102857,18232239,26860214},
B_test_rank_array[TEST_ARRAY_SIZE] =
{33422937,10244,59149,33135281,99},
C_test_index_array[TEST_ARRAY_SIZE] =
{44172927,72999161,74326391,129606274,21736814},
C_test_rank_array[TEST_ARRAY_SIZE] =
{61147,882988,266290,133997595,133525895};
/***********************/
/* function prototypes */
/***********************/
double randlc( double *X, double *A );
extern double timer_read(int);
void full_verify( void );
/*
* FUNCTION RANDLC (X, A)
*
* This routine returns a uniform pseudorandom double precision number in the
* range (0, 1) by using the linear congruential generator
*
* x_{k+1} = a x_k (mod 2^46)
*
* where 0 < x_k < 2^46 and 0 < a < 2^46. This scheme generates 2^44 numbers
* before repeating. The argument A is the same as 'a' in the above formula,
* and X is the same as x_0. A and X must be odd double precision integers
* in the range (1, 2^46). The returned value RANDLC is normalized to be
* between 0 and 1, i.e. RANDLC = 2^(-46) * x_1. X is updated to contain
* the new seed x_1, so that subsequent calls to RANDLC using the same
* arguments will generate a continuous sequence.
*
* This routine should produce the same results on any computer with at least
* 48 mantissa bits in double precision floating point data. On Cray systems,
* double precision should be disabled.
*
* David H. Bailey October 26, 1990
*
* IMPLICIT DOUBLE PRECISION (A-H, O-Z)
* SAVE KS, R23, R46, T23, T46
* DATA KS/0/
*
* If this is the first call to RANDLC, compute R23 = 2 ^ -23, R46 = 2 ^ -46,
* T23 = 2 ^ 23, and T46 = 2 ^ 46. These are computed in loops, rather than
* by merely using the ** operator, in order to insure that the results are
* exact on all systems. This code assumes that 0.5D0 is represented exactly.
*/
/*****************************************************************/
/************* R A N D L C ************/
/************* ************/
/************* portable random number generator ************/
/*****************************************************************/
double randlc(X, A)
double *X;
double *A;
{
static int KS=0;
static double R23, R46, T23, T46;
double T1, T2, T3, T4;
double A1;
double A2;
double X1;
double X2;
double Z;
int i, j;
if (KS == 0)
{
R23 = 1.0;
R46 = 1.0;
T23 = 1.0;
T46 = 1.0;
for (i=1; i<=23; i++)
{
R23 = 0.50 * R23;
T23 = 2.0 * T23;
}
for (i=1; i<=46; i++)
{
R46 = 0.50 * R46;
T46 = 2.0 * T46;
}
KS = 1;
}
/* Break A into two parts such that A = 2^23 * A1 + A2 and set X = N. */
T1 = R23 * *A;
j = T1;
A1 = j;
A2 = *A - T23 * A1;
/* Break X into two parts such that X = 2^23 * X1 + X2, compute
Z = A1 * X2 + A2 * X1 (mod 2^23), and then
X = 2^23 * Z + A2 * X2 (mod 2^46). */
T1 = R23 * *X;
j = T1;
X1 = j;
X2 = *X - T23 * X1;
T1 = A1 * X2 + A2 * X1;
j = R23 * T1;
T2 = j;
Z = T1 - T23 * T2;
T3 = T23 * Z + A2 * X2;
j = R46 * T3;
T4 = j;
*X = T3 - T46 * T4;
return(R46 * *X);
}
/*****************************************************************/
/************* C R E A T E _ S E Q ************/
/*****************************************************************/
void create_seq( double seed, double a )
{
double x;
int i, j, k;
k = MAX_KEY/4;
for (i=0; i<NUM_KEYS; i++)
{
x = randlc(&seed, &a);
x += randlc(&seed, &a);
x += randlc(&seed, &a);
x += randlc(&seed, &a);
key_array[i] = k*x;
}
}
/*****************************************************************/
/************* F U L L _ V E R I F Y ************/
/*****************************************************************/
void full_verify()
{
INT_TYPE i, j;
INT_TYPE k;
INT_TYPE m, unique_keys;
/* Now, finally, sort the keys: */
for( i=0; i<NUM_KEYS; i++ )
key_array[--key_buff_ptr_global[key_buff2[i]]] = key_buff2[i];
/* Confirm keys correctly sorted: count incorrectly sorted keys, if any */
j = 0;
for( i=1; i<NUM_KEYS; i++ )
if( key_array[i-1] > key_array[i] )
j++;
if( j != 0 )
{
printf( "Full_verify: number of keys out of sort: %d\n",
j );
}
else
passed_verification++;
}
/*****************************************************************/
/************* R A N K ****************/
/*****************************************************************/
void rank( int iteration )
{
INT_TYPE i, j, k;
INT_TYPE l, m;
INT_TYPE shift = MAX_KEY_LOG_2 - NUM_BUCKETS_LOG_2;
INT_TYPE key;
INT_TYPE min_key_val, max_key_val;
INT_TYPE prv_buff1[MAX_KEY];
#pragma omp master
{
key_array[iteration] = iteration;
key_array[iteration+MAX_ITERATIONS] = MAX_KEY - iteration;
/* Determine where the partial verify test keys are, load into */
/* top of array bucket_size */
for( i=0; i<TEST_ARRAY_SIZE; i++ )
partial_verify_vals[i] = key_array[test_index_array[i]];
/* Clear the work array */
for( i=0; i<MAX_KEY; i++ )
key_buff1[i] = 0;
}
#pragma omp barrier
for (i=0; i<MAX_KEY; i++)
prv_buff1[i] = 0;
/* Copy keys into work array; keys in key_array will be reused each iter. */
#pragma omp for nowait
for( i=0; i<NUM_KEYS; i++ ) {
key_buff2[i] = key_array[i];
/* Ranking of all keys occurs in this section: */
/* In this section, the keys themselves are used as their
own indexes to determine how many of each there are: their
individual population */
prv_buff1[key_buff2[i]]++; /* Now they have individual key */
}
/* population */
for( i=0; i<MAX_KEY-1; i++ )
prv_buff1[i+1] += prv_buff1[i];
#pragma omp critical
{
for( i=0; i<MAX_KEY; i++ )
key_buff1[i] += prv_buff1[i];
}
/* To obtain ranks of each key, successively add the individual key
population, not forgetting to add m, the total of lesser keys,
to the first key population */
#pragma omp barrier
#pragma omp master
{
/* This is the partial verify test section */
/* Observe that test_rank_array vals are */
/* shifted differently for different cases */
for( i=0; i<TEST_ARRAY_SIZE; i++ )
{
k = partial_verify_vals[i]; /* test vals were put here */
if( 0 <= k && k <= NUM_KEYS-1 )
switch( CLASS )
{
case 'S':
if( i <= 2 )
{
if( key_buff1[k-1] != test_rank_array[i]+iteration )
{
printf( "Failed partial verification: "
"iteration %d, test key %d\n",
iteration, i );
}
else
passed_verification++;
}
else
{
if( key_buff1[k-1] != test_rank_array[i]-iteration )
{
printf( "Failed partial verification: "
"iteration %d, test key %d\n",
iteration, i );
}
else
passed_verification++;
}
break;
case 'W':
if( i < 2 )
{
if( key_buff1[k-1] !=
test_rank_array[i]+(iteration-2) )
{
printf( "Failed partial verification: "
"iteration %d, test key %d\n",
iteration, i );
}
else
passed_verification++;
}
else
{
if( key_buff1[k-1] != test_rank_array[i]-iteration )
{
printf( "Failed partial verification: "
"iteration %d, test key %d\n",
iteration, i );
}
else
passed_verification++;
}
break;
case 'A':
if( i <= 2 )
{
if( key_buff1[k-1] !=
test_rank_array[i]+(iteration-1) )
{
printf( "Failed partial verification: "
"iteration %d, test key %d\n",
iteration, i );
}
else
passed_verification++;
}
else
{
if( key_buff1[k-1] !=
test_rank_array[i]-(iteration-1) )
{
printf( "Failed partial verification: "
"iteration %d, test key %d\n",
iteration, i );
}
else
passed_verification++;
}
break;
case 'B':
if( i == 1 || i == 2 || i == 4 )
{
if( key_buff1[k-1] != test_rank_array[i]+iteration )
{
printf( "Failed partial verification: "
"iteration %d, test key %d\n",
iteration, i );
}
else
passed_verification++;
}
else
{
if( key_buff1[k-1] != test_rank_array[i]-iteration )
{
printf( "Failed partial verification: "
"iteration %d, test key %d\n",
iteration, i );
}
else
passed_verification++;
}
break;
case 'C':
if( i <= 2 )
{
if( key_buff1[k-1] != test_rank_array[i]+iteration )
{
printf( "Failed partial verification: "
"iteration %d, test key %d\n",
iteration, i );
}
else
passed_verification++;
}
else
{
if( key_buff1[k-1] != test_rank_array[i]-iteration )
{
printf( "Failed partial verification: "
"iteration %d, test key %d\n",
iteration, i );
}
else
passed_verification++;
}
break;
}
}
/* Make copies of rank info for use by full_verify: these variables
in rank are local; making them global slows down the code, probably
since they cannot be made register by compiler */
if( iteration == MAX_ITERATIONS )
key_buff_ptr_global = key_buff1;
} /* end master */
}
/*****************************************************************/
/************* M A I N ****************/
/*****************************************************************/
main( argc, argv )
int argc;
char **argv;
{
int i, iteration, itemp;
int nthreads = 1;
double timecounter, maxtime;
/* Initialize the verification arrays if a valid class */
for( i=0; i<TEST_ARRAY_SIZE; i++ )
switch( CLASS )
{
case 'S':
test_index_array[i] = S_test_index_array[i];
test_rank_array[i] = S_test_rank_array[i];
break;
case 'A':
test_index_array[i] = A_test_index_array[i];
test_rank_array[i] = A_test_rank_array[i];
break;
case 'W':
test_index_array[i] = W_test_index_array[i];
test_rank_array[i] = W_test_rank_array[i];
break;
case 'B':
test_index_array[i] = B_test_index_array[i];
test_rank_array[i] = B_test_rank_array[i];
break;
case 'C':
test_index_array[i] = C_test_index_array[i];
test_rank_array[i] = C_test_rank_array[i];
break;
};
/* Printout initial NPB info */
printf( "\n\n NAS Parallel Benchmarks 2.3 OpenMP C version"
" - IS Benchmark\n\n" );
printf( " Size: %d (class %c)\n", TOTAL_KEYS, CLASS );
printf( " Iterations: %d\n", MAX_ITERATIONS );
/* Initialize timer */
timer_clear( 0 );
/* Generate random number sequence and subsequent keys on all procs */
create_seq( 314159265.00, /* Random number gen seed */
1220703125.00 ); /* Random number gen mult */
/* Do one interation for free (i.e., untimed) to guarantee initialization of
all data and code pages and respective tables */
#pragma omp parallel
rank( 1 );
/* Start verification counter */
passed_verification = 0;
if( CLASS != 'S' ) printf( "\n iteration\n" );
/* Start timer */
timer_start( 0 );
/* This is the main iteration */
#pragma omp parallel private(iteration)
for( iteration=1; iteration<=MAX_ITERATIONS; iteration++ )
{
#pragma omp master
if( CLASS != 'S' ) printf( " %d\n", iteration );
rank( iteration );
#if defined(_OPENMP)
#pragma omp master
nthreads = omp_get_num_threads();
#endif /* _OPENMP */
}
/* End of timing, obtain maximum time of all processors */
timer_stop( 0 );
timecounter = timer_read( 0 );
/* This tests that keys are in sequence: sorting of last ranked key seq
occurs here, but is an untimed operation */
full_verify();
/* The final printout */
if( passed_verification != 5*MAX_ITERATIONS + 1 )
passed_verification = 0;
c_print_results( "IS",
CLASS,
TOTAL_KEYS,
0,
0,
MAX_ITERATIONS,
nthreads,
timecounter,
((double) (MAX_ITERATIONS*TOTAL_KEYS))
/timecounter/1000000.,
"keys ranked",
passed_verification,
NPBVERSION,
COMPILETIME,
CC,
CLINK,
C_LIB,
C_INC,
CFLAGS,
CLINKFLAGS,
"randlc");
/**************************/
} /* E N D P R O G R A M */
/**************************/
|
GB_binop__ge_fp32.c |
//------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_emult.h"
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB (_AaddB__ge_fp32)
// A.*B function (eWiseMult): GB (_AemultB_08__ge_fp32)
// A.*B function (eWiseMult): GB (_AemultB_02__ge_fp32)
// A.*B function (eWiseMult): GB (_AemultB_04__ge_fp32)
// A.*B function (eWiseMult): GB (_AemultB_bitmap__ge_fp32)
// A*D function (colscale): GB (_AxD__ge_fp32)
// D*A function (rowscale): GB (_DxB__ge_fp32)
// C+=B function (dense accum): GB (_Cdense_accumB__ge_fp32)
// C+=b function (dense accum): GB (_Cdense_accumb__ge_fp32)
// C+=A+B function (dense ewise3): GB ((none))
// C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__ge_fp32)
// C=scalar+B GB (_bind1st__ge_fp32)
// C=scalar+B' GB (_bind1st_tran__ge_fp32)
// C=A+scalar GB (_bind2nd__ge_fp32)
// C=A'+scalar GB (_bind2nd_tran__ge_fp32)
// C type: bool
// A type: float
// A pattern? 0
// B type: float
// B pattern? 0
// BinaryOp: cij = (aij >= bij)
#define GB_ATYPE \
float
#define GB_BTYPE \
float
#define GB_CTYPE \
bool
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
0
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
0
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA,A_iso) \
float aij = GBX (Ax, pA, A_iso)
// true if values of A are not used
#define GB_A_IS_PATTERN \
0 \
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB,B_iso) \
float bij = GBX (Bx, pB, B_iso)
// true if values of B are not used
#define GB_B_IS_PATTERN \
0 \
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
bool t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \
cij = GBX (Ax, pA, A_iso)
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \
cij = GBX (Bx, pB, B_iso)
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z,x,y,i,j) \
z = (x >= y) ;
// true if the binop must be flipped
#define GB_BINOP_FLIP \
0
// op is second
#define GB_OP_IS_SECOND \
0
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_GE || GxB_NO_FP32 || GxB_NO_GE_FP32)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
#if 0
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB ((none))
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
#endif
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
void GB (_Cdense_ewise3_noaccum__ge_fp32)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_noaccum_template.c"
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumB__ge_fp32)
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if 0
{
#include "GB_dense_subassign_23_template.c"
}
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumb__ge_fp32)
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if 0
{
// get the scalar b for C += b, of type float
float bwork = (*((float *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_AxD__ge_fp32)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix D,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
bool *restrict Cx = (bool *) C->x ;
#include "GB_AxB_colscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_DxB__ge_fp32)
(
GrB_Matrix C,
const GrB_Matrix D,
const GrB_Matrix B,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
bool *restrict Cx = (bool *) C->x ;
#include "GB_AxB_rowscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B
//------------------------------------------------------------------------------
GrB_Info GB (_AaddB__ge_fp32)
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool is_eWiseUnion,
const GB_void *alpha_scalar_in,
const GB_void *beta_scalar_in,
const bool Ch_is_Mh,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GB_WERK_DECLARE (M_ek_slicing, int64_t) ;
GB_WERK_DECLARE (A_ek_slicing, int64_t) ;
GB_WERK_DECLARE (B_ek_slicing, int64_t) ;
float alpha_scalar ;
float beta_scalar ;
if (is_eWiseUnion)
{
alpha_scalar = (*((float *) alpha_scalar_in)) ;
beta_scalar = (*((float *) beta_scalar_in )) ;
}
#include "GB_add_template.c"
GB_FREE_WORKSPACE ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_08__ge_fp32)
(
GrB_Matrix C,
const int C_sparsity,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_08_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_02__ge_fp32)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool flipxy,
const int64_t *restrict Cp_kfirst,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if GB_BINOP_FLIP
// The operator is not commutative, and does not have a flipped
// variant. For example z=atan2(y,x).
if (flipxy)
{
// use fmult(y,x)
#undef GB_FLIPPED
#define GB_FLIPPED 1
#include "GB_emult_02_template.c"
}
else
{
// use fmult(x,y)
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
}
#else
// No need to handle the flip: the operator is either commutative, or
// has been handled by changing z=div(y,x) to z=rdiv(x,y) for example.
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_04__ge_fp32)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict Cp_kfirst,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_04_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_bitmap__ge_fp32)
(
GrB_Matrix C,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_bitmap_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB (_bind1st__ge_fp32)
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *restrict Bb,
int64_t bnz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
bool *Cx = (bool *) Cx_output ;
float x = (*((float *) x_input)) ;
float *Bx = (float *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < bnz ; p++)
{
if (!GBB (Bb, p)) continue ;
float bij = GBX (Bx, p, false) ;
Cx [p] = (x >= bij) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB (_bind2nd__ge_fp32)
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *restrict Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
bool *Cx = (bool *) Cx_output ;
float *Ax = (float *) Ax_input ;
float y = (*((float *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
float aij = GBX (Ax, p, false) ;
Cx [p] = (aij >= y) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
float aij = GBX (Ax, pA, false) ; \
Cx [pC] = (x >= aij) ; \
}
GrB_Info GB (_bind1st_tran__ge_fp32)
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
float
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
float x = (*((const float *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
float
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
float aij = GBX (Ax, pA, false) ; \
Cx [pC] = (aij >= y) ; \
}
GrB_Info GB (_bind2nd_tran__ge_fp32)
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
float y = (*((const float *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
conv3x3s2_neon.h | // Tencent is pleased to support the open source community by making ncnn available.
//
// Copyright (C) 2017 THL A29 Limited, a Tencent company. All rights reserved.
//
// Licensed under the BSD 3-Clause License (the "License"); you may not use this file except
// in compliance with the License. You may obtain a copy of the License at
//
// https://opensource.org/licenses/BSD-3-Clause
//
// Unless required by applicable law or agreed to in writing, software distributed
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
// specific language governing permissions and limitations under the License.
#include "option.h"
#include "mat.h"
namespace ncnn{
static void conv3x3s2_neon(const Mat& bottom_blob, Mat& top_blob, const Mat& _kernel, const Mat& _bias, const Option& opt)
{
int w = bottom_blob.w;
int inch = bottom_blob.c;
int outw = top_blob.w;
int outh = top_blob.h;
int outch = top_blob.c;
const int tailstep = w - 2*outw + w;
const float* kernel = _kernel;
const float* bias = _bias;
int nn_outch = outch >> 1;
int remain_outch_start = nn_outch << 1;
#pragma omp parallel for num_threads(opt.num_threads)
for (int pp=0; pp<nn_outch; pp++)
{
int p = pp * 2;
Mat out0 = top_blob.channel(p);
Mat out1 = top_blob.channel(p+1);
const float bias0 = bias ? bias[p] : 0.f;
const float bias1 = bias ? bias[p+1] : 0.f;
out0.fill(bias0);
out1.fill(bias1);
const float* k0 = kernel + p*inch*9;
const float* k1 = kernel + (p+1)*inch*9;
for (int q=0; q<inch; q++)
{
float* outptr0 = out0;
float* outptr1 = out1;
const float* img0 = bottom_blob.channel(q);
const float* r0 = img0;
const float* r1 = img0 + w;
const float* r2 = img0 + w*2;
#if __ARM_NEON
float32x4_t _k00 = vld1q_f32(k0);
float32x4_t _k03 = vld1q_f32(k0+3);
float32x4_t _k06 = vld1q_f32(k0+6);
float32x4_t _k10 = vld1q_f32(k1);
float32x4_t _k13 = vld1q_f32(k1+3);
float32x4_t _k16 = vld1q_f32(k1+6);
#endif // __ARM_NEON
int i = 0;
for (; i < outh; i++)
{
#if __ARM_NEON
int nn = outw >> 2;
int remain = outw & 3;
#else
int remain = outw;
#endif // __ARM_NEON
#if __ARM_NEON
#if __aarch64__
if (nn > 0)
{
asm volatile(
"prfm pldl1keep, [%3, #256] \n"
"ld2 {v8.4s, v9.4s}, [%3], #32 \n"// v8 v9 = r0
"0: \n"
"prfm pldl1keep, [%1, #128] \n"
"ld1 {v6.4s}, [%1] \n"// v6 = _sum0
"fmul v12.4s, v8.4s, %12.s[0] \n"
"prfm pldl1keep, [%2, #128] \n"
"ld1 {v7.4s}, [%2] \n"// v7 = _sum1
"fmul v13.4s, v8.4s, %15.s[0] \n"
"prfm pldl1keep, [%3, #128] \n"
"ld2 {v10.4s, v11.4s}, [%3] \n"// v10
"fmla v6.4s, v9.4s, %12.s[1] \n"
"ext v14.16b, v8.16b, v10.16b, #4\n"
"fmla v7.4s, v9.4s, %15.s[1] \n"
"prfm pldl1keep, [%4, #256] \n"
"ld2 {v8.4s, v9.4s}, [%4], #32 \n"// r1
"fmla v12.4s, v14.4s, %12.s[2] \n"
"fmla v13.4s, v14.4s, %15.s[2] \n"
"prfm pldl1keep, [%4, #128] \n"
"ld2 {v10.4s, v11.4s}, [%4] \n"
"fmla v6.4s, v8.4s, %13.s[0] \n"
"fmla v7.4s, v8.4s, %16.s[0] \n"
"ext v14.16b, v8.16b, v10.16b, #4\n"
"fmla v12.4s, v9.4s, %13.s[1] \n"
"fmla v13.4s, v9.4s, %16.s[1] \n"
"prfm pldl1keep, [%5, #256] \n"
"ld2 {v8.4s, v9.4s}, [%5], #32 \n"// r2
"fmla v6.4s, v14.4s, %13.s[2] \n"
"fmla v7.4s, v14.4s, %16.s[2] \n"
"prfm pldl1keep, [%5, #128] \n"
"ld2 {v10.4s, v11.4s}, [%5] \n"
"fmla v12.4s, v8.4s, %14.s[0] \n"
"fmla v13.4s, v8.4s, %17.s[0] \n"
"ext v14.16b, v8.16b, v10.16b, #4\n"
"fmla v6.4s, v9.4s, %14.s[1] \n"
"fmla v7.4s, v9.4s, %17.s[1] \n"
"fmla v12.4s, v14.4s, %14.s[2] \n"
"fmla v13.4s, v14.4s, %17.s[2] \n"
"prfm pldl1keep, [%3, #256] \n"
"ld2 {v8.4s, v9.4s}, [%3], #32 \n"// v8 v9 = r0
"fadd v6.4s, v6.4s, v12.4s \n"
"fadd v7.4s, v7.4s, v13.4s \n"
"subs %w0, %w0, #1 \n"
"st1 {v6.4s}, [%1], #16 \n"
"st1 {v7.4s}, [%2], #16 \n"
"bne 0b \n"
"sub %3, %3, #32 \n"
: "=r"(nn), // %0
"=r"(outptr0), // %1
"=r"(outptr1), // %2
"=r"(r0), // %3
"=r"(r1), // %4
"=r"(r2) // %5
: "0"(nn),
"1"(outptr0),
"2"(outptr1),
"3"(r0),
"4"(r1),
"5"(r2),
"w"(_k00), // %12
"w"(_k03), // %13
"w"(_k06), // %14
"w"(_k10), // %15
"w"(_k13), // %16
"w"(_k16) // %17
: "cc", "memory", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15"
);
}
#else
if (nn > 0)
{
asm volatile(
"pld [%3, #256] \n"
"vld2.f32 {d16-d19}, [%3]! \n"// q8 q9 = r0
"0: \n"
"pld [%1, #128] \n"
"vld1.f32 {d12-d13}, [%1] \n"// q6 = _sum0
"vmul.f32 q12, q8, %e12[0] \n"
"pld [%2, #128] \n"
"vld1.f32 {d14-d15}, [%2] \n"// q7 = _sum1
"vmul.f32 q13, q8, %e15[0] \n"
"pld [%3, #128] \n"
"vld2.f32 {d20-d21}, [%3] \n"// q10
"vmla.f32 q6, q9, %e12[1] \n"
"vext.32 q11, q8, q10, #1 \n"
"vmla.f32 q7, q9, %e15[1] \n"
"pld [%4, #256] \n"
"vld2.f32 {d16-d19}, [%4]! \n"// r1
"vmla.f32 q12, q11, %f12[0] \n"
"vmla.f32 q13, q11, %f15[0] \n"
"pld [%4, #128] \n"
"vld2.f32 {d20-d21}, [%4] \n"
"vmla.f32 q6, q8, %e13[0] \n"
"vmla.f32 q7, q8, %e16[0] \n"
"vext.32 q11, q8, q10, #1 \n"
"vmla.f32 q12, q9, %e13[1] \n"
"vmla.f32 q13, q9, %e16[1] \n"
"pld [%5, #256] \n"
"vld2.f32 {d16-d19}, [%5]! \n"// r2
"vmla.f32 q6, q11, %f13[0] \n"
"vmla.f32 q7, q11, %f16[0] \n"
"pld [%5, #128] \n"
"vld2.f32 {d20-d21}, [%5] \n"
"vmla.f32 q12, q8, %e14[0] \n"
"vmla.f32 q13, q8, %e17[0] \n"
"vext.32 q11, q8, q10, #1 \n"
"vmla.f32 q6, q9, %e14[1] \n"
"vmla.f32 q7, q9, %e17[1] \n"
"vmla.f32 q12, q11, %f14[0] \n"
"vmla.f32 q13, q11, %f17[0] \n"
"pld [%3, #256] \n"
"vld2.f32 {d16-d19}, [%3]! \n"// q8 q9 = r0
"vadd.f32 q6, q6, q12 \n"
"vadd.f32 q7, q7, q13 \n"
"subs %0, #1 \n"
"vst1.f32 {d12-d13}, [%1]! \n"
"vst1.f32 {d14-d15}, [%2]! \n"
"bne 0b \n"
"sub %3, #32 \n"
: "=r"(nn), // %0
"=r"(outptr0), // %1
"=r"(outptr1), // %2
"=r"(r0), // %3
"=r"(r1), // %4
"=r"(r2) // %5
: "0"(nn),
"1"(outptr0),
"2"(outptr1),
"3"(r0),
"4"(r1),
"5"(r2),
"w"(_k00), // %12
"w"(_k03), // %13
"w"(_k06), // %14
"w"(_k10), // %15
"w"(_k13), // %16
"w"(_k16) // %17
: "cc", "memory", "q6", "q7", "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15"
);
}
#endif // __aarch64__
#endif // __ARM_NEON
for (; remain>0; remain--)
{
#if __ARM_NEON
float32x4_t _r00 = vld1q_f32(r0);
float32x4_t _r10 = vld1q_f32(r1);
float32x4_t _r20 = vld1q_f32(r2);
float32x4_t _sum0 = vmulq_f32(_r00, _k00);
float32x4_t _sum1 = vmulq_f32(_r00, _k10);
_sum0 = vmlaq_f32(_sum0, _r10, _k03);
_sum1 = vmlaq_f32(_sum1, _r10, _k13);
_sum0 = vmlaq_f32(_sum0, _r20, _k06);
_sum1 = vmlaq_f32(_sum1, _r20, _k16);
_sum0 = vsetq_lane_f32(*outptr0, _sum0, 3);
_sum1 = vsetq_lane_f32(*outptr1, _sum1, 3);
#if __aarch64__
*outptr0 = vaddvq_f32(_sum0);
*outptr1 = vaddvq_f32(_sum1);
#else
float32x2_t _ss0 = vadd_f32(vget_low_f32(_sum0), vget_high_f32(_sum0));
float32x2_t _ss1 = vadd_f32(vget_low_f32(_sum1), vget_high_f32(_sum1));
float32x2_t _ss01 = vpadd_f32(_ss0, _ss1);
*outptr0 = vget_lane_f32(_ss01, 0);
*outptr1 = vget_lane_f32(_ss01, 1);
#endif // __aarch64__
#else
float sum0 = 0.f;
float sum1 = 0.f;
sum0 += r0[0] * k0[0];
sum0 += r0[1] * k0[1];
sum0 += r0[2] * k0[2];
sum0 += r1[0] * k0[3];
sum0 += r1[1] * k0[4];
sum0 += r1[2] * k0[5];
sum0 += r2[0] * k0[6];
sum0 += r2[1] * k0[7];
sum0 += r2[2] * k0[8];
sum1 += r0[0] * k1[0];
sum1 += r0[1] * k1[1];
sum1 += r0[2] * k1[2];
sum1 += r1[0] * k1[3];
sum1 += r1[1] * k1[4];
sum1 += r1[2] * k1[5];
sum1 += r2[0] * k1[6];
sum1 += r2[1] * k1[7];
sum1 += r2[2] * k1[8];
*outptr0 += sum0;
*outptr1 += sum1;
#endif // __ARM_NEON
r0 += 2;
r1 += 2;
r2 += 2;
outptr0++;
outptr1++;
}
r0 += tailstep;
r1 += tailstep;
r2 += tailstep;
}
k0 += 9;
k1 += 9;
}
}
#pragma omp parallel for num_threads(opt.num_threads)
for (int p=remain_outch_start; p<outch; p++)
{
Mat out = top_blob.channel(p);
const float bias0 = bias ? bias[p] : 0.f;
out.fill(bias0);
const float* kernel0 = kernel + p*inch*9;
for (int q=0; q<inch; q++)
{
float* outptr = out;
const float* img0 = bottom_blob.channel(q);
const float* r0 = img0;
const float* r1 = img0 + w;
const float* r2 = img0 + w*2;
const float* k0 = kernel0;
const float* k1 = kernel0 + 3;
const float* k2 = kernel0 + 6;
#if __ARM_NEON
float32x4_t _k0123 = vld1q_f32(k0);
float32x4_t _k3456 = vld1q_f32(k1);
float32x4_t _k6789 = vld1q_f32(k2);
#endif // __ARM_NEON
int i = 0;
for (; i < outh; i++)
{
#if __ARM_NEON
int nn = outw >> 2;
int remain = outw & 3;
#else
int remain = outw;
#endif // __ARM_NEON
#if __ARM_NEON
#if __aarch64__
if (nn > 0)
{
asm volatile(
"prfm pldl1keep, [%2, #256] \n"
"ld2 {v2.4s, v3.4s}, [%2], #32 \n"
"0: \n"
"prfm pldl1keep, [%1, #128] \n"
"ld1 {v0.4s}, [%1] \n"
"fmla v0.4s, v2.4s, %10.s[0] \n"
"fmul v10.4s, v3.4s, %10.s[1] \n"
"prfm pldl1keep, [%2, #256] \n"
"ld2 {v8.4s, v9.4s}, [%2] \n"
"ext v1.16b, v2.16b, v8.16b, #4 \n"
"fmul v11.4s, v1.4s, %10.s[2] \n"
"prfm pldl1keep, [%3, #256] \n"
"ld2 {v2.4s, v3.4s}, [%3], #32 \n"
"fmla v0.4s, v2.4s, %11.s[0] \n"
"fmla v10.4s, v3.4s, %11.s[1] \n"
"prfm pldl1keep, [%3, #256] \n"
"ld2 {v8.4s, v9.4s}, [%3] \n"
"ext v1.16b, v2.16b, v8.16b, #4 \n"
"fmla v11.4s, v1.4s, %11.s[2] \n"
"prfm pldl1keep, [%4, #256] \n"
"ld2 {v2.4s, v3.4s}, [%4], #32 \n"
"fmla v0.4s, v2.4s, %12.s[0] \n"
"fmla v10.4s, v3.4s, %12.s[1] \n"
"prfm pldl1keep, [%4, #256] \n"
"ld2 {v8.4s, v9.4s}, [%4] \n"
"ext v1.16b, v2.16b, v8.16b, #4 \n"
"fmla v11.4s, v1.4s, %12.s[2] \n"
"prfm pldl1keep, [%2, #256] \n"
"ld2 {v2.4s, v3.4s}, [%2], #32 \n"
"fadd v0.4s, v0.4s, v10.4s \n"
"fadd v0.4s, v0.4s, v11.4s \n"
"subs %w0, %w0, #1 \n"
"st1 {v0.4s}, [%1], #16 \n"
"bne 0b \n"
"sub %2, %2, #32 \n"
: "=r"(nn), // %0
"=r"(outptr), // %1
"=r"(r0), // %2
"=r"(r1), // %3
"=r"(r2) // %4
: "0"(nn),
"1"(outptr),
"2"(r0),
"3"(r1),
"4"(r2),
"w"(_k0123), // %10
"w"(_k3456), // %11
"w"(_k6789) // %12
: "cc", "memory", "v0", "v1", "v2", "v3", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15"
);
}
#else
if (nn > 0)
{
asm volatile(
"pld [%2, #256] \n"
"vld2.f32 {d4-d7}, [%2]! \n"
"0: \n"
"pld [%1, #128] \n"
"vld1.f32 {d0-d1}, [%1] \n"
"vmla.f32 q0, q2, %e10[0] \n"
"vmul.f32 q10, q3, %e10[1] \n"
"pld [%2, #128] \n"
"vld2.f32 {d16-d17}, [%2] \n"
"vext.32 q1, q2, q8, #1 \n"
"vmul.f32 q11, q1, %f10[0] \n"
"pld [%3, #256] \n"
"vld2.f32 {d4-d7}, [%3]! \n"
"vmla.f32 q0, q2, %e11[0] \n"
"vmla.f32 q10, q3, %e11[1] \n"
"pld [%3, #128] \n"
"vld2.f32 {d16-d17}, [%3] \n"
"vext.32 q1, q2, q8, #1 \n"
"vmla.f32 q11, q1, %f11[0] \n"
"pld [%4, #256] \n"
"vld2.f32 {d4-d7}, [%4]! \n"
"vmla.f32 q0, q2, %e12[0] \n"
"vmla.f32 q10, q3, %e12[1] \n"
"pld [%4, #128] \n"
"vld2.f32 {d16-d17}, [%4] \n"
"vext.32 q1, q2, q8, #1 \n"
"vmla.f32 q11, q1, %f12[0] \n"
"pld [%2, #256] \n"
"vld2.f32 {d4-d7}, [%2]! \n"
"vadd.f32 q0, q0, q10 \n"
"vadd.f32 q0, q0, q11 \n"
"subs %0, #1 \n"
"vst1.f32 {d0-d1}, [%1]! \n"
"bne 0b \n"
"sub %2, #32 \n"
: "=r"(nn), // %0
"=r"(outptr), // %1
"=r"(r0), // %2
"=r"(r1), // %3
"=r"(r2) // %4
: "0"(nn),
"1"(outptr),
"2"(r0),
"3"(r1),
"4"(r2),
"w"(_k0123), // %10
"w"(_k3456), // %11
"w"(_k6789) // %12
: "cc", "memory", "q0", "q1", "q2", "q3", "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15"
);
}
#endif // __aarch64__
#endif // __ARM_NEON
for (; remain>0; remain--)
{
#if __ARM_NEON
float32x4_t _r00 = vld1q_f32(r0);
float32x4_t _r10 = vld1q_f32(r1);
float32x4_t _r20 = vld1q_f32(r2);
float32x4_t _sum = vmulq_f32(_r00, _k0123);
_sum = vmlaq_f32(_sum, _r10, _k3456);
_sum = vmlaq_f32(_sum, _r20, _k6789);
_sum = vsetq_lane_f32(*outptr, _sum, 3);
#if __aarch64__
*outptr = vaddvq_f32(_sum);
#else
float32x2_t _ss = vadd_f32(vget_low_f32(_sum), vget_high_f32(_sum));
_ss = vpadd_f32(_ss, _ss);
*outptr = vget_lane_f32(_ss, 0);
#endif // __aarch64__
#else
float sum = 0;
sum += r0[0] * k0[0];
sum += r0[1] * k0[1];
sum += r0[2] * k0[2];
sum += r1[0] * k1[0];
sum += r1[1] * k1[1];
sum += r1[2] * k1[2];
sum += r2[0] * k2[0];
sum += r2[1] * k2[1];
sum += r2[2] * k2[2];
*outptr += sum;
#endif // __ARM_NEON
r0 += 2;
r1 += 2;
r2 += 2;
outptr++;
}
r0 += tailstep;
r1 += tailstep;
r2 += tailstep;
}
kernel0 += 9;
}
}
}
}
|
ex04.c | /* Copyright (c) 2019 CSC Training */
/* Copyright (c) 2021 ENCCS */
#include <stdio.h>
#include <math.h>
#define NX 102400
int main(void)
{
double vecA[NX],vecB[NX],vecC[NX];
double r=0.2;
/* Initialization of vectors */
for (int i = 0; i < NX; i++) {
vecA[i] = pow(r, i);
vecB[i] = 1.0;
}
/* dot product of two vectors */
#pragma omp target teams distribute
for (int i = 0; i < NX; i++) {
vecC[i] = vecA[i] * vecB[i];
}
double sum = 0.0;
/* calculate the sum */
for (int i = 0; i < NX; i++) {
sum += vecC[i];
}
printf("The sum is: %8.6f \n", sum);
return 0;
}
|
x_solve-brisbane.c | //-------------------------------------------------------------------------//
// //
// This benchmark is a serial C version of the NPB BT code. This C //
// version is developed by the Center for Manycore Programming at Seoul //
// National University and derived from the serial Fortran versions in //
// "NPB3.3-SER" developed by NAS. //
// //
// Permission to use, copy, distribute and modify this software for any //
// purpose with or without fee is hereby granted. This software is //
// provided "as is" without express or implied warranty. //
// //
// Information on NPB 3.3, including the technical report, the original //
// specifications, source code, results and information on how to submit //
// new results, is available at: //
// //
// http://www.nas.nasa.gov/Software/NPB/ //
// //
// Send comments or suggestions for this C version to cmp@aces.snu.ac.kr //
// //
// Center for Manycore Programming //
// School of Computer Science and Engineering //
// Seoul National University //
// Seoul 151-744, Korea //
// //
// E-mail: cmp@aces.snu.ac.kr //
// //
//-------------------------------------------------------------------------//
//-------------------------------------------------------------------------//
// Authors: Sangmin Seo, Jungwon Kim, Jun Lee, Jeongho Nah, Gangwon Jo, //
// and Jaejin Lee //
//-------------------------------------------------------------------------//
#include "header-brisbane.h"
#include "work_lhs.h"
//#include "timers.h"
//---------------------------------------------------------------------
//
// Performs line solves in X direction by first factoring
// the block-tridiagonal matrix into an upper triangular matrix,
// and then performing back substitution to solve for the unknow
// vectors of each line.
//
// Make sure we treat elements zero to cell_size in the direction
// of the sweep.
//
//---------------------------------------------------------------------
void x_solve()
{
int i, j, k, m, n, isize, z;
// double pivot, coeff;
int gp22, gp12;
// double temp1, temp2, temp3;
double fjacX[5][5][PROBLEM_SIZE+1][JMAXP-1][KMAX-1];
double njacX[5][5][PROBLEM_SIZE+1][JMAXP-1][KMAX-1];
double lhsX[5][5][3][PROBLEM_SIZE][JMAXP-1][KMAX-1];
double pivot,coeff,temp1, temp2, temp3;
gp22 = grid_points[2]-2;
gp12 = grid_points[1]-2;
//---------------------------------------------------------------------
// This function computes the left hand side in the xi-direction
//---------------------------------------------------------------------
isize = grid_points[0]-1;
brisbane_mem mem_fjacX;
brisbane_mem mem_njacX;
brisbane_mem mem_lhsX;
brisbane_mem_create(sizeof(double) * 5 * 5 * (PROBLEM_SIZE + 1) * (JMAXP - 1) * (KMAX - 1), &mem_fjacX);
brisbane_mem_create(sizeof(double) * 5 * 5 * (PROBLEM_SIZE + 1) * (JMAXP - 1) * (KMAX - 1), &mem_njacX);
brisbane_mem_create(sizeof(double) * 5 * 5 * 3 * (PROBLEM_SIZE) * (JMAXP - 1) * (KMAX - 1), &mem_lhsX);
//---------------------------------------------------------------------
// determine a (labeled f) and n jacobians
//---------------------------------------------------------------------
#pragma omp target data map(alloc:fjacX[:][:][:][:][:],njacX[:][:][:][:][:],lhsX[:][:][:][:][:][:])
//present(rho_i,u,qs,rhs,square)
{
size_t kernel_x_solve_0_off[2] = { 1, 0 };
size_t kernel_x_solve_0_idx[2] = { gp12, isize + 1 };
brisbane_kernel kernel_x_solve_0;
brisbane_kernel_create("x_solve_0", &kernel_x_solve_0);
brisbane_kernel_setmem(kernel_x_solve_0, 0, mem_rho_i, brisbane_r);
brisbane_kernel_setmem(kernel_x_solve_0, 1, mem_fjacX, brisbane_w);
brisbane_kernel_setmem(kernel_x_solve_0, 2, mem_njacX, brisbane_w);
brisbane_kernel_setmem(kernel_x_solve_0, 3, mem_u, brisbane_r);
brisbane_kernel_setmem(kernel_x_solve_0, 4, mem_qs, brisbane_r);
brisbane_kernel_setmem(kernel_x_solve_0, 5, mem_square, brisbane_r);
brisbane_kernel_setarg(kernel_x_solve_0, 6, sizeof(double), &c1);
brisbane_kernel_setarg(kernel_x_solve_0, 7, sizeof(double), &c2);
brisbane_kernel_setarg(kernel_x_solve_0, 8, sizeof(double), &c3c4);
brisbane_kernel_setarg(kernel_x_solve_0, 9, sizeof(double), &c1345);
brisbane_kernel_setarg(kernel_x_solve_0, 10, sizeof(double), &con43);
brisbane_kernel_setarg(kernel_x_solve_0, 11, sizeof(int), &gp22);
brisbane_task task0;
brisbane_task_create(&task0);
brisbane_task_kernel(task0, kernel_x_solve_0, 2, kernel_x_solve_0_off, kernel_x_solve_0_idx);
brisbane_task_submit(task0, brisbane_cpu, NULL, true);
#if 0
#pragma omp target teams distribute parallel for collapse(2) private(temp1,temp2,temp3,i,j,k)
for (i = 0; i <= isize; i++) {
for (j = 1; j <= gp12; j++) {
for (k = 1; k <= gp22; k++) {
temp1 = rho_i[k][j][i];
temp2 = temp1 * temp1;
temp3 = temp1 * temp2;
//-------------------------------------------------------------------
//
//-------------------------------------------------------------------
fjacX[0][0][i][j][k] = 0.0;
fjacX[0][1][i][j][k] = 1.0;
fjacX[0][2][i][j][k] = 0.0;
fjacX[0][3][i][j][k] = 0.0;
fjacX[0][4][i][j][k] = 0.0;
fjacX[1][0][i][j][k] = -(u[k][j][i][1] * temp2 * u[k][j][i][1])
+ c2 * qs[k][j][i];
fjacX[1][1][i][j][k] = ( 2.0 - c2 ) * ( u[k][j][i][1] / u[k][j][i][0] );
fjacX[1][2][i][j][k] = - c2 * ( u[k][j][i][2] * temp1 );
fjacX[1][3][i][j][k] = - c2 * ( u[k][j][i][3] * temp1 );
fjacX[1][4][i][j][k] = c2;
fjacX[2][0][i][j][k] = - ( u[k][j][i][1]*u[k][j][i][2] ) * temp2;
fjacX[2][1][i][j][k] = u[k][j][i][2] * temp1;
fjacX[2][2][i][j][k] = u[k][j][i][1] * temp1;
fjacX[2][3][i][j][k] = 0.0;
fjacX[2][4][i][j][k] = 0.0;
fjacX[3][0][i][j][k] = - ( u[k][j][i][1]*u[k][j][i][3] ) * temp2;
fjacX[3][1][i][j][k] = u[k][j][i][3] * temp1;
fjacX[3][2][i][j][k] = 0.0;
fjacX[3][3][i][j][k] = u[k][j][i][1] * temp1;
fjacX[3][4][i][j][k] = 0.0;
fjacX[4][0][i][j][k] = ( c2 * 2.0 * square[k][j][i] - c1 * u[k][j][i][4] )
* ( u[k][j][i][1] * temp2 );
fjacX[4][1][i][j][k] = c1 * u[k][j][i][4] * temp1
- c2 * ( u[k][j][i][1]*u[k][j][i][1] * temp2 + qs[k][j][i] );
fjacX[4][2][i][j][k] = - c2 * ( u[k][j][i][2]*u[k][j][i][1] ) * temp2;
fjacX[4][3][i][j][k] = - c2 * ( u[k][j][i][3]*u[k][j][i][1] ) * temp2;
fjacX[4][4][i][j][k] = c1 * ( u[k][j][i][1] * temp1 );
njacX[0][0][i][j][k] = 0.0;
njacX[0][1][i][j][k] = 0.0;
njacX[0][2][i][j][k] = 0.0;
njacX[0][3][i][j][k] = 0.0;
njacX[0][4][i][j][k] = 0.0;
njacX[1][0][i][j][k] = - con43 * c3c4 * temp2 * u[k][j][i][1];
njacX[1][1][i][j][k] = con43 * c3c4 * temp1;
njacX[1][2][i][j][k] = 0.0;
njacX[1][3][i][j][k] = 0.0;
njacX[1][4][i][j][k] = 0.0;
njacX[2][0][i][j][k] = - c3c4 * temp2 * u[k][j][i][2];
njacX[2][1][i][j][k] = 0.0;
njacX[2][2][i][j][k] = c3c4 * temp1;
njacX[2][3][i][j][k] = 0.0;
njacX[2][4][i][j][k] = 0.0;
njacX[3][0][i][j][k] = - c3c4 * temp2 * u[k][j][i][3];
njacX[3][1][i][j][k] = 0.0;
njacX[3][2][i][j][k] = 0.0;
njacX[3][3][i][j][k] = c3c4 * temp1;
njacX[3][4][i][j][k] = 0.0;
njacX[4][0][i][j][k] = - ( con43 * c3c4
- c1345 ) * temp3 * (u[k][j][i][1]*u[k][j][i][1])
- ( c3c4 - c1345 ) * temp3 * (u[k][j][i][2]*u[k][j][i][2])
- ( c3c4 - c1345 ) * temp3 * (u[k][j][i][3]*u[k][j][i][3])
- c1345 * temp2 * u[k][j][i][4];
njacX[4][1][i][j][k] = ( con43 * c3c4
- c1345 ) * temp2 * u[k][j][i][1];
njacX[4][2][i][j][k] = ( c3c4 - c1345 ) * temp2 * u[k][j][i][2];
njacX[4][3][i][j][k] = ( c3c4 - c1345 ) * temp2 * u[k][j][i][3];
njacX[4][4][i][j][k] = ( c1345 ) * temp1;
}
}
}
#endif
//---------------------------------------------------------------------
// now jacobians set, so form left hand side in x direction
//---------------------------------------------------------------------
// lhsX[k][j]init(lhsX[k][j], isize);
// zero the whole left hand side for starters
size_t kernel_x_solve_1_off[3] = { 0, 1, 1 };
size_t kernel_x_solve_1_idx[3] = { 5, gp12, gp22 };
brisbane_kernel kernel_x_solve_1;
brisbane_kernel_create("x_solve_1", &kernel_x_solve_1);
brisbane_kernel_setmem(kernel_x_solve_1, 0, mem_lhsX, brisbane_w);
brisbane_kernel_setarg(kernel_x_solve_1, 1, sizeof(int), &isize);
brisbane_task task1;
brisbane_task_create(&task1);
brisbane_task_kernel(task1, kernel_x_solve_1, 3, kernel_x_solve_1_off, kernel_x_solve_1_idx);
brisbane_task_submit(task1, brisbane_cpu, NULL, true);
#if 0
#ifdef SPEC_USE_INNER_SIMD
#pragma omp target teams distribute parallel for collapse(3) private(k,j,n,m)
#else
#pragma omp target teams distribute parallel for simd collapse(4)
#endif
for (k = 1; k <= gp22; k++) {
for (j = 1; j <= gp12; j++) {
for (n = 0; n < 5; n++) {
#ifdef SPEC_USE_INNER_SIMD
#pragma omp simd
#endif
for (m = 0; m < 5; m++){
lhsX[m][n][0][0][j][k] = 0.0;
lhsX[m][n][1][0][j][k] = 0.0;
lhsX[m][n][2][0][j][k] = 0.0;
lhsX[m][n][0][isize][j][k] = 0.0;
lhsX[m][n][1][isize][j][k] = 0.0;
lhsX[m][n][2][isize][j][k] = 0.0;
}
}
}
}
#endif
// next, set all diagonal values to 1. This is overkill, but convenient
size_t kernel_x_solve_2_off[2] = { 1, 1 };
size_t kernel_x_solve_2_idx[2] = { gp12, gp22 };
brisbane_kernel kernel_x_solve_2;
brisbane_kernel_create("x_solve_2", &kernel_x_solve_2);
brisbane_kernel_setmem(kernel_x_solve_2, 0, mem_lhsX, brisbane_w);
brisbane_kernel_setarg(kernel_x_solve_2, 1, sizeof(int), &isize);
brisbane_task task2;
brisbane_task_create(&task2);
brisbane_task_kernel(task2, kernel_x_solve_2, 2, kernel_x_solve_2_off, kernel_x_solve_2_idx);
brisbane_task_submit(task2, brisbane_cpu, NULL, true);
#if 0
#ifdef SPEC_USE_INNER_SIMD
#pragma omp target teams distribute parallel for private(k,j) // collapse(2)
#else
#pragma omp target teams distribute parallel for simd collapse(2)
#endif
for (k = 1; k <= gp22; k++) {
#ifdef SPEC_USE_INNER_SIMD
#pragma omp simd
#endif
for (j = 1; j <= gp12; j++) {
lhsX[0][0][1][0][j][k] = 1.0;
lhsX[0][0][1][isize][j][k] = 1.0;
lhsX[1][1][1][0][j][k] = 1.0;
lhsX[1][1][1][isize][j][k] = 1.0;
lhsX[2][2][1][0][j][k] = 1.0;
lhsX[2][2][1][isize][j][k] = 1.0;
lhsX[3][3][1][0][j][k] = 1.0;
lhsX[3][3][1][isize][j][k] = 1.0;
lhsX[4][4][1][0][j][k] = 1.0;
lhsX[4][4][1][isize][j][k] = 1.0;
}
}
#endif
size_t kernel_x_solve_3_off[3] = { 1, 1, 1 };
size_t kernel_x_solve_3_idx[3] = { gp22, gp12, isize - 1 };
brisbane_kernel kernel_x_solve_3;
brisbane_kernel_create("x_solve_3", &kernel_x_solve_3);
brisbane_kernel_setmem(kernel_x_solve_3, 0, mem_lhsX, brisbane_w);
brisbane_kernel_setmem(kernel_x_solve_3, 1, mem_fjacX, brisbane_r);
brisbane_kernel_setmem(kernel_x_solve_3, 2, mem_njacX, brisbane_r);
brisbane_kernel_setarg(kernel_x_solve_3, 3, sizeof(double), &dt);
brisbane_kernel_setarg(kernel_x_solve_3, 4, sizeof(double), &tx1);
brisbane_kernel_setarg(kernel_x_solve_3, 5, sizeof(double), &tx2);
brisbane_kernel_setarg(kernel_x_solve_3, 6, sizeof(double), &dx1);
brisbane_kernel_setarg(kernel_x_solve_3, 7, sizeof(double), &dx2);
brisbane_kernel_setarg(kernel_x_solve_3, 8, sizeof(double), &dx3);
brisbane_kernel_setarg(kernel_x_solve_3, 9, sizeof(double), &dx4);
brisbane_kernel_setarg(kernel_x_solve_3, 10, sizeof(double), &dx5);
brisbane_task task3;
brisbane_task_create(&task3);
brisbane_task_kernel(task3, kernel_x_solve_3, 3, kernel_x_solve_3_off, kernel_x_solve_3_idx);
brisbane_task_submit(task3, brisbane_cpu, NULL, true);
#if 0
#ifdef SPEC_USE_INNER_SIMD
#pragma omp target teams distribute parallel for collapse(2) private(j,k)
#else
#pragma omp target teams distribute parallel for simd collapse(3) private(temp1,temp2)
#endif
for (i = 1; i <= isize-1; i++) {
for (j = 1; j <= gp12; j++) {
#ifdef SPEC_USE_INNER_SIMD
#pragma omp simd private(temp1, temp2)
#endif
for (k = 1; k <= gp22; k++) {
temp1 = dt * tx1;
temp2 = dt * tx2;
lhsX[0][0][AA][i][j][k] = - temp2 * fjacX[0][0][i-1][j][k]
- temp1 * njacX[0][0][i-1][j][k]
- temp1 * dx1;
lhsX[0][1][AA][i][j][k] = - temp2 * fjacX[0][1][i-1][j][k]
- temp1 * njacX[0][1][i-1][j][k];
lhsX[0][2][AA][i][j][k] = - temp2 * fjacX[0][2][i-1][j][k]
- temp1 * njacX[0][2][i-1][j][k];
lhsX[0][3][AA][i][j][k] = - temp2 * fjacX[0][3][i-1][j][k]
- temp1 * njacX[0][3][i-1][j][k];
lhsX[0][4][AA][i][j][k] = - temp2 * fjacX[0][4][i-1][j][k]
- temp1 * njacX[0][4][i-1][j][k];
lhsX[1][0][AA][i][j][k] = - temp2 * fjacX[1][0][i-1][j][k]
- temp1 * njacX[1][0][i-1][j][k];
lhsX[1][1][AA][i][j][k] = - temp2 * fjacX[1][1][i-1][j][k]
- temp1 * njacX[1][1][i-1][j][k]
- temp1 * dx2;
lhsX[1][2][AA][i][j][k] = - temp2 * fjacX[1][2][i-1][j][k]
- temp1 * njacX[1][2][i-1][j][k];
lhsX[1][3][AA][i][j][k] = - temp2 * fjacX[1][3][i-1][j][k]
- temp1 * njacX[1][3][i-1][j][k];
lhsX[1][4][AA][i][j][k] = - temp2 * fjacX[1][4][i-1][j][k]
- temp1 * njacX[1][4][i-1][j][k];
lhsX[2][0][AA][i][j][k] = - temp2 * fjacX[2][0][i-1][j][k]
- temp1 * njacX[2][0][i-1][j][k];
lhsX[2][1][AA][i][j][k] = - temp2 * fjacX[2][1][i-1][j][k]
- temp1 * njacX[2][1][i-1][j][k];
lhsX[2][2][AA][i][j][k] = - temp2 * fjacX[2][2][i-1][j][k]
- temp1 * njacX[2][2][i-1][j][k]
- temp1 * dx3;
lhsX[2][3][AA][i][j][k] = - temp2 * fjacX[2][3][i-1][j][k]
- temp1 * njacX[2][3][i-1][j][k];
lhsX[2][4][AA][i][j][k] = - temp2 * fjacX[2][4][i-1][j][k]
- temp1 * njacX[2][4][i-1][j][k];
lhsX[3][0][AA][i][j][k] = - temp2 * fjacX[3][0][i-1][j][k]
- temp1 * njacX[3][0][i-1][j][k];
lhsX[3][1][AA][i][j][k] = - temp2 * fjacX[3][1][i-1][j][k]
- temp1 * njacX[3][1][i-1][j][k];
lhsX[3][2][AA][i][j][k] = - temp2 * fjacX[3][2][i-1][j][k]
- temp1 * njacX[3][2][i-1][j][k];
lhsX[3][3][AA][i][j][k] = - temp2 * fjacX[3][3][i-1][j][k]
- temp1 * njacX[3][3][i-1][j][k]
- temp1 * dx4;
lhsX[3][4][AA][i][j][k] = - temp2 * fjacX[3][4][i-1][j][k]
- temp1 * njacX[3][4][i-1][j][k];
lhsX[4][0][AA][i][j][k] = - temp2 * fjacX[4][0][i-1][j][k]
- temp1 * njacX[4][0][i-1][j][k];
lhsX[4][1][AA][i][j][k] = - temp2 * fjacX[4][1][i-1][j][k]
- temp1 * njacX[4][1][i-1][j][k];
lhsX[4][2][AA][i][j][k] = - temp2 * fjacX[4][2][i-1][j][k]
- temp1 * njacX[4][2][i-1][j][k];
lhsX[4][3][AA][i][j][k] = - temp2 * fjacX[4][3][i-1][j][k]
- temp1 * njacX[4][3][i-1][j][k];
lhsX[4][4][AA][i][j][k] = - temp2 * fjacX[4][4][i-1][j][k]
- temp1 * njacX[4][4][i-1][j][k]
- temp1 * dx5;
lhsX[0][0][BB][i][j][k] = 1.0
+ temp1 * 2.0 * njacX[0][0][i][j][k]
+ temp1 * 2.0 * dx1;
lhsX[0][1][BB][i][j][k] = temp1 * 2.0 * njacX[0][1][i][j][k];
lhsX[0][2][BB][i][j][k] = temp1 * 2.0 * njacX[0][2][i][j][k];
lhsX[0][3][BB][i][j][k] = temp1 * 2.0 * njacX[0][3][i][j][k];
lhsX[0][4][BB][i][j][k] = temp1 * 2.0 * njacX[0][4][i][j][k];
lhsX[1][0][BB][i][j][k] = temp1 * 2.0 * njacX[1][0][i][j][k];
lhsX[1][1][BB][i][j][k] = 1.0
+ temp1 * 2.0 * njacX[1][1][i][j][k]
+ temp1 * 2.0 * dx2;
lhsX[1][2][BB][i][j][k] = temp1 * 2.0 * njacX[1][2][i][j][k];
lhsX[1][3][BB][i][j][k] = temp1 * 2.0 * njacX[1][3][i][j][k];
lhsX[1][4][BB][i][j][k] = temp1 * 2.0 * njacX[1][4][i][j][k];
lhsX[2][0][BB][i][j][k] = temp1 * 2.0 * njacX[2][0][i][j][k];
lhsX[2][1][BB][i][j][k] = temp1 * 2.0 * njacX[2][1][i][j][k];
lhsX[2][2][BB][i][j][k] = 1.0
+ temp1 * 2.0 * njacX[2][2][i][j][k]
+ temp1 * 2.0 * dx3;
lhsX[2][3][BB][i][j][k] = temp1 * 2.0 * njacX[2][3][i][j][k];
lhsX[2][4][BB][i][j][k] = temp1 * 2.0 * njacX[2][4][i][j][k];
lhsX[3][0][BB][i][j][k] = temp1 * 2.0 * njacX[3][0][i][j][k];
lhsX[3][1][BB][i][j][k] = temp1 * 2.0 * njacX[3][1][i][j][k];
lhsX[3][2][BB][i][j][k] = temp1 * 2.0 * njacX[3][2][i][j][k];
lhsX[3][3][BB][i][j][k] = 1.0
+ temp1 * 2.0 * njacX[3][3][i][j][k]
+ temp1 * 2.0 * dx4;
lhsX[3][4][BB][i][j][k] = temp1 * 2.0 * njacX[3][4][i][j][k];
lhsX[4][0][BB][i][j][k] = temp1 * 2.0 * njacX[4][0][i][j][k];
lhsX[4][1][BB][i][j][k] = temp1 * 2.0 * njacX[4][1][i][j][k];
lhsX[4][2][BB][i][j][k] = temp1 * 2.0 * njacX[4][2][i][j][k];
lhsX[4][3][BB][i][j][k] = temp1 * 2.0 * njacX[4][3][i][j][k];
lhsX[4][4][BB][i][j][k] = 1.0
+ temp1 * 2.0 * njacX[4][4][i][j][k]
+ temp1 * 2.0 * dx5;
lhsX[0][0][CC][i][j][k] = temp2 * fjacX[0][0][i+1][j][k]
- temp1 * njacX[0][0][i+1][j][k]
- temp1 * dx1;
lhsX[0][1][CC][i][j][k] = temp2 * fjacX[0][1][i+1][j][k]
- temp1 * njacX[0][1][i+1][j][k];
lhsX[0][2][CC][i][j][k] = temp2 * fjacX[0][2][i+1][j][k]
- temp1 * njacX[0][2][i+1][j][k];
lhsX[0][3][CC][i][j][k] = temp2 * fjacX[0][3][i+1][j][k]
- temp1 * njacX[0][3][i+1][j][k];
lhsX[0][4][CC][i][j][k] = temp2 * fjacX[0][4][i+1][j][k]
- temp1 * njacX[0][4][i+1][j][k];
lhsX[1][0][CC][i][j][k] = temp2 * fjacX[1][0][i+1][j][k]
- temp1 * njacX[1][0][i+1][j][k];
lhsX[1][1][CC][i][j][k] = temp2 * fjacX[1][1][i+1][j][k]
- temp1 * njacX[1][1][i+1][j][k]
- temp1 * dx2;
lhsX[1][2][CC][i][j][k] = temp2 * fjacX[1][2][i+1][j][k]
- temp1 * njacX[1][2][i+1][j][k];
lhsX[1][3][CC][i][j][k] = temp2 * fjacX[1][3][i+1][j][k]
- temp1 * njacX[1][3][i+1][j][k];
lhsX[1][4][CC][i][j][k] = temp2 * fjacX[1][4][i+1][j][k]
- temp1 * njacX[1][4][i+1][j][k];
lhsX[2][0][CC][i][j][k] = temp2 * fjacX[2][0][i+1][j][k]
- temp1 * njacX[2][0][i+1][j][k];
lhsX[2][1][CC][i][j][k] = temp2 * fjacX[2][1][i+1][j][k]
- temp1 * njacX[2][1][i+1][j][k];
lhsX[2][2][CC][i][j][k] = temp2 * fjacX[2][2][i+1][j][k]
- temp1 * njacX[2][2][i+1][j][k]
- temp1 * dx3;
lhsX[2][3][CC][i][j][k] = temp2 * fjacX[2][3][i+1][j][k]
- temp1 * njacX[2][3][i+1][j][k];
lhsX[2][4][CC][i][j][k] = temp2 * fjacX[2][4][i+1][j][k]
- temp1 * njacX[2][4][i+1][j][k];
lhsX[3][0][CC][i][j][k] = temp2 * fjacX[3][0][i+1][j][k]
- temp1 * njacX[3][0][i+1][j][k];
lhsX[3][1][CC][i][j][k] = temp2 * fjacX[3][1][i+1][j][k]
- temp1 * njacX[3][1][i+1][j][k];
lhsX[3][2][CC][i][j][k] = temp2 * fjacX[3][2][i+1][j][k]
- temp1 * njacX[3][2][i+1][j][k];
lhsX[3][3][CC][i][j][k] = temp2 * fjacX[3][3][i+1][j][k]
- temp1 * njacX[3][3][i+1][j][k]
- temp1 * dx4;
lhsX[3][4][CC][i][j][k] = temp2 * fjacX[3][4][i+1][j][k]
- temp1 * njacX[3][4][i+1][j][k];
lhsX[4][0][CC][i][j][k] = temp2 * fjacX[4][0][i+1][j][k]
- temp1 * njacX[4][0][i+1][j][k];
lhsX[4][1][CC][i][j][k] = temp2 * fjacX[4][1][i+1][j][k]
- temp1 * njacX[4][1][i+1][j][k];
lhsX[4][2][CC][i][j][k] = temp2 * fjacX[4][2][i+1][j][k]
- temp1 * njacX[4][2][i+1][j][k];
lhsX[4][3][CC][i][j][k] = temp2 * fjacX[4][3][i+1][j][k]
- temp1 * njacX[4][3][i+1][j][k];
lhsX[4][4][CC][i][j][k] = temp2 * fjacX[4][4][i+1][j][k]
- temp1 * njacX[4][4][i+1][j][k]
- temp1 * dx5;
}
}
}
#endif
//---------------------------------------------------------------------
//---------------------------------------------------------------------
//---------------------------------------------------------------------
// performs guaussian elimination on this cell.
//
// assumes that unpacking routines for non-first cells
// preload C' and rhs' from previous cell.
//
// assumed send happens outside this routine, but that
// c'(IMAX) and rhs'(IMAX) will be sent to next cell
//---------------------------------------------------------------------
//---------------------------------------------------------------------
// outer most do loops - sweeping in i direction
//---------------------------------------------------------------------
//---------------------------------------------------------------------
// multiply c[k][j][0] by b_inverse and copy back to c
// multiply rhs(0) by b_inverse(0) and copy to rhs
//---------------------------------------------------------------------
//binvcrhs( lhsX[0][j][BB], lhsX[k][0][j][k][CC], rhs[k][j][0] );
size_t kernel_x_solve_4_off[2] = { 1, 1 };
size_t kernel_x_solve_4_idx[2] = { gp22, gp12 };
brisbane_kernel kernel_x_solve_4;
brisbane_kernel_create("x_solve_4", &kernel_x_solve_4);
brisbane_kernel_setmem(kernel_x_solve_4, 0, mem_lhsX, brisbane_rw);
brisbane_kernel_setmem(kernel_x_solve_4, 1, mem_rhs, brisbane_rw);
brisbane_task task4;
brisbane_task_create(&task4);
brisbane_task_kernel(task4, kernel_x_solve_4, 2, kernel_x_solve_4_off, kernel_x_solve_4_idx);
brisbane_task_submit(task4, brisbane_cpu, NULL, true);
#if 0
#ifdef SPEC_USE_INNER_SIMD
#pragma omp target teams distribute parallel for private(j,k,pivot, coeff)
#else
#pragma omp target teams distribute parallel for simd private(pivot, coeff) collapse(2)
#endif
for (j = 1; j <= gp12; j++) {
#ifdef SPEC_USE_INNER_SIMD
#pragma omp simd private(pivot, coeff)
#endif
for (k = 1; k <= gp22; k++) {
/*
for(m = 0; m < 5; m++){
pivot = 1.00/lhsX[m][m][BB][0][j][k];
for(n = m+1; n < 5; n++){
lhsX[m][n][BB][0][j][k] = lhsX[m][n][BB][0][j][k]*pivot;
}
lhsX[m][0][CC][0][j][k] = lhsX[m][0][CC][0][j][k]*pivot;
lhsX[m][1][CC][0][j][k] = lhsX[m][1][CC][0][j][k]*pivot;
lhsX[m][2][CC][0][j][k] = lhsX[m][2][CC][0][j][k]*pivot;
lhsX[m][3][CC][0][j][k] = lhsX[m][3][CC][0][j][k]*pivot;
lhsX[m][4][CC][0][j][k] = lhsX[m][4][CC][0][j][k]*pivot;
rhs[k][j][0][m] = rhs[k][j][0][m]*pivot;
for(n = 0; n < 5; n++){
if(n != m){
coeff = lhsX[n][m][BB][0][j][k];
for(z = m+1; z < 5; z++){
lhsX[n][z][BB][0][j][k] = lhsX[n][z][BB][0][j][k] - coeff*lhsX[m][z][BB][0][j][k];
}
lhsX[n][0][CC][0][j][k] = lhsX[n][0][CC][0][j][k] - coeff*lhsX[m][0][CC][0][j][k];
lhsX[n][1][CC][0][j][k] = lhsX[n][1][CC][0][j][k] - coeff*lhsX[m][1][CC][0][j][k];
lhsX[n][2][CC][0][j][k] = lhsX[n][2][CC][0][j][k] - coeff*lhsX[m][2][CC][0][j][k];
lhsX[n][3][CC][0][j][k] = lhsX[n][3][CC][0][j][k] - coeff*lhsX[m][3][CC][0][j][k];
lhsX[n][4][CC][0][j][k] = lhsX[n][4][CC][0][j][k] - coeff*lhsX[m][4][CC][0][j][k];
rhs[k][j][0][n] = rhs[k][j][0][n] - coeff*rhs[k][j][0][m];
}
}
}
*/
pivot = 1.00/lhsX[0][0][BB][0][j][k];
lhsX[0][1][BB][0][j][k] = lhsX[0][1][BB][0][j][k]*pivot;
lhsX[0][2][BB][0][j][k] = lhsX[0][2][BB][0][j][k]*pivot;
lhsX[0][3][BB][0][j][k] = lhsX[0][3][BB][0][j][k]*pivot;
lhsX[0][4][BB][0][j][k] = lhsX[0][4][BB][0][j][k]*pivot;
lhsX[0][0][CC][0][j][k] = lhsX[0][0][CC][0][j][k]*pivot;
lhsX[0][1][CC][0][j][k] = lhsX[0][1][CC][0][j][k]*pivot;
lhsX[0][2][CC][0][j][k] = lhsX[0][2][CC][0][j][k]*pivot;
lhsX[0][3][CC][0][j][k] = lhsX[0][3][CC][0][j][k]*pivot;
lhsX[0][4][CC][0][j][k] = lhsX[0][4][CC][0][j][k]*pivot;
rhs[k][j][0][0] = rhs[k][j][0][0] *pivot;
coeff = lhsX[1][0][BB][0][j][k];
lhsX[1][1][BB][0][j][k]= lhsX[1][1][BB][0][j][k] - coeff*lhsX[0][1][BB][0][j][k];
lhsX[1][2][BB][0][j][k]= lhsX[1][2][BB][0][j][k] - coeff*lhsX[0][2][BB][0][j][k];
lhsX[1][3][BB][0][j][k]= lhsX[1][3][BB][0][j][k] - coeff*lhsX[0][3][BB][0][j][k];
lhsX[1][4][BB][0][j][k]= lhsX[1][4][BB][0][j][k] - coeff*lhsX[0][4][BB][0][j][k];
lhsX[1][0][CC][0][j][k] = lhsX[1][0][CC][0][j][k] - coeff*lhsX[0][0][CC][0][j][k];
lhsX[1][1][CC][0][j][k] = lhsX[1][1][CC][0][j][k] - coeff*lhsX[0][1][CC][0][j][k];
lhsX[1][2][CC][0][j][k] = lhsX[1][2][CC][0][j][k] - coeff*lhsX[0][2][CC][0][j][k];
lhsX[1][3][CC][0][j][k] = lhsX[1][3][CC][0][j][k] - coeff*lhsX[0][3][CC][0][j][k];
lhsX[1][4][CC][0][j][k] = lhsX[1][4][CC][0][j][k] - coeff*lhsX[0][4][CC][0][j][k];
rhs[k][j][0][1] = rhs[k][j][0][1] - coeff*rhs[k][j][0][0];
coeff = lhsX[2][0][BB][0][j][k];
lhsX[2][1][BB][0][j][k]= lhsX[2][1][BB][0][j][k] - coeff*lhsX[0][1][BB][0][j][k];
lhsX[2][2][BB][0][j][k]= lhsX[2][2][BB][0][j][k] - coeff*lhsX[0][2][BB][0][j][k];
lhsX[2][3][BB][0][j][k]= lhsX[2][3][BB][0][j][k] - coeff*lhsX[0][3][BB][0][j][k];
lhsX[2][4][BB][0][j][k]= lhsX[2][4][BB][0][j][k] - coeff*lhsX[0][4][BB][0][j][k];
lhsX[2][0][CC][0][j][k] = lhsX[2][0][CC][0][j][k] - coeff*lhsX[0][0][CC][0][j][k];
lhsX[2][1][CC][0][j][k] = lhsX[2][1][CC][0][j][k] - coeff*lhsX[0][1][CC][0][j][k];
lhsX[2][2][CC][0][j][k] = lhsX[2][2][CC][0][j][k] - coeff*lhsX[0][2][CC][0][j][k];
lhsX[2][3][CC][0][j][k] = lhsX[2][3][CC][0][j][k] - coeff*lhsX[0][3][CC][0][j][k];
lhsX[2][4][CC][0][j][k] = lhsX[2][4][CC][0][j][k] - coeff*lhsX[0][4][CC][0][j][k];
rhs[k][j][0][2] = rhs[k][j][0][2] - coeff*rhs[k][j][0][0];
coeff = lhsX[3][0][BB][0][j][k];
lhsX[3][1][BB][0][j][k]= lhsX[3][1][BB][0][j][k] - coeff*lhsX[0][1][BB][0][j][k];
lhsX[3][2][BB][0][j][k]= lhsX[3][2][BB][0][j][k] - coeff*lhsX[0][2][BB][0][j][k];
lhsX[3][3][BB][0][j][k]= lhsX[3][3][BB][0][j][k] - coeff*lhsX[0][3][BB][0][j][k];
lhsX[3][4][BB][0][j][k]= lhsX[3][4][BB][0][j][k] - coeff*lhsX[0][4][BB][0][j][k];
lhsX[3][0][CC][0][j][k] = lhsX[3][0][CC][0][j][k] - coeff*lhsX[0][0][CC][0][j][k];
lhsX[3][1][CC][0][j][k] = lhsX[3][1][CC][0][j][k] - coeff*lhsX[0][1][CC][0][j][k];
lhsX[3][2][CC][0][j][k] = lhsX[3][2][CC][0][j][k] - coeff*lhsX[0][2][CC][0][j][k];
lhsX[3][3][CC][0][j][k] = lhsX[3][3][CC][0][j][k] - coeff*lhsX[0][3][CC][0][j][k];
lhsX[3][4][CC][0][j][k] = lhsX[3][4][CC][0][j][k] - coeff*lhsX[0][4][CC][0][j][k];
rhs[k][j][0][3] = rhs[k][j][0][3] - coeff*rhs[k][j][0][0];
coeff = lhsX[4][0][BB][0][j][k];
lhsX[4][1][BB][0][j][k]= lhsX[4][1][BB][0][j][k] - coeff*lhsX[0][1][BB][0][j][k];
lhsX[4][2][BB][0][j][k]= lhsX[4][2][BB][0][j][k] - coeff*lhsX[0][2][BB][0][j][k];
lhsX[4][3][BB][0][j][k]= lhsX[4][3][BB][0][j][k] - coeff*lhsX[0][3][BB][0][j][k];
lhsX[4][4][BB][0][j][k]= lhsX[4][4][BB][0][j][k] - coeff*lhsX[0][4][BB][0][j][k];
lhsX[4][0][CC][0][j][k] = lhsX[4][0][CC][0][j][k] - coeff*lhsX[0][0][CC][0][j][k];
lhsX[4][1][CC][0][j][k] = lhsX[4][1][CC][0][j][k] - coeff*lhsX[0][1][CC][0][j][k];
lhsX[4][2][CC][0][j][k] = lhsX[4][2][CC][0][j][k] - coeff*lhsX[0][2][CC][0][j][k];
lhsX[4][3][CC][0][j][k] = lhsX[4][3][CC][0][j][k] - coeff*lhsX[0][3][CC][0][j][k];
lhsX[4][4][CC][0][j][k] = lhsX[4][4][CC][0][j][k] - coeff*lhsX[0][4][CC][0][j][k];
rhs[k][j][0][4] = rhs[k][j][0][4] - coeff*rhs[k][j][0][0];
pivot = 1.00/lhsX[1][1][BB][0][j][k];
lhsX[1][2][BB][0][j][k] = lhsX[1][2][BB][0][j][k]*pivot;
lhsX[1][3][BB][0][j][k] = lhsX[1][3][BB][0][j][k]*pivot;
lhsX[1][4][BB][0][j][k] = lhsX[1][4][BB][0][j][k]*pivot;
lhsX[1][0][CC][0][j][k] = lhsX[1][0][CC][0][j][k]*pivot;
lhsX[1][1][CC][0][j][k] = lhsX[1][1][CC][0][j][k]*pivot;
lhsX[1][2][CC][0][j][k] = lhsX[1][2][CC][0][j][k]*pivot;
lhsX[1][3][CC][0][j][k] = lhsX[1][3][CC][0][j][k]*pivot;
lhsX[1][4][CC][0][j][k] = lhsX[1][4][CC][0][j][k]*pivot;
rhs[k][j][0][1] = rhs[k][j][0][1] *pivot;
coeff = lhsX[0][1][BB][0][j][k];
lhsX[0][2][BB][0][j][k]= lhsX[0][2][BB][0][j][k] - coeff*lhsX[1][2][BB][0][j][k];
lhsX[0][3][BB][0][j][k]= lhsX[0][3][BB][0][j][k] - coeff*lhsX[1][3][BB][0][j][k];
lhsX[0][4][BB][0][j][k]= lhsX[0][4][BB][0][j][k] - coeff*lhsX[1][4][BB][0][j][k];
lhsX[0][0][CC][0][j][k] = lhsX[0][0][CC][0][j][k] - coeff*lhsX[1][0][CC][0][j][k];
lhsX[0][1][CC][0][j][k] = lhsX[0][1][CC][0][j][k] - coeff*lhsX[1][1][CC][0][j][k];
lhsX[0][2][CC][0][j][k] = lhsX[0][2][CC][0][j][k] - coeff*lhsX[1][2][CC][0][j][k];
lhsX[0][3][CC][0][j][k] = lhsX[0][3][CC][0][j][k] - coeff*lhsX[1][3][CC][0][j][k];
lhsX[0][4][CC][0][j][k] = lhsX[0][4][CC][0][j][k] - coeff*lhsX[1][4][CC][0][j][k];
rhs[k][j][0][0] = rhs[k][j][0][0] - coeff*rhs[k][j][0][1];
coeff = lhsX[2][1][BB][0][j][k];
lhsX[2][2][BB][0][j][k]= lhsX[2][2][BB][0][j][k] - coeff*lhsX[1][2][BB][0][j][k];
lhsX[2][3][BB][0][j][k]= lhsX[2][3][BB][0][j][k] - coeff*lhsX[1][3][BB][0][j][k];
lhsX[2][4][BB][0][j][k]= lhsX[2][4][BB][0][j][k] - coeff*lhsX[1][4][BB][0][j][k];
lhsX[2][0][CC][0][j][k] = lhsX[2][0][CC][0][j][k] - coeff*lhsX[1][0][CC][0][j][k];
lhsX[2][1][CC][0][j][k] = lhsX[2][1][CC][0][j][k] - coeff*lhsX[1][1][CC][0][j][k];
lhsX[2][2][CC][0][j][k] = lhsX[2][2][CC][0][j][k] - coeff*lhsX[1][2][CC][0][j][k];
lhsX[2][3][CC][0][j][k] = lhsX[2][3][CC][0][j][k] - coeff*lhsX[1][3][CC][0][j][k];
lhsX[2][4][CC][0][j][k] = lhsX[2][4][CC][0][j][k] - coeff*lhsX[1][4][CC][0][j][k];
rhs[k][j][0][2] = rhs[k][j][0][2] - coeff*rhs[k][j][0][1];
coeff = lhsX[3][1][BB][0][j][k];
lhsX[3][2][BB][0][j][k]= lhsX[3][2][BB][0][j][k] - coeff*lhsX[1][2][BB][0][j][k];
lhsX[3][3][BB][0][j][k]= lhsX[3][3][BB][0][j][k] - coeff*lhsX[1][3][BB][0][j][k];
lhsX[3][4][BB][0][j][k]= lhsX[3][4][BB][0][j][k] - coeff*lhsX[1][4][BB][0][j][k];
lhsX[3][0][CC][0][j][k] = lhsX[3][0][CC][0][j][k] - coeff*lhsX[1][0][CC][0][j][k];
lhsX[3][1][CC][0][j][k] = lhsX[3][1][CC][0][j][k] - coeff*lhsX[1][1][CC][0][j][k];
lhsX[3][2][CC][0][j][k] = lhsX[3][2][CC][0][j][k] - coeff*lhsX[1][2][CC][0][j][k];
lhsX[3][3][CC][0][j][k] = lhsX[3][3][CC][0][j][k] - coeff*lhsX[1][3][CC][0][j][k];
lhsX[3][4][CC][0][j][k] = lhsX[3][4][CC][0][j][k] - coeff*lhsX[1][4][CC][0][j][k];
rhs[k][j][0][3] = rhs[k][j][0][3] - coeff*rhs[k][j][0][1];
coeff = lhsX[4][1][BB][0][j][k];
lhsX[4][2][BB][0][j][k]= lhsX[4][2][BB][0][j][k] - coeff*lhsX[1][2][BB][0][j][k];
lhsX[4][3][BB][0][j][k]= lhsX[4][3][BB][0][j][k] - coeff*lhsX[1][3][BB][0][j][k];
lhsX[4][4][BB][0][j][k]= lhsX[4][4][BB][0][j][k] - coeff*lhsX[1][4][BB][0][j][k];
lhsX[4][0][CC][0][j][k] = lhsX[4][0][CC][0][j][k] - coeff*lhsX[1][0][CC][0][j][k];
lhsX[4][1][CC][0][j][k] = lhsX[4][1][CC][0][j][k] - coeff*lhsX[1][1][CC][0][j][k];
lhsX[4][2][CC][0][j][k] = lhsX[4][2][CC][0][j][k] - coeff*lhsX[1][2][CC][0][j][k];
lhsX[4][3][CC][0][j][k] = lhsX[4][3][CC][0][j][k] - coeff*lhsX[1][3][CC][0][j][k];
lhsX[4][4][CC][0][j][k] = lhsX[4][4][CC][0][j][k] - coeff*lhsX[1][4][CC][0][j][k];
rhs[k][j][0][4] = rhs[k][j][0][4] - coeff*rhs[k][j][0][1];
pivot = 1.00/lhsX[2][2][BB][0][j][k];
lhsX[2][3][BB][0][j][k] = lhsX[2][3][BB][0][j][k]*pivot;
lhsX[2][4][BB][0][j][k] = lhsX[2][4][BB][0][j][k]*pivot;
lhsX[2][0][CC][0][j][k] = lhsX[2][0][CC][0][j][k]*pivot;
lhsX[2][1][CC][0][j][k] = lhsX[2][1][CC][0][j][k]*pivot;
lhsX[2][2][CC][0][j][k] = lhsX[2][2][CC][0][j][k]*pivot;
lhsX[2][3][CC][0][j][k] = lhsX[2][3][CC][0][j][k]*pivot;
lhsX[2][4][CC][0][j][k] = lhsX[2][4][CC][0][j][k]*pivot;
rhs[k][j][0][2] = rhs[k][j][0][2] *pivot;
coeff = lhsX[0][2][BB][0][j][k];
lhsX[0][3][BB][0][j][k]= lhsX[0][3][BB][0][j][k] - coeff*lhsX[2][3][BB][0][j][k];
lhsX[0][4][BB][0][j][k]= lhsX[0][4][BB][0][j][k] - coeff*lhsX[2][4][BB][0][j][k];
lhsX[0][0][CC][0][j][k] = lhsX[0][0][CC][0][j][k] - coeff*lhsX[2][0][CC][0][j][k];
lhsX[0][1][CC][0][j][k] = lhsX[0][1][CC][0][j][k] - coeff*lhsX[2][1][CC][0][j][k];
lhsX[0][2][CC][0][j][k] = lhsX[0][2][CC][0][j][k] - coeff*lhsX[2][2][CC][0][j][k];
lhsX[0][3][CC][0][j][k] = lhsX[0][3][CC][0][j][k] - coeff*lhsX[2][3][CC][0][j][k];
lhsX[0][4][CC][0][j][k] = lhsX[0][4][CC][0][j][k] - coeff*lhsX[2][4][CC][0][j][k];
rhs[k][j][0][0] = rhs[k][j][0][0] - coeff*rhs[k][j][0][2];
coeff = lhsX[1][2][BB][0][j][k];
lhsX[1][3][BB][0][j][k]= lhsX[1][3][BB][0][j][k] - coeff*lhsX[2][3][BB][0][j][k];
lhsX[1][4][BB][0][j][k]= lhsX[1][4][BB][0][j][k] - coeff*lhsX[2][4][BB][0][j][k];
lhsX[1][0][CC][0][j][k] = lhsX[1][0][CC][0][j][k] - coeff*lhsX[2][0][CC][0][j][k];
lhsX[1][1][CC][0][j][k] = lhsX[1][1][CC][0][j][k] - coeff*lhsX[2][1][CC][0][j][k];
lhsX[1][2][CC][0][j][k] = lhsX[1][2][CC][0][j][k] - coeff*lhsX[2][2][CC][0][j][k];
lhsX[1][3][CC][0][j][k] = lhsX[1][3][CC][0][j][k] - coeff*lhsX[2][3][CC][0][j][k];
lhsX[1][4][CC][0][j][k] = lhsX[1][4][CC][0][j][k] - coeff*lhsX[2][4][CC][0][j][k];
rhs[k][j][0][1] = rhs[k][j][0][1] - coeff*rhs[k][j][0][2];
coeff = lhsX[3][2][BB][0][j][k];
lhsX[3][3][BB][0][j][k]= lhsX[3][3][BB][0][j][k] - coeff*lhsX[2][3][BB][0][j][k];
lhsX[3][4][BB][0][j][k]= lhsX[3][4][BB][0][j][k] - coeff*lhsX[2][4][BB][0][j][k];
lhsX[3][0][CC][0][j][k] = lhsX[3][0][CC][0][j][k] - coeff*lhsX[2][0][CC][0][j][k];
lhsX[3][1][CC][0][j][k] = lhsX[3][1][CC][0][j][k] - coeff*lhsX[2][1][CC][0][j][k];
lhsX[3][2][CC][0][j][k] = lhsX[3][2][CC][0][j][k] - coeff*lhsX[2][2][CC][0][j][k];
lhsX[3][3][CC][0][j][k] = lhsX[3][3][CC][0][j][k] - coeff*lhsX[2][3][CC][0][j][k];
lhsX[3][4][CC][0][j][k] = lhsX[3][4][CC][0][j][k] - coeff*lhsX[2][4][CC][0][j][k];
rhs[k][j][0][3] = rhs[k][j][0][3] - coeff*rhs[k][j][0][2];
coeff = lhsX[4][2][BB][0][j][k];
lhsX[4][3][BB][0][j][k]= lhsX[4][3][BB][0][j][k] - coeff*lhsX[2][3][BB][0][j][k];
lhsX[4][4][BB][0][j][k]= lhsX[4][4][BB][0][j][k] - coeff*lhsX[2][4][BB][0][j][k];
lhsX[4][0][CC][0][j][k] = lhsX[4][0][CC][0][j][k] - coeff*lhsX[2][0][CC][0][j][k];
lhsX[4][1][CC][0][j][k] = lhsX[4][1][CC][0][j][k] - coeff*lhsX[2][1][CC][0][j][k];
lhsX[4][2][CC][0][j][k] = lhsX[4][2][CC][0][j][k] - coeff*lhsX[2][2][CC][0][j][k];
lhsX[4][3][CC][0][j][k] = lhsX[4][3][CC][0][j][k] - coeff*lhsX[2][3][CC][0][j][k];
lhsX[4][4][CC][0][j][k] = lhsX[4][4][CC][0][j][k] - coeff*lhsX[2][4][CC][0][j][k];
rhs[k][j][0][4] = rhs[k][j][0][4] - coeff*rhs[k][j][0][2];
pivot = 1.00/lhsX[3][3][BB][0][j][k];
lhsX[3][4][BB][0][j][k] = lhsX[3][4][BB][0][j][k]*pivot;
lhsX[3][0][CC][0][j][k] = lhsX[3][0][CC][0][j][k]*pivot;
lhsX[3][1][CC][0][j][k] = lhsX[3][1][CC][0][j][k]*pivot;
lhsX[3][2][CC][0][j][k] = lhsX[3][2][CC][0][j][k]*pivot;
lhsX[3][3][CC][0][j][k] = lhsX[3][3][CC][0][j][k]*pivot;
lhsX[3][4][CC][0][j][k] = lhsX[3][4][CC][0][j][k]*pivot;
rhs[k][j][0][3] = rhs[k][j][0][3] *pivot;
coeff = lhsX[0][3][BB][0][j][k];
lhsX[0][4][BB][0][j][k]= lhsX[0][4][BB][0][j][k] - coeff*lhsX[3][4][BB][0][j][k];
lhsX[0][0][CC][0][j][k] = lhsX[0][0][CC][0][j][k] - coeff*lhsX[3][0][CC][0][j][k];
lhsX[0][1][CC][0][j][k] = lhsX[0][1][CC][0][j][k] - coeff*lhsX[3][1][CC][0][j][k];
lhsX[0][2][CC][0][j][k] = lhsX[0][2][CC][0][j][k] - coeff*lhsX[3][2][CC][0][j][k];
lhsX[0][3][CC][0][j][k] = lhsX[0][3][CC][0][j][k] - coeff*lhsX[3][3][CC][0][j][k];
lhsX[0][4][CC][0][j][k] = lhsX[0][4][CC][0][j][k] - coeff*lhsX[3][4][CC][0][j][k];
rhs[k][j][0][0] = rhs[k][j][0][0] - coeff*rhs[k][j][0][3];
coeff = lhsX[1][3][BB][0][j][k];
lhsX[1][4][BB][0][j][k]= lhsX[1][4][BB][0][j][k] - coeff*lhsX[3][4][BB][0][j][k];
lhsX[1][0][CC][0][j][k] = lhsX[1][0][CC][0][j][k] - coeff*lhsX[3][0][CC][0][j][k];
lhsX[1][1][CC][0][j][k] = lhsX[1][1][CC][0][j][k] - coeff*lhsX[3][1][CC][0][j][k];
lhsX[1][2][CC][0][j][k] = lhsX[1][2][CC][0][j][k] - coeff*lhsX[3][2][CC][0][j][k];
lhsX[1][3][CC][0][j][k] = lhsX[1][3][CC][0][j][k] - coeff*lhsX[3][3][CC][0][j][k];
lhsX[1][4][CC][0][j][k] = lhsX[1][4][CC][0][j][k] - coeff*lhsX[3][4][CC][0][j][k];
rhs[k][j][0][1] = rhs[k][j][0][1] - coeff*rhs[k][j][0][3];
coeff = lhsX[2][3][BB][0][j][k];
lhsX[2][4][BB][0][j][k]= lhsX[2][4][BB][0][j][k] - coeff*lhsX[3][4][BB][0][j][k];
lhsX[2][0][CC][0][j][k] = lhsX[2][0][CC][0][j][k] - coeff*lhsX[3][0][CC][0][j][k];
lhsX[2][1][CC][0][j][k] = lhsX[2][1][CC][0][j][k] - coeff*lhsX[3][1][CC][0][j][k];
lhsX[2][2][CC][0][j][k] = lhsX[2][2][CC][0][j][k] - coeff*lhsX[3][2][CC][0][j][k];
lhsX[2][3][CC][0][j][k] = lhsX[2][3][CC][0][j][k] - coeff*lhsX[3][3][CC][0][j][k];
lhsX[2][4][CC][0][j][k] = lhsX[2][4][CC][0][j][k] - coeff*lhsX[3][4][CC][0][j][k];
rhs[k][j][0][2] = rhs[k][j][0][2] - coeff*rhs[k][j][0][3];
coeff = lhsX[4][3][BB][0][j][k];
lhsX[4][4][BB][0][j][k]= lhsX[4][4][BB][0][j][k] - coeff*lhsX[3][4][BB][0][j][k];
lhsX[4][0][CC][0][j][k] = lhsX[4][0][CC][0][j][k] - coeff*lhsX[3][0][CC][0][j][k];
lhsX[4][1][CC][0][j][k] = lhsX[4][1][CC][0][j][k] - coeff*lhsX[3][1][CC][0][j][k];
lhsX[4][2][CC][0][j][k] = lhsX[4][2][CC][0][j][k] - coeff*lhsX[3][2][CC][0][j][k];
lhsX[4][3][CC][0][j][k] = lhsX[4][3][CC][0][j][k] - coeff*lhsX[3][3][CC][0][j][k];
lhsX[4][4][CC][0][j][k] = lhsX[4][4][CC][0][j][k] - coeff*lhsX[3][4][CC][0][j][k];
rhs[k][j][0][4] = rhs[k][j][0][4] - coeff*rhs[k][j][0][3];
pivot = 1.00/lhsX[4][4][BB][0][j][k];
lhsX[4][0][CC][0][j][k] = lhsX[4][0][CC][0][j][k]*pivot;
lhsX[4][1][CC][0][j][k] = lhsX[4][1][CC][0][j][k]*pivot;
lhsX[4][2][CC][0][j][k] = lhsX[4][2][CC][0][j][k]*pivot;
lhsX[4][3][CC][0][j][k] = lhsX[4][3][CC][0][j][k]*pivot;
lhsX[4][4][CC][0][j][k] = lhsX[4][4][CC][0][j][k]*pivot;
rhs[k][j][0][4] = rhs[k][j][0][4] *pivot;
coeff = lhsX[0][4][BB][0][j][k];
lhsX[0][0][CC][0][j][k] = lhsX[0][0][CC][0][j][k] - coeff*lhsX[4][0][CC][0][j][k];
lhsX[0][1][CC][0][j][k] = lhsX[0][1][CC][0][j][k] - coeff*lhsX[4][1][CC][0][j][k];
lhsX[0][2][CC][0][j][k] = lhsX[0][2][CC][0][j][k] - coeff*lhsX[4][2][CC][0][j][k];
lhsX[0][3][CC][0][j][k] = lhsX[0][3][CC][0][j][k] - coeff*lhsX[4][3][CC][0][j][k];
lhsX[0][4][CC][0][j][k] = lhsX[0][4][CC][0][j][k] - coeff*lhsX[4][4][CC][0][j][k];
rhs[k][j][0][0] = rhs[k][j][0][0] - coeff*rhs[k][j][0][4];
coeff = lhsX[1][4][BB][0][j][k];
lhsX[1][0][CC][0][j][k] = lhsX[1][0][CC][0][j][k] - coeff*lhsX[4][0][CC][0][j][k];
lhsX[1][1][CC][0][j][k] = lhsX[1][1][CC][0][j][k] - coeff*lhsX[4][1][CC][0][j][k];
lhsX[1][2][CC][0][j][k] = lhsX[1][2][CC][0][j][k] - coeff*lhsX[4][2][CC][0][j][k];
lhsX[1][3][CC][0][j][k] = lhsX[1][3][CC][0][j][k] - coeff*lhsX[4][3][CC][0][j][k];
lhsX[1][4][CC][0][j][k] = lhsX[1][4][CC][0][j][k] - coeff*lhsX[4][4][CC][0][j][k];
rhs[k][j][0][1] = rhs[k][j][0][1] - coeff*rhs[k][j][0][4];
coeff = lhsX[2][4][BB][0][j][k];
lhsX[2][0][CC][0][j][k] = lhsX[2][0][CC][0][j][k] - coeff*lhsX[4][0][CC][0][j][k];
lhsX[2][1][CC][0][j][k] = lhsX[2][1][CC][0][j][k] - coeff*lhsX[4][1][CC][0][j][k];
lhsX[2][2][CC][0][j][k] = lhsX[2][2][CC][0][j][k] - coeff*lhsX[4][2][CC][0][j][k];
lhsX[2][3][CC][0][j][k] = lhsX[2][3][CC][0][j][k] - coeff*lhsX[4][3][CC][0][j][k];
lhsX[2][4][CC][0][j][k] = lhsX[2][4][CC][0][j][k] - coeff*lhsX[4][4][CC][0][j][k];
rhs[k][j][0][2] = rhs[k][j][0][2] - coeff*rhs[k][j][0][4];
coeff = lhsX[3][4][BB][0][j][k];
lhsX[3][0][CC][0][j][k] = lhsX[3][0][CC][0][j][k] - coeff*lhsX[4][0][CC][0][j][k];
lhsX[3][1][CC][0][j][k] = lhsX[3][1][CC][0][j][k] - coeff*lhsX[4][1][CC][0][j][k];
lhsX[3][2][CC][0][j][k] = lhsX[3][2][CC][0][j][k] - coeff*lhsX[4][2][CC][0][j][k];
lhsX[3][3][CC][0][j][k] = lhsX[3][3][CC][0][j][k] - coeff*lhsX[4][3][CC][0][j][k];
lhsX[3][4][CC][0][j][k] = lhsX[3][4][CC][0][j][k] - coeff*lhsX[4][4][CC][0][j][k];
rhs[k][j][0][3] = rhs[k][j][0][3] - coeff*rhs[k][j][0][4];
}/*end j*/
}/*end k*/
#endif
//---------------------------------------------------------------------
// begin inner most do loop
// do all the elements of the cell unless last
//---------------------------------------------------------------------
size_t kernel_x_solve_5_off[1] = { 1 };
size_t kernel_x_solve_5_idx[1] = { gp12 };
brisbane_kernel kernel_x_solve_5;
brisbane_kernel_create("x_solve_5", &kernel_x_solve_5);
brisbane_kernel_setmem(kernel_x_solve_5, 0, mem_lhsX, brisbane_rw);
brisbane_kernel_setmem(kernel_x_solve_5, 1, mem_rhs, brisbane_rw);
brisbane_kernel_setarg(kernel_x_solve_5, 2, sizeof(int), &isize);
brisbane_kernel_setarg(kernel_x_solve_5, 3, sizeof(int), &gp22);
brisbane_task task5;
brisbane_task_create(&task5);
brisbane_task_kernel(task5, kernel_x_solve_5, 1, kernel_x_solve_5_off, kernel_x_solve_5_idx);
brisbane_task_submit(task5, brisbane_cpu, NULL, true);
#if 0
#pragma omp target teams distribute parallel for private(i,k)
for (j = 1; j <= gp12; j++) {
for (i = 1; i <= isize-1; i++) {
#pragma omp simd private(pivot,coeff)
for (k = 1; k <= gp22; k++) {
//-------------------------------------------------------------------
// rhs(i) = rhs(i) - A*rhs(i-1)
//-------------------------------------------------------------------
//matvec_sub(lhsX[i-1][j][AA], rhs[k][i][j][k], rhs[k][j][i]);
/*
for(m = 0; m < 5; m++){
rhs[k][j][i][m] = rhs[k][j][i][m] - lhsX[m][0][AA][i][j][k]*rhs[k][j][i-1][0]
- lhsX[m][1][AA][i][j][k]*rhs[k][j][i-1][1]
- lhsX[m][2][AA][i][j][k]*rhs[k][j][i-1][2]
- lhsX[m][3][AA][i][j][k]*rhs[k][j][i-1][3]
- lhsX[m][4][AA][i][j][k]*rhs[k][j][i-1][4];
}
*/
rhs[k][j][i][0] = rhs[k][j][i][0] - lhsX[0][0][AA][i][j][k]*rhs[k][j][i-1][0]
- lhsX[0][1][AA][i][j][k]*rhs[k][j][i-1][1]
- lhsX[0][2][AA][i][j][k]*rhs[k][j][i-1][2]
- lhsX[0][3][AA][i][j][k]*rhs[k][j][i-1][3]
- lhsX[0][4][AA][i][j][k]*rhs[k][j][i-1][4];
rhs[k][j][i][1] = rhs[k][j][i][1] - lhsX[1][0][AA][i][j][k]*rhs[k][j][i-1][0]
- lhsX[1][1][AA][i][j][k]*rhs[k][j][i-1][1]
- lhsX[1][2][AA][i][j][k]*rhs[k][j][i-1][2]
- lhsX[1][3][AA][i][j][k]*rhs[k][j][i-1][3]
- lhsX[1][4][AA][i][j][k]*rhs[k][j][i-1][4];
rhs[k][j][i][2] = rhs[k][j][i][2] - lhsX[2][0][AA][i][j][k]*rhs[k][j][i-1][0]
- lhsX[2][1][AA][i][j][k]*rhs[k][j][i-1][1]
- lhsX[2][2][AA][i][j][k]*rhs[k][j][i-1][2]
- lhsX[2][3][AA][i][j][k]*rhs[k][j][i-1][3]
- lhsX[2][4][AA][i][j][k]*rhs[k][j][i-1][4];
rhs[k][j][i][3] = rhs[k][j][i][3] - lhsX[3][0][AA][i][j][k]*rhs[k][j][i-1][0]
- lhsX[3][1][AA][i][j][k]*rhs[k][j][i-1][1]
- lhsX[3][2][AA][i][j][k]*rhs[k][j][i-1][2]
- lhsX[3][3][AA][i][j][k]*rhs[k][j][i-1][3]
- lhsX[3][4][AA][i][j][k]*rhs[k][j][i-1][4];
rhs[k][j][i][4] = rhs[k][j][i][4] - lhsX[4][0][AA][i][j][k]*rhs[k][j][i-1][0]
- lhsX[4][1][AA][i][j][k]*rhs[k][j][i-1][1]
- lhsX[4][2][AA][i][j][k]*rhs[k][j][i-1][2]
- lhsX[4][3][AA][i][j][k]*rhs[k][j][i-1][3]
- lhsX[4][4][AA][i][j][k]*rhs[k][j][i-1][4];
//-------------------------------------------------------------------
// B(i) = B(i) - C(i-1)*A(i)
//-------------------------------------------------------------------
// matmul_sub(lhsX[i-1][j][AA], lhsX[k][i][j][k][CC], lhsX[k][j][i][BB]);
/*
for(m = 0; m < 5; m++){
for(n = 0; n < 5; n++){
lhsX[n][m][BB][i][j][k] = lhsX[n][m][BB][i][j][k] - lhsX[n][0][AA][i][j][k]*lhsX[0][m][CC][i-1][j][k]
- lhsX[n][1][AA][i][j][k]*lhsX[1][m][CC][i-1][j][k]
- lhsX[n][2][AA][i][j][k]*lhsX[2][m][CC][i-1][j][k]
- lhsX[n][3][AA][i][j][k]*lhsX[3][m][CC][i-1][j][k]
- lhsX[n][4][AA][i][j][k]*lhsX[4][m][CC][i-1][j][k];
}
}
*/
lhsX[0][0][BB][i][j][k] = lhsX[0][0][BB][i][j][k] - lhsX[0][0][AA][i][j][k]*lhsX[0][0][CC][i-1][j][k]
- lhsX[0][1][AA][i][j][k]*lhsX[1][0][CC][i-1][j][k]
- lhsX[0][2][AA][i][j][k]*lhsX[2][0][CC][i-1][j][k]
- lhsX[0][3][AA][i][j][k]*lhsX[3][0][CC][i-1][j][k]
- lhsX[0][4][AA][i][j][k]*lhsX[4][0][CC][i-1][j][k];
lhsX[1][0][BB][i][j][k] = lhsX[1][0][BB][i][j][k] - lhsX[1][0][AA][i][j][k]*lhsX[0][0][CC][i-1][j][k]
- lhsX[1][1][AA][i][j][k]*lhsX[1][0][CC][i-1][j][k]
- lhsX[1][2][AA][i][j][k]*lhsX[2][0][CC][i-1][j][k]
- lhsX[1][3][AA][i][j][k]*lhsX[3][0][CC][i-1][j][k]
- lhsX[1][4][AA][i][j][k]*lhsX[4][0][CC][i-1][j][k];
lhsX[2][0][BB][i][j][k] = lhsX[2][0][BB][i][j][k] - lhsX[2][0][AA][i][j][k]*lhsX[0][0][CC][i-1][j][k]
- lhsX[2][1][AA][i][j][k]*lhsX[1][0][CC][i-1][j][k]
- lhsX[2][2][AA][i][j][k]*lhsX[2][0][CC][i-1][j][k]
- lhsX[2][3][AA][i][j][k]*lhsX[3][0][CC][i-1][j][k]
- lhsX[2][4][AA][i][j][k]*lhsX[4][0][CC][i-1][j][k];
lhsX[3][0][BB][i][j][k] = lhsX[3][0][BB][i][j][k] - lhsX[3][0][AA][i][j][k]*lhsX[0][0][CC][i-1][j][k]
- lhsX[3][1][AA][i][j][k]*lhsX[1][0][CC][i-1][j][k]
- lhsX[3][2][AA][i][j][k]*lhsX[2][0][CC][i-1][j][k]
- lhsX[3][3][AA][i][j][k]*lhsX[3][0][CC][i-1][j][k]
- lhsX[3][4][AA][i][j][k]*lhsX[4][0][CC][i-1][j][k];
lhsX[4][0][BB][i][j][k] = lhsX[4][0][BB][i][j][k] - lhsX[4][0][AA][i][j][k]*lhsX[0][0][CC][i-1][j][k]
- lhsX[4][1][AA][i][j][k]*lhsX[1][0][CC][i-1][j][k]
- lhsX[4][2][AA][i][j][k]*lhsX[2][0][CC][i-1][j][k]
- lhsX[4][3][AA][i][j][k]*lhsX[3][0][CC][i-1][j][k]
- lhsX[4][4][AA][i][j][k]*lhsX[4][0][CC][i-1][j][k];
lhsX[0][1][BB][i][j][k] = lhsX[0][1][BB][i][j][k] - lhsX[0][0][AA][i][j][k]*lhsX[0][1][CC][i-1][j][k]
- lhsX[0][1][AA][i][j][k]*lhsX[1][1][CC][i-1][j][k]
- lhsX[0][2][AA][i][j][k]*lhsX[2][1][CC][i-1][j][k]
- lhsX[0][3][AA][i][j][k]*lhsX[3][1][CC][i-1][j][k]
- lhsX[0][4][AA][i][j][k]*lhsX[4][1][CC][i-1][j][k];
lhsX[1][1][BB][i][j][k] = lhsX[1][1][BB][i][j][k] - lhsX[1][0][AA][i][j][k]*lhsX[0][1][CC][i-1][j][k]
- lhsX[1][1][AA][i][j][k]*lhsX[1][1][CC][i-1][j][k]
- lhsX[1][2][AA][i][j][k]*lhsX[2][1][CC][i-1][j][k]
- lhsX[1][3][AA][i][j][k]*lhsX[3][1][CC][i-1][j][k]
- lhsX[1][4][AA][i][j][k]*lhsX[4][1][CC][i-1][j][k];
lhsX[2][1][BB][i][j][k] = lhsX[2][1][BB][i][j][k] - lhsX[2][0][AA][i][j][k]*lhsX[0][1][CC][i-1][j][k]
- lhsX[2][1][AA][i][j][k]*lhsX[1][1][CC][i-1][j][k]
- lhsX[2][2][AA][i][j][k]*lhsX[2][1][CC][i-1][j][k]
- lhsX[2][3][AA][i][j][k]*lhsX[3][1][CC][i-1][j][k]
- lhsX[2][4][AA][i][j][k]*lhsX[4][1][CC][i-1][j][k];
lhsX[3][1][BB][i][j][k] = lhsX[3][1][BB][i][j][k] - lhsX[3][0][AA][i][j][k]*lhsX[0][1][CC][i-1][j][k]
- lhsX[3][1][AA][i][j][k]*lhsX[1][1][CC][i-1][j][k]
- lhsX[3][2][AA][i][j][k]*lhsX[2][1][CC][i-1][j][k]
- lhsX[3][3][AA][i][j][k]*lhsX[3][1][CC][i-1][j][k]
- lhsX[3][4][AA][i][j][k]*lhsX[4][1][CC][i-1][j][k];
lhsX[4][1][BB][i][j][k] = lhsX[4][1][BB][i][j][k] - lhsX[4][0][AA][i][j][k]*lhsX[0][1][CC][i-1][j][k]
- lhsX[4][1][AA][i][j][k]*lhsX[1][1][CC][i-1][j][k]
- lhsX[4][2][AA][i][j][k]*lhsX[2][1][CC][i-1][j][k]
- lhsX[4][3][AA][i][j][k]*lhsX[3][1][CC][i-1][j][k]
- lhsX[4][4][AA][i][j][k]*lhsX[4][1][CC][i-1][j][k];
lhsX[0][2][BB][i][j][k] = lhsX[0][2][BB][i][j][k] - lhsX[0][0][AA][i][j][k]*lhsX[0][2][CC][i-1][j][k]
- lhsX[0][1][AA][i][j][k]*lhsX[1][2][CC][i-1][j][k]
- lhsX[0][2][AA][i][j][k]*lhsX[2][2][CC][i-1][j][k]
- lhsX[0][3][AA][i][j][k]*lhsX[3][2][CC][i-1][j][k]
- lhsX[0][4][AA][i][j][k]*lhsX[4][2][CC][i-1][j][k];
lhsX[1][2][BB][i][j][k] = lhsX[1][2][BB][i][j][k] - lhsX[1][0][AA][i][j][k]*lhsX[0][2][CC][i-1][j][k]
- lhsX[1][1][AA][i][j][k]*lhsX[1][2][CC][i-1][j][k]
- lhsX[1][2][AA][i][j][k]*lhsX[2][2][CC][i-1][j][k]
- lhsX[1][3][AA][i][j][k]*lhsX[3][2][CC][i-1][j][k]
- lhsX[1][4][AA][i][j][k]*lhsX[4][2][CC][i-1][j][k];
lhsX[2][2][BB][i][j][k] = lhsX[2][2][BB][i][j][k] - lhsX[2][0][AA][i][j][k]*lhsX[0][2][CC][i-1][j][k]
- lhsX[2][1][AA][i][j][k]*lhsX[1][2][CC][i-1][j][k]
- lhsX[2][2][AA][i][j][k]*lhsX[2][2][CC][i-1][j][k]
- lhsX[2][3][AA][i][j][k]*lhsX[3][2][CC][i-1][j][k]
- lhsX[2][4][AA][i][j][k]*lhsX[4][2][CC][i-1][j][k];
lhsX[3][2][BB][i][j][k] = lhsX[3][2][BB][i][j][k] - lhsX[3][0][AA][i][j][k]*lhsX[0][2][CC][i-1][j][k]
- lhsX[3][1][AA][i][j][k]*lhsX[1][2][CC][i-1][j][k]
- lhsX[3][2][AA][i][j][k]*lhsX[2][2][CC][i-1][j][k]
- lhsX[3][3][AA][i][j][k]*lhsX[3][2][CC][i-1][j][k]
- lhsX[3][4][AA][i][j][k]*lhsX[4][2][CC][i-1][j][k];
lhsX[4][2][BB][i][j][k] = lhsX[4][2][BB][i][j][k] - lhsX[4][0][AA][i][j][k]*lhsX[0][2][CC][i-1][j][k]
- lhsX[4][1][AA][i][j][k]*lhsX[1][2][CC][i-1][j][k]
- lhsX[4][2][AA][i][j][k]*lhsX[2][2][CC][i-1][j][k]
- lhsX[4][3][AA][i][j][k]*lhsX[3][2][CC][i-1][j][k]
- lhsX[4][4][AA][i][j][k]*lhsX[4][2][CC][i-1][j][k];
lhsX[0][3][BB][i][j][k] = lhsX[0][3][BB][i][j][k] - lhsX[0][0][AA][i][j][k]*lhsX[0][3][CC][i-1][j][k]
- lhsX[0][1][AA][i][j][k]*lhsX[1][3][CC][i-1][j][k]
- lhsX[0][2][AA][i][j][k]*lhsX[2][3][CC][i-1][j][k]
- lhsX[0][3][AA][i][j][k]*lhsX[3][3][CC][i-1][j][k]
- lhsX[0][4][AA][i][j][k]*lhsX[4][3][CC][i-1][j][k];
lhsX[1][3][BB][i][j][k] = lhsX[1][3][BB][i][j][k] - lhsX[1][0][AA][i][j][k]*lhsX[0][3][CC][i-1][j][k]
- lhsX[1][1][AA][i][j][k]*lhsX[1][3][CC][i-1][j][k]
- lhsX[1][2][AA][i][j][k]*lhsX[2][3][CC][i-1][j][k]
- lhsX[1][3][AA][i][j][k]*lhsX[3][3][CC][i-1][j][k]
- lhsX[1][4][AA][i][j][k]*lhsX[4][3][CC][i-1][j][k];
lhsX[2][3][BB][i][j][k] = lhsX[2][3][BB][i][j][k] - lhsX[2][0][AA][i][j][k]*lhsX[0][3][CC][i-1][j][k]
- lhsX[2][1][AA][i][j][k]*lhsX[1][3][CC][i-1][j][k]
- lhsX[2][2][AA][i][j][k]*lhsX[2][3][CC][i-1][j][k]
- lhsX[2][3][AA][i][j][k]*lhsX[3][3][CC][i-1][j][k]
- lhsX[2][4][AA][i][j][k]*lhsX[4][3][CC][i-1][j][k];
lhsX[3][3][BB][i][j][k] = lhsX[3][3][BB][i][j][k] - lhsX[3][0][AA][i][j][k]*lhsX[0][3][CC][i-1][j][k]
- lhsX[3][1][AA][i][j][k]*lhsX[1][3][CC][i-1][j][k]
- lhsX[3][2][AA][i][j][k]*lhsX[2][3][CC][i-1][j][k]
- lhsX[3][3][AA][i][j][k]*lhsX[3][3][CC][i-1][j][k]
- lhsX[3][4][AA][i][j][k]*lhsX[4][3][CC][i-1][j][k];
lhsX[4][3][BB][i][j][k] = lhsX[4][3][BB][i][j][k] - lhsX[4][0][AA][i][j][k]*lhsX[0][3][CC][i-1][j][k]
- lhsX[4][1][AA][i][j][k]*lhsX[1][3][CC][i-1][j][k]
- lhsX[4][2][AA][i][j][k]*lhsX[2][3][CC][i-1][j][k]
- lhsX[4][3][AA][i][j][k]*lhsX[3][3][CC][i-1][j][k]
- lhsX[4][4][AA][i][j][k]*lhsX[4][3][CC][i-1][j][k];
lhsX[0][4][BB][i][j][k] = lhsX[0][4][BB][i][j][k] - lhsX[0][0][AA][i][j][k]*lhsX[0][4][CC][i-1][j][k]
- lhsX[0][1][AA][i][j][k]*lhsX[1][4][CC][i-1][j][k]
- lhsX[0][2][AA][i][j][k]*lhsX[2][4][CC][i-1][j][k]
- lhsX[0][3][AA][i][j][k]*lhsX[3][4][CC][i-1][j][k]
- lhsX[0][4][AA][i][j][k]*lhsX[4][4][CC][i-1][j][k];
lhsX[1][4][BB][i][j][k] = lhsX[1][4][BB][i][j][k] - lhsX[1][0][AA][i][j][k]*lhsX[0][4][CC][i-1][j][k]
- lhsX[1][1][AA][i][j][k]*lhsX[1][4][CC][i-1][j][k]
- lhsX[1][2][AA][i][j][k]*lhsX[2][4][CC][i-1][j][k]
- lhsX[1][3][AA][i][j][k]*lhsX[3][4][CC][i-1][j][k]
- lhsX[1][4][AA][i][j][k]*lhsX[4][4][CC][i-1][j][k];
lhsX[2][4][BB][i][j][k] = lhsX[2][4][BB][i][j][k] - lhsX[2][0][AA][i][j][k]*lhsX[0][4][CC][i-1][j][k]
- lhsX[2][1][AA][i][j][k]*lhsX[1][4][CC][i-1][j][k]
- lhsX[2][2][AA][i][j][k]*lhsX[2][4][CC][i-1][j][k]
- lhsX[2][3][AA][i][j][k]*lhsX[3][4][CC][i-1][j][k]
- lhsX[2][4][AA][i][j][k]*lhsX[4][4][CC][i-1][j][k];
lhsX[3][4][BB][i][j][k] = lhsX[3][4][BB][i][j][k] - lhsX[3][0][AA][i][j][k]*lhsX[0][4][CC][i-1][j][k]
- lhsX[3][1][AA][i][j][k]*lhsX[1][4][CC][i-1][j][k]
- lhsX[3][2][AA][i][j][k]*lhsX[2][4][CC][i-1][j][k]
- lhsX[3][3][AA][i][j][k]*lhsX[3][4][CC][i-1][j][k]
- lhsX[3][4][AA][i][j][k]*lhsX[4][4][CC][i-1][j][k];
lhsX[4][4][BB][i][j][k] = lhsX[4][4][BB][i][j][k] - lhsX[4][0][AA][i][j][k]*lhsX[0][4][CC][i-1][j][k]
- lhsX[4][1][AA][i][j][k]*lhsX[1][4][CC][i-1][j][k]
- lhsX[4][2][AA][i][j][k]*lhsX[2][4][CC][i-1][j][k]
- lhsX[4][3][AA][i][j][k]*lhsX[3][4][CC][i-1][j][k]
- lhsX[4][4][AA][i][j][k]*lhsX[4][4][CC][i-1][j][k];
//-------------------------------------------------------------------
// multiply c[k][j][i] by b_inverse and copy back to c
// multiply rhs[k][j][0] by b_inverse[k][j][0] and copy to rhs
//-------------------------------------------------------------------
//binvcrhs( lhsX[i][j][BB], lhsX[k][i][j][k][CC], rhs[k][j][i] );
/*
for(m = 0; m < 5; m++){
pivot = 1.00/lhsX[m][m][BB][i][j][k];
for(n = m+1; n < 5; n++){
lhsX[m][n][BB][i][j][k] = lhsX[m][n][BB][i][j][k]*pivot;
}
lhsX[m][0][CC][i][j][k] = lhsX[m][0][CC][i][j][k]*pivot;
lhsX[m][1][CC][i][j][k] = lhsX[m][1][CC][i][j][k]*pivot;
lhsX[m][2][CC][i][j][k] = lhsX[m][2][CC][i][j][k]*pivot;
lhsX[m][3][CC][i][j][k] = lhsX[m][3][CC][i][j][k]*pivot;
lhsX[m][4][CC][i][j][k] = lhsX[m][4][CC][i][j][k]*pivot;
rhs[k][j][i][m] = rhs[k][j][i][m]*pivot;
for(n = 0; n < 5; n++){
if(n != m){
coeff = lhsX[n][m][BB][i][j][k];
for(z = m+1; z < 5; z++){
lhsX[n][z][BB][i][j][k] = lhsX[n][z][BB][i][j][k] - coeff*lhsX[m][z][BB][i][j][k];
}
lhsX[n][0][CC][i][j][k] = lhsX[n][0][CC][i][j][k] - coeff*lhsX[m][0][CC][i][j][k];
lhsX[n][1][CC][i][j][k] = lhsX[n][1][CC][i][j][k] - coeff*lhsX[m][1][CC][i][j][k];
lhsX[n][2][CC][i][j][k] = lhsX[n][2][CC][i][j][k] - coeff*lhsX[m][2][CC][i][j][k];
lhsX[n][3][CC][i][j][k] = lhsX[n][3][CC][i][j][k] - coeff*lhsX[m][3][CC][i][j][k];
lhsX[n][4][CC][i][j][k] = lhsX[n][4][CC][i][j][k] - coeff*lhsX[m][4][CC][i][j][k];
rhs[k][j][i][n] = rhs[k][j][i][n] - coeff*rhs[k][j][i][m];
}
}
}
*/
pivot = 1.00/lhsX[0][0][BB][i][j][k];
lhsX[0][1][BB][i][j][k] = lhsX[0][1][BB][i][j][k]*pivot;
lhsX[0][2][BB][i][j][k] = lhsX[0][2][BB][i][j][k]*pivot;
lhsX[0][3][BB][i][j][k] = lhsX[0][3][BB][i][j][k]*pivot;
lhsX[0][4][BB][i][j][k] = lhsX[0][4][BB][i][j][k]*pivot;
lhsX[0][0][CC][i][j][k] = lhsX[0][0][CC][i][j][k]*pivot;
lhsX[0][1][CC][i][j][k] = lhsX[0][1][CC][i][j][k]*pivot;
lhsX[0][2][CC][i][j][k] = lhsX[0][2][CC][i][j][k]*pivot;
lhsX[0][3][CC][i][j][k] = lhsX[0][3][CC][i][j][k]*pivot;
lhsX[0][4][CC][i][j][k] = lhsX[0][4][CC][i][j][k]*pivot;
rhs[k][j][i][0] = rhs[k][j][i][0] *pivot;
coeff = lhsX[1][0][BB][i][j][k];
lhsX[1][1][BB][i][j][k]= lhsX[1][1][BB][i][j][k] - coeff*lhsX[0][1][BB][i][j][k];
lhsX[1][2][BB][i][j][k]= lhsX[1][2][BB][i][j][k] - coeff*lhsX[0][2][BB][i][j][k];
lhsX[1][3][BB][i][j][k]= lhsX[1][3][BB][i][j][k] - coeff*lhsX[0][3][BB][i][j][k];
lhsX[1][4][BB][i][j][k]= lhsX[1][4][BB][i][j][k] - coeff*lhsX[0][4][BB][i][j][k];
lhsX[1][0][CC][i][j][k] = lhsX[1][0][CC][i][j][k] - coeff*lhsX[0][0][CC][i][j][k];
lhsX[1][1][CC][i][j][k] = lhsX[1][1][CC][i][j][k] - coeff*lhsX[0][1][CC][i][j][k];
lhsX[1][2][CC][i][j][k] = lhsX[1][2][CC][i][j][k] - coeff*lhsX[0][2][CC][i][j][k];
lhsX[1][3][CC][i][j][k] = lhsX[1][3][CC][i][j][k] - coeff*lhsX[0][3][CC][i][j][k];
lhsX[1][4][CC][i][j][k] = lhsX[1][4][CC][i][j][k] - coeff*lhsX[0][4][CC][i][j][k];
rhs[k][j][i][1] = rhs[k][j][i][1] - coeff*rhs[k][j][i][0];
coeff = lhsX[2][0][BB][i][j][k];
lhsX[2][1][BB][i][j][k]= lhsX[2][1][BB][i][j][k] - coeff*lhsX[0][1][BB][i][j][k];
lhsX[2][2][BB][i][j][k]= lhsX[2][2][BB][i][j][k] - coeff*lhsX[0][2][BB][i][j][k];
lhsX[2][3][BB][i][j][k]= lhsX[2][3][BB][i][j][k] - coeff*lhsX[0][3][BB][i][j][k];
lhsX[2][4][BB][i][j][k]= lhsX[2][4][BB][i][j][k] - coeff*lhsX[0][4][BB][i][j][k];
lhsX[2][0][CC][i][j][k] = lhsX[2][0][CC][i][j][k] - coeff*lhsX[0][0][CC][i][j][k];
lhsX[2][1][CC][i][j][k] = lhsX[2][1][CC][i][j][k] - coeff*lhsX[0][1][CC][i][j][k];
lhsX[2][2][CC][i][j][k] = lhsX[2][2][CC][i][j][k] - coeff*lhsX[0][2][CC][i][j][k];
lhsX[2][3][CC][i][j][k] = lhsX[2][3][CC][i][j][k] - coeff*lhsX[0][3][CC][i][j][k];
lhsX[2][4][CC][i][j][k] = lhsX[2][4][CC][i][j][k] - coeff*lhsX[0][4][CC][i][j][k];
rhs[k][j][i][2] = rhs[k][j][i][2] - coeff*rhs[k][j][i][0];
coeff = lhsX[3][0][BB][i][j][k];
lhsX[3][1][BB][i][j][k]= lhsX[3][1][BB][i][j][k] - coeff*lhsX[0][1][BB][i][j][k];
lhsX[3][2][BB][i][j][k]= lhsX[3][2][BB][i][j][k] - coeff*lhsX[0][2][BB][i][j][k];
lhsX[3][3][BB][i][j][k]= lhsX[3][3][BB][i][j][k] - coeff*lhsX[0][3][BB][i][j][k];
lhsX[3][4][BB][i][j][k]= lhsX[3][4][BB][i][j][k] - coeff*lhsX[0][4][BB][i][j][k];
lhsX[3][0][CC][i][j][k] = lhsX[3][0][CC][i][j][k] - coeff*lhsX[0][0][CC][i][j][k];
lhsX[3][1][CC][i][j][k] = lhsX[3][1][CC][i][j][k] - coeff*lhsX[0][1][CC][i][j][k];
lhsX[3][2][CC][i][j][k] = lhsX[3][2][CC][i][j][k] - coeff*lhsX[0][2][CC][i][j][k];
lhsX[3][3][CC][i][j][k] = lhsX[3][3][CC][i][j][k] - coeff*lhsX[0][3][CC][i][j][k];
lhsX[3][4][CC][i][j][k] = lhsX[3][4][CC][i][j][k] - coeff*lhsX[0][4][CC][i][j][k];
rhs[k][j][i][3] = rhs[k][j][i][3] - coeff*rhs[k][j][i][0];
coeff = lhsX[4][0][BB][i][j][k];
lhsX[4][1][BB][i][j][k]= lhsX[4][1][BB][i][j][k] - coeff*lhsX[0][1][BB][i][j][k];
lhsX[4][2][BB][i][j][k]= lhsX[4][2][BB][i][j][k] - coeff*lhsX[0][2][BB][i][j][k];
lhsX[4][3][BB][i][j][k]= lhsX[4][3][BB][i][j][k] - coeff*lhsX[0][3][BB][i][j][k];
lhsX[4][4][BB][i][j][k]= lhsX[4][4][BB][i][j][k] - coeff*lhsX[0][4][BB][i][j][k];
lhsX[4][0][CC][i][j][k] = lhsX[4][0][CC][i][j][k] - coeff*lhsX[0][0][CC][i][j][k];
lhsX[4][1][CC][i][j][k] = lhsX[4][1][CC][i][j][k] - coeff*lhsX[0][1][CC][i][j][k];
lhsX[4][2][CC][i][j][k] = lhsX[4][2][CC][i][j][k] - coeff*lhsX[0][2][CC][i][j][k];
lhsX[4][3][CC][i][j][k] = lhsX[4][3][CC][i][j][k] - coeff*lhsX[0][3][CC][i][j][k];
lhsX[4][4][CC][i][j][k] = lhsX[4][4][CC][i][j][k] - coeff*lhsX[0][4][CC][i][j][k];
rhs[k][j][i][4] = rhs[k][j][i][4] - coeff*rhs[k][j][i][0];
pivot = 1.00/lhsX[1][1][BB][i][j][k];
lhsX[1][2][BB][i][j][k] = lhsX[1][2][BB][i][j][k]*pivot;
lhsX[1][3][BB][i][j][k] = lhsX[1][3][BB][i][j][k]*pivot;
lhsX[1][4][BB][i][j][k] = lhsX[1][4][BB][i][j][k]*pivot;
lhsX[1][0][CC][i][j][k] = lhsX[1][0][CC][i][j][k]*pivot;
lhsX[1][1][CC][i][j][k] = lhsX[1][1][CC][i][j][k]*pivot;
lhsX[1][2][CC][i][j][k] = lhsX[1][2][CC][i][j][k]*pivot;
lhsX[1][3][CC][i][j][k] = lhsX[1][3][CC][i][j][k]*pivot;
lhsX[1][4][CC][i][j][k] = lhsX[1][4][CC][i][j][k]*pivot;
rhs[k][j][i][1] = rhs[k][j][i][1] *pivot;
coeff = lhsX[0][1][BB][i][j][k];
lhsX[0][2][BB][i][j][k]= lhsX[0][2][BB][i][j][k] - coeff*lhsX[1][2][BB][i][j][k];
lhsX[0][3][BB][i][j][k]= lhsX[0][3][BB][i][j][k] - coeff*lhsX[1][3][BB][i][j][k];
lhsX[0][4][BB][i][j][k]= lhsX[0][4][BB][i][j][k] - coeff*lhsX[1][4][BB][i][j][k];
lhsX[0][0][CC][i][j][k] = lhsX[0][0][CC][i][j][k] - coeff*lhsX[1][0][CC][i][j][k];
lhsX[0][1][CC][i][j][k] = lhsX[0][1][CC][i][j][k] - coeff*lhsX[1][1][CC][i][j][k];
lhsX[0][2][CC][i][j][k] = lhsX[0][2][CC][i][j][k] - coeff*lhsX[1][2][CC][i][j][k];
lhsX[0][3][CC][i][j][k] = lhsX[0][3][CC][i][j][k] - coeff*lhsX[1][3][CC][i][j][k];
lhsX[0][4][CC][i][j][k] = lhsX[0][4][CC][i][j][k] - coeff*lhsX[1][4][CC][i][j][k];
rhs[k][j][i][0] = rhs[k][j][i][0] - coeff*rhs[k][j][i][1];
coeff = lhsX[2][1][BB][i][j][k];
lhsX[2][2][BB][i][j][k]= lhsX[2][2][BB][i][j][k] - coeff*lhsX[1][2][BB][i][j][k];
lhsX[2][3][BB][i][j][k]= lhsX[2][3][BB][i][j][k] - coeff*lhsX[1][3][BB][i][j][k];
lhsX[2][4][BB][i][j][k]= lhsX[2][4][BB][i][j][k] - coeff*lhsX[1][4][BB][i][j][k];
lhsX[2][0][CC][i][j][k] = lhsX[2][0][CC][i][j][k] - coeff*lhsX[1][0][CC][i][j][k];
lhsX[2][1][CC][i][j][k] = lhsX[2][1][CC][i][j][k] - coeff*lhsX[1][1][CC][i][j][k];
lhsX[2][2][CC][i][j][k] = lhsX[2][2][CC][i][j][k] - coeff*lhsX[1][2][CC][i][j][k];
lhsX[2][3][CC][i][j][k] = lhsX[2][3][CC][i][j][k] - coeff*lhsX[1][3][CC][i][j][k];
lhsX[2][4][CC][i][j][k] = lhsX[2][4][CC][i][j][k] - coeff*lhsX[1][4][CC][i][j][k];
rhs[k][j][i][2] = rhs[k][j][i][2] - coeff*rhs[k][j][i][1];
coeff = lhsX[3][1][BB][i][j][k];
lhsX[3][2][BB][i][j][k]= lhsX[3][2][BB][i][j][k] - coeff*lhsX[1][2][BB][i][j][k];
lhsX[3][3][BB][i][j][k]= lhsX[3][3][BB][i][j][k] - coeff*lhsX[1][3][BB][i][j][k];
lhsX[3][4][BB][i][j][k]= lhsX[3][4][BB][i][j][k] - coeff*lhsX[1][4][BB][i][j][k];
lhsX[3][0][CC][i][j][k] = lhsX[3][0][CC][i][j][k] - coeff*lhsX[1][0][CC][i][j][k];
lhsX[3][1][CC][i][j][k] = lhsX[3][1][CC][i][j][k] - coeff*lhsX[1][1][CC][i][j][k];
lhsX[3][2][CC][i][j][k] = lhsX[3][2][CC][i][j][k] - coeff*lhsX[1][2][CC][i][j][k];
lhsX[3][3][CC][i][j][k] = lhsX[3][3][CC][i][j][k] - coeff*lhsX[1][3][CC][i][j][k];
lhsX[3][4][CC][i][j][k] = lhsX[3][4][CC][i][j][k] - coeff*lhsX[1][4][CC][i][j][k];
rhs[k][j][i][3] = rhs[k][j][i][3] - coeff*rhs[k][j][i][1];
coeff = lhsX[4][1][BB][i][j][k];
lhsX[4][2][BB][i][j][k]= lhsX[4][2][BB][i][j][k] - coeff*lhsX[1][2][BB][i][j][k];
lhsX[4][3][BB][i][j][k]= lhsX[4][3][BB][i][j][k] - coeff*lhsX[1][3][BB][i][j][k];
lhsX[4][4][BB][i][j][k]= lhsX[4][4][BB][i][j][k] - coeff*lhsX[1][4][BB][i][j][k];
lhsX[4][0][CC][i][j][k] = lhsX[4][0][CC][i][j][k] - coeff*lhsX[1][0][CC][i][j][k];
lhsX[4][1][CC][i][j][k] = lhsX[4][1][CC][i][j][k] - coeff*lhsX[1][1][CC][i][j][k];
lhsX[4][2][CC][i][j][k] = lhsX[4][2][CC][i][j][k] - coeff*lhsX[1][2][CC][i][j][k];
lhsX[4][3][CC][i][j][k] = lhsX[4][3][CC][i][j][k] - coeff*lhsX[1][3][CC][i][j][k];
lhsX[4][4][CC][i][j][k] = lhsX[4][4][CC][i][j][k] - coeff*lhsX[1][4][CC][i][j][k];
rhs[k][j][i][4] = rhs[k][j][i][4] - coeff*rhs[k][j][i][1];
pivot = 1.00/lhsX[2][2][BB][i][j][k];
lhsX[2][3][BB][i][j][k] = lhsX[2][3][BB][i][j][k]*pivot;
lhsX[2][4][BB][i][j][k] = lhsX[2][4][BB][i][j][k]*pivot;
lhsX[2][0][CC][i][j][k] = lhsX[2][0][CC][i][j][k]*pivot;
lhsX[2][1][CC][i][j][k] = lhsX[2][1][CC][i][j][k]*pivot;
lhsX[2][2][CC][i][j][k] = lhsX[2][2][CC][i][j][k]*pivot;
lhsX[2][3][CC][i][j][k] = lhsX[2][3][CC][i][j][k]*pivot;
lhsX[2][4][CC][i][j][k] = lhsX[2][4][CC][i][j][k]*pivot;
rhs[k][j][i][2] = rhs[k][j][i][2] *pivot;
coeff = lhsX[0][2][BB][i][j][k];
lhsX[0][3][BB][i][j][k]= lhsX[0][3][BB][i][j][k] - coeff*lhsX[2][3][BB][i][j][k];
lhsX[0][4][BB][i][j][k]= lhsX[0][4][BB][i][j][k] - coeff*lhsX[2][4][BB][i][j][k];
lhsX[0][0][CC][i][j][k] = lhsX[0][0][CC][i][j][k] - coeff*lhsX[2][0][CC][i][j][k];
lhsX[0][1][CC][i][j][k] = lhsX[0][1][CC][i][j][k] - coeff*lhsX[2][1][CC][i][j][k];
lhsX[0][2][CC][i][j][k] = lhsX[0][2][CC][i][j][k] - coeff*lhsX[2][2][CC][i][j][k];
lhsX[0][3][CC][i][j][k] = lhsX[0][3][CC][i][j][k] - coeff*lhsX[2][3][CC][i][j][k];
lhsX[0][4][CC][i][j][k] = lhsX[0][4][CC][i][j][k] - coeff*lhsX[2][4][CC][i][j][k];
rhs[k][j][i][0] = rhs[k][j][i][0] - coeff*rhs[k][j][i][2];
coeff = lhsX[1][2][BB][i][j][k];
lhsX[1][3][BB][i][j][k]= lhsX[1][3][BB][i][j][k] - coeff*lhsX[2][3][BB][i][j][k];
lhsX[1][4][BB][i][j][k]= lhsX[1][4][BB][i][j][k] - coeff*lhsX[2][4][BB][i][j][k];
lhsX[1][0][CC][i][j][k] = lhsX[1][0][CC][i][j][k] - coeff*lhsX[2][0][CC][i][j][k];
lhsX[1][1][CC][i][j][k] = lhsX[1][1][CC][i][j][k] - coeff*lhsX[2][1][CC][i][j][k];
lhsX[1][2][CC][i][j][k] = lhsX[1][2][CC][i][j][k] - coeff*lhsX[2][2][CC][i][j][k];
lhsX[1][3][CC][i][j][k] = lhsX[1][3][CC][i][j][k] - coeff*lhsX[2][3][CC][i][j][k];
lhsX[1][4][CC][i][j][k] = lhsX[1][4][CC][i][j][k] - coeff*lhsX[2][4][CC][i][j][k];
rhs[k][j][i][1] = rhs[k][j][i][1] - coeff*rhs[k][j][i][2];
coeff = lhsX[3][2][BB][i][j][k];
lhsX[3][3][BB][i][j][k]= lhsX[3][3][BB][i][j][k] - coeff*lhsX[2][3][BB][i][j][k];
lhsX[3][4][BB][i][j][k]= lhsX[3][4][BB][i][j][k] - coeff*lhsX[2][4][BB][i][j][k];
lhsX[3][0][CC][i][j][k] = lhsX[3][0][CC][i][j][k] - coeff*lhsX[2][0][CC][i][j][k];
lhsX[3][1][CC][i][j][k] = lhsX[3][1][CC][i][j][k] - coeff*lhsX[2][1][CC][i][j][k];
lhsX[3][2][CC][i][j][k] = lhsX[3][2][CC][i][j][k] - coeff*lhsX[2][2][CC][i][j][k];
lhsX[3][3][CC][i][j][k] = lhsX[3][3][CC][i][j][k] - coeff*lhsX[2][3][CC][i][j][k];
lhsX[3][4][CC][i][j][k] = lhsX[3][4][CC][i][j][k] - coeff*lhsX[2][4][CC][i][j][k];
rhs[k][j][i][3] = rhs[k][j][i][3] - coeff*rhs[k][j][i][2];
coeff = lhsX[4][2][BB][i][j][k];
lhsX[4][3][BB][i][j][k]= lhsX[4][3][BB][i][j][k] - coeff*lhsX[2][3][BB][i][j][k];
lhsX[4][4][BB][i][j][k]= lhsX[4][4][BB][i][j][k] - coeff*lhsX[2][4][BB][i][j][k];
lhsX[4][0][CC][i][j][k] = lhsX[4][0][CC][i][j][k] - coeff*lhsX[2][0][CC][i][j][k];
lhsX[4][1][CC][i][j][k] = lhsX[4][1][CC][i][j][k] - coeff*lhsX[2][1][CC][i][j][k];
lhsX[4][2][CC][i][j][k] = lhsX[4][2][CC][i][j][k] - coeff*lhsX[2][2][CC][i][j][k];
lhsX[4][3][CC][i][j][k] = lhsX[4][3][CC][i][j][k] - coeff*lhsX[2][3][CC][i][j][k];
lhsX[4][4][CC][i][j][k] = lhsX[4][4][CC][i][j][k] - coeff*lhsX[2][4][CC][i][j][k];
rhs[k][j][i][4] = rhs[k][j][i][4] - coeff*rhs[k][j][i][2];
pivot = 1.00/lhsX[3][3][BB][i][j][k];
lhsX[3][4][BB][i][j][k] = lhsX[3][4][BB][i][j][k]*pivot;
lhsX[3][0][CC][i][j][k] = lhsX[3][0][CC][i][j][k]*pivot;
lhsX[3][1][CC][i][j][k] = lhsX[3][1][CC][i][j][k]*pivot;
lhsX[3][2][CC][i][j][k] = lhsX[3][2][CC][i][j][k]*pivot;
lhsX[3][3][CC][i][j][k] = lhsX[3][3][CC][i][j][k]*pivot;
lhsX[3][4][CC][i][j][k] = lhsX[3][4][CC][i][j][k]*pivot;
rhs[k][j][i][3] = rhs[k][j][i][3] *pivot;
coeff = lhsX[0][3][BB][i][j][k];
lhsX[0][4][BB][i][j][k]= lhsX[0][4][BB][i][j][k] - coeff*lhsX[3][4][BB][i][j][k];
lhsX[0][0][CC][i][j][k] = lhsX[0][0][CC][i][j][k] - coeff*lhsX[3][0][CC][i][j][k];
lhsX[0][1][CC][i][j][k] = lhsX[0][1][CC][i][j][k] - coeff*lhsX[3][1][CC][i][j][k];
lhsX[0][2][CC][i][j][k] = lhsX[0][2][CC][i][j][k] - coeff*lhsX[3][2][CC][i][j][k];
lhsX[0][3][CC][i][j][k] = lhsX[0][3][CC][i][j][k] - coeff*lhsX[3][3][CC][i][j][k];
lhsX[0][4][CC][i][j][k] = lhsX[0][4][CC][i][j][k] - coeff*lhsX[3][4][CC][i][j][k];
rhs[k][j][i][0] = rhs[k][j][i][0] - coeff*rhs[k][j][i][3];
coeff = lhsX[1][3][BB][i][j][k];
lhsX[1][4][BB][i][j][k]= lhsX[1][4][BB][i][j][k] - coeff*lhsX[3][4][BB][i][j][k];
lhsX[1][0][CC][i][j][k] = lhsX[1][0][CC][i][j][k] - coeff*lhsX[3][0][CC][i][j][k];
lhsX[1][1][CC][i][j][k] = lhsX[1][1][CC][i][j][k] - coeff*lhsX[3][1][CC][i][j][k];
lhsX[1][2][CC][i][j][k] = lhsX[1][2][CC][i][j][k] - coeff*lhsX[3][2][CC][i][j][k];
lhsX[1][3][CC][i][j][k] = lhsX[1][3][CC][i][j][k] - coeff*lhsX[3][3][CC][i][j][k];
lhsX[1][4][CC][i][j][k] = lhsX[1][4][CC][i][j][k] - coeff*lhsX[3][4][CC][i][j][k];
rhs[k][j][i][1] = rhs[k][j][i][1] - coeff*rhs[k][j][i][3];
coeff = lhsX[2][3][BB][i][j][k];
lhsX[2][4][BB][i][j][k]= lhsX[2][4][BB][i][j][k] - coeff*lhsX[3][4][BB][i][j][k];
lhsX[2][0][CC][i][j][k] = lhsX[2][0][CC][i][j][k] - coeff*lhsX[3][0][CC][i][j][k];
lhsX[2][1][CC][i][j][k] = lhsX[2][1][CC][i][j][k] - coeff*lhsX[3][1][CC][i][j][k];
lhsX[2][2][CC][i][j][k] = lhsX[2][2][CC][i][j][k] - coeff*lhsX[3][2][CC][i][j][k];
lhsX[2][3][CC][i][j][k] = lhsX[2][3][CC][i][j][k] - coeff*lhsX[3][3][CC][i][j][k];
lhsX[2][4][CC][i][j][k] = lhsX[2][4][CC][i][j][k] - coeff*lhsX[3][4][CC][i][j][k];
rhs[k][j][i][2] = rhs[k][j][i][2] - coeff*rhs[k][j][i][3];
coeff = lhsX[4][3][BB][i][j][k];
lhsX[4][4][BB][i][j][k]= lhsX[4][4][BB][i][j][k] - coeff*lhsX[3][4][BB][i][j][k];
lhsX[4][0][CC][i][j][k] = lhsX[4][0][CC][i][j][k] - coeff*lhsX[3][0][CC][i][j][k];
lhsX[4][1][CC][i][j][k] = lhsX[4][1][CC][i][j][k] - coeff*lhsX[3][1][CC][i][j][k];
lhsX[4][2][CC][i][j][k] = lhsX[4][2][CC][i][j][k] - coeff*lhsX[3][2][CC][i][j][k];
lhsX[4][3][CC][i][j][k] = lhsX[4][3][CC][i][j][k] - coeff*lhsX[3][3][CC][i][j][k];
lhsX[4][4][CC][i][j][k] = lhsX[4][4][CC][i][j][k] - coeff*lhsX[3][4][CC][i][j][k];
rhs[k][j][i][4] = rhs[k][j][i][4] - coeff*rhs[k][j][i][3];
pivot = 1.00/lhsX[4][4][BB][i][j][k];
lhsX[4][0][CC][i][j][k] = lhsX[4][0][CC][i][j][k]*pivot;
lhsX[4][1][CC][i][j][k] = lhsX[4][1][CC][i][j][k]*pivot;
lhsX[4][2][CC][i][j][k] = lhsX[4][2][CC][i][j][k]*pivot;
lhsX[4][3][CC][i][j][k] = lhsX[4][3][CC][i][j][k]*pivot;
lhsX[4][4][CC][i][j][k] = lhsX[4][4][CC][i][j][k]*pivot;
rhs[k][j][i][4] = rhs[k][j][i][4] *pivot;
coeff = lhsX[0][4][BB][i][j][k];
lhsX[0][0][CC][i][j][k] = lhsX[0][0][CC][i][j][k] - coeff*lhsX[4][0][CC][i][j][k];
lhsX[0][1][CC][i][j][k] = lhsX[0][1][CC][i][j][k] - coeff*lhsX[4][1][CC][i][j][k];
lhsX[0][2][CC][i][j][k] = lhsX[0][2][CC][i][j][k] - coeff*lhsX[4][2][CC][i][j][k];
lhsX[0][3][CC][i][j][k] = lhsX[0][3][CC][i][j][k] - coeff*lhsX[4][3][CC][i][j][k];
lhsX[0][4][CC][i][j][k] = lhsX[0][4][CC][i][j][k] - coeff*lhsX[4][4][CC][i][j][k];
rhs[k][j][i][0] = rhs[k][j][i][0] - coeff*rhs[k][j][i][4];
coeff = lhsX[1][4][BB][i][j][k];
lhsX[1][0][CC][i][j][k] = lhsX[1][0][CC][i][j][k] - coeff*lhsX[4][0][CC][i][j][k];
lhsX[1][1][CC][i][j][k] = lhsX[1][1][CC][i][j][k] - coeff*lhsX[4][1][CC][i][j][k];
lhsX[1][2][CC][i][j][k] = lhsX[1][2][CC][i][j][k] - coeff*lhsX[4][2][CC][i][j][k];
lhsX[1][3][CC][i][j][k] = lhsX[1][3][CC][i][j][k] - coeff*lhsX[4][3][CC][i][j][k];
lhsX[1][4][CC][i][j][k] = lhsX[1][4][CC][i][j][k] - coeff*lhsX[4][4][CC][i][j][k];
rhs[k][j][i][1] = rhs[k][j][i][1] - coeff*rhs[k][j][i][4];
coeff = lhsX[2][4][BB][i][j][k];
lhsX[2][0][CC][i][j][k] = lhsX[2][0][CC][i][j][k] - coeff*lhsX[4][0][CC][i][j][k];
lhsX[2][1][CC][i][j][k] = lhsX[2][1][CC][i][j][k] - coeff*lhsX[4][1][CC][i][j][k];
lhsX[2][2][CC][i][j][k] = lhsX[2][2][CC][i][j][k] - coeff*lhsX[4][2][CC][i][j][k];
lhsX[2][3][CC][i][j][k] = lhsX[2][3][CC][i][j][k] - coeff*lhsX[4][3][CC][i][j][k];
lhsX[2][4][CC][i][j][k] = lhsX[2][4][CC][i][j][k] - coeff*lhsX[4][4][CC][i][j][k];
rhs[k][j][i][2] = rhs[k][j][i][2] - coeff*rhs[k][j][i][4];
coeff = lhsX[3][4][BB][i][j][k];
lhsX[3][0][CC][i][j][k] = lhsX[3][0][CC][i][j][k] - coeff*lhsX[4][0][CC][i][j][k];
lhsX[3][1][CC][i][j][k] = lhsX[3][1][CC][i][j][k] - coeff*lhsX[4][1][CC][i][j][k];
lhsX[3][2][CC][i][j][k] = lhsX[3][2][CC][i][j][k] - coeff*lhsX[4][2][CC][i][j][k];
lhsX[3][3][CC][i][j][k] = lhsX[3][3][CC][i][j][k] - coeff*lhsX[4][3][CC][i][j][k];
lhsX[3][4][CC][i][j][k] = lhsX[3][4][CC][i][j][k] - coeff*lhsX[4][4][CC][i][j][k];
rhs[k][j][i][3] = rhs[k][j][i][3] - coeff*rhs[k][j][i][4];
}/*end i*/
}
}
#endif
//---------------------------------------------------------------------
// rhs(isize) = rhs(isize) - A*rhs(isize-1)
//---------------------------------------------------------------------
//matvec_sub(lhsX[isize-1][j][AA], rhs[k][isize][j][k], rhs[k][j][isize]);
size_t kernel_x_solve_6_off[2] = { 1, 1 };
size_t kernel_x_solve_6_idx[2] = { gp12, gp22 };
brisbane_kernel kernel_x_solve_6;
brisbane_kernel_create("x_solve_6", &kernel_x_solve_6);
brisbane_kernel_setmem(kernel_x_solve_6, 0, mem_lhsX, brisbane_rw);
brisbane_kernel_setmem(kernel_x_solve_6, 1, mem_rhs, brisbane_rw);
brisbane_kernel_setarg(kernel_x_solve_6, 2, sizeof(int), &isize);
brisbane_task task6;
brisbane_task_create(&task6);
brisbane_task_kernel(task6, kernel_x_solve_6, 2, kernel_x_solve_6_off, kernel_x_solve_6_idx);
brisbane_task_submit(task6, brisbane_cpu, NULL, true);
#if 0
#pragma omp target teams distribute parallel for collapse(2) private(k,j)
for (k = 1; k <= gp22; k++) {
for (j = 1; j <= gp12; j++) {
/*
for(m = 0; m < 5; m++){
rhs[k][j][isize][m] = rhs[k][j][isize][m] - lhsX[m][0][AA][isize][j][k]*rhs[k][j][isize-1][0]
- lhsX[m][1][AA][isize][j][k]*rhs[k][j][isize-1][1]
- lhsX[m][2][AA][isize][j][k]*rhs[k][j][isize-1][2]
- lhsX[m][3][AA][isize][j][k]*rhs[k][j][isize-1][3]
- lhsX[m][4][AA][isize][j][k]*rhs[k][j][isize-1][4];
}
*/
rhs[k][j][isize][0] = rhs[k][j][isize][0] - lhsX[0][0][AA][isize][j][k]*rhs[k][j][isize-1][0]
- lhsX[0][1][AA][isize][j][k]*rhs[k][j][isize-1][1]
- lhsX[0][2][AA][isize][j][k]*rhs[k][j][isize-1][2]
- lhsX[0][3][AA][isize][j][k]*rhs[k][j][isize-1][3]
- lhsX[0][4][AA][isize][j][k]*rhs[k][j][isize-1][4];
rhs[k][j][isize][1] = rhs[k][j][isize][1] - lhsX[1][0][AA][isize][j][k]*rhs[k][j][isize-1][0]
- lhsX[1][1][AA][isize][j][k]*rhs[k][j][isize-1][1]
- lhsX[1][2][AA][isize][j][k]*rhs[k][j][isize-1][2]
- lhsX[1][3][AA][isize][j][k]*rhs[k][j][isize-1][3]
- lhsX[1][4][AA][isize][j][k]*rhs[k][j][isize-1][4];
rhs[k][j][isize][2] = rhs[k][j][isize][2] - lhsX[2][0][AA][isize][j][k]*rhs[k][j][isize-1][0]
- lhsX[2][1][AA][isize][j][k]*rhs[k][j][isize-1][1]
- lhsX[2][2][AA][isize][j][k]*rhs[k][j][isize-1][2]
- lhsX[2][3][AA][isize][j][k]*rhs[k][j][isize-1][3]
- lhsX[2][4][AA][isize][j][k]*rhs[k][j][isize-1][4];
rhs[k][j][isize][3] = rhs[k][j][isize][3] - lhsX[3][0][AA][isize][j][k]*rhs[k][j][isize-1][0]
- lhsX[3][1][AA][isize][j][k]*rhs[k][j][isize-1][1]
- lhsX[3][2][AA][isize][j][k]*rhs[k][j][isize-1][2]
- lhsX[3][3][AA][isize][j][k]*rhs[k][j][isize-1][3]
- lhsX[3][4][AA][isize][j][k]*rhs[k][j][isize-1][4];
rhs[k][j][isize][4] = rhs[k][j][isize][4] - lhsX[4][0][AA][isize][j][k]*rhs[k][j][isize-1][0]
- lhsX[4][1][AA][isize][j][k]*rhs[k][j][isize-1][1]
- lhsX[4][2][AA][isize][j][k]*rhs[k][j][isize-1][2]
- lhsX[4][3][AA][isize][j][k]*rhs[k][j][isize-1][3]
- lhsX[4][4][AA][isize][j][k]*rhs[k][j][isize-1][4];
}
}
#endif
//---------------------------------------------------------------------
// B(isize) = B(isize) - C(isize-1)*A(isize)
//---------------------------------------------------------------------
//matmul_sub(lhsX[isize-1][j][AA], lhsX[k][isize][j][k][CC], lhsX[k][j][isize][BB]);
size_t kernel_x_solve_7_off[2] = { 1, 1 };
size_t kernel_x_solve_7_idx[2] = { gp12, gp22 };
brisbane_kernel kernel_x_solve_7;
brisbane_kernel_create("x_solve_7", &kernel_x_solve_7);
brisbane_kernel_setmem(kernel_x_solve_7, 0, mem_lhsX, brisbane_rw);
brisbane_kernel_setarg(kernel_x_solve_7, 1, sizeof(int), &isize);
brisbane_task task7;
brisbane_task_create(&task7);
brisbane_task_kernel(task7, kernel_x_solve_7, 2, kernel_x_solve_7_off, kernel_x_solve_7_idx);
brisbane_task_submit(task7, brisbane_cpu, NULL, true);
#if 0
#pragma omp target teams distribute parallel for collapse(2) private(k,j)
for (k = 1; k <= gp22; k++) {
for (j = 1; j <= gp12; j++) {
/*
for(m = 0; m < 5; m++){
for(n = 0; n < 5; n++){
lhsX[n][m][BB][isize][j][k] = lhsX[n][m][BB][isize][j][k] - lhsX[n][0][AA][isize][j][k]*lhsX[0][m][CC][isize-1][j][k]
- lhsX[n][1][AA][isize][j][k]*lhsX[1][m][CC][isize-1][j][k]
- lhsX[n][2][AA][isize][j][k]*lhsX[2][m][CC][isize-1][j][k]
- lhsX[n][3][AA][isize][j][k]*lhsX[3][m][CC][isize-1][j][k]
- lhsX[n][4][AA][isize][j][k]*lhsX[4][m][CC][isize-1][j][k];
}
}
*/
lhsX[0][0][BB][isize][j][k] = lhsX[0][0][BB][isize][j][k] - lhsX[0][0][AA][isize][j][k]*lhsX[0][0][CC][isize-1][j][k]
- lhsX[0][1][AA][isize][j][k]*lhsX[1][0][CC][isize-1][j][k]
- lhsX[0][2][AA][isize][j][k]*lhsX[2][0][CC][isize-1][j][k]
- lhsX[0][3][AA][isize][j][k]*lhsX[3][0][CC][isize-1][j][k]
- lhsX[0][4][AA][isize][j][k]*lhsX[4][0][CC][isize-1][j][k];
lhsX[1][0][BB][isize][j][k] = lhsX[1][0][BB][isize][j][k] - lhsX[1][0][AA][isize][j][k]*lhsX[0][0][CC][isize-1][j][k]
- lhsX[1][1][AA][isize][j][k]*lhsX[1][0][CC][isize-1][j][k]
- lhsX[1][2][AA][isize][j][k]*lhsX[2][0][CC][isize-1][j][k]
- lhsX[1][3][AA][isize][j][k]*lhsX[3][0][CC][isize-1][j][k]
- lhsX[1][4][AA][isize][j][k]*lhsX[4][0][CC][isize-1][j][k];
lhsX[2][0][BB][isize][j][k] = lhsX[2][0][BB][isize][j][k] - lhsX[2][0][AA][isize][j][k]*lhsX[0][0][CC][isize-1][j][k]
- lhsX[2][1][AA][isize][j][k]*lhsX[1][0][CC][isize-1][j][k]
- lhsX[2][2][AA][isize][j][k]*lhsX[2][0][CC][isize-1][j][k]
- lhsX[2][3][AA][isize][j][k]*lhsX[3][0][CC][isize-1][j][k]
- lhsX[2][4][AA][isize][j][k]*lhsX[4][0][CC][isize-1][j][k];
lhsX[3][0][BB][isize][j][k] = lhsX[3][0][BB][isize][j][k] - lhsX[3][0][AA][isize][j][k]*lhsX[0][0][CC][isize-1][j][k]
- lhsX[3][1][AA][isize][j][k]*lhsX[1][0][CC][isize-1][j][k]
- lhsX[3][2][AA][isize][j][k]*lhsX[2][0][CC][isize-1][j][k]
- lhsX[3][3][AA][isize][j][k]*lhsX[3][0][CC][isize-1][j][k]
- lhsX[3][4][AA][isize][j][k]*lhsX[4][0][CC][isize-1][j][k];
lhsX[4][0][BB][isize][j][k] = lhsX[4][0][BB][isize][j][k] - lhsX[4][0][AA][isize][j][k]*lhsX[0][0][CC][isize-1][j][k]
- lhsX[4][1][AA][isize][j][k]*lhsX[1][0][CC][isize-1][j][k]
- lhsX[4][2][AA][isize][j][k]*lhsX[2][0][CC][isize-1][j][k]
- lhsX[4][3][AA][isize][j][k]*lhsX[3][0][CC][isize-1][j][k]
- lhsX[4][4][AA][isize][j][k]*lhsX[4][0][CC][isize-1][j][k];
lhsX[0][1][BB][isize][j][k] = lhsX[0][1][BB][isize][j][k] - lhsX[0][0][AA][isize][j][k]*lhsX[0][1][CC][isize-1][j][k]
- lhsX[0][1][AA][isize][j][k]*lhsX[1][1][CC][isize-1][j][k]
- lhsX[0][2][AA][isize][j][k]*lhsX[2][1][CC][isize-1][j][k]
- lhsX[0][3][AA][isize][j][k]*lhsX[3][1][CC][isize-1][j][k]
- lhsX[0][4][AA][isize][j][k]*lhsX[4][1][CC][isize-1][j][k];
lhsX[1][1][BB][isize][j][k] = lhsX[1][1][BB][isize][j][k] - lhsX[1][0][AA][isize][j][k]*lhsX[0][1][CC][isize-1][j][k]
- lhsX[1][1][AA][isize][j][k]*lhsX[1][1][CC][isize-1][j][k]
- lhsX[1][2][AA][isize][j][k]*lhsX[2][1][CC][isize-1][j][k]
- lhsX[1][3][AA][isize][j][k]*lhsX[3][1][CC][isize-1][j][k]
- lhsX[1][4][AA][isize][j][k]*lhsX[4][1][CC][isize-1][j][k];
lhsX[2][1][BB][isize][j][k] = lhsX[2][1][BB][isize][j][k] - lhsX[2][0][AA][isize][j][k]*lhsX[0][1][CC][isize-1][j][k]
- lhsX[2][1][AA][isize][j][k]*lhsX[1][1][CC][isize-1][j][k]
- lhsX[2][2][AA][isize][j][k]*lhsX[2][1][CC][isize-1][j][k]
- lhsX[2][3][AA][isize][j][k]*lhsX[3][1][CC][isize-1][j][k]
- lhsX[2][4][AA][isize][j][k]*lhsX[4][1][CC][isize-1][j][k];
lhsX[3][1][BB][isize][j][k] = lhsX[3][1][BB][isize][j][k] - lhsX[3][0][AA][isize][j][k]*lhsX[0][1][CC][isize-1][j][k]
- lhsX[3][1][AA][isize][j][k]*lhsX[1][1][CC][isize-1][j][k]
- lhsX[3][2][AA][isize][j][k]*lhsX[2][1][CC][isize-1][j][k]
- lhsX[3][3][AA][isize][j][k]*lhsX[3][1][CC][isize-1][j][k]
- lhsX[3][4][AA][isize][j][k]*lhsX[4][1][CC][isize-1][j][k];
lhsX[4][1][BB][isize][j][k] = lhsX[4][1][BB][isize][j][k] - lhsX[4][0][AA][isize][j][k]*lhsX[0][1][CC][isize-1][j][k]
- lhsX[4][1][AA][isize][j][k]*lhsX[1][1][CC][isize-1][j][k]
- lhsX[4][2][AA][isize][j][k]*lhsX[2][1][CC][isize-1][j][k]
- lhsX[4][3][AA][isize][j][k]*lhsX[3][1][CC][isize-1][j][k]
- lhsX[4][4][AA][isize][j][k]*lhsX[4][1][CC][isize-1][j][k];
lhsX[0][2][BB][isize][j][k] = lhsX[0][2][BB][isize][j][k] - lhsX[0][0][AA][isize][j][k]*lhsX[0][2][CC][isize-1][j][k]
- lhsX[0][1][AA][isize][j][k]*lhsX[1][2][CC][isize-1][j][k]
- lhsX[0][2][AA][isize][j][k]*lhsX[2][2][CC][isize-1][j][k]
- lhsX[0][3][AA][isize][j][k]*lhsX[3][2][CC][isize-1][j][k]
- lhsX[0][4][AA][isize][j][k]*lhsX[4][2][CC][isize-1][j][k];
lhsX[1][2][BB][isize][j][k] = lhsX[1][2][BB][isize][j][k] - lhsX[1][0][AA][isize][j][k]*lhsX[0][2][CC][isize-1][j][k]
- lhsX[1][1][AA][isize][j][k]*lhsX[1][2][CC][isize-1][j][k]
- lhsX[1][2][AA][isize][j][k]*lhsX[2][2][CC][isize-1][j][k]
- lhsX[1][3][AA][isize][j][k]*lhsX[3][2][CC][isize-1][j][k]
- lhsX[1][4][AA][isize][j][k]*lhsX[4][2][CC][isize-1][j][k];
lhsX[2][2][BB][isize][j][k] = lhsX[2][2][BB][isize][j][k] - lhsX[2][0][AA][isize][j][k]*lhsX[0][2][CC][isize-1][j][k]
- lhsX[2][1][AA][isize][j][k]*lhsX[1][2][CC][isize-1][j][k]
- lhsX[2][2][AA][isize][j][k]*lhsX[2][2][CC][isize-1][j][k]
- lhsX[2][3][AA][isize][j][k]*lhsX[3][2][CC][isize-1][j][k]
- lhsX[2][4][AA][isize][j][k]*lhsX[4][2][CC][isize-1][j][k];
lhsX[3][2][BB][isize][j][k] = lhsX[3][2][BB][isize][j][k] - lhsX[3][0][AA][isize][j][k]*lhsX[0][2][CC][isize-1][j][k]
- lhsX[3][1][AA][isize][j][k]*lhsX[1][2][CC][isize-1][j][k]
- lhsX[3][2][AA][isize][j][k]*lhsX[2][2][CC][isize-1][j][k]
- lhsX[3][3][AA][isize][j][k]*lhsX[3][2][CC][isize-1][j][k]
- lhsX[3][4][AA][isize][j][k]*lhsX[4][2][CC][isize-1][j][k];
lhsX[4][2][BB][isize][j][k] = lhsX[4][2][BB][isize][j][k] - lhsX[4][0][AA][isize][j][k]*lhsX[0][2][CC][isize-1][j][k]
- lhsX[4][1][AA][isize][j][k]*lhsX[1][2][CC][isize-1][j][k]
- lhsX[4][2][AA][isize][j][k]*lhsX[2][2][CC][isize-1][j][k]
- lhsX[4][3][AA][isize][j][k]*lhsX[3][2][CC][isize-1][j][k]
- lhsX[4][4][AA][isize][j][k]*lhsX[4][2][CC][isize-1][j][k];
lhsX[0][3][BB][isize][j][k] = lhsX[0][3][BB][isize][j][k] - lhsX[0][0][AA][isize][j][k]*lhsX[0][3][CC][isize-1][j][k]
- lhsX[0][1][AA][isize][j][k]*lhsX[1][3][CC][isize-1][j][k]
- lhsX[0][2][AA][isize][j][k]*lhsX[2][3][CC][isize-1][j][k]
- lhsX[0][3][AA][isize][j][k]*lhsX[3][3][CC][isize-1][j][k]
- lhsX[0][4][AA][isize][j][k]*lhsX[4][3][CC][isize-1][j][k];
lhsX[1][3][BB][isize][j][k] = lhsX[1][3][BB][isize][j][k] - lhsX[1][0][AA][isize][j][k]*lhsX[0][3][CC][isize-1][j][k]
- lhsX[1][1][AA][isize][j][k]*lhsX[1][3][CC][isize-1][j][k]
- lhsX[1][2][AA][isize][j][k]*lhsX[2][3][CC][isize-1][j][k]
- lhsX[1][3][AA][isize][j][k]*lhsX[3][3][CC][isize-1][j][k]
- lhsX[1][4][AA][isize][j][k]*lhsX[4][3][CC][isize-1][j][k];
lhsX[2][3][BB][isize][j][k] = lhsX[2][3][BB][isize][j][k] - lhsX[2][0][AA][isize][j][k]*lhsX[0][3][CC][isize-1][j][k]
- lhsX[2][1][AA][isize][j][k]*lhsX[1][3][CC][isize-1][j][k]
- lhsX[2][2][AA][isize][j][k]*lhsX[2][3][CC][isize-1][j][k]
- lhsX[2][3][AA][isize][j][k]*lhsX[3][3][CC][isize-1][j][k]
- lhsX[2][4][AA][isize][j][k]*lhsX[4][3][CC][isize-1][j][k];
lhsX[3][3][BB][isize][j][k] = lhsX[3][3][BB][isize][j][k] - lhsX[3][0][AA][isize][j][k]*lhsX[0][3][CC][isize-1][j][k]
- lhsX[3][1][AA][isize][j][k]*lhsX[1][3][CC][isize-1][j][k]
- lhsX[3][2][AA][isize][j][k]*lhsX[2][3][CC][isize-1][j][k]
- lhsX[3][3][AA][isize][j][k]*lhsX[3][3][CC][isize-1][j][k]
- lhsX[3][4][AA][isize][j][k]*lhsX[4][3][CC][isize-1][j][k];
lhsX[4][3][BB][isize][j][k] = lhsX[4][3][BB][isize][j][k] - lhsX[4][0][AA][isize][j][k]*lhsX[0][3][CC][isize-1][j][k]
- lhsX[4][1][AA][isize][j][k]*lhsX[1][3][CC][isize-1][j][k]
- lhsX[4][2][AA][isize][j][k]*lhsX[2][3][CC][isize-1][j][k]
- lhsX[4][3][AA][isize][j][k]*lhsX[3][3][CC][isize-1][j][k]
- lhsX[4][4][AA][isize][j][k]*lhsX[4][3][CC][isize-1][j][k];
lhsX[0][4][BB][isize][j][k] = lhsX[0][4][BB][isize][j][k] - lhsX[0][0][AA][isize][j][k]*lhsX[0][4][CC][isize-1][j][k]
- lhsX[0][1][AA][isize][j][k]*lhsX[1][4][CC][isize-1][j][k]
- lhsX[0][2][AA][isize][j][k]*lhsX[2][4][CC][isize-1][j][k]
- lhsX[0][3][AA][isize][j][k]*lhsX[3][4][CC][isize-1][j][k]
- lhsX[0][4][AA][isize][j][k]*lhsX[4][4][CC][isize-1][j][k];
lhsX[1][4][BB][isize][j][k] = lhsX[1][4][BB][isize][j][k] - lhsX[1][0][AA][isize][j][k]*lhsX[0][4][CC][isize-1][j][k]
- lhsX[1][1][AA][isize][j][k]*lhsX[1][4][CC][isize-1][j][k]
- lhsX[1][2][AA][isize][j][k]*lhsX[2][4][CC][isize-1][j][k]
- lhsX[1][3][AA][isize][j][k]*lhsX[3][4][CC][isize-1][j][k]
- lhsX[1][4][AA][isize][j][k]*lhsX[4][4][CC][isize-1][j][k];
lhsX[2][4][BB][isize][j][k] = lhsX[2][4][BB][isize][j][k] - lhsX[2][0][AA][isize][j][k]*lhsX[0][4][CC][isize-1][j][k]
- lhsX[2][1][AA][isize][j][k]*lhsX[1][4][CC][isize-1][j][k]
- lhsX[2][2][AA][isize][j][k]*lhsX[2][4][CC][isize-1][j][k]
- lhsX[2][3][AA][isize][j][k]*lhsX[3][4][CC][isize-1][j][k]
- lhsX[2][4][AA][isize][j][k]*lhsX[4][4][CC][isize-1][j][k];
lhsX[3][4][BB][isize][j][k] = lhsX[3][4][BB][isize][j][k] - lhsX[3][0][AA][isize][j][k]*lhsX[0][4][CC][isize-1][j][k]
- lhsX[3][1][AA][isize][j][k]*lhsX[1][4][CC][isize-1][j][k]
- lhsX[3][2][AA][isize][j][k]*lhsX[2][4][CC][isize-1][j][k]
- lhsX[3][3][AA][isize][j][k]*lhsX[3][4][CC][isize-1][j][k]
- lhsX[3][4][AA][isize][j][k]*lhsX[4][4][CC][isize-1][j][k];
lhsX[4][4][BB][isize][j][k] = lhsX[4][4][BB][isize][j][k] - lhsX[4][0][AA][isize][j][k]*lhsX[0][4][CC][isize-1][j][k]
- lhsX[4][1][AA][isize][j][k]*lhsX[1][4][CC][isize-1][j][k]
- lhsX[4][2][AA][isize][j][k]*lhsX[2][4][CC][isize-1][j][k]
- lhsX[4][3][AA][isize][j][k]*lhsX[3][4][CC][isize-1][j][k]
- lhsX[4][4][AA][isize][j][k]*lhsX[4][4][CC][isize-1][j][k];
}
}
#endif
//---------------------------------------------------------------------
// multiply rhs() by b_inverse() and copy to rhs
//---------------------------------------------------------------------
//binvrhs( lhsX[isize][j][BB], rhs[k][isize][j][k] );
size_t kernel_x_solve_8_off[1] = { 1 };
size_t kernel_x_solve_8_idx[1] = { gp22 };
brisbane_kernel kernel_x_solve_8;
brisbane_kernel_create("x_solve_8", &kernel_x_solve_8);
brisbane_kernel_setmem(kernel_x_solve_8, 0, mem_lhsX, brisbane_rw);
brisbane_kernel_setmem(kernel_x_solve_8, 1, mem_rhs, brisbane_rw);
brisbane_kernel_setarg(kernel_x_solve_8, 2, sizeof(int), &isize);
brisbane_task task8;
brisbane_task_create(&task8);
brisbane_task_kernel(task8, kernel_x_solve_8, 1, kernel_x_solve_8_off, kernel_x_solve_8_idx);
brisbane_task_submit(task8, brisbane_cpu, NULL, true);
#if 0
#pragma omp target teams distribute parallel for private(j,k,pivot,coeff)
for (k = 1; k <= gp22; k++) {
for (j = 1; j <= gp12; j++) {
/*
for(m = 0; m < 5; m++){
pivot = 1.00/lhsX[m][m][BB][isize][j][k];
for(n = m+1; n < 5; n++){
lhsX[m][n][BB][isize][j][k] = lhsX[m][n][BB][isize][j][k]*pivot;
}
rhs[k][j][isize][m] = rhs[k][j][isize][m]*pivot;
for(n = 0; n < 5; n++){
if(n != m){
coeff = lhsX[n][m][BB][isize][j][k];
for(z = m+1; z < 5; z++){
lhsX[n][z][BB][isize][j][k] = lhsX[n][z][BB][isize][j][k] - coeff*lhsX[m][z][BB][isize][j][k];
}
rhs[k][j][isize][n] = rhs[k][j][isize][n] - coeff*rhs[k][j][isize][m];
}
}
}
*/
pivot = 1.00/lhsX[0][0][BB][isize][j][k];
lhsX[0][1][BB][isize][j][k] = lhsX[0][1][BB][isize][j][k]*pivot;
lhsX[0][2][BB][isize][j][k] = lhsX[0][2][BB][isize][j][k]*pivot;
lhsX[0][3][BB][isize][j][k] = lhsX[0][3][BB][isize][j][k]*pivot;
lhsX[0][4][BB][isize][j][k] = lhsX[0][4][BB][isize][j][k]*pivot;
rhs[k][j][isize][0] = rhs[k][j][isize][0] *pivot;
coeff = lhsX[1][0][BB][isize][j][k];
lhsX[1][1][BB][isize][j][k]= lhsX[1][1][BB][isize][j][k] - coeff*lhsX[0][1][BB][isize][j][k];
lhsX[1][2][BB][isize][j][k]= lhsX[1][2][BB][isize][j][k] - coeff*lhsX[0][2][BB][isize][j][k];
lhsX[1][3][BB][isize][j][k]= lhsX[1][3][BB][isize][j][k] - coeff*lhsX[0][3][BB][isize][j][k];
lhsX[1][4][BB][isize][j][k]= lhsX[1][4][BB][isize][j][k] - coeff*lhsX[0][4][BB][isize][j][k];
rhs[k][j][isize][1] = rhs[k][j][isize][1] - coeff*rhs[k][j][isize][0];
coeff = lhsX[2][0][BB][isize][j][k];
lhsX[2][1][BB][isize][j][k]= lhsX[2][1][BB][isize][j][k] - coeff*lhsX[0][1][BB][isize][j][k];
lhsX[2][2][BB][isize][j][k]= lhsX[2][2][BB][isize][j][k] - coeff*lhsX[0][2][BB][isize][j][k];
lhsX[2][3][BB][isize][j][k]= lhsX[2][3][BB][isize][j][k] - coeff*lhsX[0][3][BB][isize][j][k];
lhsX[2][4][BB][isize][j][k]= lhsX[2][4][BB][isize][j][k] - coeff*lhsX[0][4][BB][isize][j][k];
rhs[k][j][isize][2] = rhs[k][j][isize][2] - coeff*rhs[k][j][isize][0];
coeff = lhsX[3][0][BB][isize][j][k];
lhsX[3][1][BB][isize][j][k]= lhsX[3][1][BB][isize][j][k] - coeff*lhsX[0][1][BB][isize][j][k];
lhsX[3][2][BB][isize][j][k]= lhsX[3][2][BB][isize][j][k] - coeff*lhsX[0][2][BB][isize][j][k];
lhsX[3][3][BB][isize][j][k]= lhsX[3][3][BB][isize][j][k] - coeff*lhsX[0][3][BB][isize][j][k];
lhsX[3][4][BB][isize][j][k]= lhsX[3][4][BB][isize][j][k] - coeff*lhsX[0][4][BB][isize][j][k];
rhs[k][j][isize][3] = rhs[k][j][isize][3] - coeff*rhs[k][j][isize][0];
coeff = lhsX[4][0][BB][isize][j][k];
lhsX[4][1][BB][isize][j][k]= lhsX[4][1][BB][isize][j][k] - coeff*lhsX[0][1][BB][isize][j][k];
lhsX[4][2][BB][isize][j][k]= lhsX[4][2][BB][isize][j][k] - coeff*lhsX[0][2][BB][isize][j][k];
lhsX[4][3][BB][isize][j][k]= lhsX[4][3][BB][isize][j][k] - coeff*lhsX[0][3][BB][isize][j][k];
lhsX[4][4][BB][isize][j][k]= lhsX[4][4][BB][isize][j][k] - coeff*lhsX[0][4][BB][isize][j][k];
rhs[k][j][isize][4] = rhs[k][j][isize][4] - coeff*rhs[k][j][isize][0];
pivot = 1.00/lhsX[1][1][BB][isize][j][k];
lhsX[1][2][BB][isize][j][k] = lhsX[1][2][BB][isize][j][k]*pivot;
lhsX[1][3][BB][isize][j][k] = lhsX[1][3][BB][isize][j][k]*pivot;
lhsX[1][4][BB][isize][j][k] = lhsX[1][4][BB][isize][j][k]*pivot;
rhs[k][j][isize][1] = rhs[k][j][isize][1] *pivot;
coeff = lhsX[0][1][BB][isize][j][k];
lhsX[0][2][BB][isize][j][k]= lhsX[0][2][BB][isize][j][k] - coeff*lhsX[1][2][BB][isize][j][k];
lhsX[0][3][BB][isize][j][k]= lhsX[0][3][BB][isize][j][k] - coeff*lhsX[1][3][BB][isize][j][k];
lhsX[0][4][BB][isize][j][k]= lhsX[0][4][BB][isize][j][k] - coeff*lhsX[1][4][BB][isize][j][k];
rhs[k][j][isize][0] = rhs[k][j][isize][0] - coeff*rhs[k][j][isize][1];
coeff = lhsX[2][1][BB][isize][j][k];
lhsX[2][2][BB][isize][j][k]= lhsX[2][2][BB][isize][j][k] - coeff*lhsX[1][2][BB][isize][j][k];
lhsX[2][3][BB][isize][j][k]= lhsX[2][3][BB][isize][j][k] - coeff*lhsX[1][3][BB][isize][j][k];
lhsX[2][4][BB][isize][j][k]= lhsX[2][4][BB][isize][j][k] - coeff*lhsX[1][4][BB][isize][j][k];
rhs[k][j][isize][2] = rhs[k][j][isize][2] - coeff*rhs[k][j][isize][1];
coeff = lhsX[3][1][BB][isize][j][k];
lhsX[3][2][BB][isize][j][k]= lhsX[3][2][BB][isize][j][k] - coeff*lhsX[1][2][BB][isize][j][k];
lhsX[3][3][BB][isize][j][k]= lhsX[3][3][BB][isize][j][k] - coeff*lhsX[1][3][BB][isize][j][k];
lhsX[3][4][BB][isize][j][k]= lhsX[3][4][BB][isize][j][k] - coeff*lhsX[1][4][BB][isize][j][k];
rhs[k][j][isize][3] = rhs[k][j][isize][3] - coeff*rhs[k][j][isize][1];
coeff = lhsX[4][1][BB][isize][j][k];
lhsX[4][2][BB][isize][j][k]= lhsX[4][2][BB][isize][j][k] - coeff*lhsX[1][2][BB][isize][j][k];
lhsX[4][3][BB][isize][j][k]= lhsX[4][3][BB][isize][j][k] - coeff*lhsX[1][3][BB][isize][j][k];
lhsX[4][4][BB][isize][j][k]= lhsX[4][4][BB][isize][j][k] - coeff*lhsX[1][4][BB][isize][j][k];
rhs[k][j][isize][4] = rhs[k][j][isize][4] - coeff*rhs[k][j][isize][1];
pivot = 1.00/lhsX[2][2][BB][isize][j][k];
lhsX[2][3][BB][isize][j][k] = lhsX[2][3][BB][isize][j][k]*pivot;
lhsX[2][4][BB][isize][j][k] = lhsX[2][4][BB][isize][j][k]*pivot;
rhs[k][j][isize][2] = rhs[k][j][isize][2] *pivot;
coeff = lhsX[0][2][BB][isize][j][k];
lhsX[0][3][BB][isize][j][k]= lhsX[0][3][BB][isize][j][k] - coeff*lhsX[2][3][BB][isize][j][k];
lhsX[0][4][BB][isize][j][k]= lhsX[0][4][BB][isize][j][k] - coeff*lhsX[2][4][BB][isize][j][k];
rhs[k][j][isize][0] = rhs[k][j][isize][0] - coeff*rhs[k][j][isize][2];
coeff = lhsX[1][2][BB][isize][j][k];
lhsX[1][3][BB][isize][j][k]= lhsX[1][3][BB][isize][j][k] - coeff*lhsX[2][3][BB][isize][j][k];
lhsX[1][4][BB][isize][j][k]= lhsX[1][4][BB][isize][j][k] - coeff*lhsX[2][4][BB][isize][j][k];
rhs[k][j][isize][1] = rhs[k][j][isize][1] - coeff*rhs[k][j][isize][2];
coeff = lhsX[3][2][BB][isize][j][k];
lhsX[3][3][BB][isize][j][k]= lhsX[3][3][BB][isize][j][k] - coeff*lhsX[2][3][BB][isize][j][k];
lhsX[3][4][BB][isize][j][k]= lhsX[3][4][BB][isize][j][k] - coeff*lhsX[2][4][BB][isize][j][k];
rhs[k][j][isize][3] = rhs[k][j][isize][3] - coeff*rhs[k][j][isize][2];
coeff = lhsX[4][2][BB][isize][j][k];
lhsX[4][3][BB][isize][j][k]= lhsX[4][3][BB][isize][j][k] - coeff*lhsX[2][3][BB][isize][j][k];
lhsX[4][4][BB][isize][j][k]= lhsX[4][4][BB][isize][j][k] - coeff*lhsX[2][4][BB][isize][j][k];
rhs[k][j][isize][4] = rhs[k][j][isize][4] - coeff*rhs[k][j][isize][2];
pivot = 1.00/lhsX[3][3][BB][isize][j][k];
lhsX[3][4][BB][isize][j][k] = lhsX[3][4][BB][isize][j][k]*pivot;
rhs[k][j][isize][3] = rhs[k][j][isize][3] *pivot;
coeff = lhsX[0][3][BB][isize][j][k];
lhsX[0][4][BB][isize][j][k]= lhsX[0][4][BB][isize][j][k] - coeff*lhsX[3][4][BB][isize][j][k];
rhs[k][j][isize][0] = rhs[k][j][isize][0] - coeff*rhs[k][j][isize][3];
coeff = lhsX[1][3][BB][isize][j][k];
lhsX[1][4][BB][isize][j][k]= lhsX[1][4][BB][isize][j][k] - coeff*lhsX[3][4][BB][isize][j][k];
rhs[k][j][isize][1] = rhs[k][j][isize][1] - coeff*rhs[k][j][isize][3];
coeff = lhsX[2][3][BB][isize][j][k];
lhsX[2][4][BB][isize][j][k]= lhsX[2][4][BB][isize][j][k] - coeff*lhsX[3][4][BB][isize][j][k];
rhs[k][j][isize][2] = rhs[k][j][isize][2] - coeff*rhs[k][j][isize][3];
coeff = lhsX[4][3][BB][isize][j][k];
lhsX[4][4][BB][isize][j][k]= lhsX[4][4][BB][isize][j][k] - coeff*lhsX[3][4][BB][isize][j][k];
rhs[k][j][isize][4] = rhs[k][j][isize][4] - coeff*rhs[k][j][isize][3];
pivot = 1.00/lhsX[4][4][BB][isize][j][k];
rhs[k][j][isize][4] = rhs[k][j][isize][4] *pivot;
coeff = lhsX[0][4][BB][isize][j][k];
rhs[k][j][isize][0] = rhs[k][j][isize][0] - coeff*rhs[k][j][isize][4];
coeff = lhsX[1][4][BB][isize][j][k];
rhs[k][j][isize][1] = rhs[k][j][isize][1] - coeff*rhs[k][j][isize][4];
coeff = lhsX[2][4][BB][isize][j][k];
rhs[k][j][isize][2] = rhs[k][j][isize][2] - coeff*rhs[k][j][isize][4];
coeff = lhsX[3][4][BB][isize][j][k];
rhs[k][j][isize][3] = rhs[k][j][isize][3] - coeff*rhs[k][j][isize][4];
}
}
#endif
//---------------------------------------------------------------------
// back solve: if last cell, then generate U(isize)=rhs(isize)
// else assume U(isize) is loaded in un pack backsub_info
// so just use it
// after u(istart) will be sent to next cell
//---------------------------------------------------------------------
size_t kernel_x_solve_9_off[2] = { 1, 1 };
size_t kernel_x_solve_9_idx[2] = { gp12, gp22 };
brisbane_kernel kernel_x_solve_9;
brisbane_kernel_create("x_solve_9", &kernel_x_solve_9);
brisbane_kernel_setmem(kernel_x_solve_9, 0, mem_lhsX, brisbane_r);
brisbane_kernel_setmem(kernel_x_solve_9, 1, mem_rhs, brisbane_rw);
brisbane_kernel_setarg(kernel_x_solve_9, 2, sizeof(int), &isize);
brisbane_task task9;
brisbane_task_create(&task9);
brisbane_task_kernel(task9, kernel_x_solve_9, 2, kernel_x_solve_9_off, kernel_x_solve_9_idx);
//brisbane_task_submit(task9, brisbane_cpu, NULL, true);
#if 1
brisbane_task task10;
brisbane_task_create(&task10);
brisbane_task_d2h_full(task10, mem_rhs, rhs);
brisbane_task_d2h_full(task10, mem_lhsX, lhsX);
brisbane_task_submit(task10, brisbane_cpu, NULL, true);
#pragma omp target teams distribute parallel for collapse(2) private(i,j,k,m,n)
for (k = 1; k <= gp22; k++) {
for (j = 1; j <= gp12; j++) {
for (i = isize-1; i >=0; i--) {
for (m = 0; m < BLOCK_SIZE; m++) {
for (n = 0; n < BLOCK_SIZE; n++) {
rhs[k][j][i][m] = rhs[k][j][i][m]
- lhsX[m][n][CC][i][j][k]*rhs[k][j][i+1][n];
}
}
}
}
}
brisbane_task task11;
brisbane_task_create(&task11);
brisbane_task_h2d_full(task11, mem_rhs, rhs);
brisbane_task_submit(task11, brisbane_cpu, NULL, true);
#endif
}/*end omp target data */
brisbane_mem_release(mem_fjacX);
brisbane_mem_release(mem_njacX);
brisbane_mem_release(mem_lhsX);
}
|
knnImpl.h | // This code is part of the project "ParGeo: A Library for Parallel Computational Geometry"
// Copyright (c) 2021-2022 Yiqiu Wang, Shangdi Yu, Laxman Dhulipala, Yan Gu, Julian Shun
//
// Permission is hereby granted, free of charge, to any person obtaining a
// copy of this software and associated documentation files (the
// "Software"), to deal in the Software without restriction, including
// without limitation the rights (to use, copy, modify, merge, publish,
// distribute, sublicense, and/or sell copies of the Software, and to
// permit persons to whom the Software is furnished to do so, subject to
// the following conditions:
//
// The above copyright notice and this permission notice shall be included
// in all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
// OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
// MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
// NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
// LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
// OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
// WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#pragma once
#include <limits>
#include <algorithm>
#include "parlay/parallel.h"
#include "parlay/sequence.h"
#include "kdTree.h"
#include "pargeo/point.h"
// #include <omp.h>
namespace pargeo::kdTreeNUMA
{
namespace knnBuf
{
typedef int intT;
typedef double floatT;
template <typename T>
struct elem
{
floatT cost; // Non-negative
T entry;
elem(floatT t_cost, T t_entry) : cost(t_cost), entry(t_entry) {}
elem() : cost(std::numeric_limits<floatT>::max()) {}
bool operator<(const elem &b) const
{
if (cost < b.cost)
return true;
return false;
}
};
template <typename T>
struct buffer
{
typedef parlay::slice<elem<T> *, elem<T> *> sliceT;
intT k;
intT ptr;
sliceT buf;
double max_cost = 0;
buffer(intT t_k, sliceT t_buf) : k(t_k), ptr(0), buf(t_buf) {}
inline void reset() { ptr = 0; }
bool hasK() { return ptr >= k; }
elem<T> keepK()
{
if (ptr < k)
throw std::runtime_error("Error, kbuffer not enough k.");
ptr = k;
std::nth_element(buf.begin(), buf.begin() + k - 1, buf.end());
max_cost = 0;
for(auto b = buf.begin(); b < buf.begin() + k ; ++b){
max_cost = std::max(max_cost, b->cost);
}
return buf[k - 1];
}
void sort()
{ // todo check
if (ptr < k)
throw std::runtime_error("Error, sorting kbuffer without enough k.");
parlay::sort_inplace(buf.cut(0, k));
}
void insert(elem<T> t_elem)
{
buf[ptr++] = t_elem;
max_cost = std::max(max_cost, t_elem.cost);
if (ptr >= buf.size())
keepK();
}
elem<T> operator[](intT i)
{
if (i < ptr)
return buf[i];
else
return elem<T>();
}
inline size_t size() {return ptr;}
inline double back() {
if(ptr<k){return std::numeric_limits<double>::max();}
return max_cost;
}
};
}
template <int dim, typename nodeT, typename objT>
void knnRangeHelper2(nodeT *tree, objT &q,
double radius, knnBuf::buffer<objT *> &out)
{
int relation = tree->boxBallCompare(q, radius, tree->getMin(), tree->getMax());
if (relation == tree->boxExclude)
{
return;
}
// else if (relation == tree->boxInclude)
// {
// for (size_t i = 0; i < tree->size(); ++i)
// {
// objT *p = tree->getItem(i);
// out.insert(knnBuf::elem(q.dist(*p), p));
// }
// }
else
{ // intersect
if (tree->isLeaf())
{
for (size_t i = 0; i < tree->size(); ++i)
{
objT *p = tree->getItem(i);
double dist = q.dist(*p);
if (dist <= radius)
{
out.insert(knnBuf::elem(dist, p));
radius = out.back();
}
}
}
else
{
knnRangeHelper2<dim, nodeT, objT>(tree->L(), q, radius, out);
radius = out.back();
knnRangeHelper2<dim, nodeT, objT>(tree->R(), q, radius, out);
}
}
}
template <int dim, typename nodeT, typename objT>
void knnHelper2(nodeT *tree, objT &q, knnBuf::buffer<objT *> &out)
{
// find the leaf first
int relation = tree->boxCompare(tree->getMin(), tree->getMax(),
point<dim>(q.coords()),
point<dim>(q.coords()));
if (relation == tree->boxExclude)
{
return;
}
else
{
if (tree->isLeaf())
{
// basecase
for (size_t i = 0; i < tree->size(); ++i)
{
objT *p = tree->getItem(i);
out.insert(knnBuf::elem(q.dist(*p), p));
}
}
else
{
knnHelper2<dim, nodeT, objT>(tree->L(), q, out);
knnHelper2<dim, nodeT, objT>(tree->R(), q, out);
}
}
if (!out.hasK())
{
if (tree->siblin() == NULL)
{
throw std::runtime_error("Error, knnHelper reached root node without enough neighbors.");
}
for (size_t i = 0; i < tree->siblin()->size(); ++i)
{
objT *p = tree->siblin()->getItem(i);
out.insert(knnBuf::elem(q.dist(*p), p));
}
}
else
{ // Buffer filled to a least k
if (tree->siblin() != NULL)
{
knnRangeHelper2<dim, nodeT, objT>(tree->siblin(), q, out.back(), out);
}
}
}
template <int dim, class objT>
void batchKnn2(parlay::slice<objT *, objT *> queries,
size_t k,
tree<dim, objT> *tree,
parlay::slice<size_t *, size_t *> idx,
parlay::slice<int *, int *> ordermap, //only used when shuffle search or shuffle tree is true
parlay::slice<int *, int *> treemap, //only used when shuffle search or shuffle tree is true
bool shuffle_search = false,
bool shuffle_tree = false,
bool sorted=false)
{
using nodeT = node<dim, objT>;
bool freeTree = false;
if (!tree)
{
freeTree = true;
tree = build<dim, objT>(queries, true);
}
auto out = parlay::sequence<knnBuf::elem<objT *>>(2 * k * queries.size());
// auto idx = parlay::sequence<size_t>(k * queries.size());
parlay::parallel_for(0, queries.size(), [&](size_t i)
{
knnBuf::buffer buf = knnBuf::buffer<objT *>(k, out.cut(i * 2 * k, (i + 1) * 2 * k));
knnHelper2<dim, nodeT, objT>(tree, queries[i], buf);
buf.keepK();
if (sorted){buf.sort();}
size_t idx_i = shuffle_search ? ordermap[i] : i;
// size_t idx_i = queries[i].attribute;
for (size_t j = 0; j < k; ++j){
size_t result_id = buf[j].entry - tree->items_begin;
result_id = shuffle_tree ? treemap[result_id] : result_id;
idx[idx_i * k + j] = result_id;
}
});
if (freeTree)
free(tree);
// return idx;
}
template <int dim, typename nodeT, typename objT>
void knnRangeHelper(nodeT *tree, objT &q, point<dim> qMin, point<dim> qMax,
double radius, knnBuf::buffer<objT *> &out)
{
int relation = tree->boxCompare(qMin, qMax, tree->getMin(), tree->getMax());
if (relation == tree->boxExclude)
{
return;
}
else if (relation == tree->boxInclude)
{
for (size_t i = 0; i < tree->size(); ++i)
{
objT *p = tree->getItem(i);
out.insert(knnBuf::elem(q.dist(*p), p));
}
}
else
{ // intersect
if (tree->isLeaf())
{
for (size_t i = 0; i < tree->size(); ++i)
{
objT *p = tree->getItem(i);
double dist = q.dist(*p);
if (dist <= radius)
{
out.insert(knnBuf::elem(dist, p));
}
}
}
else
{
knnRangeHelper<dim, nodeT, objT>(tree->L(), q, qMin, qMax, radius, out);
knnRangeHelper<dim, nodeT, objT>(tree->R(), q, qMin, qMax, radius, out);
}
}
}
template <int dim, typename nodeT, typename objT>
void knnRange(nodeT *tree, objT &q, double radius, knnBuf::buffer<objT *> &out)
{
point<dim> qMin, qMax;
for (size_t i = 0; i < dim; i++)
{
auto tmp = q[i] - radius;
qMin[i] = tmp;
qMax[i] = tmp + radius * 2;
}
knnRangeHelper<dim, nodeT, objT>(tree, q, qMin, qMax, radius, out);
}
template <int dim, typename nodeT, typename objT>
void knnHelper(nodeT *tree, objT &q, knnBuf::buffer<objT *> &out)
{
// find the leaf first
int relation = tree->boxCompare(tree->getMin(), tree->getMax(),
point<dim>(q.coords()),
point<dim>(q.coords()));
if (relation == tree->boxExclude)
{
return;
}
else
{
if (tree->isLeaf())
{
// basecase
for (size_t i = 0; i < tree->size(); ++i)
{
objT *p = tree->getItem(i);
out.insert(knnBuf::elem(q.dist(*p), p));
}
}
else
{
knnHelper<dim, nodeT, objT>(tree->L(), q, out);
knnHelper<dim, nodeT, objT>(tree->R(), q, out);
}
}
if (!out.hasK())
{
if (tree->siblin() == NULL)
{
throw std::runtime_error("Error, knnHelper reached root node without enough neighbors.");
}
for (size_t i = 0; i < tree->siblin()->size(); ++i)
{
objT *p = tree->siblin()->getItem(i);
out.insert(knnBuf::elem(q.dist(*p), p));
}
}
else
{ // Buffer filled to a least k
if (tree->siblin() != NULL)
{
knnBuf::elem tmp = out.keepK();
knnRange<dim, nodeT, objT>(tree->siblin(), q, tmp.cost, out);
}
}
}
// the tree->items have to be the same as queries
template <int dim, class objT>
parlay::sequence<size_t> batchKnn(parlay::slice<objT *, objT *> queries,
size_t k,
node<dim, objT> *tree,
bool sorted)
{
using nodeT = node<dim, objT>;
bool freeTree = false;
if (!tree)
{
freeTree = true;
tree = build<dim, objT>(queries, true);
}
auto out = parlay::sequence<knnBuf::elem<objT *>>(2 * k * queries.size());
auto idx = parlay::sequence<size_t>(k * queries.size());
parlay::parallel_for(0, queries.size(), [&](size_t i)
{
knnBuf::buffer buf = knnBuf::buffer<objT *>(k, out.cut(i * 2 * k, (i + 1) * 2 * k));
knnHelper<dim, nodeT, objT>(tree, queries[i], buf);
buf.keepK();
if (sorted){
buf.sort();}
for (size_t j = 0; j < k; ++j)
{
idx[i * k + j] = buf[j].entry - queries.begin();
}
});
if (freeTree)
free(tree);
return idx;
}
template <int dim, typename nodeT, typename objT>
void traverseTree(nodeT *tree, objT &query, knnBuf::buffer<objT *> &out, int k)
{
if (tree->isLeaf())
{
// basecase
for (size_t i = 0; i < tree->size(); ++i)
{
objT *p = tree->getItem(i);
out.insert(knnBuf::elem(query.dist(*p), p));
}
return;
}
traverseTree<dim, nodeT, objT>(tree->left, query, out, k);
traverseTree<dim, nodeT, objT>(tree->right, query, out, k);
}
template <int dim, class objT>
parlay::sequence<size_t> batchTraverse(parlay::slice<objT *, objT *> queries,
size_t k,
node<dim, objT> *tree = nullptr,
bool sorted = false)
{
using nodeT = node<dim, objT>;
bool freeTree = false;
if (!tree)
{
freeTree = true;
tree = build<dim, objT>(queries, true);
}
auto out = parlay::sequence<knnBuf::elem<objT *>>(2 * k * queries.size());
auto idx = parlay::sequence<size_t>(k * queries.size());
parlay::parallel_for(0, queries.size(), [&](size_t i)
{
knnBuf::buffer buf = knnBuf::buffer<objT *>(k, out.cut(i * 2 * k, (i + 1) * 2 * k));
traverseTree<dim, nodeT, objT>(tree, queries[i], buf, k);
buf.keepK();
if (sorted)
buf.sort();
for (size_t j = 0; j < k; ++j)
{
idx[i * k + j] = buf[j].entry - queries.begin();
}
});
if (freeTree)
free(tree);
return idx;
}
// template <int dim, typename nodeT, typename objT>
// void knnHelperSimple(nodeT *tree, objT &query, knnBuf::buffer<objT *> &out, int k)
// {
// if (tree->isLeaf())
// {
// // basecase
// for (size_t i = 0; i < tree->size(); ++i)
// {
// objT *p = tree->getItem(i);
// out.insert(knnBuf::elem(query.dist(*p), p));
// }
// return;
// }
// const double split = tree->getSplit();
// const int axis = tree->k;
// nodeT* next[2] = {tree->left, tree->right}; //next[0] = tree->left; next[1] = tree->right;
// const int dir = query[axis] < split ? 0 : 1;
// knnHelperSimple<dim, nodeT, objT>(next[dir], query, out, k);
// // nnSearchRecursive(query, node->next[dir], guess, minDist);
// const double diff = fabs(query[axis] - split);
// if ((int)out.size() < k || diff < out.back()) knnHelperSimple<dim, nodeT, objT>(next[!dir], query, out, k);
// // knnSearchRecursive(query, node->next[!dir], queue, k);
// }
// template <int dim, class objT>
// parlay::sequence<size_t> batchKnnSimple(parlay::slice<objT *, objT *> queries,
// size_t k,
// node<dim, objT> *tree = nullptr,
// bool sorted = false)
// {
// using nodeT = node<dim, objT>;
// bool freeTree = false;
// if (!tree)
// {
// freeTree = true;
// tree = build<dim, objT>(queries, true);
// }
// auto out = parlay::sequence<knnBuf::elem<objT *>>(2 * k * queries.size());
// auto idx = parlay::sequence<size_t>(k * queries.size());
// parlay::parallel_for(0, queries.size(), [&](size_t i)
// {
// knnBuf::buffer buf = knnBuf::buffer<objT *>(k, out.cut(i * 2 * k, (i + 1) * 2 * k));
// knnHelperSimple<dim, nodeT, objT>(tree, queries[i], buf, k);
// // std::cout <<"done" << std::endl;
// buf.keepK();
// if (sorted)
// buf.sort();
// for (size_t j = 0; j < k; ++j)
// {
// idx[i * k + j] = buf[j].entry - queries.begin();
// }
// // if(i==0){
// // for (size_t j = 0; j < k; ++j)
// // {
// // std::cout << buf[j].entry - queries.begin() << " " << buf[j].cost << std::endl;
// // }
// // }
// });
// if (freeTree)
// free(tree);
// return idx;
// }
// template <int dim, class objT>
// parlay::sequence<size_t> batchKnnOmp(parlay::slice<objT *, objT *> queries,
// size_t k,
// node<dim, objT> *tree,
// bool sorted)
// {
// using nodeT = node<dim, objT>;
// bool freeTree = false;
// if (!tree)
// {
// freeTree = true;
// tree = build<dim, objT>(queries, true);
// }
// auto out = parlay::sequence<knnBuf::elem<objT *>>(2 * k * queries.size());
// auto idx = parlay::sequence<size_t>(k * queries.size());
// #pragma omp parallel for // num_threads( numThreads/2 ) proc_bind(master)
// for ( size_t i = 0; i < queries.size(); i++ ){
// // int place_num = omp_get_place_num();
// knnBuf::buffer buf = knnBuf::buffer<objT *>(k, out.cut(i * 2 * k, (i + 1) * 2 * k));
// knnHelper<dim, nodeT, objT>(tree, queries[i], buf);
// buf.keepK();
// if (sorted)
// buf.sort();
// for (size_t j = 0; j < k; ++j)
// {
// idx[i * k + j] = buf[j].entry - queries.begin();
// }
// }
// if (freeTree)
// free(tree);
// return idx;
// }
template <int dim, typename objT>
parlay::sequence<size_t> bruteforceKnn(parlay::sequence<objT> &queries, size_t k)
{
auto out = parlay::sequence<knnBuf::elem<objT *>>(2 * k * queries.size());
auto idx = parlay::sequence<size_t>(k * queries.size());
parlay::parallel_for(0, queries.size(), [&](size_t i)
{
objT q = queries[i];
knnBuf::buffer buf = knnBuf::buffer<objT *>(k, out.cut(i * 2 * k, (i + 1) * 2 * k));
for (size_t j = 0; j < queries.size(); ++j)
{
objT *p = &queries[j];
buf.insert(elem(q.dist(p), p));
}
buf.keepK();
for (size_t j = 0; j < k; ++j)
{
idx[i * k + j] = buf[j].entry - queries.data();
}
});
return idx;
}
} // End namespace pargeo
|
fox_floats_timer_caching_omp.c | /* fox_floats_timer_caching_omp.c -- uses Fox's algorithm to multiply two square matrices
*
* Input:
* n: global order of matrices
* A,B: the factor matrices
* Output:
* C: the product matrix
*
* Notes:
* 1. Assumes the number of processes is a perfect square
* 2. The array member of the matrices is statically allocated
*
* See Chap 7, pp. 113 & ff and pp. 125 & ff in PPMPI
*/
/* Compiler command:
* mpiicc -O3 -qopenmp -xMIC-AVX512 -qopt-report-phase=vec -qopt-report=3 fox_floats_timer_caching_omp.c -o fox_floats_timer_caching_omp
*
* Run command:
* mpirun -n -4 ./fox_floats_timer_caching_omp
*/
/* Head files */
#include <stdio.h>
#include <math.h>
#include <stdlib.h>
#include <mpi.h>
#include <omp.h>
// define float precision, 4 byte single-precision float or 8 byte double-precision float
#define FLOAT double
#define FLOAT_MPI MPI_DOUBLE
// Define threads speed-up affnity in the computing
// 8 for Intel Core i7-6700K
// 134 for Intel Xeon Phi 7250 (68-1)*2 Hardware Threads
#define NUM_THREADS 2
// Define threads affinity "scatter" or "compact"
#define AFFINITY "KMP_AFFINITY = compact"
/* Type define structure of process grid */
typedef struct {
int p; /* Total number of processes */
MPI_Comm comm; /* Communicator for entire grid */
MPI_Comm row_comm; /* Communicator for my row */
MPI_Comm col_comm; /* Communicator for my col */
int q; /* Order of grid */
int my_row; /* My row number */
int my_col; /* My column number */
int my_rank; /* My rank in the grid comm */
} GRID_INFO_T;
/* Type define structure of local matrix */
#define MAX 65536 // Maximum number of elements in the array that store the local matrix (2^16)
typedef struct {
int n_bar;
#define Order(A) ((A)->n_bar) // defination with parameters
FLOAT entries[MAX];
#define Entry(A,i,j) (*(((A)->entries) + ((A)->n_bar)*(i) + (j))) // defination with parameters, Array dereference
} LOCAL_MATRIX_T;
/* Function Declarations */
LOCAL_MATRIX_T* Local_matrix_allocate(int n_bar);
void Free_local_matrix(LOCAL_MATRIX_T** local_A);
void Read_matrix_A(char* prompt, LOCAL_MATRIX_T* local_A,
GRID_INFO_T* grid, int n); // for continuous memory access, local A(i,k)*B(k,j) = A(i,k)*B^{T}(j,k)
void Read_matrix_B(char* prompt, LOCAL_MATRIX_T* local_B,
GRID_INFO_T* grid, int n); // for continuous memory access, local A(i,k)*B(k,j) = A(i,k)*B^{T}(j,k)
void Print_matrix(char* title, LOCAL_MATRIX_T* local_A,
GRID_INFO_T* grid, int n);
void Print_matrix_B(char* title, LOCAL_MATRIX_T* local_B, // speical print function for local matrix B^{T}(j,k)
GRID_INFO_T* grid, int n);
void Set_to_zero(LOCAL_MATRIX_T* local_A);
void Local_matrix_multiply(LOCAL_MATRIX_T* local_A,
LOCAL_MATRIX_T* local_B, LOCAL_MATRIX_T* local_C);
void Build_matrix_type(LOCAL_MATRIX_T* local_A);
MPI_Datatype local_matrix_mpi_t;
LOCAL_MATRIX_T* temp_mat; // global LOCAL_MATRIX_T* type pointer
void Print_local_matrices(char* title, LOCAL_MATRIX_T* local_A,
GRID_INFO_T* grid);
void Print_local_matrices_B(char* title, LOCAL_MATRIX_T* local_B, // speical print function for local matrix B^{T}(j,k)
GRID_INFO_T* grid);
/*********************************************************/
main(int argc, char* argv[]) {
int p;
int my_rank;
GRID_INFO_T grid;
LOCAL_MATRIX_T* local_A;
LOCAL_MATRIX_T* local_B;
LOCAL_MATRIX_T* local_C;
int n;
int n_bar;
double timer_start;
double timer_end;
void Setup_grid(GRID_INFO_T* grid);
void Fox(int n, GRID_INFO_T* grid, LOCAL_MATRIX_T* local_A,
LOCAL_MATRIX_T* local_B, LOCAL_MATRIX_T* local_C);
// SPMD Mode start from here (Processess fork from here)
MPI_Init(&argc, &argv); // MPI initializing
MPI_Comm_rank(MPI_COMM_WORLD, &my_rank); // Get my process id in the MPI communicator
// Initial OpenMP Environment
omp_set_num_threads(NUM_THREADS);
kmp_set_defaults(AFFINITY);
Setup_grid(&grid); // Set up Processess grid
if (my_rank == 0) {
printf("What's the order of the matrices?\n");
scanf("%d", &n); // Overall Matrix's Order
}
MPI_Bcast(&n, 1, MPI_INT, 0, MPI_COMM_WORLD); // MPI broadcast the overall matrix's order
n_bar = n/grid.q; // \bar n is the local matrix's order
local_A = Local_matrix_allocate(n_bar); // Allocate local matrix A
Order(local_A) = n_bar; // Local matrix A's order
Read_matrix_A("Enter A", local_A, &grid, n); // Read local matrices A from process 0 by using stdin, and send them to each process (Procedure)
Print_matrix("We read A =", local_A, &grid, n); // Print local matrices A from process 0 by using stdout, and send them to each process (Procedure)
local_B = Local_matrix_allocate(n_bar); // Allocate local matrix
Order(local_B) = n_bar; // Local matrix B's order
Read_matrix_B("Enter B", local_B, &grid, n); // Read local matrix B as it's local transpose from process 0 by using stdin, and send them to each process (Procedure)
Print_matrix_B("We read B =", local_B, &grid, n); // Print local matrix B as it's local transpose from process 0 by using stdout, and send them to each process (Procedure)
Build_matrix_type(local_A); // Buid local_A's MPI matrix data type
temp_mat = Local_matrix_allocate(n_bar); // Allocate temporary matrix of order n $\time$ n
local_C = Local_matrix_allocate(n_bar); // Allocate matrix local_C
Order(local_C) = n_bar; // Set matrix local_C's order
MPI_Barrier(MPI_COMM_WORLD); // Set the MPI process barrier
timer_start = MPI_Wtime(); // Get the MPI wall time
Fox(n, &grid, local_A, local_B, local_C); // FOX parallel matrix multiplication Algorithm implement function
timer_end = MPI_Wtime(); // Get the MPI wall time
MPI_Barrier(MPI_COMM_WORLD); // Set the MPI process barrier
Print_matrix("The product is", local_C, &grid, n); // Print matrix local_C (parallel matrix multiplication result)
Print_local_matrices("Split of local matrix A",
local_A, &grid); // Print matrix A split in processess
Print_local_matrices_B("Split of local matrix B",
local_B, &grid); // Print matrix B split in processess, special for row-major storage
Print_local_matrices("Split of local matrix C",
local_C, &grid); // Print matrix C split in processess
Free_local_matrix(&local_A); // Free local matrix local_A
Free_local_matrix(&local_B); // Free local matrix local_B
Free_local_matrix(&local_C); // Free local matrix local_C
if(my_rank == 0)
printf("Parallel Fox Matrix Multiplication Elapsed time:\n %30.20E seconds\n", timer_end-timer_start);
MPI_Finalize(); // MPI finalize, processes join and resource recycle
} /* main */
/*********************************************************/
void Setup_grid(
GRID_INFO_T* grid /* out */) {
int old_rank;
int dimensions[2];
int wrap_around[2];
int coordinates[2];
int free_coords[2];
/* Set up Global Grid Information */
MPI_Comm_size(MPI_COMM_WORLD, &(grid->p));
MPI_Comm_rank(MPI_COMM_WORLD, &old_rank);
/* We assume p is a perfect square */ // but what if it's not a perfect square
grid->q = (int) sqrt((double) grid->p);
dimensions[0] = dimensions[1] = grid->q;
/* We want a circular shift in second dimension. */
/* Don't care about first */
wrap_around[0] = wrap_around[1] = 1;
MPI_Cart_create(MPI_COMM_WORLD, 2, dimensions,
wrap_around, 1, &(grid->comm));
MPI_Comm_rank(grid->comm, &(grid->my_rank));
MPI_Cart_coords(grid->comm, grid->my_rank, 2,
coordinates);
grid->my_row = coordinates[0];
grid->my_col = coordinates[1];
/* Set up row communicators */
free_coords[0] = 0;
free_coords[1] = 1;
MPI_Cart_sub(grid->comm, free_coords,
&(grid->row_comm));
/* Set up column communicators */
free_coords[0] = 1;
free_coords[1] = 0;
MPI_Cart_sub(grid->comm, free_coords,
&(grid->col_comm));
} /* Setup_grid */
/*********************************************************/
void Fox(
int n /* in */,
GRID_INFO_T* grid /* in */,
LOCAL_MATRIX_T* local_A /* in */,
LOCAL_MATRIX_T* local_B /* in */,
LOCAL_MATRIX_T* local_C /* out */) {
LOCAL_MATRIX_T* temp_A; /* Storage for the sub- */
/* matrix of A used during */
/* the current stage */
int stage;
int bcast_root;
int n_bar; /* n/sqrt(p) */
int source;
int dest;
MPI_Status status;
n_bar = n/grid->q;
Set_to_zero(local_C);
/* Calculate addresses for row circular shift of B */
source = (grid->my_row + 1) % grid->q;
dest = (grid->my_row + grid->q - 1) % grid->q;
/* Set aside storage for the broadcast block of A */
temp_A = Local_matrix_allocate(n_bar);
for (stage = 0; stage < grid->q; stage++) {
bcast_root = (grid->my_row + stage) % grid->q;
if (bcast_root == grid->my_col) {
MPI_Bcast(local_A, 1, local_matrix_mpi_t,
bcast_root, grid->row_comm);
Local_matrix_multiply(local_A, local_B,
local_C);
} else {
MPI_Bcast(temp_A, 1, local_matrix_mpi_t,
bcast_root, grid->row_comm);
Local_matrix_multiply(temp_A, local_B,
local_C);
}
MPI_Sendrecv_replace(local_B, 1, local_matrix_mpi_t,
dest, 0, source, 0, grid->col_comm, &status);
} /* for */
} /* Fox */
/*********************************************************/
LOCAL_MATRIX_T* Local_matrix_allocate(int local_order) {
LOCAL_MATRIX_T* temp;
temp = (LOCAL_MATRIX_T*) malloc(sizeof(LOCAL_MATRIX_T));
return temp;
} /* Local_matrix_allocate */
/*********************************************************/
void Free_local_matrix(
LOCAL_MATRIX_T** local_A_ptr /* in/out */) {
free(*local_A_ptr);
} /* Free_local_matrix */
/*********************************************************/
/* Read and distribute matrix for matrix A:
* foreach global row of the matrix,
* foreach grid column
* read a block of n_bar floats on process 0
* and send them to the appropriate process.
*/
void Read_matrix_A(
char* prompt /* in */,
LOCAL_MATRIX_T* local_A /* out */,
GRID_INFO_T* grid /* in */,
int n /* in */) {
int mat_row, mat_col;
int grid_row, grid_col;
int dest;
int coords[2];
FLOAT* temp;
MPI_Status status;
if (grid->my_rank == 0) { // Process 0 read matrix input from stdin and send them to other processess
temp = (FLOAT*) malloc(Order(local_A)*sizeof(FLOAT));
printf("%s\n", prompt);
fflush(stdout);
for (mat_row = 0; mat_row < n; mat_row++) {
grid_row = mat_row/Order(local_A);
coords[0] = grid_row;
for (grid_col = 0; grid_col < grid->q; grid_col++) {
coords[1] = grid_col;
MPI_Cart_rank(grid->comm, coords, &dest);
if (dest == 0) {
for (mat_col = 0; mat_col < Order(local_A); mat_col++)
scanf("%lf",
(local_A->entries)+mat_row*Order(local_A)+mat_col);
} else {
for(mat_col = 0; mat_col < Order(local_A); mat_col++)
scanf("%lf", temp + mat_col);
MPI_Send(temp, Order(local_A), FLOAT_MPI, dest, 0,
grid->comm);
}
}
}
free(temp);
} else { // Other processess receive matrix from process 0
for (mat_row = 0; mat_row < Order(local_A); mat_row++)
MPI_Recv(&Entry(local_A, mat_row, 0), Order(local_A),
FLOAT_MPI, 0, 0, grid->comm, &status);
}
} /* Read_matrix */
/*********************************************************/
/* Read and distribute matrix for local matrix B's transpose:
* foreach global row of the matrix,
* foreach grid column
* read a block of n_bar floats on process 0
* and send them to the appropriate process.
*/
void Read_matrix_B(
char* prompt /* in */,
LOCAL_MATRIX_T* local_B /* out */,
GRID_INFO_T* grid /* in */,
int n /* in */) {
int mat_row, mat_col;
int grid_row, grid_col;
int dest;
int coords[2];
FLOAT *temp;
MPI_Status status;
if (grid->my_rank == 0) { // Process 0 read matrix input from stdin and send them to other processess
temp = (FLOAT*) malloc(Order(local_B)*sizeof(FLOAT));
printf("%s\n", prompt);
fflush(stdout);
for (mat_row = 0; mat_row < n; mat_row++) {
grid_row = mat_row/Order(local_B);
coords[0] = grid_row;
for (grid_col = 0; grid_col < grid->q; grid_col++) {
coords[1] = grid_col;
MPI_Cart_rank(grid->comm, coords, &dest);
if (dest == 0) { // process 0 (local)
for (mat_col = 0; mat_col < Order(local_B); mat_col++)
scanf("%lf",
(local_B->entries)+mat_col*Order(local_B)+mat_row); // switch rows and colums in local_B, for column major storage
/* scanf("%lf",
(local_A->entries)+mat_row*Order(local_A)+mat_col); */
} else {
for(mat_col = 0; mat_col < Order(local_B); mat_col++)
scanf("%lf", temp + mat_col);
MPI_Send(temp, Order(local_B), FLOAT_MPI, dest, 0,
grid->comm);
}
}
}
free(temp);
} else { // Other processess receive matrix from process 0
temp = (FLOAT*) malloc(Order(local_B)*sizeof(FLOAT)); // switch rows and colums in local_B, for column major storage
for (mat_col = 0; mat_col < Order(local_B); mat_col++) {
MPI_Recv(temp, Order(local_B),
FLOAT_MPI, 0, 0, grid->comm, &status); // switch rows and colums in local_B, for column major storage
for(mat_row = 0; mat_row < Order(local_B); mat_row++)
Entry(local_B, mat_row, mat_col) = *(temp + mat_row); // switch rows and colums in local_B, for column major storage
/* MPI_Recv(&Entry(local_A, mat_row, 0), Order(local_A),
FLOAT_MPI, 0, 0, grid->comm, &status); */
}
free(temp);
}
} /* Read_matrix_B */
/*********************************************************/
/* Recive and Print Matrix:
* foreach global row of the matrix,
* foreach grid column
* send n_bar floats to process 0 from each other process
* receive a block of n_bar floats on process 0 from other processes and print them
*/
void Print_matrix(
char* title /* in */,
LOCAL_MATRIX_T* local_A /* out */,
GRID_INFO_T* grid /* in */,
int n /* in */) {
int mat_row, mat_col;
int grid_row, grid_col;
int source;
int coords[2];
FLOAT* temp;
MPI_Status status;
if (grid->my_rank == 0) {
temp = (FLOAT*) malloc(Order(local_A)*sizeof(FLOAT));
printf("%s\n", title);
for (mat_row = 0; mat_row < n; mat_row++) {
grid_row = mat_row/Order(local_A);
coords[0] = grid_row;
for (grid_col = 0; grid_col < grid->q; grid_col++) {
coords[1] = grid_col;
MPI_Cart_rank(grid->comm, coords, &source);
if (source == 0) {
for(mat_col = 0; mat_col < Order(local_A); mat_col++)
printf("%20.15E ", Entry(local_A, mat_row, mat_col));
} else {
MPI_Recv(temp, Order(local_A), FLOAT_MPI, source, 0,
grid->comm, &status);
for(mat_col = 0; mat_col < Order(local_A); mat_col++)
printf("%20.15E ", temp[mat_col]);
}
}
printf("\n");
}
free(temp);
} else {
for (mat_row = 0; mat_row < Order(local_A); mat_row++)
MPI_Send(&Entry(local_A, mat_row, 0), Order(local_A),
FLOAT_MPI, 0, 0, grid->comm);
}
} /* Print_matrix */
/*********************************************************/
/* Recive and Print Matrix for local matrix B's transpose:
* foreach global row of the matrix,
* foreach grid column
* send n_bar floats to process 0 from each other process
* receive a block of n_bar floats on process 0 from other processes and print them
*/
void Print_matrix_B(
char* title /* in */,
LOCAL_MATRIX_T* local_B /* out */,
GRID_INFO_T* grid /* in */,
int n /* in */) {
int mat_row, mat_col;
int grid_row, grid_col;
int source;
int coords[2];
FLOAT* temp;
MPI_Status status;
if (grid->my_rank == 0) {
temp = (FLOAT*) malloc(Order(local_B)*sizeof(FLOAT));
printf("%s\n", title);
for (mat_row = 0; mat_row < n; mat_row++) {
grid_row = mat_row/Order(local_B);
coords[0] = grid_row;
for (grid_col = 0; grid_col < grid->q; grid_col++) {
coords[1] = grid_col;
MPI_Cart_rank(grid->comm, coords, &source);
if (source == 0) {
for(mat_col = 0; mat_col < Order(local_B); mat_col++)
printf("%20.15E ", Entry(local_B, mat_col, mat_row)); // switch rows and colums in local_B, for column major storage
// printf("%20.15E ", Entry(local_A, mat_row, mat_col));
} else {
MPI_Recv(temp, Order(local_B), FLOAT_MPI, source, 0,
grid->comm, &status);
for(mat_col = 0; mat_col < Order(local_B); mat_col++)
printf("%20.15E ", temp[mat_col]);
}
}
printf("\n");
}
free(temp);
} else {
temp = (FLOAT*) malloc(Order(local_B)*sizeof(FLOAT));
for (mat_col = 0; mat_col < Order(local_B); mat_col++) {
for(mat_row = 0; mat_row < Order(local_B); mat_row++)
*(temp+mat_row) = Entry(local_B, mat_row, mat_col); // switch rows and colums in local_B, for column major storage
MPI_Send(temp, Order(local_B), FLOAT_MPI, 0, 0, grid->comm);
}
free(temp);
}
} /* Print_matrix_B */
/*********************************************************/
/*
* Set local matrix's element to zero
*/
void Set_to_zero(
LOCAL_MATRIX_T* local_A /* out */) {
int i, j;
for (i = 0; i < Order(local_A); i++)
for (j = 0; j < Order(local_A); j++)
Entry(local_A,i,j) = 0.0E0;
} /* Set_to_zero */
/*********************************************************/
void Build_matrix_type(
LOCAL_MATRIX_T* local_A /* in */) {
MPI_Datatype temp_mpi_t;
int block_lengths[2];
MPI_Aint displacements[2];
MPI_Datatype typelist[2];
MPI_Aint start_address;
MPI_Aint address;
MPI_Type_contiguous(Order(local_A)*Order(local_A),
FLOAT_MPI, &temp_mpi_t); // Creates a contiguous datatype
/*
Synopsis
int MPI_Type_contiguous(int count,
MPI_Datatype oldtype,
MPI_Datatype *newtype)
Input Parameters
count
replication count (nonnegative integer)
oldtype
old datatype (handle)
*/
block_lengths[0] = block_lengths[1] = 1;
typelist[0] = MPI_INT;
typelist[1] = temp_mpi_t;
MPI_Address(local_A, &start_address); // Gets the address of a location in caller's memory
MPI_Address(&(local_A->n_bar), &address);
/*
Synopsis
int MPI_Address(const void *location, MPI_Aint *address)
Input Parameters
location
location in caller memory (choice)
Output Parameters
address
address of location (address integer)
*/
displacements[0] = address - start_address;
MPI_Address(local_A->entries, &address);
displacements[1] = address - start_address;
MPI_Type_struct(2, block_lengths, displacements,
typelist, &local_matrix_mpi_t); // Creates a struct datatype
/*
Synopsis
int MPI_Type_struct(int count,
const int *array_of_blocklengths,
const MPI_Aint *array_of_displacements,
const MPI_Datatype *array_of_types,
MPI_Datatype *newtype)
Input Parameters
count
number of blocks (integer) -- also number of entries in arrays array_of_types , array_of_displacements and array_of_blocklengths
array_of_blocklengths
number of elements in each block (array)
array_of_displacements
byte displacement of each block (array)
array_of_types
type of elements in each block (array of handles to datatype objects)
Output Parameters
newtype
new datatype (handle)
*/
MPI_Type_commit(&local_matrix_mpi_t); // Commits the datatype
/*
Synopsis
int MPI_Type_commit(MPI_Datatype *datatype)
Input Parameters
datatype
datatype (handle)
*/
} /* Build_matrix_type */
/*********************************************************/
/* local matrix multiplication function
* withing OpenMP Thread Acceleration
*/
void Local_matrix_multiply(
LOCAL_MATRIX_T* local_A /* in */,
LOCAL_MATRIX_T* local_B /* in */,
LOCAL_MATRIX_T* local_C /* out */) {
int i, j, k;
// int my_rank;
// MPI_Comm_rank(MPI_COMM_WORLD, &my_rank); // Get my process id in the MPI communicator
#pragma omp parallel for private(i, j, k) shared(local_A, local_B, local_C) num_threads(NUM_THREADS) // Threads acceleration upgrade, parallel task split
for (i = 0; i < Order(local_A); i++) {
// printf("Current in the Fox Kernel:\n my process id is %d, my thread id is %d\n",my_rank,omp_get_thread_num());
for (j = 0; j < Order(local_A); j++)
for (k = 0; k < Order(local_B); k++)
Entry(local_C,i,j) = Entry(local_C,i,j) // switch rows and colums in local_B, for column major storage
+ Entry(local_A,i,k)*Entry(local_B,j,k); // continuous memory access, local matrix multiplication A(i,k)*B^T(j,k)
/* Entry(local_C,i,j) = Entry(local_C,i,j)
+ Entry(local_A,i,k)*Entry(local_B,k,j); // non-continuous memory access, A(i,k)*B^T(j,k) is more proper
*/
}
} /* Local_matrix_multiply */
/*********************************************************/
/* Recive and Print Local Matrix:
* Process 0 print local matrix local_A
* Other Processess send local matrix local_A to process 0
* And process 0 receive local matrix local_A from other processess
*/
void Print_local_matrices(
char* title /* in */,
LOCAL_MATRIX_T* local_A /* in */,
GRID_INFO_T* grid /* in */) {
int coords[2];
int i, j;
int source;
MPI_Status status;
// print by process No.0 in process mesh
if (grid->my_rank == 0) {
printf("%s\n", title);
printf("Process %d > grid_row = %d, grid_col = %d\n",
grid->my_rank, grid->my_row, grid->my_col);
for (i = 0; i < Order(local_A); i++) {
for (j = 0; j < Order(local_A); j++)
printf("%20.15E ", Entry(local_A,i,j));
printf("\n");
}
for (source = 1; source < grid->p; source++) {
MPI_Recv(temp_mat, 1, local_matrix_mpi_t, source, 0,
grid->comm, &status);
MPI_Cart_coords(grid->comm, source, 2, coords);
printf("Process %d > grid_row = %d, grid_col = %d\n",
source, coords[0], coords[1]);
for (i = 0; i < Order(temp_mat); i++) {
for (j = 0; j < Order(temp_mat); j++)
printf("%20.15E ", Entry(temp_mat,i,j));
printf("\n");
}
}
fflush(stdout);
} else {
MPI_Send(local_A, 1, local_matrix_mpi_t, 0, 0, grid->comm);
}
} /* Print_local_matrices */
/*********************************************************/
/* Recive and Print Local Matrix for local matrix B's transpose:
* Process 0 print local matrix local_A
* Other Processess send local matrix local_A to process 0
* And process 0 receive local matrix local_A from other processess
*/
void Print_local_matrices_B(
char* title /* in */,
LOCAL_MATRIX_T* local_B /* in */,
GRID_INFO_T* grid /* in */) {
int coords[2];
int i, j;
int source;
MPI_Status status;
// print by process No.0 in process mesh
if (grid->my_rank == 0) {
printf("%s\n", title);
printf("Process %d > grid_row = %d, grid_col = %d\n",
grid->my_rank, grid->my_row, grid->my_col);
for (i = 0; i < Order(local_B); i++) {
for (j = 0; j < Order(local_B); j++)
printf("%20.15E ", Entry(local_B,j,i)); // switch rows and colums in local_B, for column major storage
printf("\n");
}
for (source = 1; source < grid->p; source++) {
MPI_Recv(temp_mat, 1, local_matrix_mpi_t, source, 0,
grid->comm, &status);
MPI_Cart_coords(grid->comm, source, 2, coords);
printf("Process %d > grid_row = %d, grid_col = %d\n",
source, coords[0], coords[1]);
for (i = 0; i < Order(temp_mat); i++) {
for (j = 0; j < Order(temp_mat); j++)
printf("%20.15E ", Entry(temp_mat,j,i)); // switch rows and colums in local_B, for column major storage
printf("\n");
}
}
fflush(stdout);
} else {
MPI_Send(local_B, 1, local_matrix_mpi_t, 0, 0, grid->comm);
}
} /* Print_local_matrices_B */ |
integration_point_to_node_transformation_utility.h | // | / |
// ' / __| _` | __| _ \ __|
// . \ | ( | | ( |\__ `
// _|\_\_| \__,_|\__|\___/ ____/
// Multi-Physics
//
// License: BSD License
// Kratos default license: kratos/license.txt
//
// Main authors: Michael Andre, https://github.com/msandre
//
#if !defined(KRATOS_INTEGRATION_POINT_TO_NODE_TRANSFORMATION_UTILITY_H_INCLUDED)
#define KRATOS_INTEGRATION_POINT_TO_NODE_TRANSFORMATION_UTILITY_H_INCLUDED
// System includes
#include <string>
#include <iostream>
// External includes
// Project includes
#include "includes/define.h"
#include "includes/element.h"
#include "includes/model_part.h"
#include "utilities/openmp_utils.h"
// Application includes
#include "fluid_dynamics_application_variables.h"
namespace Kratos
{
///@addtogroup FluidDynamicsApplication
///@{
///@name Kratos Classes
///@{
/**
* @brief A utility for transforming values on integration points to nodes.
*
* This utility was created to transform vorticity and q-criterion variables
* from the integration points where they are computed to the nodes for
* visualization. The utility is designed to work in both 2D and 3D with and
* without the MPI library. Each nodal value is computed as a weighted average
* of the neighboring elements.
*/
template<unsigned int TDim, unsigned int TNumNodes = TDim + 1>
class IntegrationPointToNodeTransformationUtility {
public:
///@name Type Definitions
///@{
/// Pointer definition of IntegrationPointToNodeTransformationUtility
KRATOS_CLASS_POINTER_DEFINITION(IntegrationPointToNodeTransformationUtility);
template<class TVariableType>
void TransformFromIntegrationPointsToNodes(const Variable<TVariableType>& rVariable,
ModelPart& rModelPart) const
{
#pragma omp parallel
{
ModelPart::NodeIterator NodesBegin;
ModelPart::NodeIterator NodesEnd;
OpenMPUtils::PartitionedIterators(rModelPart.Nodes(),NodesBegin,NodesEnd);
for (ModelPart::NodeIterator itNode = NodesBegin; itNode != NodesEnd; ++itNode)
{
itNode->FastGetSolutionStepValue(rVariable) = rVariable.Zero();
itNode->FastGetSolutionStepValue(NODAL_AREA) = 0.0;
}
}
#pragma omp parallel
{
ModelPart::ElementIterator ElemBegin;
ModelPart::ElementIterator ElemEnd;
OpenMPUtils::PartitionedIterators(rModelPart.Elements(),ElemBegin,ElemEnd);
std::vector<TVariableType> ValuesOnIntPoint;
for (ModelPart::ElementIterator itElem = ElemBegin; itElem != ElemEnd; ++itElem)
{
const auto& r_process_info = rModelPart.GetProcessInfo();
itElem->CalculateOnIntegrationPoints(rVariable,ValuesOnIntPoint,r_process_info);
Element::GeometryType& rGeom = itElem->GetGeometry();
const double Weight = rGeom.Volume() / (double) TNumNodes;
for (unsigned int iNode = 0; iNode < rGeom.size(); iNode++)
{
rGeom[iNode].SetLock();
rGeom[iNode].FastGetSolutionStepValue(rVariable) += Weight * ValuesOnIntPoint[0];
rGeom[iNode].FastGetSolutionStepValue(NODAL_AREA) += Weight;
rGeom[iNode].UnSetLock();
}
}
}
rModelPart.GetCommunicator().AssembleCurrentData(rVariable);
rModelPart.GetCommunicator().AssembleCurrentData(NODAL_AREA);
#pragma omp parallel
{
ModelPart::NodeIterator NodesBegin;
ModelPart::NodeIterator NodesEnd;
OpenMPUtils::PartitionedIterators(rModelPart.Nodes(),NodesBegin,NodesEnd);
for (ModelPart::NodeIterator itNode = NodesBegin; itNode != NodesEnd; ++itNode)
{
const double NodalArea = itNode->FastGetSolutionStepValue(NODAL_AREA);
itNode->FastGetSolutionStepValue(rVariable) /= NodalArea;
}
}
}
}; // class IntegrationPointToNodalDataTransformationUtility
///@}
///@} // Fluid Dynamics Application group
} // namespace Kratos
#endif // KRATOS_INTEGRATION_POINT_TO_NODAL_DATA_TRANSFORMATION_UTILITY_H_INCLUDED defined
|
gbdt.h | #ifndef LIGHTGBM_BOOSTING_GBDT_H_
#define LIGHTGBM_BOOSTING_GBDT_H_
#include <LightGBM/boosting.h>
#include <LightGBM/objective_function.h>
#include <LightGBM/prediction_early_stop.h>
#include <LightGBM/json11.hpp>
#include "score_updater.hpp"
#include <cstdio>
#include <vector>
#include <string>
#include <fstream>
#include <memory>
#include <mutex>
#include <map>
using namespace json11;
namespace LightGBM {
/*!
* \brief GBDT algorithm implementation. including Training, prediction, bagging.
*/
class GBDT : public GBDTBase {
public:
/*!
* \brief Constructor
*/
GBDT();
/*!
* \brief Destructor
*/
~GBDT();
/*!
* \brief Initialization logic
* \param gbdt_config Config for boosting
* \param train_data Training data
* \param objective_function Training objective function
* \param training_metrics Training metrics
*/
void Init(const Config* gbdt_config, const Dataset* train_data,
const ObjectiveFunction* objective_function,
const std::vector<const Metric*>& training_metrics) override;
/*!
* \brief Merge model from other boosting object. Will insert to the front of current boosting object
* \param other
*/
void MergeFrom(const Boosting* other) override {
auto other_gbdt = reinterpret_cast<const GBDT*>(other);
// tmp move to other vector
auto original_models = std::move(models_);
models_ = std::vector<std::unique_ptr<Tree>>();
// push model from other first
for (const auto& tree : other_gbdt->models_) {
auto new_tree = std::unique_ptr<Tree>(new Tree(*(tree.get())));
models_.push_back(std::move(new_tree));
}
num_init_iteration_ = static_cast<int>(models_.size()) / num_tree_per_iteration_;
// push model in current object
for (const auto& tree : original_models) {
auto new_tree = std::unique_ptr<Tree>(new Tree(*(tree.get())));
models_.push_back(std::move(new_tree));
}
num_iteration_for_pred_ = static_cast<int>(models_.size()) / num_tree_per_iteration_;
}
void ShuffleModels(int start_iter, int end_iter) override {
int total_iter = static_cast<int>(models_.size()) / num_tree_per_iteration_;
start_iter = std::max(0, start_iter);
if (end_iter <= 0) {
end_iter = total_iter;
}
end_iter = std::min(total_iter, end_iter);
auto original_models = std::move(models_);
std::vector<int> indices(total_iter);
for (int i = 0; i < total_iter; ++i) {
indices[i] = i;
}
Random tmp_rand(17);
for (int i = start_iter; i < end_iter - 1; ++i) {
int j = tmp_rand.NextShort(i + 1, end_iter);
std::swap(indices[i], indices[j]);
}
models_ = std::vector<std::unique_ptr<Tree>>();
for (int i = 0; i < total_iter; ++i) {
for (int j = 0; j < num_tree_per_iteration_; ++j) {
int tree_idx = indices[i] * num_tree_per_iteration_ + j;
auto new_tree = std::unique_ptr<Tree>(new Tree(*(original_models[tree_idx].get())));
models_.push_back(std::move(new_tree));
}
}
}
/*!
* \brief Reset the training data
* \param train_data New Training data
* \param objective_function Training objective function
* \param training_metrics Training metrics
*/
void ResetTrainingData(const Dataset* train_data, const ObjectiveFunction* objective_function,
const std::vector<const Metric*>& training_metrics) override;
/*!
* \brief Reset Boosting Config
* \param gbdt_config Config for boosting
*/
void ResetConfig(const Config* gbdt_config) override;
/*!
* \brief Adding a validation dataset
* \param valid_data Validation dataset
* \param valid_metrics Metrics for validation dataset
*/
void AddValidDataset(const Dataset* valid_data,
const std::vector<const Metric*>& valid_metrics) override;
/*!
* \brief Perform a full training procedure
* \param snapshot_freq frequence of snapshot
* \param model_output_path path of model file
*/
void Train(int snapshot_freq, const std::string& model_output_path) override;
void RefitTree(const std::vector<std::vector<int>>& tree_leaf_prediction) override;
/*!
* \brief Training logic
* \param gradients nullptr for using default objective, otherwise use self-defined boosting
* \param hessians nullptr for using default objective, otherwise use self-defined boosting
* \return True if cannot train any more
*/
virtual bool TrainOneIter(const score_t* gradients, const score_t* hessians) override;
/*!
* \brief Rollback one iteration
*/
void RollbackOneIter() override;
/*!
* \brief Get current iteration
*/
int GetCurrentIteration() const override { return static_cast<int>(models_.size()) / num_tree_per_iteration_; }
/*!
* \brief Can use early stopping for prediction or not
* \return True if cannot use early stopping for prediction
*/
bool NeedAccuratePrediction() const override {
if (objective_function_ == nullptr) {
return true;
} else {
return objective_function_->NeedAccuratePrediction();
}
}
/*!
* \brief Get evaluation result at data_idx data
* \param data_idx 0: training data, 1: 1st validation data
* \return evaluation result
*/
std::vector<double> GetEvalAt(int data_idx) const override;
/*!
* \brief Get current training score
* \param out_len length of returned score
* \return training score
*/
virtual const double* GetTrainingScore(int64_t* out_len) override;
/*!
* \brief Get size of prediction at data_idx data
* \param data_idx 0: training data, 1: 1st validation data
* \return The size of prediction
*/
virtual int64_t GetNumPredictAt(int data_idx) const override {
CHECK(data_idx >= 0 && data_idx <= static_cast<int>(valid_score_updater_.size()));
data_size_t num_data = train_data_->num_data();
if (data_idx > 0) {
num_data = valid_score_updater_[data_idx - 1]->num_data();
}
return num_data * num_class_;
}
/*!
* \brief Get prediction result at data_idx data
* \param data_idx 0: training data, 1: 1st validation data
* \param result used to store prediction result, should allocate memory before call this function
* \param out_len length of returned score
*/
void GetPredictAt(int data_idx, double* out_result, int64_t* out_len) override;
/*!
* \brief Get number of prediction for one data
* \param num_iteration number of used iterations
* \param is_pred_leaf True if predicting leaf index
* \param is_pred_contrib True if predicting feature contribution
* \return number of prediction
*/
inline int NumPredictOneRow(int num_iteration, bool is_pred_leaf, bool is_pred_contrib) const override {
int num_preb_in_one_row = num_class_;
if (is_pred_leaf) {
int max_iteration = GetCurrentIteration();
if (num_iteration > 0) {
num_preb_in_one_row *= static_cast<int>(std::min(max_iteration, num_iteration));
} else {
num_preb_in_one_row *= max_iteration;
}
} else if (is_pred_contrib) {
num_preb_in_one_row = num_tree_per_iteration_ * (max_feature_idx_ + 2); // +1 for 0-based indexing, +1 for baseline
}
return num_preb_in_one_row;
}
void PredictRaw(const double* features, double* output,
const PredictionEarlyStopInstance* earlyStop) const override;
void PredictRawByMap(const std::unordered_map<int, double>& features, double* output,
const PredictionEarlyStopInstance* early_stop) const override;
void Predict(const double* features, double* output,
const PredictionEarlyStopInstance* earlyStop) const override;
void PredictByMap(const std::unordered_map<int, double>& features, double* output,
const PredictionEarlyStopInstance* early_stop) const override;
void PredictLeafIndex(const double* features, double* output) const override;
void PredictLeafIndexByMap(const std::unordered_map<int, double>& features, double* output) const override;
void PredictContrib(const double* features, double* output,
const PredictionEarlyStopInstance* earlyStop) const override;
/*!
* \brief Dump model to json format string
* \param start_iteration The model will be saved start from
* \param num_iteration Number of iterations that want to dump, -1 means dump all
* \return Json format string of model
*/
std::string DumpModel(int start_iteration, int num_iteration) const override;
/*!
* \brief Translate model to if-else statement
* \param num_iteration Number of iterations that want to translate, -1 means translate all
* \return if-else format codes of model
*/
std::string ModelToIfElse(int num_iteration) const override;
/*!
* \brief Translate model to if-else statement
* \param num_iteration Number of iterations that want to translate, -1 means translate all
* \param filename Filename that want to save to
* \return is_finish Is training finished or not
*/
bool SaveModelToIfElse(int num_iteration, const char* filename) const override;
/*!
* \brief Save model to file
* \param start_iteration The model will be saved start from
* \param num_iterations Number of model that want to save, -1 means save all
* \param filename Filename that want to save to
* \return is_finish Is training finished or not
*/
virtual bool SaveModelToFile(int start_iteration, int num_iterations, const char* filename) const override;
/*!
* \brief Save model to string
* \param start_iteration The model will be saved start from
* \param num_iterations Number of model that want to save, -1 means save all
* \return Non-empty string if succeeded
*/
virtual std::string SaveModelToString(int start_iteration, int num_iterations) const override;
/*!
* \brief Restore from a serialized buffer
*/
bool LoadModelFromString(const char* buffer, size_t len) override;
/*!
* \brief Calculate feature importances
* \param num_iteration Number of model that want to use for feature importance, -1 means use all
* \param importance_type: 0 for split, 1 for gain
* \return vector of feature_importance
*/
std::vector<double> FeatureImportance(int num_iteration, int importance_type) const override;
/*!
* \brief Get max feature index of this model
* \return Max feature index of this model
*/
inline int MaxFeatureIdx() const override { return max_feature_idx_; }
/*!
* \brief Get feature names of this model
* \return Feature names of this model
*/
inline std::vector<std::string> FeatureNames() const override { return feature_names_; }
/*!
* \brief Get index of label column
* \return index of label column
*/
inline int LabelIdx() const override { return label_idx_; }
/*!
* \brief Get number of weak sub-models
* \return Number of weak sub-models
*/
inline int NumberOfTotalModel() const override { return static_cast<int>(models_.size()); }
/*!
* \brief Get number of tree per iteration
* \return number of tree per iteration
*/
inline int NumModelPerIteration() const override { return num_tree_per_iteration_; }
/*!
* \brief Get number of classes
* \return Number of classes
*/
inline int NumberOfClasses() const override { return num_class_; }
inline void InitPredict(int num_iteration, bool is_pred_contrib) override {
num_iteration_for_pred_ = static_cast<int>(models_.size()) / num_tree_per_iteration_;
if (num_iteration > 0) {
num_iteration_for_pred_ = std::min(num_iteration, num_iteration_for_pred_);
}
if (is_pred_contrib) {
#pragma omp parallel for schedule(static)
for (int i = 0; i < static_cast<int>(models_.size()); ++i) {
models_[i]->RecomputeMaxDepth();
}
}
}
inline double GetLeafValue(int tree_idx, int leaf_idx) const override {
CHECK(tree_idx >= 0 && static_cast<size_t>(tree_idx) < models_.size());
CHECK(leaf_idx >= 0 && leaf_idx < models_[tree_idx]->num_leaves());
return models_[tree_idx]->LeafOutput(leaf_idx);
}
inline void SetLeafValue(int tree_idx, int leaf_idx, double val) override {
CHECK(tree_idx >= 0 && static_cast<size_t>(tree_idx) < models_.size());
CHECK(leaf_idx >= 0 && leaf_idx < models_[tree_idx]->num_leaves());
models_[tree_idx]->SetLeafOutput(leaf_idx, val);
}
/*!
* \brief Get Type name of this boosting object
*/
virtual const char* SubModelName() const override { return "tree"; }
protected:
/*!
* \brief Print eval result and check early stopping
*/
bool EvalAndCheckEarlyStopping();
/*!
* \brief reset config for bagging
*/
void ResetBaggingConfig(const Config* config, bool is_change_dataset);
/*!
* \brief Implement bagging logic
* \param iter Current interation
*/
virtual void Bagging(int iter);
/*!
* \brief Helper function for bagging, used for multi-threading optimization
* \param start start indice of bagging
* \param cnt count
* \param buffer output buffer
* \return count of left size
*/
data_size_t BaggingHelper(Random& cur_rand, data_size_t start, data_size_t cnt, data_size_t* buffer);
/*!
* \brief calculate the object function
*/
virtual void Boosting();
/*!
* \brief updating score after tree was trained
* \param tree Trained tree of this iteration
* \param cur_tree_id Current tree for multiclass training
*/
virtual void UpdateScore(const Tree* tree, const int cur_tree_id);
/*!
* \brief eval results for one metric
*/
virtual std::vector<double> EvalOneMetric(const Metric* metric, const double* score) const;
/*!
* \brief Print metric result of current iteration
* \param iter Current interation
* \return best_msg if met early_stopping
*/
std::string OutputMetric(int iter);
double BoostFromAverage(int class_id);
/*! \brief current iteration */
int iter_;
/*! \brief Pointer to training data */
const Dataset* train_data_;
/*! \brief Config of gbdt */
std::unique_ptr<Config> config_;
/*! \brief Tree learner, will use this class to learn trees */
std::unique_ptr<TreeLearner> tree_learner_;
/*! \brief Objective function */
const ObjectiveFunction* objective_function_;
/*! \brief Store and update training data's score */
std::unique_ptr<ScoreUpdater> train_score_updater_;
/*! \brief Metrics for training data */
std::vector<const Metric*> training_metrics_;
/*! \brief Store and update validation data's scores */
std::vector<std::unique_ptr<ScoreUpdater>> valid_score_updater_;
/*! \brief Metric for validation data */
std::vector<std::vector<const Metric*>> valid_metrics_;
/*! \brief Number of rounds for early stopping */
int early_stopping_round_;
/*! \brief Best iteration(s) for early stopping */
std::vector<std::vector<int>> best_iter_;
/*! \brief Best score(s) for early stopping */
std::vector<std::vector<double>> best_score_;
/*! \brief output message of best iteration */
std::vector<std::vector<std::string>> best_msg_;
/*! \brief Trained models(trees) */
std::vector<std::unique_ptr<Tree>> models_;
/*! \brief Max feature index of training data*/
int max_feature_idx_;
/*! \brief First order derivative of training data */
std::vector<score_t> gradients_;
/*! \brief Secend order derivative of training data */
std::vector<score_t> hessians_;
/*! \brief Store the indices of in-bag data */
std::vector<data_size_t> bag_data_indices_;
/*! \brief Number of in-bag data */
data_size_t bag_data_cnt_;
/*! \brief Store the indices of in-bag data */
std::vector<data_size_t> tmp_indices_;
/*! \brief Number of training data */
data_size_t num_data_;
/*! \brief Number of trees per iterations */
int num_tree_per_iteration_;
/*! \brief Number of class */
int num_class_;
/*! \brief Index of label column */
data_size_t label_idx_;
/*! \brief number of used model */
int num_iteration_for_pred_;
/*! \brief Shrinkage rate for one iteration */
double shrinkage_rate_;
/*! \brief Number of loaded initial models */
int num_init_iteration_;
/*! \brief Feature names */
std::vector<std::string> feature_names_;
std::vector<std::string> feature_infos_;
/*! \brief number of threads */
int num_threads_;
/*! \brief Buffer for multi-threading bagging */
std::vector<data_size_t> offsets_buf_;
/*! \brief Buffer for multi-threading bagging */
std::vector<data_size_t> left_cnts_buf_;
/*! \brief Buffer for multi-threading bagging */
std::vector<data_size_t> right_cnts_buf_;
/*! \brief Buffer for multi-threading bagging */
std::vector<data_size_t> left_write_pos_buf_;
/*! \brief Buffer for multi-threading bagging */
std::vector<data_size_t> right_write_pos_buf_;
std::unique_ptr<Dataset> tmp_subset_;
bool is_use_subset_;
std::vector<bool> class_need_train_;
bool is_constant_hessian_;
std::unique_ptr<ObjectiveFunction> loaded_objective_;
bool average_output_;
bool need_re_bagging_;
std::string loaded_parameter_;
Json forced_splits_json_;
};
} // namespace LightGBM
#endif // LightGBM_BOOSTING_GBDT_H_
|
ellipticBlockPartialAxCoeffHex3D_N3.c | extern "C" void FUNC(ellipticBlockPartialAxCoeffHex3D_N3)(const dlong & Nelements,
const dlong & offset,
const dlong & loffset,
const dlong* __restrict__ elementList,
const dfloat* __restrict__ ggeo,
const dfloat* __restrict__ D,
const dfloat* __restrict__ S,
const dfloat* __restrict__ lambda,
const dfloat* __restrict__ q,
dfloat* __restrict__ Aq )
{
dfloat s_q[3][p_Nq][p_Nq][p_Nq];
dfloat s_Gqr[3][p_Nq][p_Nq][p_Nq];
dfloat s_Gqs[3][p_Nq][p_Nq][p_Nq];
dfloat s_Gqt[3][p_Nq][p_Nq][p_Nq];
dfloat s_D[p_Nq][p_Nq];
dfloat s_S[p_Nq][p_Nq];
for(int j = 0; j < p_Nq; ++j)
for(int i = 0; i < p_Nq; ++i) {
s_D[j][i] = D[j * p_Nq + i];
s_S[j][i] = S[j * p_Nq + i];
}
#ifdef __NEKRS__OMP__
#pragma omp parallel for private(s_q, s_Gqr, s_Gqs, s_Gqt)
#endif
for(dlong e = 0; e < Nelements; ++e) {
const dlong element = elementList[e];
for(int k = 0; k < p_Nq; k++)
for(int j = 0; j < p_Nq; ++j)
for(int i = 0; i < p_Nq; ++i) {
const dlong base = i + j * p_Nq + k * p_Nq * p_Nq + element * p_Np;
s_q[0][k][j][i] = q[base + 0 * offset];
s_q[1][k][j][i] = q[base + 1 * offset];
s_q[2][k][j][i] = q[base + 2 * offset];
}
for(int k = 0; k < p_Nq; ++k)
for(int j = 0; j < p_Nq; ++j)
for(int i = 0; i < p_Nq; ++i) {
const dlong gbase = element * p_Nggeo * p_Np + k * p_Nq * p_Nq + j * p_Nq + i;
const dfloat r_G00 = ggeo[gbase + p_G00ID * p_Np];
const dfloat r_G01 = ggeo[gbase + p_G01ID * p_Np];
const dfloat r_G11 = ggeo[gbase + p_G11ID * p_Np];
const dfloat r_G12 = ggeo[gbase + p_G12ID * p_Np];
const dfloat r_G02 = ggeo[gbase + p_G02ID * p_Np];
const dfloat r_G22 = ggeo[gbase + p_G22ID * p_Np];
const dlong id = element * p_Np + k * p_Nq * p_Nq + j * p_Nq + i;
const dfloat r_lam00 = lambda[id + 0 * offset + 0 * loffset];
const dfloat r_lam10 = lambda[id + 0 * offset + 1 * loffset];
const dfloat r_lam20 = lambda[id + 0 * offset + 2 * loffset];
dfloat qr0 = 0.f, qr1 = 0.f, qr2 = 0.f;
dfloat qs0 = 0.f, qs1 = 0.f, qs2 = 0.f;
dfloat qt0 = 0.f, qt1 = 0.f, qt2 = 0.f;
for(int m = 0; m < p_Nq; m++) {
qr0 += s_S[m][i] * s_q[0][k][j][m];
qs0 += s_S[m][j] * s_q[0][k][m][i];
qt0 += s_S[m][k] * s_q[0][m][j][i];
//
qr1 += s_S[m][i] * s_q[1][k][j][m];
qs1 += s_S[m][j] * s_q[1][k][m][i];
qt1 += s_S[m][k] * s_q[1][m][j][i];
qr2 += s_S[m][i] * s_q[2][k][j][m];
qs2 += s_S[m][j] * s_q[2][k][m][i];
qt2 += s_S[m][k] * s_q[2][m][j][i];
}
dfloat Gqr0 = r_G00 * qr0 + r_G01 * qs0 + r_G02 * qt0;
dfloat Gqs0 = r_G01 * qr0 + r_G11 * qs0 + r_G12 * qt0;
dfloat Gqt0 = r_G02 * qr0 + r_G12 * qs0 + r_G22 * qt0;
dfloat Gqr1 = r_G00 * qr1 + r_G01 * qs1 + r_G02 * qt1;
dfloat Gqs1 = r_G01 * qr1 + r_G11 * qs1 + r_G12 * qt1;
dfloat Gqt1 = r_G02 * qr1 + r_G12 * qs1 + r_G22 * qt1;
dfloat Gqr2 = r_G00 * qr2 + r_G01 * qs2 + r_G02 * qt2;
dfloat Gqs2 = r_G01 * qr2 + r_G11 * qs2 + r_G12 * qt2;
dfloat Gqt2 = r_G02 * qr2 + r_G12 * qs2 + r_G22 * qt2;
s_Gqr[0][k][j][i] = r_lam00 * Gqr0;
s_Gqs[0][k][j][i] = r_lam00 * Gqs0;
s_Gqt[0][k][j][i] = r_lam00 * Gqt0;
s_Gqr[1][k][j][i] = r_lam10 * Gqr1;
s_Gqs[1][k][j][i] = r_lam10 * Gqs1;
s_Gqt[1][k][j][i] = r_lam10 * Gqt1;
s_Gqr[2][k][j][i] = r_lam20 * Gqr2;
s_Gqs[2][k][j][i] = r_lam20 * Gqs2;
s_Gqt[2][k][j][i] = r_lam20 * Gqt2;
}
for(int k = 0; k < p_Nq; k++)
for(int j = 0; j < p_Nq; ++j)
for(int i = 0; i < p_Nq; ++i) {
const dlong gbase = element * p_Nggeo * p_Np + k * p_Nq * p_Nq + j * p_Nq + i;
const dfloat r_GwJ = ggeo[gbase + p_GWJID * p_Np];
const dlong id = element * p_Np + k * p_Nq * p_Nq + j * p_Nq + i;
const dfloat r_lam01 = lambda[id + 1 * offset + 0 * loffset];
const dfloat r_lam11 = lambda[id + 1 * offset + 1 * loffset];
const dfloat r_lam21 = lambda[id + 1 * offset + 2 * loffset];
dfloat r_Aq0 = r_GwJ * r_lam01 * s_q[0][k][j][i];
dfloat r_Aq1 = r_GwJ * r_lam11 * s_q[1][k][j][i];
dfloat r_Aq2 = r_GwJ * r_lam21 * s_q[2][k][j][i];
dfloat r_Aqr0 = 0.f, r_Aqs0 = 0.f, r_Aqt0 = 0.f;
dfloat r_Aqr1 = 0.f, r_Aqs1 = 0.f, r_Aqt1 = 0.f;
dfloat r_Aqr2 = 0.f, r_Aqs2 = 0.f, r_Aqt2 = 0.f;
for(int m = 0; m < p_Nq; m++) {
r_Aqr0 += s_D[m][i] * s_Gqr[0][k][j][m];
r_Aqr1 += s_D[m][i] * s_Gqr[1][k][j][m];
r_Aqr2 += s_D[m][i] * s_Gqr[2][k][j][m];
}
for(int m = 0; m < p_Nq; m++) {
r_Aqs0 += s_D[m][j] * s_Gqs[0][k][m][i];
r_Aqs1 += s_D[m][j] * s_Gqs[1][k][m][i];
r_Aqs2 += s_D[m][j] * s_Gqs[2][k][m][i];
}
for(int m = 0; m < p_Nq; m++) {
r_Aqt0 += s_D[m][k] * s_Gqt[0][m][j][i];
r_Aqt1 += s_D[m][k] * s_Gqt[1][m][j][i];
r_Aqt2 += s_D[m][k] * s_Gqt[2][m][j][i];
}
Aq[id + 0 * offset] = r_Aqr0 + r_Aqs0 + r_Aqt0 + r_Aq0;
Aq[id + 1 * offset] = r_Aqr1 + r_Aqs1 + r_Aqt1 + r_Aq1;
Aq[id + 2 * offset] = r_Aqr2 + r_Aqs2 + r_Aqt2 + r_Aq2;
}
}
}
|
convolution_3x3_pack1ton_fp16s.h | // Tencent is pleased to support the open source community by making ncnn available.
//
// Copyright (C) 2021 THL A29 Limited, a Tencent company. All rights reserved.
//
// Licensed under the BSD 3-Clause License (the "License"); you may not use this file except
// in compliance with the License. You may obtain a copy of the License at
//
// https://opensource.org/licenses/BSD-3-Clause
//
// Unless required by applicable law or agreed to in writing, software distributed
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
// specific language governing permissions and limitations under the License.
static void conv3x3s1_pack1ton_fp16sa_rvv(const Mat& bottom_blob, Mat& top_blob, const Mat& kernel, const Mat& _bias, const Option& opt)
{
const int packn = csrr_vlenb() / 2;
const word_type vl = vsetvl_e16m1(packn);
int inch = bottom_blob.c;
int outw = top_blob.w;
int outh = top_blob.h;
int outch = top_blob.c;
const __fp16* bias = _bias;
#pragma omp parallel for num_threads(opt.num_threads)
for (int p = 0; p < outch; p++)
{
Mat out0 = top_blob.channel(p);
vfloat16m1_t _bias0 = bias ? vle16_v_f16m1(bias + p * packn, vl) : vfmv_v_f_f16m1(0.f, vl);
out0.fill(_bias0);
const __fp16* k0 = kernel.channel(p);
int q = 0;
for (; q < inch; q++)
{
__fp16* outptr0 = out0;
const Mat img0 = bottom_blob.channel(q);
const __fp16* r0 = img0.row<const __fp16>(0);
const __fp16* r1 = img0.row<const __fp16>(1);
const __fp16* r2 = img0.row<const __fp16>(2);
vfloat16m1_t _k00 = vle16_v_f16m1(k0, vl);
vfloat16m1_t _k01 = vle16_v_f16m1(k0 + packn, vl);
vfloat16m1_t _k02 = vle16_v_f16m1(k0 + packn * 2, vl);
vfloat16m1_t _k10 = vle16_v_f16m1(k0 + packn * 3, vl);
vfloat16m1_t _k11 = vle16_v_f16m1(k0 + packn * 4, vl);
vfloat16m1_t _k12 = vle16_v_f16m1(k0 + packn * 5, vl);
vfloat16m1_t _k20 = vle16_v_f16m1(k0 + packn * 6, vl);
vfloat16m1_t _k21 = vle16_v_f16m1(k0 + packn * 7, vl);
vfloat16m1_t _k22 = vle16_v_f16m1(k0 + packn * 8, vl);
int i = 0;
for (; i < outh; i++)
{
int j = 0;
for (; j + 7 < outw; j += 8)
{
vfloat16m1_t _sum0 = vle16_v_f16m1(outptr0, vl);
vfloat16m1_t _sum1 = vle16_v_f16m1(outptr0 + packn, vl);
vfloat16m1_t _sum2 = vle16_v_f16m1(outptr0 + packn * 2, vl);
vfloat16m1_t _sum3 = vle16_v_f16m1(outptr0 + packn * 3, vl);
vfloat16m1_t _sum4 = vle16_v_f16m1(outptr0 + packn * 4, vl);
vfloat16m1_t _sum5 = vle16_v_f16m1(outptr0 + packn * 5, vl);
vfloat16m1_t _sum6 = vle16_v_f16m1(outptr0 + packn * 6, vl);
vfloat16m1_t _sum7 = vle16_v_f16m1(outptr0 + packn * 7, vl);
_sum0 = vfmacc_vf_f16m1(_sum0, r0[0], _k00, vl);
_sum1 = vfmacc_vf_f16m1(_sum1, r0[1], _k00, vl);
_sum2 = vfmacc_vf_f16m1(_sum2, r0[2], _k00, vl);
_sum3 = vfmacc_vf_f16m1(_sum3, r0[3], _k00, vl);
_sum4 = vfmacc_vf_f16m1(_sum4, r0[4], _k00, vl);
_sum5 = vfmacc_vf_f16m1(_sum5, r0[5], _k00, vl);
_sum6 = vfmacc_vf_f16m1(_sum6, r0[6], _k00, vl);
_sum7 = vfmacc_vf_f16m1(_sum7, r0[7], _k00, vl);
_sum0 = vfmacc_vf_f16m1(_sum0, r0[1], _k01, vl);
_sum1 = vfmacc_vf_f16m1(_sum1, r0[2], _k01, vl);
_sum2 = vfmacc_vf_f16m1(_sum2, r0[3], _k01, vl);
_sum3 = vfmacc_vf_f16m1(_sum3, r0[4], _k01, vl);
_sum4 = vfmacc_vf_f16m1(_sum4, r0[5], _k01, vl);
_sum5 = vfmacc_vf_f16m1(_sum5, r0[6], _k01, vl);
_sum6 = vfmacc_vf_f16m1(_sum6, r0[7], _k01, vl);
_sum7 = vfmacc_vf_f16m1(_sum7, r0[8], _k01, vl);
_sum0 = vfmacc_vf_f16m1(_sum0, r0[2], _k02, vl);
_sum1 = vfmacc_vf_f16m1(_sum1, r0[3], _k02, vl);
_sum2 = vfmacc_vf_f16m1(_sum2, r0[4], _k02, vl);
_sum3 = vfmacc_vf_f16m1(_sum3, r0[5], _k02, vl);
_sum4 = vfmacc_vf_f16m1(_sum4, r0[6], _k02, vl);
_sum5 = vfmacc_vf_f16m1(_sum5, r0[7], _k02, vl);
_sum6 = vfmacc_vf_f16m1(_sum6, r0[8], _k02, vl);
_sum7 = vfmacc_vf_f16m1(_sum7, r0[9], _k02, vl);
_sum0 = vfmacc_vf_f16m1(_sum0, r1[0], _k10, vl);
_sum1 = vfmacc_vf_f16m1(_sum1, r1[1], _k10, vl);
_sum2 = vfmacc_vf_f16m1(_sum2, r1[2], _k10, vl);
_sum3 = vfmacc_vf_f16m1(_sum3, r1[3], _k10, vl);
_sum4 = vfmacc_vf_f16m1(_sum4, r1[4], _k10, vl);
_sum5 = vfmacc_vf_f16m1(_sum5, r1[5], _k10, vl);
_sum6 = vfmacc_vf_f16m1(_sum6, r1[6], _k10, vl);
_sum7 = vfmacc_vf_f16m1(_sum7, r1[7], _k10, vl);
_sum0 = vfmacc_vf_f16m1(_sum0, r1[1], _k11, vl);
_sum1 = vfmacc_vf_f16m1(_sum1, r1[2], _k11, vl);
_sum2 = vfmacc_vf_f16m1(_sum2, r1[3], _k11, vl);
_sum3 = vfmacc_vf_f16m1(_sum3, r1[4], _k11, vl);
_sum4 = vfmacc_vf_f16m1(_sum4, r1[5], _k11, vl);
_sum5 = vfmacc_vf_f16m1(_sum5, r1[6], _k11, vl);
_sum6 = vfmacc_vf_f16m1(_sum6, r1[7], _k11, vl);
_sum7 = vfmacc_vf_f16m1(_sum7, r1[8], _k11, vl);
_sum0 = vfmacc_vf_f16m1(_sum0, r1[2], _k12, vl);
_sum1 = vfmacc_vf_f16m1(_sum1, r1[3], _k12, vl);
_sum2 = vfmacc_vf_f16m1(_sum2, r1[4], _k12, vl);
_sum3 = vfmacc_vf_f16m1(_sum3, r1[5], _k12, vl);
_sum4 = vfmacc_vf_f16m1(_sum4, r1[6], _k12, vl);
_sum5 = vfmacc_vf_f16m1(_sum5, r1[7], _k12, vl);
_sum6 = vfmacc_vf_f16m1(_sum6, r1[8], _k12, vl);
_sum7 = vfmacc_vf_f16m1(_sum7, r1[9], _k12, vl);
_sum0 = vfmacc_vf_f16m1(_sum0, r2[0], _k20, vl);
_sum1 = vfmacc_vf_f16m1(_sum1, r2[1], _k20, vl);
_sum2 = vfmacc_vf_f16m1(_sum2, r2[2], _k20, vl);
_sum3 = vfmacc_vf_f16m1(_sum3, r2[3], _k20, vl);
_sum4 = vfmacc_vf_f16m1(_sum4, r2[4], _k20, vl);
_sum5 = vfmacc_vf_f16m1(_sum5, r2[5], _k20, vl);
_sum6 = vfmacc_vf_f16m1(_sum6, r2[6], _k20, vl);
_sum7 = vfmacc_vf_f16m1(_sum7, r2[7], _k20, vl);
_sum0 = vfmacc_vf_f16m1(_sum0, r2[1], _k21, vl);
_sum1 = vfmacc_vf_f16m1(_sum1, r2[2], _k21, vl);
_sum2 = vfmacc_vf_f16m1(_sum2, r2[3], _k21, vl);
_sum3 = vfmacc_vf_f16m1(_sum3, r2[4], _k21, vl);
_sum4 = vfmacc_vf_f16m1(_sum4, r2[5], _k21, vl);
_sum5 = vfmacc_vf_f16m1(_sum5, r2[6], _k21, vl);
_sum6 = vfmacc_vf_f16m1(_sum6, r2[7], _k21, vl);
_sum7 = vfmacc_vf_f16m1(_sum7, r2[8], _k21, vl);
_sum0 = vfmacc_vf_f16m1(_sum0, r2[2], _k22, vl);
_sum1 = vfmacc_vf_f16m1(_sum1, r2[3], _k22, vl);
_sum2 = vfmacc_vf_f16m1(_sum2, r2[4], _k22, vl);
_sum3 = vfmacc_vf_f16m1(_sum3, r2[5], _k22, vl);
_sum4 = vfmacc_vf_f16m1(_sum4, r2[6], _k22, vl);
_sum5 = vfmacc_vf_f16m1(_sum5, r2[7], _k22, vl);
_sum6 = vfmacc_vf_f16m1(_sum6, r2[8], _k22, vl);
_sum7 = vfmacc_vf_f16m1(_sum7, r2[9], _k22, vl);
vse16_v_f16m1(outptr0, _sum0, vl);
vse16_v_f16m1(outptr0 + packn, _sum1, vl);
vse16_v_f16m1(outptr0 + packn * 2, _sum2, vl);
vse16_v_f16m1(outptr0 + packn * 3, _sum3, vl);
vse16_v_f16m1(outptr0 + packn * 4, _sum4, vl);
vse16_v_f16m1(outptr0 + packn * 5, _sum5, vl);
vse16_v_f16m1(outptr0 + packn * 6, _sum6, vl);
vse16_v_f16m1(outptr0 + packn * 7, _sum7, vl);
outptr0 += packn * 8;
r0 += 8;
r1 += 8;
r2 += 8;
}
for (; j + 3 < outw; j += 4)
{
vfloat16m1_t _sum0 = vle16_v_f16m1(outptr0, vl);
vfloat16m1_t _sum1 = vle16_v_f16m1(outptr0 + packn, vl);
vfloat16m1_t _sum2 = vle16_v_f16m1(outptr0 + packn * 2, vl);
vfloat16m1_t _sum3 = vle16_v_f16m1(outptr0 + packn * 3, vl);
_sum0 = vfmacc_vf_f16m1(_sum0, r0[0], _k00, vl);
_sum1 = vfmacc_vf_f16m1(_sum1, r0[1], _k00, vl);
_sum2 = vfmacc_vf_f16m1(_sum2, r0[2], _k00, vl);
_sum3 = vfmacc_vf_f16m1(_sum3, r0[3], _k00, vl);
_sum0 = vfmacc_vf_f16m1(_sum0, r0[1], _k01, vl);
_sum1 = vfmacc_vf_f16m1(_sum1, r0[2], _k01, vl);
_sum2 = vfmacc_vf_f16m1(_sum2, r0[3], _k01, vl);
_sum3 = vfmacc_vf_f16m1(_sum3, r0[4], _k01, vl);
_sum0 = vfmacc_vf_f16m1(_sum0, r0[2], _k02, vl);
_sum1 = vfmacc_vf_f16m1(_sum1, r0[3], _k02, vl);
_sum2 = vfmacc_vf_f16m1(_sum2, r0[4], _k02, vl);
_sum3 = vfmacc_vf_f16m1(_sum3, r0[5], _k02, vl);
_sum0 = vfmacc_vf_f16m1(_sum0, r1[0], _k10, vl);
_sum1 = vfmacc_vf_f16m1(_sum1, r1[1], _k10, vl);
_sum2 = vfmacc_vf_f16m1(_sum2, r1[2], _k10, vl);
_sum3 = vfmacc_vf_f16m1(_sum3, r1[3], _k10, vl);
_sum0 = vfmacc_vf_f16m1(_sum0, r1[1], _k11, vl);
_sum1 = vfmacc_vf_f16m1(_sum1, r1[2], _k11, vl);
_sum2 = vfmacc_vf_f16m1(_sum2, r1[3], _k11, vl);
_sum3 = vfmacc_vf_f16m1(_sum3, r1[4], _k11, vl);
_sum0 = vfmacc_vf_f16m1(_sum0, r1[2], _k12, vl);
_sum1 = vfmacc_vf_f16m1(_sum1, r1[3], _k12, vl);
_sum2 = vfmacc_vf_f16m1(_sum2, r1[4], _k12, vl);
_sum3 = vfmacc_vf_f16m1(_sum3, r1[5], _k12, vl);
_sum0 = vfmacc_vf_f16m1(_sum0, r2[0], _k20, vl);
_sum1 = vfmacc_vf_f16m1(_sum1, r2[1], _k20, vl);
_sum2 = vfmacc_vf_f16m1(_sum2, r2[2], _k20, vl);
_sum3 = vfmacc_vf_f16m1(_sum3, r2[3], _k20, vl);
_sum0 = vfmacc_vf_f16m1(_sum0, r2[1], _k21, vl);
_sum1 = vfmacc_vf_f16m1(_sum1, r2[2], _k21, vl);
_sum2 = vfmacc_vf_f16m1(_sum2, r2[3], _k21, vl);
_sum3 = vfmacc_vf_f16m1(_sum3, r2[4], _k21, vl);
_sum0 = vfmacc_vf_f16m1(_sum0, r2[2], _k22, vl);
_sum1 = vfmacc_vf_f16m1(_sum1, r2[3], _k22, vl);
_sum2 = vfmacc_vf_f16m1(_sum2, r2[4], _k22, vl);
_sum3 = vfmacc_vf_f16m1(_sum3, r2[5], _k22, vl);
vse16_v_f16m1(outptr0, _sum0, vl);
vse16_v_f16m1(outptr0 + packn, _sum1, vl);
vse16_v_f16m1(outptr0 + packn * 2, _sum2, vl);
vse16_v_f16m1(outptr0 + packn * 3, _sum3, vl);
outptr0 += packn * 4;
r0 += 4;
r1 += 4;
r2 += 4;
}
for (; j + 1 < outw; j += 2)
{
vfloat16m1_t _sum0 = vle16_v_f16m1(outptr0, vl);
vfloat16m1_t _sum1 = vle16_v_f16m1(outptr0 + packn, vl);
_sum0 = vfmacc_vf_f16m1(_sum0, r0[0], _k00, vl);
_sum1 = vfmacc_vf_f16m1(_sum1, r0[1], _k00, vl);
_sum0 = vfmacc_vf_f16m1(_sum0, r0[1], _k01, vl);
_sum1 = vfmacc_vf_f16m1(_sum1, r0[2], _k01, vl);
_sum0 = vfmacc_vf_f16m1(_sum0, r0[2], _k02, vl);
_sum1 = vfmacc_vf_f16m1(_sum1, r0[3], _k02, vl);
_sum0 = vfmacc_vf_f16m1(_sum0, r1[0], _k10, vl);
_sum1 = vfmacc_vf_f16m1(_sum1, r1[1], _k10, vl);
_sum0 = vfmacc_vf_f16m1(_sum0, r1[1], _k11, vl);
_sum1 = vfmacc_vf_f16m1(_sum1, r1[2], _k11, vl);
_sum0 = vfmacc_vf_f16m1(_sum0, r1[2], _k12, vl);
_sum1 = vfmacc_vf_f16m1(_sum1, r1[3], _k12, vl);
_sum0 = vfmacc_vf_f16m1(_sum0, r2[0], _k20, vl);
_sum1 = vfmacc_vf_f16m1(_sum1, r2[1], _k20, vl);
_sum0 = vfmacc_vf_f16m1(_sum0, r2[1], _k21, vl);
_sum1 = vfmacc_vf_f16m1(_sum1, r2[2], _k21, vl);
_sum0 = vfmacc_vf_f16m1(_sum0, r2[2], _k22, vl);
_sum1 = vfmacc_vf_f16m1(_sum1, r2[3], _k22, vl);
vse16_v_f16m1(outptr0, _sum0, vl);
vse16_v_f16m1(outptr0 + packn, _sum1, vl);
outptr0 += packn * 2;
r0 += 2;
r1 += 2;
r2 += 2;
}
for (; j < outw; j++)
{
vfloat16m1_t _sum0 = vle16_v_f16m1(outptr0, vl);
_sum0 = vfmacc_vf_f16m1(_sum0, r0[0], _k00, vl);
_sum0 = vfmacc_vf_f16m1(_sum0, r0[1], _k01, vl);
_sum0 = vfmacc_vf_f16m1(_sum0, r0[2], _k02, vl);
_sum0 = vfmacc_vf_f16m1(_sum0, r1[0], _k10, vl);
_sum0 = vfmacc_vf_f16m1(_sum0, r1[1], _k11, vl);
_sum0 = vfmacc_vf_f16m1(_sum0, r1[2], _k12, vl);
_sum0 = vfmacc_vf_f16m1(_sum0, r2[0], _k20, vl);
_sum0 = vfmacc_vf_f16m1(_sum0, r2[1], _k21, vl);
_sum0 = vfmacc_vf_f16m1(_sum0, r2[2], _k22, vl);
vse16_v_f16m1(outptr0, _sum0, vl);
outptr0 += packn;
r0 += 1;
r1 += 1;
r2 += 1;
}
r0 += 2;
r1 += 2;
r2 += 2;
}
k0 += 9 * packn;
}
}
}
static void conv3x3s2_pack1ton_fp16sa_rvv(const Mat& bottom_blob, Mat& top_blob, const Mat& kernel, const Mat& _bias, const Option& opt)
{
const int packn = csrr_vlenb() / 2;
const word_type vl = vsetvl_e16m1(packn);
int w = bottom_blob.w;
int inch = bottom_blob.c;
int outw = top_blob.w;
int outh = top_blob.h;
int outch = top_blob.c;
const int tailstep = w - 2 * outw + w;
const __fp16* bias = _bias;
#pragma omp parallel for num_threads(opt.num_threads)
for (int p = 0; p < outch; p++)
{
Mat out0 = top_blob.channel(p);
vfloat16m1_t _bias0 = bias ? vle16_v_f16m1(bias + p * packn, vl) : vfmv_v_f_f16m1(0.f, vl);
out0.fill(_bias0);
const __fp16* k0 = kernel.channel(p);
int q = 0;
for (; q < inch; q++)
{
__fp16* outptr0 = out0;
const Mat img0 = bottom_blob.channel(q);
const __fp16* r0 = img0.row<const __fp16>(0);
const __fp16* r1 = img0.row<const __fp16>(1);
const __fp16* r2 = img0.row<const __fp16>(2);
vfloat16m1_t _k00 = vle16_v_f16m1(k0, vl);
vfloat16m1_t _k01 = vle16_v_f16m1(k0 + packn, vl);
vfloat16m1_t _k02 = vle16_v_f16m1(k0 + packn * 2, vl);
vfloat16m1_t _k10 = vle16_v_f16m1(k0 + packn * 3, vl);
vfloat16m1_t _k11 = vle16_v_f16m1(k0 + packn * 4, vl);
vfloat16m1_t _k12 = vle16_v_f16m1(k0 + packn * 5, vl);
vfloat16m1_t _k20 = vle16_v_f16m1(k0 + packn * 6, vl);
vfloat16m1_t _k21 = vle16_v_f16m1(k0 + packn * 7, vl);
vfloat16m1_t _k22 = vle16_v_f16m1(k0 + packn * 8, vl);
int i = 0;
for (; i < outh; i++)
{
int j = 0;
for (; j + 7 < outw; j += 8)
{
vfloat16m1_t _sum0 = vle16_v_f16m1(outptr0, vl);
vfloat16m1_t _sum1 = vle16_v_f16m1(outptr0 + packn, vl);
vfloat16m1_t _sum2 = vle16_v_f16m1(outptr0 + packn * 2, vl);
vfloat16m1_t _sum3 = vle16_v_f16m1(outptr0 + packn * 3, vl);
vfloat16m1_t _sum4 = vle16_v_f16m1(outptr0 + packn * 4, vl);
vfloat16m1_t _sum5 = vle16_v_f16m1(outptr0 + packn * 5, vl);
vfloat16m1_t _sum6 = vle16_v_f16m1(outptr0 + packn * 6, vl);
vfloat16m1_t _sum7 = vle16_v_f16m1(outptr0 + packn * 7, vl);
_sum0 = vfmacc_vf_f16m1(_sum0, r0[0], _k00, vl);
_sum1 = vfmacc_vf_f16m1(_sum1, r0[2], _k00, vl);
_sum2 = vfmacc_vf_f16m1(_sum2, r0[4], _k00, vl);
_sum3 = vfmacc_vf_f16m1(_sum3, r0[6], _k00, vl);
_sum4 = vfmacc_vf_f16m1(_sum4, r0[8], _k00, vl);
_sum5 = vfmacc_vf_f16m1(_sum5, r0[10], _k00, vl);
_sum6 = vfmacc_vf_f16m1(_sum6, r0[12], _k00, vl);
_sum7 = vfmacc_vf_f16m1(_sum7, r0[14], _k00, vl);
_sum0 = vfmacc_vf_f16m1(_sum0, r0[1], _k01, vl);
_sum1 = vfmacc_vf_f16m1(_sum1, r0[3], _k01, vl);
_sum2 = vfmacc_vf_f16m1(_sum2, r0[5], _k01, vl);
_sum3 = vfmacc_vf_f16m1(_sum3, r0[7], _k01, vl);
_sum4 = vfmacc_vf_f16m1(_sum4, r0[9], _k01, vl);
_sum5 = vfmacc_vf_f16m1(_sum5, r0[11], _k01, vl);
_sum6 = vfmacc_vf_f16m1(_sum6, r0[13], _k01, vl);
_sum7 = vfmacc_vf_f16m1(_sum7, r0[15], _k01, vl);
_sum0 = vfmacc_vf_f16m1(_sum0, r0[2], _k02, vl);
_sum1 = vfmacc_vf_f16m1(_sum1, r0[4], _k02, vl);
_sum2 = vfmacc_vf_f16m1(_sum2, r0[6], _k02, vl);
_sum3 = vfmacc_vf_f16m1(_sum3, r0[8], _k02, vl);
_sum4 = vfmacc_vf_f16m1(_sum4, r0[10], _k02, vl);
_sum5 = vfmacc_vf_f16m1(_sum5, r0[12], _k02, vl);
_sum6 = vfmacc_vf_f16m1(_sum6, r0[14], _k02, vl);
_sum7 = vfmacc_vf_f16m1(_sum7, r0[16], _k02, vl);
_sum0 = vfmacc_vf_f16m1(_sum0, r1[0], _k10, vl);
_sum1 = vfmacc_vf_f16m1(_sum1, r1[2], _k10, vl);
_sum2 = vfmacc_vf_f16m1(_sum2, r1[4], _k10, vl);
_sum3 = vfmacc_vf_f16m1(_sum3, r1[6], _k10, vl);
_sum4 = vfmacc_vf_f16m1(_sum4, r1[8], _k10, vl);
_sum5 = vfmacc_vf_f16m1(_sum5, r1[10], _k10, vl);
_sum6 = vfmacc_vf_f16m1(_sum6, r1[12], _k10, vl);
_sum7 = vfmacc_vf_f16m1(_sum7, r1[14], _k10, vl);
_sum0 = vfmacc_vf_f16m1(_sum0, r1[1], _k11, vl);
_sum1 = vfmacc_vf_f16m1(_sum1, r1[3], _k11, vl);
_sum2 = vfmacc_vf_f16m1(_sum2, r1[5], _k11, vl);
_sum3 = vfmacc_vf_f16m1(_sum3, r1[7], _k11, vl);
_sum4 = vfmacc_vf_f16m1(_sum4, r1[9], _k11, vl);
_sum5 = vfmacc_vf_f16m1(_sum5, r1[11], _k11, vl);
_sum6 = vfmacc_vf_f16m1(_sum6, r1[13], _k11, vl);
_sum7 = vfmacc_vf_f16m1(_sum7, r1[15], _k11, vl);
_sum0 = vfmacc_vf_f16m1(_sum0, r1[2], _k12, vl);
_sum1 = vfmacc_vf_f16m1(_sum1, r1[4], _k12, vl);
_sum2 = vfmacc_vf_f16m1(_sum2, r1[6], _k12, vl);
_sum3 = vfmacc_vf_f16m1(_sum3, r1[8], _k12, vl);
_sum4 = vfmacc_vf_f16m1(_sum4, r1[10], _k12, vl);
_sum5 = vfmacc_vf_f16m1(_sum5, r1[12], _k12, vl);
_sum6 = vfmacc_vf_f16m1(_sum6, r1[14], _k12, vl);
_sum7 = vfmacc_vf_f16m1(_sum7, r1[16], _k12, vl);
_sum0 = vfmacc_vf_f16m1(_sum0, r2[0], _k20, vl);
_sum1 = vfmacc_vf_f16m1(_sum1, r2[2], _k20, vl);
_sum2 = vfmacc_vf_f16m1(_sum2, r2[4], _k20, vl);
_sum3 = vfmacc_vf_f16m1(_sum3, r2[6], _k20, vl);
_sum4 = vfmacc_vf_f16m1(_sum4, r2[8], _k20, vl);
_sum5 = vfmacc_vf_f16m1(_sum5, r2[10], _k20, vl);
_sum6 = vfmacc_vf_f16m1(_sum6, r2[12], _k20, vl);
_sum7 = vfmacc_vf_f16m1(_sum7, r2[14], _k20, vl);
_sum0 = vfmacc_vf_f16m1(_sum0, r2[1], _k21, vl);
_sum1 = vfmacc_vf_f16m1(_sum1, r2[3], _k21, vl);
_sum2 = vfmacc_vf_f16m1(_sum2, r2[5], _k21, vl);
_sum3 = vfmacc_vf_f16m1(_sum3, r2[7], _k21, vl);
_sum4 = vfmacc_vf_f16m1(_sum4, r2[9], _k21, vl);
_sum5 = vfmacc_vf_f16m1(_sum5, r2[11], _k21, vl);
_sum6 = vfmacc_vf_f16m1(_sum6, r2[13], _k21, vl);
_sum7 = vfmacc_vf_f16m1(_sum7, r2[15], _k21, vl);
_sum0 = vfmacc_vf_f16m1(_sum0, r2[2], _k22, vl);
_sum1 = vfmacc_vf_f16m1(_sum1, r2[4], _k22, vl);
_sum2 = vfmacc_vf_f16m1(_sum2, r2[6], _k22, vl);
_sum3 = vfmacc_vf_f16m1(_sum3, r2[8], _k22, vl);
_sum4 = vfmacc_vf_f16m1(_sum4, r2[10], _k22, vl);
_sum5 = vfmacc_vf_f16m1(_sum5, r2[12], _k22, vl);
_sum6 = vfmacc_vf_f16m1(_sum6, r2[14], _k22, vl);
_sum7 = vfmacc_vf_f16m1(_sum7, r2[16], _k22, vl);
vse16_v_f16m1(outptr0, _sum0, vl);
vse16_v_f16m1(outptr0 + packn, _sum1, vl);
vse16_v_f16m1(outptr0 + packn * 2, _sum2, vl);
vse16_v_f16m1(outptr0 + packn * 3, _sum3, vl);
vse16_v_f16m1(outptr0 + packn * 4, _sum4, vl);
vse16_v_f16m1(outptr0 + packn * 5, _sum5, vl);
vse16_v_f16m1(outptr0 + packn * 6, _sum6, vl);
vse16_v_f16m1(outptr0 + packn * 7, _sum7, vl);
outptr0 += packn * 8;
r0 += 16;
r1 += 16;
r2 += 16;
}
for (; j + 3 < outw; j += 4)
{
vfloat16m1_t _sum0 = vle16_v_f16m1(outptr0, vl);
vfloat16m1_t _sum1 = vle16_v_f16m1(outptr0 + packn, vl);
vfloat16m1_t _sum2 = vle16_v_f16m1(outptr0 + packn * 2, vl);
vfloat16m1_t _sum3 = vle16_v_f16m1(outptr0 + packn * 3, vl);
_sum0 = vfmacc_vf_f16m1(_sum0, r0[0], _k00, vl);
_sum1 = vfmacc_vf_f16m1(_sum1, r0[2], _k00, vl);
_sum2 = vfmacc_vf_f16m1(_sum2, r0[4], _k00, vl);
_sum3 = vfmacc_vf_f16m1(_sum3, r0[6], _k00, vl);
_sum0 = vfmacc_vf_f16m1(_sum0, r0[1], _k01, vl);
_sum1 = vfmacc_vf_f16m1(_sum1, r0[3], _k01, vl);
_sum2 = vfmacc_vf_f16m1(_sum2, r0[5], _k01, vl);
_sum3 = vfmacc_vf_f16m1(_sum3, r0[7], _k01, vl);
_sum0 = vfmacc_vf_f16m1(_sum0, r0[2], _k02, vl);
_sum1 = vfmacc_vf_f16m1(_sum1, r0[4], _k02, vl);
_sum2 = vfmacc_vf_f16m1(_sum2, r0[6], _k02, vl);
_sum3 = vfmacc_vf_f16m1(_sum3, r0[8], _k02, vl);
_sum0 = vfmacc_vf_f16m1(_sum0, r1[0], _k10, vl);
_sum1 = vfmacc_vf_f16m1(_sum1, r1[2], _k10, vl);
_sum2 = vfmacc_vf_f16m1(_sum2, r1[4], _k10, vl);
_sum3 = vfmacc_vf_f16m1(_sum3, r1[6], _k10, vl);
_sum0 = vfmacc_vf_f16m1(_sum0, r1[1], _k11, vl);
_sum1 = vfmacc_vf_f16m1(_sum1, r1[3], _k11, vl);
_sum2 = vfmacc_vf_f16m1(_sum2, r1[5], _k11, vl);
_sum3 = vfmacc_vf_f16m1(_sum3, r1[7], _k11, vl);
_sum0 = vfmacc_vf_f16m1(_sum0, r1[2], _k12, vl);
_sum1 = vfmacc_vf_f16m1(_sum1, r1[4], _k12, vl);
_sum2 = vfmacc_vf_f16m1(_sum2, r1[6], _k12, vl);
_sum3 = vfmacc_vf_f16m1(_sum3, r1[8], _k12, vl);
_sum0 = vfmacc_vf_f16m1(_sum0, r2[0], _k20, vl);
_sum1 = vfmacc_vf_f16m1(_sum1, r2[2], _k20, vl);
_sum2 = vfmacc_vf_f16m1(_sum2, r2[4], _k20, vl);
_sum3 = vfmacc_vf_f16m1(_sum3, r2[6], _k20, vl);
_sum0 = vfmacc_vf_f16m1(_sum0, r2[1], _k21, vl);
_sum1 = vfmacc_vf_f16m1(_sum1, r2[3], _k21, vl);
_sum2 = vfmacc_vf_f16m1(_sum2, r2[5], _k21, vl);
_sum3 = vfmacc_vf_f16m1(_sum3, r2[7], _k21, vl);
_sum0 = vfmacc_vf_f16m1(_sum0, r2[2], _k22, vl);
_sum1 = vfmacc_vf_f16m1(_sum1, r2[4], _k22, vl);
_sum2 = vfmacc_vf_f16m1(_sum2, r2[6], _k22, vl);
_sum3 = vfmacc_vf_f16m1(_sum3, r2[8], _k22, vl);
vse16_v_f16m1(outptr0, _sum0, vl);
vse16_v_f16m1(outptr0 + packn, _sum1, vl);
vse16_v_f16m1(outptr0 + packn * 2, _sum2, vl);
vse16_v_f16m1(outptr0 + packn * 3, _sum3, vl);
outptr0 += packn * 4;
r0 += 8;
r1 += 8;
r2 += 8;
}
for (; j + 1 < outw; j += 2)
{
vfloat16m1_t _sum0 = vle16_v_f16m1(outptr0, vl);
vfloat16m1_t _sum1 = vle16_v_f16m1(outptr0 + packn, vl);
_sum0 = vfmacc_vf_f16m1(_sum0, r0[0], _k00, vl);
_sum1 = vfmacc_vf_f16m1(_sum1, r0[2], _k00, vl);
_sum0 = vfmacc_vf_f16m1(_sum0, r0[1], _k01, vl);
_sum1 = vfmacc_vf_f16m1(_sum1, r0[3], _k01, vl);
_sum0 = vfmacc_vf_f16m1(_sum0, r0[2], _k02, vl);
_sum1 = vfmacc_vf_f16m1(_sum1, r0[4], _k02, vl);
_sum0 = vfmacc_vf_f16m1(_sum0, r1[0], _k10, vl);
_sum1 = vfmacc_vf_f16m1(_sum1, r1[2], _k10, vl);
_sum0 = vfmacc_vf_f16m1(_sum0, r1[1], _k11, vl);
_sum1 = vfmacc_vf_f16m1(_sum1, r1[3], _k11, vl);
_sum0 = vfmacc_vf_f16m1(_sum0, r1[2], _k12, vl);
_sum1 = vfmacc_vf_f16m1(_sum1, r1[4], _k12, vl);
_sum0 = vfmacc_vf_f16m1(_sum0, r2[0], _k20, vl);
_sum1 = vfmacc_vf_f16m1(_sum1, r2[2], _k20, vl);
_sum0 = vfmacc_vf_f16m1(_sum0, r2[1], _k21, vl);
_sum1 = vfmacc_vf_f16m1(_sum1, r2[3], _k21, vl);
_sum0 = vfmacc_vf_f16m1(_sum0, r2[2], _k22, vl);
_sum1 = vfmacc_vf_f16m1(_sum1, r2[4], _k22, vl);
vse16_v_f16m1(outptr0, _sum0, vl);
vse16_v_f16m1(outptr0 + packn, _sum1, vl);
outptr0 += packn * 2;
r0 += 4;
r1 += 4;
r2 += 4;
}
for (; j < outw; j++)
{
vfloat16m1_t _sum0 = vle16_v_f16m1(outptr0, vl);
_sum0 = vfmacc_vf_f16m1(_sum0, r0[0], _k00, vl);
_sum0 = vfmacc_vf_f16m1(_sum0, r0[1], _k01, vl);
_sum0 = vfmacc_vf_f16m1(_sum0, r0[2], _k02, vl);
_sum0 = vfmacc_vf_f16m1(_sum0, r1[0], _k10, vl);
_sum0 = vfmacc_vf_f16m1(_sum0, r1[1], _k11, vl);
_sum0 = vfmacc_vf_f16m1(_sum0, r1[2], _k12, vl);
_sum0 = vfmacc_vf_f16m1(_sum0, r2[0], _k20, vl);
_sum0 = vfmacc_vf_f16m1(_sum0, r2[1], _k21, vl);
_sum0 = vfmacc_vf_f16m1(_sum0, r2[2], _k22, vl);
vse16_v_f16m1(outptr0, _sum0, vl);
outptr0 += packn;
r0 += 2;
r1 += 2;
r2 += 2;
}
r0 += tailstep;
r1 += tailstep;
r2 += tailstep;
}
k0 += 9 * packn;
}
}
}
|
basis_function.h | /*
Copyright (c) 2020, VSB - Technical University of Ostrava and Graz University of
Technology
All rights reserved.
Redistribution and use in source and binary forms, with or without modification,
are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright notice, this
list of conditions and the following disclaimer in the documentation and/or
other materials provided with the distribution.
* Neither the names of VSB - Technical University of Ostrava and Graz
University of Technology nor the names of its contributors may be used to
endorse or promote products derived from this software without specific prior
written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS “AS IS”
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
ARE DISCLAIMED. IN NO EVENT SHALL VSB - TECHNICAL UNIVERSITY OF OSTRAVA AND
GRAZ UNIVERSITY OF TECHNOLOGY BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/** @file basis_function.h
* @brief Contains a parent class for all spatial finite element basis
* functions defined on surfaces.
* @note updated documentation
*/
#ifndef INCLUDE_BESTHEA_BASIS_FUNCTION_H_
#define INCLUDE_BESTHEA_BASIS_FUNCTION_H_
#include "besthea/coordinates.h"
#include "besthea/mesh.h"
#include "besthea/settings.h"
#include <array>
#include <vector>
namespace besthea {
namespace bem {
template< class derived_type >
class basis_function;
}
}
/**
* Class representing a basis function.
*/
template< class derived_type >
class besthea::bem::basis_function {
protected:
using mesh_type = besthea::mesh::mesh; //!< Mesh type.
public:
/**
* Default constructor.
*/
basis_function( ) : _mesh( nullptr ) {
}
basis_function( const basis_function & that ) = delete;
/**
* Destructor.
*/
virtual ~basis_function( ) {
}
/**
* Returns this cast to the descendant's type.
*/
derived_type * derived( ) {
return static_cast< derived_type * >( this );
}
/**
* Returns this cast to the descendant's type.
*/
const derived_type * derived( ) const {
return static_cast< const derived_type * >( this );
}
/**
* Returns the number of basis functions supported on a single element.
*/
virtual lo dimension_local( ) const = 0;
/**
* Returns the number of basis functions on the whole mesh.
*/
virtual lo dimension_global( ) const = 0;
/**
* Provides global indices for local contributions.
* @param[in] i_elem Element index.
* @param[out] indices Global indices for local contributions.
* @remark Example: In case of p1 basis functions the global indices of the
* nodes of the element with index \p i_elem are returned.
*/
void local_to_global( lo i_elem, std::vector< lo > & indices ) const {
derived( )->do_local_to_global( i_elem, indices );
}
/**
* Provides global indices for local contributions.
* @param[in] i_elem Element index.
* @param[in] n_shared_vertices Number of shared vertices in current elements
* (regularized quadrature).
* @param[in] rotation Virtual element rotation (regularized quadrature).
* @param[in] swap Virtual element inversion (regularized quadrature).
* @param[out] indices Global indices for local contributions.
* @note This is an auxiliary routine to return the global indices in a
* modified order suitable for regularized quadrature.
*/
void local_to_global( lo i_elem, int n_shared_vertices, int rotation,
bool swap, std::vector< lo > & indices ) const {
derived( )->do_local_to_global(
i_elem, n_shared_vertices, rotation, swap, indices );
}
/**
* Evaluates a basis function in a point in an element. The point is given by
* coordinates in the reference triangle
* (\f$ (x_1,x_2) \in (0,1)\times(0,1-x_1) \f$).
* @param[in] i_elem Element index.
* @param[in] i_fun Local basis function index.
* @param[in] x1_ref First coordinate of reference quadrature point.
* @param[in] x2_ref Second coordinate of reference quadrature point.
* @param[in] n Outward normal vector on the element
*/
#pragma omp declare simd uniform( i_elem, i_fun, n ) simdlen( DATA_WIDTH )
sc evaluate( lo i_elem, lo i_fun, sc x1_ref, sc x2_ref, const sc * n ) const {
return derived( )->do_evaluate( i_elem, i_fun, x1_ref, x2_ref, n );
}
/**
* Evaluates a basis function in a point in an element. The point is given
* by coordinates in the reference triangle
* (\f$ (x_1,x_2) \in (0,1)\times(0,1-x_1) \f$).
* @param[in] i_elem Element index.
* @param[in] i_fun Local basis function index.
* @param[in] x1_ref First coordinate of reference quadrature point.
* @param[in] x2_ref Second coordinate of reference quadrature point.
* @param[in] n Outward normal vector on the element.
* @param[in] n_shared_vertices Number of shared vertices in current elements
* (regularized quadrature).
* @param[in] rotation Virtual element rotation (regularized quadrature).
* @param[in] swap Virtual element inversion (regularized quadrature).
* @note This is an auxiliary routine to evaluate a basis function suitable
* for regularized quadrature.
*/
#pragma omp declare simd uniform( \
i_elem, i_fun, n, n_shared_vertices, rotation, swap ) simdlen( DATA_WIDTH )
sc evaluate( lo i_elem, lo i_fun, sc x1_ref, sc x2_ref, const sc * n,
int n_shared_vertices, int rotation, bool swap ) const {
return derived( )->do_evaluate(
i_elem, i_fun, x1_ref, x2_ref, n, n_shared_vertices, rotation, swap );
}
protected:
const mesh_type * _mesh; //!< Pointer to the underlying mesh.
const std::array< int, 5 > _map{ 0, 1, 2, 0,
1 }; //!< Auxiliary array for mapping DOFs under
// rotation (regularized quadrature).
};
#endif /* INCLUDE_BESTHEA_BASIS_FUNCTION_H_ */
|
GB_unaryop__abs_uint16_uint16.c | //------------------------------------------------------------------------------
// GB_unaryop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_iterator.h"
#include "GB_unaryop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop__abs_uint16_uint16
// op(A') function: GB_tran__abs_uint16_uint16
// C type: uint16_t
// A type: uint16_t
// cast: uint16_t cij = (uint16_t) aij
// unaryop: cij = aij
#define GB_ATYPE \
uint16_t
#define GB_CTYPE \
uint16_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
uint16_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = x ;
// casting
#define GB_CASTING(z, x) \
uint16_t z = (uint16_t) x ;
// cij = op (cast (aij))
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
GB_GETA (aij, Ax, pA) ; \
/* Cx [pC] = op (cast (aij)) */ \
GB_CASTING (x, aij) ; \
GB_OP (GB_CX (pC), x) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_ABS || GxB_NO_UINT16)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop__abs_uint16_uint16
(
uint16_t *restrict Cx,
const uint16_t *restrict Ax,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (int64_t p = 0 ; p < anz ; p++)
{
GB_CAST_OP (p, p) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_tran__abs_uint16_uint16
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *restrict *Rowcounts,
GBI_single_iterator Iter,
const int64_t *restrict A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#define GB_PHASE_2_OF_2
#include "GB_unaryop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
util.c | /*-
* Copyright (c) 2012-2017 Ilya Kaliman
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS "AS IS" AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*/
#include <ctype.h>
#include "private.h"
#include "util.h"
int
efp_skip_frag_pair(const struct efp *efp, size_t fr_i_idx, size_t fr_j_idx)
{
size_t idx = fr_i_idx * efp->n_frag + fr_j_idx;
if (efp->skiplist[idx])
return 1;
if (!efp->opts.enable_cutoff)
return 0;
const struct frag *fr_i = efp->frags + fr_i_idx;
const struct frag *fr_j = efp->frags + fr_j_idx;
double cutoff2 = efp->opts.swf_cutoff * efp->opts.swf_cutoff;
vec_t dr = vec_sub(CVEC(fr_j->x), CVEC(fr_i->x));
if (efp->opts.enable_pbc) {
vec_t cell = { efp->box.x * round(dr.x / efp->box.x),
efp->box.y * round(dr.y / efp->box.y),
efp->box.z * round(dr.z / efp->box.z) };
dr = vec_sub(&dr, &cell);
}
return vec_len_2(&dr) > cutoff2;
}
struct swf
efp_make_swf(const struct efp *efp, const struct frag *fr_i,
const struct frag *fr_j)
{
struct swf swf;
memset(&swf, 0, sizeof(swf));
swf.swf = 1.0;
swf.dr = vec_sub(CVEC(fr_j->x), CVEC(fr_i->x));
if (!efp->opts.enable_cutoff)
return swf;
if (efp->opts.enable_pbc) {
swf.cell.x = efp->box.x * round(swf.dr.x / efp->box.x);
swf.cell.y = efp->box.y * round(swf.dr.y / efp->box.y);
swf.cell.z = efp->box.z * round(swf.dr.z / efp->box.z);
swf.dr.x -= swf.cell.x;
swf.dr.y -= swf.cell.y;
swf.dr.z -= swf.cell.z;
}
double r = vec_len(&swf.dr);
swf.swf = efp_get_swf(r, efp->opts.swf_cutoff);
double dswf = efp_get_dswf(r, efp->opts.swf_cutoff);
swf.dswf.x = -dswf * swf.dr.x;
swf.dswf.y = -dswf * swf.dr.y;
swf.dswf.z = -dswf * swf.dr.z;
return swf;
}
int
efp_check_rotation_matrix(const mat_t *rotmat)
{
vec_t ax = { rotmat->xx, rotmat->yx, rotmat->zx };
vec_t ay = { rotmat->xy, rotmat->yy, rotmat->zy };
vec_t az = { rotmat->xz, rotmat->yz, rotmat->zz };
if (!eq(vec_len(&ax), 1.0) ||
!eq(vec_len(&ay), 1.0) ||
!eq(vec_len(&az), 1.0))
return 0;
if (!eq(vec_dot(&ax, &ay), 0.0))
return 0;
vec_t cross = vec_cross(&ax, &ay);
if (!eq(cross.x, az.x) ||
!eq(cross.y, az.y) ||
!eq(cross.z, az.z))
return 0;
return 1;
}
void
efp_points_to_matrix(const double *pts, mat_t *rotmat)
{
vec_t p1 = { pts[0], pts[1], pts[2] };
vec_t p2 = { pts[3], pts[4], pts[5] };
vec_t p3 = { pts[6], pts[7], pts[8] };
vec_t r12 = vec_sub(&p2, &p1);
vec_t r13 = vec_sub(&p3, &p1);
vec_normalize(&r12);
vec_normalize(&r13);
double dot = vec_dot(&r12, &r13);
r13.x -= dot * r12.x;
r13.y -= dot * r12.y;
r13.z -= dot * r12.z;
vec_t cross = vec_cross(&r12, &r13);
vec_normalize(&r13);
vec_normalize(&cross);
rotmat->xx = r12.x;
rotmat->yx = r12.y;
rotmat->zx = r12.z;
rotmat->xy = r13.x;
rotmat->yy = r13.y;
rotmat->zy = r13.z;
rotmat->xz = cross.x;
rotmat->yz = cross.y;
rotmat->zz = cross.z;
}
const struct frag *
efp_find_lib(struct efp *efp, const char *name)
{
for (size_t i = 0; i < efp->n_lib; i++)
if (efp_strcasecmp(efp->lib[i]->name, name) == 0)
return efp->lib[i];
return NULL;
}
void
efp_add_stress(const vec_t *dr, const vec_t *force, mat_t *stress)
{
#ifdef _OPENMP
#pragma omp critical
#endif
{
stress->xx += dr->x * force->x;
stress->xy += dr->x * force->y;
stress->xz += dr->x * force->z;
stress->yx += dr->y * force->x;
stress->yy += dr->y * force->y;
stress->yz += dr->y * force->z;
stress->zx += dr->z * force->x;
stress->zy += dr->z * force->y;
stress->zz += dr->z * force->z;
}
}
void
efp_add_force(six_t *grad, const vec_t *com, const vec_t *pt,
const vec_t *force, const vec_t *add)
{
vec_t dr = vec_sub(CVEC(pt->x), com);
vec_t torque = vec_cross(&dr, force);
if (add) {
torque.x += add->x;
torque.y += add->y;
torque.z += add->z;
}
six_atomic_add_xyz(grad, force);
six_atomic_add_abc(grad, &torque);
}
void
efp_sub_force(six_t *grad, const vec_t *com, const vec_t *pt,
const vec_t *force, const vec_t *add)
{
vec_t dr = vec_sub(CVEC(pt->x), com);
vec_t torque = vec_cross(&dr, force);
if (add) {
torque.x += add->x;
torque.y += add->y;
torque.z += add->z;
}
six_atomic_sub_xyz(grad, force);
six_atomic_sub_abc(grad, &torque);
}
void
efp_move_pt(const vec_t *com, const mat_t *rotmat,
const vec_t *pos_int, vec_t *out)
{
*out = mat_vec(rotmat, pos_int);
out->x += com->x;
out->y += com->y;
out->z += com->z;
}
void
efp_rotate_t2(const mat_t *rotmat, const double *in, double *out)
{
for (size_t i = 0; i < 3 * 3; i++)
out[i] = 0.0;
for (size_t a1 = 0; a1 < 3; a1++)
for (size_t b1 = 0; b1 < 3; b1++)
for (size_t a2 = 0; a2 < 3; a2++)
for (size_t b2 = 0; b2 < 3; b2++)
out[a2 * 3 + b2] += in[a1 * 3 + b1] *
mat_get(rotmat, a2, a1) *
mat_get(rotmat, b2, b1);
}
void
efp_rotate_t3(const mat_t *rotmat, const double *in, double *out)
{
double a, b, c;
for (size_t i = 0; i < 3 * 3 * 3; i++)
out[i] = 0.0;
for (size_t a1 = 0; a1 < 3; a1++)
for (size_t b1 = 0; b1 < 3; b1++)
for (size_t c1 = 0; c1 < 3; c1++)
for (size_t a2 = 0; a2 < 3; a2++)
for (size_t b2 = 0; b2 < 3; b2++)
for (size_t c2 = 0; c2 < 3; c2++) {
a = mat_get(rotmat, a2, a1);
b = mat_get(rotmat, b2, b1);
c = mat_get(rotmat, c2, c1);
out[a2 * 9 + b2 * 3 + c2] +=
in[a1 * 9 + b1 * 3 + c1] * a * b * c;
}
}
int
efp_strcasecmp(const char *s1, const char *s2)
{
while (tolower(*s1) == tolower(*s2++))
if (*s1++ == '\0')
return 0;
return tolower(*s1) - tolower(*--s2);
}
int
efp_strncasecmp(const char *s1, const char *s2, size_t n)
{
if (n != 0) {
do {
if (tolower(*s1) != tolower(*s2++))
return tolower(*s1) - tolower(*--s2);
if (*s1++ == '\0')
break;
} while (--n != 0);
}
return 0;
}
|
OMPDenseMatrix.c | #include <omp.h>
#include <stdlib.h>
#include <stdio.h>
int main (int argc, char* argv[]) {
//Declarations
float dampingFactor = 0.15;
int numPage = atoi(argv[1]);
int totalSize = numPage * numPage;
float *sArray, *pageRank, *yArray;
sArray = (float*)malloc(totalSize*sizeof(float));
pageRank = (float*)malloc(numPage*sizeof(float));
yArray = (float*)malloc(numPage*sizeof(float));
int i, j, K, k;
K = 1000;
double startTime, endTime;
//fill out sArray with 0 and
//Initial Pagerank vector with 1/Numpage
for(i = 0; i < numPage; i++) {
for (j = 0; j < numPage; j++) {
sArray[i*numPage + j] = 0.0;
}
pageRank[i] = 1/(float)numPage;
}
//setup sArray with page navigation probabilities
sArray[1] = 0.5;
sArray[numPage - 1] = 0.5;
for (i = 1; i < numPage - 1; i++) {
for(j = 0; j < numPage; j++) {
if (i == 1) {
sArray[i*numPage] = 1.0;
sArray[i*numPage+2] = 0.5;
j = numPage;
}
else {
if (j == i) {
sArray[(i*numPage) + (j - 1)] = 0.5;
sArray[(i*numPage) + (j + 1)] = 0.5;
j = numPage;
}
}
}
}
sArray[totalSize - 2] = 0.5;
//Apply damping factor to the sArray
for (i = 0; i < numPage; i++) {
for (j = 0; j < numPage; j++) {
sArray[i*numPage+j] = ((1-dampingFactor)*sArray[i*numPage+j])+(dampingFactor/numPage);
}
}
//start timer and perform MatVec K-times in parallel
startTime = omp_get_wtime();
for (k = 0; k < K; k++) {
#pragma omp parallel for private(j)
for (i = 0; i < numPage; i++) {
yArray[i] = 0.0;
for (j = 0; j < numPage; j++) {
yArray[i] += sArray[i*numPage+j] * pageRank[j];
}
}
#pragma omp master
for (i = 0; i < numPage; i++) {
pageRank[i] = yArray[i];
}
#pragma end master
}
endTime = omp_get_wtime();
//Print the Pageranks or max and min values
if (numPage < 20) {
for (i = 0; i < numPage; i++) {
printf("%f \n", pageRank[i]);
}
}
else {
float max, min;
max = pageRank[0];
min = pageRank[0];
for (i = 0; i < numPage; i++) {
if (max < pageRank[i])
max = pageRank[i];
if (min > pageRank[i])
min = pageRank[i];
}
printf("Min Pagerank = %f \n", min);
printf("Max Pagerank = %f \n", max);
}
//print runtime
printf("RUNTIME = %.16f\n", endTime-startTime);
return 0;
} |
c-tree.h | /* Definitions for C parsing and type checking.
Copyright (C) 1987, 1993, 1994, 1995, 1997, 1998,
1999, 2000, 2001, 2002, 2003, 2004, 2005 Free Software Foundation, Inc.
This file is part of GCC.
GCC is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free
Software Foundation; either version 2, or (at your option) any later
version.
GCC is distributed in the hope that it will be useful, but WITHOUT ANY
WARRANTY; without even the implied warranty of MERCHANTABILITY or
FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
for more details.
You should have received a copy of the GNU General Public License
along with GCC; see the file COPYING. If not, write to the Free
Software Foundation, 51 Franklin Street, Fifth Floor, Boston, MA
02110-1301, USA. */
#ifndef GCC_C_TREE_H
#define GCC_C_TREE_H
#include "c-common.h"
#include "toplev.h"
#include "diagnostic.h"
/* struct lang_identifier is private to c-decl.c, but langhooks.c needs to
know how big it is. This is sanity-checked in c-decl.c. */
#define C_SIZEOF_STRUCT_LANG_IDENTIFIER \
(sizeof (struct c_common_identifier) + 3 * sizeof (void *))
/* Language-specific declaration information. */
struct lang_decl GTY(())
{
char dummy;
};
/* In a RECORD_TYPE or UNION_TYPE, nonzero if any component is read-only. */
#define C_TYPE_FIELDS_READONLY(TYPE) TREE_LANG_FLAG_1 (TYPE)
/* In a RECORD_TYPE or UNION_TYPE, nonzero if any component is volatile. */
#define C_TYPE_FIELDS_VOLATILE(TYPE) TREE_LANG_FLAG_2 (TYPE)
/* In a RECORD_TYPE or UNION_TYPE or ENUMERAL_TYPE
nonzero if the definition of the type has already started. */
#define C_TYPE_BEING_DEFINED(TYPE) TYPE_LANG_FLAG_0 (TYPE)
/* In an incomplete RECORD_TYPE or UNION_TYPE, a list of variable
declarations whose type would be completed by completing that type. */
#define C_TYPE_INCOMPLETE_VARS(TYPE) TYPE_VFIELD (TYPE)
/* In an IDENTIFIER_NODE, nonzero if this identifier is actually a
keyword. C_RID_CODE (node) is then the RID_* value of the keyword,
and C_RID_YYCODE is the token number wanted by Yacc. */
#define C_IS_RESERVED_WORD(ID) TREE_LANG_FLAG_0 (ID)
struct lang_type GTY(())
{
/* In a RECORD_TYPE, a sorted array of the fields of the type. */
struct sorted_fields_type * GTY ((reorder ("resort_sorted_fields"))) s;
/* In an ENUMERAL_TYPE, the min and max values. */
tree enum_min;
tree enum_max;
/* In a RECORD_TYPE, information specific to Objective-C, such
as a list of adopted protocols or a pointer to a corresponding
@interface. See objc/objc-act.h for details. */
tree objc_info;
};
/* Record whether a type or decl was written with nonconstant size.
Note that TYPE_SIZE may have simplified to a constant. */
#define C_TYPE_VARIABLE_SIZE(TYPE) TYPE_LANG_FLAG_1 (TYPE)
#define C_DECL_VARIABLE_SIZE(TYPE) DECL_LANG_FLAG_0 (TYPE)
/* Record whether a typedef for type `int' was actually `signed int'. */
#define C_TYPEDEF_EXPLICITLY_SIGNED(EXP) DECL_LANG_FLAG_1 (EXP)
/* For a FUNCTION_DECL, nonzero if it was defined without an explicit
return type. */
#define C_FUNCTION_IMPLICIT_INT(EXP) DECL_LANG_FLAG_1 (EXP)
/* For a FUNCTION_DECL, nonzero if it was an implicit declaration. */
#define C_DECL_IMPLICIT(EXP) DECL_LANG_FLAG_2 (EXP)
/* For FUNCTION_DECLs, evaluates true if the decl is built-in but has
been declared. */
#define C_DECL_DECLARED_BUILTIN(EXP) \
DECL_LANG_FLAG_3 (FUNCTION_DECL_CHECK (EXP))
/* For FUNCTION_DECLs, evaluates true if the decl is built-in, has a
built-in prototype and does not have a non-built-in prototype. */
#define C_DECL_BUILTIN_PROTOTYPE(EXP) \
DECL_LANG_FLAG_6 (FUNCTION_DECL_CHECK (EXP))
/* Record whether a decl was declared register. This is strictly a
front-end flag, whereas DECL_REGISTER is used for code generation;
they may differ for structures with volatile fields. */
#define C_DECL_REGISTER(EXP) DECL_LANG_FLAG_4 (EXP)
/* Record whether a decl was used in an expression anywhere except an
unevaluated operand of sizeof / typeof / alignof. This is only
used for functions declared static but not defined, though outside
sizeof and typeof it is set for other function decls as well. */
#define C_DECL_USED(EXP) DECL_LANG_FLAG_5 (FUNCTION_DECL_CHECK (EXP))
/* Record whether a label was defined in a statement expression which
has finished and so can no longer be jumped to. */
#define C_DECL_UNJUMPABLE_STMT_EXPR(EXP) \
DECL_LANG_FLAG_6 (LABEL_DECL_CHECK (EXP))
/* Record whether a label was the subject of a goto from outside the
current level of statement expression nesting and so cannot be
defined right now. */
#define C_DECL_UNDEFINABLE_STMT_EXPR(EXP) \
DECL_LANG_FLAG_7 (LABEL_DECL_CHECK (EXP))
/* Record whether a label was defined in the scope of an identifier
with variably modified type which has finished and so can no longer
be jumped to. */
#define C_DECL_UNJUMPABLE_VM(EXP) \
DECL_LANG_FLAG_3 (LABEL_DECL_CHECK (EXP))
/* Record whether a label was the subject of a goto from outside the
current level of scopes of identifiers with variably modified type
and so cannot be defined right now. */
#define C_DECL_UNDEFINABLE_VM(EXP) \
DECL_LANG_FLAG_5 (LABEL_DECL_CHECK (EXP))
/* Record whether a variable has been declared threadprivate by
#pragma omp threadprivate. */
#define C_DECL_THREADPRIVATE_P(DECL) DECL_LANG_FLAG_3 (VAR_DECL_CHECK (DECL))
/* Nonzero for a decl which either doesn't exist or isn't a prototype.
N.B. Could be simplified if all built-in decls had complete prototypes
(but this is presently difficult because some of them need FILE*). */
#define C_DECL_ISNT_PROTOTYPE(EXP) \
(EXP == 0 \
|| (TYPE_ARG_TYPES (TREE_TYPE (EXP)) == 0 \
&& !DECL_BUILT_IN (EXP)))
/* For FUNCTION_TYPE, a hidden list of types of arguments. The same as
TYPE_ARG_TYPES for functions with prototypes, but created for functions
without prototypes. */
#define TYPE_ACTUAL_ARG_TYPES(NODE) TYPE_LANG_SLOT_1 (NODE)
/* Record parser information about an expression that is irrelevant
for code generation alongside a tree representing its value. */
struct c_expr
{
/* The value of the expression. */
tree value;
/* Record the original binary operator of an expression, which may
have been changed by fold, STRING_CST for unparenthesized string
constants, or ERROR_MARK for other expressions (including
parenthesized expressions). */
enum tree_code original_code;
};
/* A kind of type specifier. Note that this information is currently
only used to distinguish tag definitions, tag references and typeof
uses. */
enum c_typespec_kind {
/* A reserved keyword type specifier. */
ctsk_resword,
/* A reference to a tag, previously declared, such as "struct foo".
This includes where the previous declaration was as a different
kind of tag, in which case this is only valid if shadowing that
tag in an inner scope. */
ctsk_tagref,
/* A reference to a tag, not previously declared in a visible
scope. */
ctsk_tagfirstref,
/* A definition of a tag such as "struct foo { int a; }". */
ctsk_tagdef,
/* A typedef name. */
ctsk_typedef,
/* An ObjC-specific kind of type specifier. */
ctsk_objc,
/* A typeof specifier. */
ctsk_typeof
};
/* A type specifier: this structure is created in the parser and
passed to declspecs_add_type only. */
struct c_typespec {
/* What kind of type specifier this is. */
enum c_typespec_kind kind;
/* The specifier itself. */
tree spec;
};
/* A storage class specifier. */
enum c_storage_class {
csc_none,
csc_auto,
csc_extern,
csc_register,
csc_static,
csc_typedef
};
/* A type specifier keyword "void", "_Bool", "char", "int", "float",
"double", or none of these. */
enum c_typespec_keyword {
cts_none,
cts_void,
cts_bool,
cts_char,
cts_int,
cts_float,
cts_double,
cts_dfloat32,
cts_dfloat64,
cts_dfloat128
};
/* A sequence of declaration specifiers in C. */
struct c_declspecs {
/* The type specified, if a single type specifier such as a struct,
union or enum specifier, typedef name or typeof specifies the
whole type, or NULL_TREE if none or a keyword such as "void" or
"char" is used. Does not include qualifiers. */
tree type;
/* The attributes from a typedef decl. */
tree decl_attr;
/* When parsing, the attributes. Outside the parser, this will be
NULL; attributes (possibly from multiple lists) will be passed
separately. */
tree attrs;
/* Any type specifier keyword used such as "int", not reflecting
modifiers such as "short", or cts_none if none. */
enum c_typespec_keyword typespec_word;
/* The storage class specifier, or csc_none if none. */
enum c_storage_class storage_class;
/* Whether any declaration specifiers have been seen at all. */
BOOL_BITFIELD declspecs_seen_p : 1;
/* Whether a type specifier has been seen. */
BOOL_BITFIELD type_seen_p : 1;
/* Whether something other than a storage class specifier or
attribute has been seen. This is used to warn for the
obsolescent usage of storage class specifiers other than at the
start of the list. (Doing this properly would require function
specifiers to be handled separately from storage class
specifiers.) */
BOOL_BITFIELD non_sc_seen_p : 1;
/* Whether the type is specified by a typedef or typeof name. */
BOOL_BITFIELD typedef_p : 1;
/* Whether a struct, union or enum type either had its content
defined by a type specifier in the list or was the first visible
declaration of its tag. */
BOOL_BITFIELD tag_defined_p : 1;
/* Whether the type is explicitly "signed" or specified by a typedef
whose type is explicitly "signed". */
BOOL_BITFIELD explicit_signed_p : 1;
/* Whether the specifiers include a deprecated typedef. */
BOOL_BITFIELD deprecated_p : 1;
/* Whether the type defaulted to "int" because there were no type
specifiers. */
BOOL_BITFIELD default_int_p;
/* Whether "long" was specified. */
BOOL_BITFIELD long_p : 1;
/* Whether "long" was specified more than once. */
BOOL_BITFIELD long_long_p : 1;
/* Whether "short" was specified. */
BOOL_BITFIELD short_p : 1;
/* Whether "signed" was specified. */
BOOL_BITFIELD signed_p : 1;
/* Whether "unsigned" was specified. */
BOOL_BITFIELD unsigned_p : 1;
/* Whether "complex" was specified. */
BOOL_BITFIELD complex_p : 1;
/* Whether "inline" was specified. */
BOOL_BITFIELD inline_p : 1;
/* Whether "__thread" was specified. */
BOOL_BITFIELD thread_p : 1;
/* Whether "const" was specified. */
BOOL_BITFIELD const_p : 1;
/* Whether "volatile" was specified. */
BOOL_BITFIELD volatile_p : 1;
/* Whether "restrict" was specified. */
BOOL_BITFIELD restrict_p : 1;
};
/* The various kinds of declarators in C. */
enum c_declarator_kind {
/* An identifier. */
cdk_id,
/* A function. */
cdk_function,
/* An array. */
cdk_array,
/* A pointer. */
cdk_pointer,
/* Parenthesized declarator with nested attributes. */
cdk_attrs
};
/* Information about the parameters in a function declarator. */
struct c_arg_info {
/* A list of parameter decls. */
tree parms;
/* A list of structure, union and enum tags defined. */
tree tags;
/* A list of argument types to go in the FUNCTION_TYPE. */
tree types;
/* A list of non-parameter decls (notably enumeration constants)
defined with the parameters. */
tree others;
/* A list of VLA sizes from the parameters. In a function
definition, these are used to ensure that side-effects in sizes
of arrays converted to pointers (such as a parameter int i[n++])
take place; otherwise, they are ignored. */
tree pending_sizes;
/* True when these arguments had [*]. */
BOOL_BITFIELD had_vla_unspec : 1;
};
/* A declarator. */
struct c_declarator {
/* The kind of declarator. */
enum c_declarator_kind kind;
/* Except for cdk_id, the contained declarator. For cdk_id, NULL. */
struct c_declarator *declarator;
location_t id_loc; /* Currently only set for cdk_id. */
union {
/* For identifiers, an IDENTIFIER_NODE or NULL_TREE if an abstract
declarator. */
tree id;
/* For functions. */
struct c_arg_info *arg_info;
/* For arrays. */
struct {
/* The array dimension, or NULL for [] and [*]. */
tree dimen;
/* The qualifiers inside []. */
int quals;
/* The attributes (currently ignored) inside []. */
tree attrs;
/* Whether [static] was used. */
BOOL_BITFIELD static_p : 1;
/* Whether [*] was used. */
BOOL_BITFIELD vla_unspec_p : 1;
} array;
/* For pointers, the qualifiers on the pointer type. */
int pointer_quals;
/* For attributes. */
tree attrs;
} u;
};
/* A type name. */
struct c_type_name {
/* The declaration specifiers. */
struct c_declspecs *specs;
/* The declarator. */
struct c_declarator *declarator;
};
/* A parameter. */
struct c_parm {
/* The declaration specifiers, minus any prefix attributes. */
struct c_declspecs *specs;
/* The attributes. */
tree attrs;
/* The declarator. */
struct c_declarator *declarator;
};
/* Save and restore the variables in this file and elsewhere
that keep track of the progress of compilation of the current function.
Used for nested functions. */
struct language_function GTY(())
{
struct c_language_function base;
tree x_break_label;
tree x_cont_label;
struct c_switch * GTY((skip)) x_switch_stack;
struct c_arg_info * GTY((skip)) arg_info;
int returns_value;
int returns_null;
int returns_abnormally;
int warn_about_return_type;
int extern_inline;
};
/* Save lists of labels used or defined in particular contexts.
Allocated on the parser obstack. */
struct c_label_list
{
/* The label at the head of the list. */
tree label;
/* The rest of the list. */
struct c_label_list *next;
};
/* Statement expression context. */
struct c_label_context_se
{
/* The labels defined at this level of nesting. */
struct c_label_list *labels_def;
/* The labels used at this level of nesting. */
struct c_label_list *labels_used;
/* The next outermost context. */
struct c_label_context_se *next;
};
/* Context of variably modified declarations. */
struct c_label_context_vm
{
/* The labels defined at this level of nesting. */
struct c_label_list *labels_def;
/* The labels used at this level of nesting. */
struct c_label_list *labels_used;
/* The scope of this context. Multiple contexts may be at the same
numbered scope, since each variably modified declaration starts a
new context. */
unsigned scope;
/* The next outermost context. */
struct c_label_context_vm *next;
};
/* in c-parser.c */
extern void c_parse_init (void);
/* in c-aux-info.c */
extern void gen_aux_info_record (tree, int, int, int);
/* in c-decl.c */
extern struct obstack parser_obstack;
extern tree c_break_label;
extern tree c_cont_label;
extern int global_bindings_p (void);
extern void push_scope (void);
extern tree pop_scope (void);
extern void insert_block (tree);
extern void c_expand_body (tree);
extern void c_init_decl_processing (void);
extern void c_dup_lang_specific_decl (tree);
extern void c_print_identifier (FILE *, tree, int);
extern int quals_from_declspecs (const struct c_declspecs *);
extern struct c_declarator *build_array_declarator (tree, struct c_declspecs *,
bool, bool);
extern tree build_enumerator (tree, tree);
extern tree check_for_loop_decls (void);
extern void mark_forward_parm_decls (void);
extern void declare_parm_level (void);
extern void undeclared_variable (tree, location_t);
extern tree declare_label (tree);
extern tree define_label (location_t, tree);
extern void c_maybe_initialize_eh (void);
extern void finish_decl (tree, tree, tree);
extern tree finish_enum (tree, tree, tree);
extern void finish_function (void);
extern tree finish_struct (tree, tree, tree);
extern struct c_arg_info *get_parm_info (bool);
extern tree grokfield (struct c_declarator *, struct c_declspecs *, tree);
extern tree groktypename (struct c_type_name *);
extern tree grokparm (const struct c_parm *);
extern tree implicitly_declare (tree);
extern void keep_next_level (void);
extern void pending_xref_error (void);
extern void c_push_function_context (struct function *);
extern void c_pop_function_context (struct function *);
extern void push_parm_decl (const struct c_parm *);
extern struct c_declarator *set_array_declarator_inner (struct c_declarator *,
struct c_declarator *,
bool);
extern tree builtin_function (const char *, tree, int, enum built_in_class,
const char *, tree);
extern void shadow_tag (const struct c_declspecs *);
extern void shadow_tag_warned (const struct c_declspecs *, int);
extern tree start_enum (tree);
extern int start_function (struct c_declspecs *, struct c_declarator *, tree);
extern tree start_decl (struct c_declarator *, struct c_declspecs *, bool,
tree);
extern tree start_struct (enum tree_code, tree);
extern void store_parm_decls (void);
extern void store_parm_decls_from (struct c_arg_info *);
extern tree xref_tag (enum tree_code, tree);
extern struct c_typespec parser_xref_tag (enum tree_code, tree);
extern int c_expand_decl (tree);
extern struct c_parm *build_c_parm (struct c_declspecs *, tree,
struct c_declarator *);
extern struct c_declarator *build_attrs_declarator (tree,
struct c_declarator *);
extern struct c_declarator *build_function_declarator (struct c_arg_info *,
struct c_declarator *);
extern struct c_declarator *build_id_declarator (tree);
extern struct c_declarator *make_pointer_declarator (struct c_declspecs *,
struct c_declarator *);
extern struct c_declspecs *build_null_declspecs (void);
extern struct c_declspecs *declspecs_add_qual (struct c_declspecs *, tree);
extern struct c_declspecs *declspecs_add_type (struct c_declspecs *,
struct c_typespec);
extern struct c_declspecs *declspecs_add_scspec (struct c_declspecs *, tree);
extern struct c_declspecs *declspecs_add_attrs (struct c_declspecs *, tree);
extern struct c_declspecs *finish_declspecs (struct c_declspecs *);
/* in c-objc-common.c */
extern int c_disregard_inline_limits (tree);
extern int c_cannot_inline_tree_fn (tree *);
extern bool c_objc_common_init (void);
extern bool c_missing_noreturn_ok_p (tree);
extern tree c_objc_common_truthvalue_conversion (tree expr);
extern bool c_warn_unused_global_decl (tree);
extern void c_initialize_diagnostics (diagnostic_context *);
extern bool c_vla_unspec_p (tree x, tree fn);
#define c_build_type_variant(TYPE, CONST_P, VOLATILE_P) \
c_build_qualified_type ((TYPE), \
((CONST_P) ? TYPE_QUAL_CONST : 0) | \
((VOLATILE_P) ? TYPE_QUAL_VOLATILE : 0))
/* in c-typeck.c */
extern int in_alignof;
extern int in_sizeof;
extern int in_typeof;
extern struct c_switch *c_switch_stack;
extern struct c_label_context_se *label_context_stack_se;
extern struct c_label_context_vm *label_context_stack_vm;
extern tree require_complete_type (tree);
extern int same_translation_unit_p (tree, tree);
extern int comptypes (tree, tree);
extern bool c_vla_type_p (tree);
extern bool c_mark_addressable (tree);
extern void c_incomplete_type_error (tree, tree);
extern tree c_type_promotes_to (tree);
extern struct c_expr default_function_array_conversion (struct c_expr);
extern tree composite_type (tree, tree);
extern tree build_component_ref (tree, tree);
extern tree build_array_ref (tree, tree);
extern tree build_external_ref (tree, int, location_t);
extern void pop_maybe_used (bool);
extern struct c_expr c_expr_sizeof_expr (struct c_expr);
extern struct c_expr c_expr_sizeof_type (struct c_type_name *);
extern struct c_expr parser_build_unary_op (enum tree_code, struct c_expr);
extern struct c_expr parser_build_binary_op (enum tree_code, struct c_expr,
struct c_expr);
extern tree build_conditional_expr (tree, tree, tree);
extern tree build_compound_expr (tree, tree);
extern tree c_cast_expr (struct c_type_name *, tree);
extern tree build_c_cast (tree, tree);
extern void store_init_value (tree, tree);
extern void error_init (const char *);
extern void pedwarn_init (const char *);
extern void maybe_warn_string_init (tree, struct c_expr);
extern void start_init (tree, tree, int);
extern void finish_init (void);
extern void really_start_incremental_init (tree);
extern void push_init_level (int);
extern struct c_expr pop_init_level (int);
extern void set_init_index (tree, tree);
extern void set_init_label (tree);
extern void process_init_element (struct c_expr);
extern tree build_compound_literal (tree, tree);
extern tree c_start_case (tree);
extern void c_finish_case (tree);
extern tree build_asm_expr (tree, tree, tree, tree, bool);
extern tree build_asm_stmt (tree, tree);
extern tree c_convert_parm_for_inlining (tree, tree, tree, int);
extern int c_types_compatible_p (tree, tree);
extern tree c_begin_compound_stmt (bool);
extern tree c_end_compound_stmt (tree, bool);
extern void c_finish_if_stmt (location_t, tree, tree, tree, bool);
extern void c_finish_loop (location_t, tree, tree, tree, tree, tree, bool);
extern tree c_begin_stmt_expr (void);
extern tree c_finish_stmt_expr (tree);
extern tree c_process_expr_stmt (tree);
extern tree c_finish_expr_stmt (tree);
extern tree c_finish_return (tree);
extern tree c_finish_bc_stmt (tree *, bool);
extern tree c_finish_goto_label (tree);
extern tree c_finish_goto_ptr (tree);
extern void c_begin_vm_scope (unsigned int);
extern void c_end_vm_scope (unsigned int);
extern tree c_expr_to_decl (tree, bool *, bool *, bool *);
extern tree c_begin_omp_parallel (void);
extern tree c_finish_omp_parallel (tree, tree);
extern tree c_finish_omp_clauses (tree);
/* Set to 0 at beginning of a function definition, set to 1 if
a return statement that specifies a return value is seen. */
extern int current_function_returns_value;
/* Set to 0 at beginning of a function definition, set to 1 if
a return statement with no argument is seen. */
extern int current_function_returns_null;
/* Set to 0 at beginning of a function definition, set to 1 if
a call to a noreturn function is seen. */
extern int current_function_returns_abnormally;
/* Nonzero means we are reading code that came from a system header file. */
extern int system_header_p;
/* True means global_bindings_p should return false even if the scope stack
says we are in file scope. */
extern bool c_override_global_bindings_to_false;
/* True means we've initialized exception handling. */
extern bool c_eh_initialized_p;
/* In c-decl.c */
extern void c_finish_incomplete_decl (tree);
extern void c_write_global_declarations (void);
/* In order for the format checking to accept the C frontend
diagnostic framework extensions, you must include this file before
toplev.h, not after. */
#if GCC_VERSION >= 4001
#define ATTRIBUTE_GCC_CDIAG(m, n) __attribute__ ((__format__ (GCC_DIAG_STYLE, m ,n))) ATTRIBUTE_NONNULL(m)
#else
#define ATTRIBUTE_GCC_CDIAG(m, n) ATTRIBUTE_NONNULL(m)
#endif
extern void pedwarn_c90 (const char *, ...) ATTRIBUTE_GCC_CDIAG(1,2);
extern void pedwarn_c99 (const char *, ...) ATTRIBUTE_GCC_CDIAG(1,2);
#endif /* ! GCC_C_TREE_H */
|
omp_alloc_hbw.c | // RUN: %libomp-compile-and-run
#include <stdio.h>
#include <omp.h>
int main() {
omp_alloctrait_t at[2];
omp_allocator_handle_t a;
void *p[2];
at[0].key = omp_atk_pool_size;
at[0].value = 2 * 1024 * 1024;
at[1].key = omp_atk_fallback;
at[1].value = omp_atv_null_fb;
a = omp_init_allocator(omp_high_bw_mem_space, 2, at);
printf("allocator hbw created: %p\n", a);
#pragma omp parallel num_threads(2)
{
int i = omp_get_thread_num();
p[i] = omp_alloc(1024 * 1024, a);
#pragma omp barrier
printf("th %d, ptr %p\n", i, p[i]);
omp_free(p[i], a);
}
if (a != omp_null_allocator) {
// As an allocator has some small memory overhead
// exactly one of the two pointers should be NULL
// because of NULL fallback requested
if ((p[0] == NULL && p[1] != NULL) || (p[0] != NULL && p[1] == NULL)) {
printf("passed\n");
return 0;
} else {
printf("failed: pointers %p %p\n", p[0], p[1]);
return 1;
}
} else {
// NULL allocator should cause default allocations
if (p[0] != NULL && p[1] != NULL) {
printf("passed\n");
return 0;
} else {
printf("failed: pointers %p %p\n", p[0], p[1]);
return 1;
}
}
}
|
2.c | #include<stdio.h>
#include<omp.h>
int DATA = 30;
int main(){
int id;
omp_set_dynamic(0);
#pragma omp parallel num_threads(3)
{
id = omp_get_thread_num();
// Reader thread 1
if(id == 0){
#pragma omp critical
{
printf(" Reader thread 1. DATA = %d", DATA);
fgetc(stdin);
}
}
// Reader thread 2
else if(id == 1){
#pragma omp critical
{
printf(" Reader thread 2. DATA = %d", DATA);
fgetc(stdin);
}
}
// Writer thread
else{
#pragma omp critical
{
DATA *= 2;
printf(" Writer thread. Doubling the value, DATA = %d", DATA);
fgetc(stdin);
}
}
}
return 0;
}
|
ordering_op-inl.h | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
/*!
* Copyright (c) 2016 by Contributors
* \file ordering_op-inl.h
* \brief Function definition of matrix related operators
*/
#ifndef MXNET_OPERATOR_TENSOR_ORDERING_OP_INL_H_
#define MXNET_OPERATOR_TENSOR_ORDERING_OP_INL_H_
#include <mxnet/operator_util.h>
#include <dmlc/optional.h>
#include <mshadow/tensor.h>
#include <algorithm>
#include <vector>
#include <type_traits>
#include "../mshadow_op.h"
#include "../elemwise_op_common.h"
#include "./sort_op.h"
#include "./indexing_op.h"
namespace mshadow {
template<typename xpu, int src_dim, typename DType, int dst_dim>
inline Tensor<xpu, dst_dim, DType> inplace_reshape(Tensor<xpu, src_dim, DType> src,
Shape<dst_dim> target_shape) {
CHECK_EQ(src.CheckContiguous(), true);
return Tensor<xpu, dst_dim, DType>(src.dptr_, target_shape, src.stream_);
}
};
namespace mxnet {
namespace op {
// These enums are only visible within this header
namespace topk_enum {
enum TopKReturnType {kReturnValue, kReturnIndices, kReturnMask, kReturnBoth};
} // topk_enum
struct TopKParam : public dmlc::Parameter<TopKParam> {
dmlc::optional<int> axis;
int k;
int ret_typ;
bool is_ascend;
DMLC_DECLARE_PARAMETER(TopKParam) {
DMLC_DECLARE_FIELD(axis).set_default(dmlc::optional<int>(-1))
.describe("Axis along which to choose the top k indices."
" If not given, the flattened array is used. Default is -1.");
DMLC_DECLARE_FIELD(k).set_default(1)
.describe("Number of top elements to select,"
" should be always smaller than or equal to the element number in the given axis."
" A global sort is performed if set k < 1.");
DMLC_DECLARE_FIELD(ret_typ).set_default(topk_enum::kReturnIndices)
.add_enum("value", topk_enum::kReturnValue)
.add_enum("indices", topk_enum::kReturnIndices)
.add_enum("mask", topk_enum::kReturnMask)
.add_enum("both", topk_enum::kReturnBoth)
.describe("The return type.\n"
" \"value\" means to return the top k values,"
" \"indices\" means to return the indices of the top k values,"
" \"mask\" means to return a mask array containing 0 and 1. 1 means the top k values."
" \"both\" means to return a list of both values and indices of top k elements.");
DMLC_DECLARE_FIELD(is_ascend).set_default(false)
.describe("Whether to choose k largest or k smallest elements."
" Top K largest elements will be chosen if set to false.");
}
};
struct SortParam : public dmlc::Parameter<SortParam> {
dmlc::optional<int> axis;
bool is_ascend;
DMLC_DECLARE_PARAMETER(SortParam) {
DMLC_DECLARE_FIELD(axis).set_default(dmlc::optional<int>(-1))
.describe("Axis along which to choose sort the input tensor."
" If not given, the flattened array is used. Default is -1.");
DMLC_DECLARE_FIELD(is_ascend).set_default(true)
.describe("Whether to sort in ascending or descending order.");
}
};
struct ArgSortParam : public dmlc::Parameter<ArgSortParam> {
dmlc::optional<int> axis;
bool is_ascend;
DMLC_DECLARE_PARAMETER(ArgSortParam) {
DMLC_DECLARE_FIELD(axis).set_default(dmlc::optional<int>(-1))
.describe("Axis along which to sort the input tensor."
" If not given, the flattened array is used. Default is -1.");
DMLC_DECLARE_FIELD(is_ascend).set_default(true)
.describe("Whether to sort in ascending or descending order.");
}
};
inline void ParseTopKParam(const TShape& src_shape, const TopKParam& param, TShape *target_shape,
int *batch_size, int *element_num, int *axis, int *k,
bool *do_transpose, bool *is_ascend) {
*do_transpose = false;
*k = param.k;
*is_ascend = param.is_ascend;
// get batch_size, axis and element_num
if (!static_cast<bool>(param.axis)) { // No axis given
*axis = 0;
*batch_size = 1;
*element_num = src_shape.Size();
} else {
*axis = param.axis.value();
if (*axis < 0) {
*axis += src_shape.ndim();
}
CHECK(*axis >= 0 && *axis < static_cast<int>(src_shape.ndim()))
<< "Invalid axis! axis should be between 0 and "
<< src_shape.ndim() << ", found axis=" << *axis;
*batch_size = src_shape.Size() / src_shape[*axis];
*element_num = src_shape[*axis];
if (*axis != static_cast<int>(src_shape.ndim()) - 1) {
*do_transpose = true;
}
}
// get k
if (param.k <= 0) {
*k = *element_num;
}
// get target_shape
if (!static_cast<bool>(param.axis)) {
if (param.ret_typ != topk_enum::kReturnMask) {
*target_shape = mshadow::Shape1(*k);
} else {
*target_shape = src_shape;
}
} else {
*target_shape = src_shape;
if (param.ret_typ != topk_enum::kReturnMask) {
(*target_shape)[*axis] = *k;
}
}
CHECK(*k >= 1 && *k <= *element_num) << "k must be smaller than "
<< *element_num << ", get k = " << *k;
}
using namespace mshadow;
template<typename xpu>
void TopKSort(const Tensor<xpu, 1, real_t>& dat,
const Tensor<xpu, 1, int>& ind,
const Tensor<xpu, 1, char>& work,
int K, int N, bool is_ascend,
Stream<xpu> *s);
template<>
MSHADOW_FORCE_INLINE void TopKSort<cpu>(const Tensor<cpu, 1, real_t>& dat,
const Tensor<cpu, 1, int>& ind,
const Tensor<cpu, 1, char>& work,
int K, int N, bool is_ascend,
Stream<cpu> *s) {
// Use full sort when K is relatively large.
const bool full_sort(K*8 > N);
// Batch size.
const int M(work.size(0)/(sizeof(real_t)*N));
const int omp_threads(engine::OpenMP::Get()->GetRecommendedOMPThreadCount());
#pragma omp parallel for num_threads(omp_threads)
for (int i = 0; i < M; ++i) {
// Tensor `work` stores the flattened source data, while `dat` stores the sorted result.
real_t *vals = reinterpret_cast<real_t*>(work.dptr_);
real_t *sorted_vals = dat.dptr_+i*N;
int *indices = ind.dptr_+i*N;
if (is_ascend) {
if (full_sort) {
std::sort(indices, indices+N,
[&](const int& i1, const int& i2){ return vals[i1] < vals[i2]; });
} else {
std::partial_sort(indices, indices+K, indices+N,
[&](const int& i1, const int& i2){ return vals[i1] < vals[i2]; });
}
} else {
if (full_sort) {
std::sort(indices, indices+N,
[&](const int& i1, const int& i2){ return vals[i1] > vals[i2]; });
} else {
std::partial_sort(indices, indices+K, indices+N,
[&](const int& i1, const int& i2){ return vals[i1] > vals[i2]; });
}
}
for (int j = 0; j < K; ++j) {
sorted_vals[j] = vals[indices[j]];
}
}
}
#ifdef __CUDACC__
template<typename DType>
MSHADOW_XINLINE bool TopKCompare(DType val1, int ind1, DType val2, int ind2, bool is_ascend) {
// Negative indices denote undefined values which are considered arbitrary small resp. large.
return (ind2 < 0) || (ind1 >= 0 && ((is_ascend && val1 < val2) || (!is_ascend && val1 > val2)));
}
template<typename DType>
MSHADOW_XINLINE void MergeTopK(int K, DType *val1, int *ind1, DType *val2, int *ind2,
bool is_ascend) {
// In-place merge of two sorted top-K lists into val1/ind1. First determine the intervals
// [0,..,i1], [0,..i2] of the two lists that will be part of the merged list.
int i1(K-1), i2(K-1);
for (int i = 0; i < K; ++i) {
if (TopKCompare(val1[i1], ind1[i1], val2[i2], ind2[i2], is_ascend)) {
--i2;
} else {
--i1;
}
}
// Now merge the lists from back to front.
for (int i = K; i--;) {
if (i2 < 0 || i1 >= 0 && TopKCompare(val2[i2], ind2[i2], val1[i1], ind1[i1], is_ascend)) {
val1[i] = val1[i1];
ind1[i] = ind1[i1];
--i1;
} else {
val1[i] = val2[i2];
ind1[i] = ind2[i2];
--i2;
}
}
}
template<typename DType>
__global__ void PartialSortSmallK(int K, int N, DType *val, int *ind, bool is_ascend) {
// Buffer for pairwise reduction.
extern __shared__ int buff[];
// Start of buffer sections associated with this thread.
const int offset(threadIdx.x*K);
int *ind_buff = &buff[offset];
DType *val_buff = reinterpret_cast<DType*>(&buff[blockDim.x*K])+offset;
// Initialize top-K values for this thread.
for (int i = 0; i < K; ++i) {
ind_buff[i] = -1;
}
// Range of values this thread cares about. Each thread block processes
// a different batch item (i.e. a different set of ind/val where we
// have to select the top-K elements). All threads within the same
// block work on the same batch item.
const int first(blockIdx.x*N+threadIdx.x), last((blockIdx.x+1)*N);
// Select top-K from this range and store it sorted in the buffer.
// We assume a small K, so linear insertion is o.k.
for (int i = first; i < last; i += blockDim.x) {
DType cur_val(val[i]);
int cur_ind(ind[i]);
for (int j = K; j-- && TopKCompare(cur_val, cur_ind, val_buff[j], ind_buff[j], is_ascend); ) {
if (j+1 < K) {
val_buff[j+1] = val_buff[j];
ind_buff[j+1] = ind_buff[j];
}
val_buff[j] = cur_val;
ind_buff[j] = cur_ind;
}
}
// Recursive merge of sorted lists for this thread block. Note that blockDim.x is not
// necessary a power of two, therefore the additional checks for last_s.
for (unsigned int s = (blockDim.x+1)/2, last_s = blockDim.x;
last_s > 1; last_s = s, s = (s+1)/2) {
__syncthreads();
if (threadIdx.x < s && threadIdx.x+s < last_s) {
MergeTopK(K, val_buff, ind_buff, val_buff+s*K, ind_buff+s*K, is_ascend);
}
}
// Final updates on master thread.
if (threadIdx.x == 0) {
for (int i = 0; i < K; ++i) {
ind[blockIdx.x*N+i] = ind_buff[i];
val[blockIdx.x*N+i] = val_buff[i];
}
}
}
template<>
MSHADOW_FORCE_INLINE void TopKSort<gpu>(const Tensor<gpu, 1, real_t>& dat,
const Tensor<gpu, 1, int>& ind,
const Tensor<gpu, 1, char>& work,
int K, int N, bool is_ascend,
Stream<gpu> *s) {
// Use full sort for all but very small K for which we
// can do a partial sort entirely within shared memory.
const bool full_sort(K > 5);
// Batch size.
const int M(dat.size(0)/N);
if (full_sort) {
// Divide workspace into two parts. The first one is needed to store batch ids.
const int id_size(sizeof(int)*ind.size(0));
Tensor<gpu, 1, int> batch_id(reinterpret_cast<int*>(work.dptr_), Shape1(ind.size(0)), s);
Tensor<gpu, 1, char> sort_work(work.dptr_+id_size, Shape1(work.size(0)-id_size), s);
mxnet::op::SortByKey(dat, ind, is_ascend, &sort_work);
if (M > 1) {
// Back to back sorting. Note that mxnet::op::SortByKey is a stable sort.
batch_id = ind / N;
mxnet::op::SortByKey(batch_id, dat, true, &sort_work);
batch_id = ind / N;
mxnet::op::SortByKey(batch_id, ind, true, &sort_work);
}
} else {
const int nthreads(mshadow::cuda::kBaseThreadNum);
PartialSortSmallK<<<M, nthreads, nthreads*K*(sizeof(int)+sizeof(real_t)),
mshadow::Stream<gpu>::GetStream(s)>>>
(K, N, dat.dptr_, ind.dptr_, is_ascend);
}
}
#endif
/*!
* \brief Implementation of the TopK operation
*
*
* \param ctx the running context
* \param resource temporary resource handler
* \param src the Source blob
* \param ret the destination blobs
* \param k the K elements to keep
* \param param the topk parameters
* \tparam xpu the device type.
*/
template<typename xpu>
void TopKImpl(RunContext ctx,
Resource resource,
const TBlob& src,
const std::vector<TBlob>& ret,
const TopKParam& param) {
using namespace mshadow;
using namespace mshadow::expr;
for (auto ret_ele : ret) {
CHECK_EQ(ret_ele.type_flag_, src.type_flag_);
}
// 1. Parse and initialize information
Stream<xpu> *s = ctx.get_stream<xpu>();
Tensor<xpu, 1, char> workspace;
Tensor<xpu, 1, char> temp_workspace;
Tensor<xpu, 1, real_t> sorted_dat;
Tensor<xpu, 1, int> indices, sel_indices;
Tensor<xpu, 2, real_t> mask_val;
int batch_size, element_num; // number of batches + the size of each batch
int axis = 0;
bool do_transpose = false;
bool is_ascend = false;
int k = 0;
TShape target_shape;
ParseTopKParam(src.shape_, param,
&target_shape, &batch_size, &element_num, &axis, &k, &do_transpose, &is_ascend);
Tensor<xpu, 3, real_t> dat = src.FlatTo3D<xpu, real_t>(axis, axis, s);
size_t temp_size = 0;
// Temp space needed by the gpu-based full sorts.
temp_size = std::max(temp_size, mxnet::op::SortByKeyWorkspaceSize<int, int, xpu>(src.Size()));
temp_size = std::max(temp_size, mxnet::op::SortByKeyWorkspaceSize<int, real_t, xpu>(src.Size()));
temp_size = std::max(temp_size, mxnet::op::SortByKeyWorkspaceSize<real_t, int, xpu>(src.Size()));
// Additional temp space for gpu full sorts for batch ids.
temp_size += sizeof(int) * src.Size();
// Temp space for cpu sorts.
temp_size = std::max(temp_size, sizeof(real_t) * src.Size());
size_t workspace_size = temp_size + sizeof(real_t) * src.Size() + sizeof(int) * src.Size();
if (param.ret_typ == topk_enum::kReturnMask) {
workspace_size += sizeof(int) * batch_size * k + sizeof(real_t) * batch_size * k;
}
workspace = resource.get_space_typed<xpu, 1, char>(Shape1(workspace_size), s);
char* workspace_curr_ptr = workspace.dptr_;
sorted_dat = Tensor<xpu, 1, real_t>(reinterpret_cast<real_t*>(workspace_curr_ptr),
Shape1(src.Size()), s); // contain sorted dat
workspace_curr_ptr += sizeof(real_t) * src.Size();
indices = Tensor<xpu, 1, int>(reinterpret_cast<int*>(workspace_curr_ptr),
Shape1(src.Size()), s); // indices in the original matrix
workspace_curr_ptr += sizeof(int) * src.Size();
if (param.ret_typ == topk_enum::kReturnMask) {
sel_indices = Tensor<xpu, 1, int>(reinterpret_cast<int*>(workspace_curr_ptr),
Shape1(batch_size * k), s);
workspace_curr_ptr += sizeof(int) * batch_size * k;
mask_val = Tensor<xpu, 2, real_t>(reinterpret_cast<real_t*>(workspace_curr_ptr),
Shape2(batch_size * k, 1), s);
workspace_curr_ptr += sizeof(real_t) * batch_size * k;
mask_val = scalar<real_t>(1);
CHECK_EQ(sel_indices.CheckContiguous(), true);
CHECK_EQ(mask_val.CheckContiguous(), true);
}
if (std::is_same<xpu, cpu>::value) {
Tensor<xpu, 1, real_t> flattened_data;
if (do_transpose) {
flattened_data = Tensor<xpu, 1, real_t>(reinterpret_cast<real_t*>(workspace_curr_ptr),
Shape1(src.Size()), s);
workspace_curr_ptr += sizeof(real_t) * src.Size();
flattened_data = reshape(transpose(dat, Shape3(0, 2, 1)), Shape1(src.Size()));
CHECK_EQ(flattened_data.CheckContiguous(), true);
} else {
flattened_data = src.FlatTo1D<xpu, real_t>(s);
}
// `temp_workspace` stores the flattened data
temp_workspace = Tensor<xpu, 1, char>(reinterpret_cast<char*>(flattened_data.dptr_),
Shape1(sizeof(real_t)*src.Size()), s);
CHECK_EQ(temp_workspace.CheckContiguous(), true);
} else {
if (do_transpose) {
sorted_dat = reshape(transpose(dat, Shape3(0, 2, 1)), Shape1(src.Size()));
} else {
sorted_dat = reshape(dat, Shape1(src.Size()));
}
CHECK_EQ(sorted_dat.CheckContiguous(), true);
temp_workspace = Tensor<xpu, 1, char>(workspace_curr_ptr, Shape1(temp_size), s); // temp space
workspace_curr_ptr += temp_size;
}
mxnet_op::Kernel<range_fwd, xpu>::Launch(s, batch_size * element_num, 1, 0, 1,
kWriteTo, indices.dptr_);
CHECK_EQ(indices.CheckContiguous(), true);
// 2. Perform inplace batch sort.
// After sorting, each batch in `sorted_dat` will be sorted in the corresponding order
// up to the k-th element and the `indices` will contain the corresponding index in `sorted_dat`
// `temp_workspace` is used to store the flattend source data for CPU device, and it's used as
// a temporal buffer for GPU device.
TopKSort(sorted_dat, indices, temp_workspace, k, element_num, is_ascend, s);
// 3. Assign results to the ret blob
// When returning indices, only update(modulo) required elements instead of full elements
// to avoid redundant calculation.
if (param.ret_typ == topk_enum::kReturnMask) {
Tensor<xpu, 2, real_t> ret_mask =
ret[0].get_with_shape<xpu, 2, real_t>(Shape2(ret[0].Size(), 1), s);
ret_mask = scalar<real_t>(0);
sel_indices = reshape(slice<1>(
inplace_reshape(indices,
Shape2(batch_size,
element_num)), 0, k),
Shape1(batch_size * k));
if (do_transpose) {
TShape src_shape = src.shape_.FlatTo3D(axis);
CHECK_EQ(sel_indices.CheckContiguous(), true);
sel_indices = transpose_indices(sel_indices, Shape3(src_shape[0], src_shape[2], src_shape[1]),
Shape3(0, 2, 1));
}
IndexFill(ret_mask, sel_indices, mask_val);
} else if (param.ret_typ == topk_enum::kReturnIndices) {
if (do_transpose) {
Tensor<xpu, 3, real_t> ret_indices = ret[0].FlatTo3D<xpu, real_t>(axis, axis, s);
ret_indices = tcast<real_t>(transpose(
slice<2>(inplace_reshape(indices,
Shape3(ret_indices.shape_[0],
ret_indices.shape_[2],
element_num)),
0, k),
Shape3(0, 2, 1)));
ret_indices = F<mshadow_op::mod>(ret_indices, element_num);
} else {
Tensor<xpu, 2, real_t> ret_indices =
ret[0].get_with_shape<xpu, 2, real_t>(Shape2(batch_size, k), s);
ret_indices = tcast<real_t>(slice<1>(
inplace_reshape(indices, Shape2(batch_size, element_num)), 0, k));
ret_indices = F<mshadow_op::mod>(ret_indices, element_num);
}
} else {
if (do_transpose) {
Tensor<xpu, 3, real_t> ret_value = ret[0].FlatTo3D<xpu, real_t>(axis, axis, s);
Tensor<xpu, 3, real_t> ret_indices = ret[1].FlatTo3D<xpu, real_t>(axis, axis, s);
ret_value = transpose(
slice<2>(inplace_reshape(sorted_dat,
Shape3(ret_value.shape_[0], ret_value.shape_[2], element_num)),
0, k),
Shape3(0, 2, 1));
ret_indices = tcast<real_t>(transpose(
slice<2>(inplace_reshape(indices,
Shape3(ret_indices.shape_[0],
ret_indices.shape_[2],
element_num)),
0, k),
Shape3(0, 2, 1)));
ret_indices = F<mshadow_op::mod>(ret_indices, element_num);
} else {
Tensor<xpu, 2, real_t> ret_value =
ret[0].get_with_shape<xpu, 2, real_t>(Shape2(batch_size, k), s);
Tensor<xpu, 2, real_t> ret_indices =
ret[1].get_with_shape<xpu, 2, real_t>(Shape2(batch_size, k), s);
ret_value = slice<1>(inplace_reshape(sorted_dat, Shape2(batch_size, element_num)), 0, k);
ret_indices = tcast<real_t>(slice<1>(
inplace_reshape(indices, Shape2(batch_size, element_num)), 0, k));
ret_indices = F<mshadow_op::mod>(ret_indices, element_num);
}
}
}
template<typename xpu>
void TopK(const nnvm::NodeAttrs& attrs,
const OpContext& ctx,
const std::vector<TBlob>& inputs,
const std::vector<OpReqType>& req,
const std::vector<TBlob>& outputs) {
const TopKParam& param = nnvm::get<TopKParam>(attrs.parsed);
// TODO(sxjscience) We can support inplace in the future
CHECK_EQ(req[0], kWriteTo) << "TopK does not support inplace";
TopKImpl<xpu>(ctx.run_ctx, ctx.requested[0], inputs[0], outputs, param);
}
template<typename xpu>
void Sort(const nnvm::NodeAttrs& attrs,
const OpContext& ctx,
const std::vector<TBlob>& inputs,
const std::vector<OpReqType>& req,
const std::vector<TBlob>& outputs) {
const SortParam& param = nnvm::get<SortParam>(attrs.parsed);
CHECK_EQ(req[0], kWriteTo) << "Sort does not support inplace";
TopKParam topk_param;
topk_param.axis = param.axis;
topk_param.is_ascend = param.is_ascend;
topk_param.k = 0;
topk_param.ret_typ = topk_enum::kReturnValue;
TopKImpl<xpu>(ctx.run_ctx, ctx.requested[0], inputs[0], outputs, topk_param);
}
template<typename xpu>
void ArgSort(const nnvm::NodeAttrs& attrs,
const OpContext& ctx,
const std::vector<TBlob>& inputs,
const std::vector<OpReqType>& req,
const std::vector<TBlob>& outputs) {
const ArgSortParam& param = nnvm::get<ArgSortParam>(attrs.parsed);
CHECK_EQ(req[0], kWriteTo) << "ArgSort does not support inplace";
TopKParam topk_param;
topk_param.axis = param.axis;
topk_param.is_ascend = param.is_ascend;
topk_param.k = 0;
topk_param.ret_typ = topk_enum::kReturnIndices;
TopKImpl<xpu>(ctx.run_ctx, ctx.requested[0], inputs[0], outputs, topk_param);
}
template<typename xpu>
void TopKBackward_(const nnvm::NodeAttrs& attrs,
const OpContext& ctx,
const std::vector<TBlob>& inputs,
const std::vector<OpReqType>& req,
const std::vector<TBlob>& outputs) {
CHECK_NE(req[0], kWriteInplace);
using namespace mshadow;
using namespace mshadow::expr;
Stream<xpu> *s = ctx.run_ctx.get_stream<xpu>();
const TopKParam& param = nnvm::get<TopKParam>(attrs.parsed);
CHECK(param.ret_typ == topk_enum::kReturnValue || param.ret_typ == topk_enum::kReturnBoth);
int batch_size, element_num; // number of batches + the size of each batch
int axis = 0;
bool do_transpose = false;
bool is_ascend = false;
int k = 0;
TShape target_shape;
ParseTopKParam(outputs[0].shape_, param,
&target_shape, &batch_size, &element_num, &axis, &k, &do_transpose, &is_ascend);
Tensor<xpu, 1, real_t> workspace =
ctx.requested[0].get_space_typed<xpu, 1, real_t>(Shape1(batch_size * k * 2 + batch_size), s);
Tensor<xpu, 1, real_t> sel_indices =
Tensor<xpu, 1, real_t>(workspace.dptr_, Shape1(batch_size * k), s);
Tensor<xpu, 1, real_t> batch_shift =
Tensor<xpu, 1, real_t>(workspace.dptr_ + batch_size * k, Shape1(batch_size), s);
Tensor<xpu, 1, real_t> dummy_index =
Tensor<xpu, 1, real_t>(workspace.dptr_ + batch_size * k + batch_size,
Shape1(batch_size * k), s);
Tensor<xpu, 2, real_t> out_grad =
inputs[0].get_with_shape<xpu, 2, real_t>(Shape2(inputs[0].shape_.Size(), 1), s);
Tensor<xpu, 2, real_t> in_grad =
outputs[0].get_with_shape<xpu, 2, real_t>(Shape2(outputs[0].shape_.Size(), 1), s);
mxnet_op::Kernel<range_fwd, xpu>::Launch(s, batch_size, 1, 0.0f,
static_cast<real_t>(element_num), kWriteTo, batch_shift.dptr_);
if (do_transpose) {
Tensor<xpu, 1, real_t> indices = inputs[2].FlatTo1D<xpu, real_t>(s);
TShape src_shape = outputs[0].shape_.FlatTo3D(axis);
sel_indices = reshape(transpose(
broadcast_to(inplace_reshape(batch_shift,
Shape3(src_shape[0], src_shape[2], 1)),
TShape(Shape3(src_shape[0], src_shape[2], k))),
Shape3(0, 2, 1)),
Shape1(batch_size * k));
sel_indices += indices;
sel_indices = transpose_indices(sel_indices, Shape3(src_shape[0], src_shape[2], src_shape[1]),
Shape3(0, 2, 1));
} else {
Tensor<xpu, 2, real_t> indices =
inputs[2].get_with_shape<xpu, 2, real_t>(Shape2(batch_size, k), s);
sel_indices = reshape(indices +
broadcast_to(inplace_reshape(batch_shift, Shape2(batch_size, 1)),
TShape(Shape2(batch_size, k))),
Shape1(batch_size * k));
}
CHECK_EQ(sel_indices.CheckContiguous(), true);
if (kWriteTo == req[0]) {
in_grad = scalar<real_t>(0);
IndexFill(in_grad, sel_indices, out_grad);
} else if (kAddTo == req[0]) {
// TODO(sxjscience) We can use AddTakeGrad in the future.
// However, the current implementation of AddTakeGrad is not so efficient.
mxnet_op::Kernel<range_fwd, xpu>::Launch(s, sel_indices.shape_.Size(), 1, 0.0f,
1.0f, kWriteTo, dummy_index.dptr_);
mxnet::op::AddTakeGradLargeBatch(in_grad, sel_indices, dummy_index, out_grad);
} else if (kNullOp == req[0]) {
return;
} else {
LOG(FATAL) << "Not Implemented!";
}
}
inline uint32_t TopKNumOutputs(const NodeAttrs& attrs) {
const TopKParam& param = nnvm::get<TopKParam>(attrs.parsed);
if (param.ret_typ == topk_enum::kReturnIndices ||
param.ret_typ == topk_enum::kReturnMask) {
return static_cast<uint32_t>(1);
} else {
return static_cast<uint32_t>(2);
}
}
inline uint32_t TopKNumVisibleOutputs(const NodeAttrs& attrs) {
const TopKParam& param = nnvm::get<TopKParam>(attrs.parsed);
if (param.ret_typ == topk_enum::kReturnBoth) {
return static_cast<uint32_t>(2);
} else {
return static_cast<uint32_t>(1);
}
}
inline bool TopKType(const nnvm::NodeAttrs& attrs,
std::vector<int> *in_attrs,
std::vector<int> *out_attrs) {
return ElemwiseAttr<int, type_is_none, type_assign, true, type_string>(
attrs, in_attrs, out_attrs, -1);
}
inline bool TopKShapeImpl(const TopKParam& param,
std::vector<TShape> *in_attrs,
std::vector<TShape> *out_attrs) {
CHECK_EQ(in_attrs->size(), 1U);
if (param.ret_typ == topk_enum::kReturnIndices ||
param.ret_typ == topk_enum::kReturnMask) {
CHECK_EQ(out_attrs->size(), 1U);
} else {
CHECK_EQ(out_attrs->size(), 2U);
}
TShape& in_shape = (*in_attrs)[0];
int batch_size, element_num; // number of batches + the size of each batch
int axis = 0;
bool do_transpose = false;
bool is_ascend = false;
int k = 0;
TShape target_shape;
ParseTopKParam(in_shape, param,
&target_shape, &batch_size, &element_num, &axis, &k, &do_transpose, &is_ascend);
if (param.ret_typ == topk_enum::kReturnIndices ||
param.ret_typ == topk_enum::kReturnMask) {
SHAPE_ASSIGN_CHECK(*out_attrs, 0, target_shape);
} else {
SHAPE_ASSIGN_CHECK(*out_attrs, 0, target_shape);
SHAPE_ASSIGN_CHECK(*out_attrs, 1, target_shape);
}
return true;
}
inline bool TopKShape(const nnvm::NodeAttrs& attrs,
std::vector<TShape> *in_attrs,
std::vector<TShape> *out_attrs) {
const TopKParam& param = nnvm::get<TopKParam>(attrs.parsed);
return TopKShapeImpl(param, in_attrs, out_attrs);
}
inline bool SortShape(const nnvm::NodeAttrs& attrs,
std::vector<TShape> *in_attrs,
std::vector<TShape> *out_attrs) {
const SortParam& param = nnvm::get<SortParam>(attrs.parsed);
TopKParam topk_param;
topk_param.axis = param.axis;
topk_param.is_ascend = param.is_ascend;
topk_param.k = 0;
topk_param.ret_typ = topk_enum::kReturnValue;
return TopKShapeImpl(topk_param, in_attrs, out_attrs);
}
inline bool ArgSortShape(const nnvm::NodeAttrs& attrs,
std::vector<TShape> *in_attrs,
std::vector<TShape> *out_attrs) {
const ArgSortParam& param = nnvm::get<ArgSortParam>(attrs.parsed);
TopKParam topk_param;
topk_param.axis = param.axis;
topk_param.is_ascend = param.is_ascend;
topk_param.k = 0;
topk_param.ret_typ = topk_enum::kReturnIndices;
return TopKShapeImpl(topk_param, in_attrs, out_attrs);
}
} // namespace op
} // namespace mxnet
#endif // MXNET_OPERATOR_TENSOR_ORDERING_OP_INL_H_
|
scheduled-clauseModificado3.c | #include <stdio.h>
#include <stdlib.h>
#ifdef _OPENMP
#include <omp.h>
#else
#define omp_get_thread_num() 0
#endif
char* printEnum (omp_sched_t type) {
char * ret;
if (type == omp_sched_static)
ret = "Static";
else if (type == omp_sched_dynamic)
ret = "Dynamic";
else if (type == omp_sched_guided)
ret = "Guided";
else if (type == omp_sched_auto)
ret = "Auto";
return ret;
}
char* printBool (int b) {
char * ret;
if (b == 0)
ret = "False";
else if (b == 1)
ret = "True";
else
ret = "Error en printBool";
return ret;
}
int main(int argc, char **argv) {
int i, n=16,chunk,a[n],suma=0, printed = 0, chunk_read;
omp_sched_t sched_type;
if(argc < 2) {
fprintf(stderr,"\nFalta chunk \n");
exit(-1);
}
chunk = atoi(argv[1]);
for (i=0; i<n; i++)
a[i] = i;
omp_get_schedule(&sched_type, &chunk_read);
printf("\n\tAntes de la modificación:\n dyn var: %s\n nthreads-var: %d\n run-shed-var: %s --- %d\n",
printBool(omp_get_dynamic()), omp_get_max_threads(), printEnum(sched_type), chunk_read);
omp_set_dynamic(1);
omp_set_num_threads(2);
omp_set_schedule(omp_sched_auto, chunk);
omp_get_schedule(&sched_type, &chunk_read);
printf("\n\tDespués de la modificación:\n dyn var: %s\n nthreads-var: %d\n run-shed-var: %s --- %d\n\n",
printBool(omp_get_dynamic()), omp_get_max_threads(), printEnum(sched_type), chunk_read);
#pragma omp parallel for firstprivate(suma) lastprivate(suma) schedule(dynamic,chunk)
for (i=0; i<n; i++)
{
suma = suma + a[i];
printf(" thread %d suma a[%d]=%d suma=%d \n",
omp_get_thread_num(),i,a[i],suma);
}
printf("Fuera de 'parallel for' suma=%d\n",suma);
}
|
timer.c | /*
* Copyright (c) 2011-2012, Los Alamos National Security, LLC.
* All rights Reserved.
*
* Copyright 2011-2012. Los Alamos National Security, LLC. This software was produced
* under U.S. Government contract DE-AC52-06NA25396 for Los Alamos National
* Laboratory (LANL), which is operated by Los Alamos National Security, LLC
* for the U.S. Department of Energy. The U.S. Government has rights to use,
* reproduce, and distribute this software. NEITHER THE GOVERNMENT NOR LOS
* ALAMOS NATIONAL SECURITY, LLC MAKES ANY WARRANTY, EXPRESS OR IMPLIED, OR
* ASSUMES ANY LIABILITY FOR THE USE OF THIS SOFTWARE. If software is modified
* to produce derivative works, such modified software should be clearly marked,
* so as not to confuse it with the version available from LANL.
*
* Additionally, redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* * Neither the name of the Los Alamos National Security, LLC, Los Alamos
* National Laboratory, LANL, the U.S. Government, nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE LOS ALAMOS NATIONAL SECURITY, LLC AND
* CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT
* NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL LOS ALAMOS NATIONAL
* SECURITY, LLC OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
* PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
* OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
* WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*
* CLAMR -- LA-CC-11-094
* This research code is being developed as part of the
* 2011 X Division Summer Workshop for the express purpose
* of a collaborative code for development of ideas in
* the implementation of AMR codes for Exascale platforms
*
* AMR implementation of the Wave code previously developed
* as a demonstration code for regular grids on Exascale platforms
* as part of the Supercomputing Challenge and Los Alamos
* National Laboratory
*
* Authors: Bob Robey XCP-2 brobey@lanl.gov
* Neal Davis davis68@lanl.gov, davis68@illinois.edu
* David Nicholaeff dnic@lanl.gov, mtrxknight@aol.com
* Dennis Trujillo dptrujillo@lanl.gov, dptru10@gmail.com
*
*/
#include <sys/time.h>
#include <stdlib.h>
#include <stdio.h>
#include <sys/types.h>
#include <unistd.h>
#include <string.h>
#ifdef _OPENMP
#include <omp.h>
#endif
#ifdef HAVE_CONFIG_H
#include "config.h"
#endif
#include "timer.h"
void cpu_timer_start(struct timeval *tstart_cpu){
#ifdef _OPENMP
if ( omp_in_parallel() ) {
#pragma omp master
{
gettimeofday(tstart_cpu, NULL);
}
} else {
gettimeofday(tstart_cpu, NULL);
}
#else
gettimeofday(tstart_cpu, NULL);
#endif
}
double cpu_timer_stop(struct timeval tstart_cpu){
double result;
struct timeval tstop_cpu, tresult;
#ifdef _OPENMP
if ( omp_in_parallel() ) {
#pragma omp master
{
gettimeofday(&tstop_cpu, NULL);
tresult.tv_sec = tstop_cpu.tv_sec - tstart_cpu.tv_sec;
tresult.tv_usec = tstop_cpu.tv_usec - tstart_cpu.tv_usec;
result = (double)tresult.tv_sec + (double)tresult.tv_usec*1.0e-6;
}
} else {
gettimeofday(&tstop_cpu, NULL);
tresult.tv_sec = tstop_cpu.tv_sec - tstart_cpu.tv_sec;
tresult.tv_usec = tstop_cpu.tv_usec - tstart_cpu.tv_usec;
result = (double)tresult.tv_sec + (double)tresult.tv_usec*1.0e-6;
}
#else
gettimeofday(&tstop_cpu, NULL);
tresult.tv_sec = tstop_cpu.tv_sec - tstart_cpu.tv_sec;
tresult.tv_usec = tstop_cpu.tv_usec - tstart_cpu.tv_usec;
result = (double)tresult.tv_sec + (double)tresult.tv_usec*1.0e-6;
#endif
return(result);
}
|
knucleotide.c | // The Computer Language Benchmarks Game
// http://benchmarksgame.alioth.debian.org/
//
// Contributed by Jeremy Zerfas
// This controls the maximum length for each set of oligonucleotide frequencies
// and each oligonucleotide count output by this program.
#define MAXIMUM_OUTPUT_LENGTH 4096
#include <stdint.h>
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <htslib/khash.h>
// Define a custom hash function to use instead of khash's default hash
// function. This custom hash function uses a simpler bit shift and XOR which
// results in several percent faster performance compared to when khash's
// default hash function is used.
#define CUSTOM_HASH_FUNCTION(key) ((key) ^ (key)>>7)
KHASH_INIT(oligonucleotide, uint64_t, uint32_t, 1, CUSTOM_HASH_FUNCTION
, kh_int64_hash_equal)
// intptr_t should be the native integer type on most sane systems.
typedef intptr_t intnative_t;
typedef struct {
uint64_t key;
uint32_t value;
} element;
// Macro to convert a nucleotide character to a code. Note that upper and lower
// case ASCII letters only differ in the fifth bit from the right and we only
// need the three least significant bits to differentiate the letters 'A', 'C',
// 'G', and 'T'. Spaces in this array/string will never be used as long as
// characters other than 'A', 'C', 'G', and 'T' aren't used.
#define code_For_Nucleotide(nucleotide) (" \0 \1\3 \2"[nucleotide & 0x7])
// And one more macro to convert the codes back to nucleotide characters.
#define nucleotide_For_Code(code) ("ACGT"[code & 0x3])
// Function to use when sorting elements with qsort() later. Elements with
// larger values will come first and in cases of identical values then elements
// with smaller keys will come first.
static int element_Compare(const element * const left_Element
, const element * const right_Element){
// Sort based on element values.
if(left_Element->value < right_Element->value) return 1;
if(left_Element->value > right_Element->value) return -1;
// If we got here then both items have the same value so then sort based on
// key.
return left_Element->key > right_Element->key ? 1 : -1;
}
// Generate frequencies for all oligonucleotides in polynucleotide that are of
// desired_Length_For_Oligonucleotides and then save it to output.
static void generate_Frequencies_For_Desired_Length_Oligonucleotides(
const char * const polynucleotide, const intnative_t polynucleotide_Length
, const intnative_t desired_Length_For_Oligonucleotides, char * const output){
khash_t(oligonucleotide) * hash_Table=kh_init(oligonucleotide);
uint64_t key=0;
const uint64_t mask=((uint64_t)1<<2*desired_Length_For_Oligonucleotides)-1;
// For the first several nucleotides we only need to append them to key in
// preparation for the insertion of complete oligonucleotides to hash_Table.
for(intnative_t i=0; i<desired_Length_For_Oligonucleotides-1; i++)
key=(key<<2 & mask) | polynucleotide[i];
// Add all the complete oligonucleotides of
// desired_Length_For_Oligonucleotides to hash_Table and update the count
// for each oligonucleotide.
for(intnative_t i=desired_Length_For_Oligonucleotides-1
; i<polynucleotide_Length; i++){
key=(key<<2 & mask) | polynucleotide[i];
int element_Was_Unused;
const khiter_t k=kh_put(oligonucleotide, hash_Table, key
, &element_Was_Unused);
// If the element_Was_Unused, then initialize the count to 1, otherwise
// increment the count.
if(element_Was_Unused)
kh_value(hash_Table, k)=1;
else
kh_value(hash_Table, k)++;
}
// Create an array of elements from hash_Table.
intnative_t elements_Array_Size=kh_size(hash_Table), i=0;
element * elements_Array=malloc(elements_Array_Size*sizeof(element));
uint32_t value;
kh_foreach(hash_Table, key, value
, elements_Array[i++]=((element){key, value}));
kh_destroy(oligonucleotide, hash_Table);
// Sort elements_Array.
qsort(elements_Array, elements_Array_Size, sizeof(element)
, (int (*)(const void *, const void *)) element_Compare);
// Print the frequencies for each oligonucleotide.
for(intnative_t output_Position=0, i=0; i<elements_Array_Size; i++){
// Convert the key for the oligonucleotide to a string.
char oligonucleotide[desired_Length_For_Oligonucleotides+1];
for(intnative_t j=desired_Length_For_Oligonucleotides-1; j>-1; j--){
oligonucleotide[j]=nucleotide_For_Code(elements_Array[i].key);
elements_Array[i].key>>=2;
}
oligonucleotide[desired_Length_For_Oligonucleotides]='\0';
// Output the frequency for oligonucleotide to output.
output_Position+=snprintf(output+output_Position
, MAXIMUM_OUTPUT_LENGTH-output_Position, "%s %.3f\n", oligonucleotide
, 100.0f*elements_Array[i].value
/(polynucleotide_Length-desired_Length_For_Oligonucleotides+1));
}
free(elements_Array);
}
// Generate a count for the number of times oligonucleotide appears in
// polynucleotide and then save it to output.
static void generate_Count_For_Oligonucleotide(
const char * const polynucleotide, const intnative_t polynucleotide_Length
, const char * const oligonucleotide, char * const output){
const intnative_t oligonucleotide_Length=strlen(oligonucleotide);
khash_t(oligonucleotide) * const hash_Table=kh_init(oligonucleotide);
uint64_t key=0;
const uint64_t mask=((uint64_t)1<<2*oligonucleotide_Length)-1;
// For the first several nucleotides we only need to append them to key in
// preparation for the insertion of complete oligonucleotides to hash_Table.
for(intnative_t i=0; i<oligonucleotide_Length-1; i++)
key=(key<<2 & mask) | polynucleotide[i];
// Add all the complete oligonucleotides of oligonucleotide_Length to
// hash_Table and update the count for each oligonucleotide.
for(intnative_t i=oligonucleotide_Length-1; i<polynucleotide_Length; i++){
key=(key<<2 & mask) | polynucleotide[i];
int element_Was_Unused;
const khiter_t k=kh_put(oligonucleotide, hash_Table, key
, &element_Was_Unused);
// If the element_Was_Unused, then initialize the count to 1, otherwise
// increment the count.
if(element_Was_Unused)
kh_value(hash_Table, k)=1;
else
kh_value(hash_Table, k)++;
}
// Generate the key for oligonucleotide.
key=0;
for(intnative_t i=0; i<oligonucleotide_Length; i++)
key=(key<<2) | code_For_Nucleotide(oligonucleotide[i]);
// Output the count for oligonucleotide to output.
khiter_t k=kh_get(oligonucleotide, hash_Table, key);
uintmax_t count=k==kh_end(hash_Table) ? 0 : kh_value(hash_Table, k);
snprintf(output, MAXIMUM_OUTPUT_LENGTH, "%ju\t%s", count, oligonucleotide);
kh_destroy(oligonucleotide, hash_Table);
}
int main(){
char buffer[4096];
// Find the start of the third polynucleotide.
while(fgets(buffer, sizeof(buffer), stdin) && memcmp(">THREE", buffer
, sizeof(">THREE")-1));
// Start with 1 MB of storage for reading in the polynucleotide and grow
// geometrically.
intnative_t polynucleotide_Capacity=1048576;
intnative_t polynucleotide_Length=0;
char * polynucleotide=malloc(polynucleotide_Capacity);
// Start reading and encoding the third polynucleotide.
while(fgets(buffer, sizeof(buffer), stdin) && buffer[0]!='>'){
for(intnative_t i=0; buffer[i]!='\0'; i++)
if(buffer[i]!='\n')
polynucleotide[polynucleotide_Length++]
=code_For_Nucleotide(buffer[i]);
// Make sure we still have enough memory allocated for any potential
// nucleotides in the next line.
if(polynucleotide_Capacity-polynucleotide_Length<sizeof(buffer))
polynucleotide=realloc(polynucleotide, polynucleotide_Capacity*=2);
}
// Free up any leftover memory.
polynucleotide=realloc(polynucleotide, polynucleotide_Length);
char output_Buffer[7][MAXIMUM_OUTPUT_LENGTH];
// Do the following functions in parallel.
#pragma omp parallel sections
{
#pragma omp section
generate_Count_For_Oligonucleotide(polynucleotide
, polynucleotide_Length, "GGTATTTTAATTTATAGT", output_Buffer[6]);
#pragma omp section
generate_Count_For_Oligonucleotide(polynucleotide
, polynucleotide_Length, "GGTATTTTAATT", output_Buffer[5]);
#pragma omp section
generate_Count_For_Oligonucleotide(polynucleotide
, polynucleotide_Length, "GGTATT", output_Buffer[4]);
#pragma omp section
generate_Count_For_Oligonucleotide(polynucleotide
, polynucleotide_Length, "GGTA", output_Buffer[3]);
#pragma omp section
generate_Count_For_Oligonucleotide(polynucleotide
, polynucleotide_Length, "GGT", output_Buffer[2]);
#pragma omp section
generate_Frequencies_For_Desired_Length_Oligonucleotides(polynucleotide
, polynucleotide_Length, 2, output_Buffer[1]);
#pragma omp section
generate_Frequencies_For_Desired_Length_Oligonucleotides(polynucleotide
, polynucleotide_Length, 1, output_Buffer[0]);
}
// Output the results to stdout.
for(intnative_t i=0; i<7; printf("%s\n", output_Buffer[i++]));
free(polynucleotide);
return 0;
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.