source stringlengths 3 92 | c stringlengths 26 2.25M |
|---|---|
GB_unaryop__minv_uint8_uint8.c | //------------------------------------------------------------------------------
// GB_unaryop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_iterator.h"
#include "GB_unaryop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop__minv_uint8_uint8
// op(A') function: GB_tran__minv_uint8_uint8
// C type: uint8_t
// A type: uint8_t
// cast: uint8_t cij = (uint8_t) aij
// unaryop: cij = GB_IMINV_UNSIGNED (aij, 8)
#define GB_ATYPE \
uint8_t
#define GB_CTYPE \
uint8_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
uint8_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = GB_IMINV_UNSIGNED (x, 8) ;
// casting
#define GB_CASTING(z, x) \
uint8_t z = (uint8_t) x ;
// cij = op (cast (aij))
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
GB_GETA (aij, Ax, pA) ; \
/* Cx [pC] = op (cast (aij)) */ \
GB_CASTING (x, aij) ; \
GB_OP (GB_CX (pC), x) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_MINV || GxB_NO_UINT8)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop__minv_uint8_uint8
(
uint8_t *restrict Cx,
const uint8_t *restrict Ax,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (int64_t p = 0 ; p < anz ; p++)
{
GB_CAST_OP (p, p) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_tran__minv_uint8_uint8
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *restrict *Rowcounts,
GBI_single_iterator Iter,
const int64_t *restrict A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#define GB_PHASE_2_OF_2
#include "GB_unaryop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
STFT.h | #ifndef _H_STFT_
#define _H_STFT_
#include "Ooura_FFT.h"
#include "HannWindow.h"
#include "PostProcessor.h"
class STFT{
private :
const double MATLAB_scale = 32768;
HannWindow *hw;
Ooura_FFT *fft;
PostProcessor *ap;
int channels;
int frame_size;
int shift_size;
int ol;
double**buf;
public :
inline STFT(int channels,int frame,int shift);
inline ~STFT();
/* in from input device or file
in : raw buffer from wav or mic
length : shift_size * channels (for not fully occupied input)
out : STFTed buffer [channels][frame_size + 2] (half FFT in complex)
*/
inline void stft(short*in,int length,double**out);
inline void istft(double**in,short*out);
inline void stft(short*in,int length,double**out,int target_channels);
/* 2-D raw input STFT
in : [channels][shift_size] raw data in double
out : [channels][frame_size+2]
*/
inline void stft(double** in, double** out);
inline void stft(double** in, double** out,int target_channels);
/* Single-Channel STFT
in : 1 x shift
out : 1 x frame_size + 2 (half FFT in complex)
*/
inline void stft(short* in, double* out);
inline void stft(double* in, double* out);
/* Single-Channel ISTFT
in : 1 x frame_size + 2 (half FFT in complex)
out : 1 x shift_size */
inline void istft(double* in, short* out);
//for separated 3-channels wav
inline void stft(short* in_1, short* in_2, short* in_3, int length, double** out);
};
STFT::STFT(int channels_,int frame_,int shift_){
int i;
channels = channels_;
frame_size = frame_;
shift_size = shift_;
ol = frame_size - shift_size;
hw = new HannWindow(frame_size, shift_size);
fft= new Ooura_FFT(frame_size, channels);
ap = new PostProcessor(frame_size, shift_size, channels);
buf = new double*[channels];
for(i=0;i<channels;i++){
buf[i] = new double[frame_size];
memset(buf[i],0,sizeof(double)*frame_size);
}
}
STFT::~STFT(){
int i;
delete hw;
delete fft;
delete ap;
for(i=0;i<channels;i++)
delete[] buf[i];
delete[] buf;
}
void STFT::stft(short*in,int length,double**out){
int i,j;
/*** Shfit & Copy***/
for (j = 0; j < channels; j++) {
for (i = 0; i < ol; i++) {
buf[j][i] = buf[j][i + shift_size];
}
}
//// EOF
if(length!=shift_size*channels){
length = length/channels;
for (i = 0; i < length; i++) {
for (j = 0; j < channels; j++)
buf[j][i + ol]
= (double)(in[i * channels+ j]);
}
for (i = length; i < shift_size; i++) {
for (j = 0; j < channels; j++)
buf[j][i + ol] = 0;
}
//// continue
}else{
for (i = 0; i < shift_size; i++) {
for (j = 0; j < channels; j++){
buf[j][i + ol]
= (double)(in[i * channels+ j]);
}
}
}
/*** Copy input -> hann_input buffer ***/
for (i = 0; i < channels; i++)
memcpy(out[i], buf[i], sizeof(double) * frame_size);
// scaling for precision
for (i = 0; i < channels; i++)
for (j = 0; j < frame_size; j++)
out[i][j] /= MATLAB_scale;
/*** Window ***/
hw->Process(out, channels);
/*** FFT ***/
fft->FFT(out);
}
void STFT::stft(short* in, int length, double** out, int target_channels) {
int tmp = channels;
channels = target_channels;
stft(in, length, out);
channels = tmp;
}
void STFT::istft(double**in,short*out){
/*** iFFT ***/
fft->iFFT(in);
/*** Window ***/
hw->Process(in, channels);
/*** Output ***/
memcpy(out,ap->Overlap(in),sizeof(short)*shift_size*channels);
}
// Single-Channel double
void STFT::stft(short* in, double* out){
int i;
/*** Shfit & Copy***/
for (i = 0; i < ol; i++) {
buf[0][i] = buf[0][i + shift_size];
}
for (i = 0; i < shift_size; i++)
buf[0][ol + i] = static_cast<double>(in[i]);
memcpy(out, buf[0], sizeof(double) * frame_size);
/*** Window ***/
hw->Process(out);
/*** FFT ***/
fft->FFT(out);
}
void STFT::stft(double* in, double* out) {
int i;
/*** Shfit & Copy***/
for (i = 0; i < ol; i++) {
buf[0][i] = buf[0][i + shift_size];
}
for (i = 0; i < shift_size; i++)
buf[0][ol + i] = in[i];
memcpy(out, buf[0], sizeof(double) * frame_size);
/*** Window ***/
hw->Process(out);
/*** FFT ***/
fft->FFT(out);
}
void STFT::stft(double** in, double** out) {
/*** Shfit & Copy***/
#pragma omp parallel for
for (int j = 0; j < channels; j++) {
for (int i = 0; i < ol; i++) {
buf[j][i] = buf[j][i + shift_size];
}
for (int i = 0; i < shift_size; i++){
buf[j][ol + i] = in[j][i];
memcpy(out[j], buf[j], sizeof(double) * frame_size);
}
}
// scaling for precision
for (int i = 0; i < channels; i++)
for (int j = 0; j < frame_size; j++){
out[i][j] /= MATLAB_scale;
}
/*** Window ***/
hw->Process(out,channels);
/*** FFT ***/
fft->FFT(out);
}
void STFT::stft(double** in, double** out,int target_channels){
/*** Shfit & Copy***/
#pragma omp parallel for
for (int j = 0; j < target_channels; j++) {
for (int i = 0; i < ol; i++) {
buf[j][i] = buf[j][i + shift_size];
}
for (int i = 0; i < shift_size; i++){
buf[j][ol + i] = in[j][i];
memcpy(out[j], buf[j], sizeof(double) * frame_size);
}
}
// scaling for precision
for (int i = 0; i < target_channels; i++)
for (int j = 0; j < frame_size; j++){
out[i][j] /= MATLAB_scale;
}
/*** Window ***/
hw->Process(out,target_channels);
/*** FFT ***/
fft->FFT(out,target_channels);
}
//for separated 3-channels wav
void STFT:: stft(short* in_1, short* in_2, short* in_3, int length, double** out){
int i, j;
short** in;
in[0] = in_1;
in[1] = in_2;
in[2] = in_3;
/*** Shfit & Copy***/
for (j = 0; j < channels; j++) {
for (i = 0; i < ol; i++) {
buf[j][i] = buf[j][i + shift_size];
}
}
//// EOF
if (length != shift_size * channels) {
length = length / channels;
for (i = 0; i < length; i++) {
for (j = 0; j < channels; j++)
buf[j][i + ol]
= (double)(in[j][i]);
}
for (i = length; i < shift_size; i++) {
for (j = 0; j < channels; j++)
buf[j][i + ol] = 0;
}
//// continue
}
else {
for (i = 0; i < shift_size; i++) {
for (j = 0; j < channels; j++) {
buf[j][i + ol]
= (double)(in[j][i]);
}
}
}
/*** Copy input -> hann_input buffer ***/
for (i = 0; i < channels; i++)
memcpy(out[i], buf[i], sizeof(double) * frame_size);
// scaling for precision
for (i = 0; i < channels; i++)
for (j = 0; j < frame_size; j++)
out[i][j] /= MATLAB_scale;
/*** Window ***/
hw->Process(out, channels);
/*** FFT ***/
fft->FFT(out);
}
void STFT::istft(double* in, short* out) {
/*** iFFT ***/
fft->iFFT(in);
/*** Window ***/
hw->Process(in);
/*** Output ***/
memcpy(out,ap->Overlap(in),sizeof(short)*shift_size);
}
#endif
|
HardTanh.c | #ifndef TH_GENERIC_FILE
#define TH_GENERIC_FILE "THNN/generic/HardTanh.c"
#else
void THNN_(HardTanh_updateOutput)(
THNNState *state,
THTensor *input,
THTensor *output,
accreal min_val_,
accreal max_val_,
bool inplace)
{
scalar_t min_val = TH_CONVERT_ACCREAL_TO_REAL(min_val_);
scalar_t max_val = TH_CONVERT_ACCREAL_TO_REAL(max_val_);
if (inplace)
THTensor_(set)(output, input);
else
THTensor_(resizeAs)(output, input);
if (THTensor_nDimensionLegacyAll(input) == 1 || !THTensor_(isContiguous)(input) || !THTensor_(isContiguous)(output))
{
if (inplace)
{
TH_TENSOR_APPLY(scalar_t, input,
if (*input_data < min_val)
*input_data = min_val;
else if (*input_data > max_val)
*input_data = max_val;
);
}
else
{
TH_TENSOR_APPLY2(scalar_t, output, scalar_t, input,
if (*input_data < min_val)
*output_data = min_val;
else if (*input_data > max_val)
*output_data = max_val;
else
*output_data = *input_data;
);
}
}
else
{
scalar_t* ptr_input = input->data<scalar_t>();
scalar_t* ptr_output = output->data<scalar_t>();
ptrdiff_t i;
ptrdiff_t n = THTensor_(nElement)(input);
if (inplace)
#pragma omp parallel for private(i)
for (i = 0; i < n; i++)
{
if (ptr_input[i] < min_val)
ptr_input[i] = min_val;
else if (ptr_input[i] > max_val)
ptr_input[i] = max_val;
}
else
#pragma omp parallel for private(i)
for (i = 0; i < n; i++)
{
if (ptr_input[i] < min_val)
ptr_output[i] = min_val;
else if (ptr_input[i] <= max_val)
ptr_output[i] = ptr_input[i];
else
ptr_output[i] = max_val;
}
}
}
void THNN_(HardTanh_updateGradInput)(
THNNState *state,
THTensor *input,
THTensor *gradOutput,
THTensor *gradInput,
accreal min_val_,
accreal max_val_,
bool inplace)
{
scalar_t min_val = TH_CONVERT_ACCREAL_TO_REAL(min_val_);
scalar_t max_val = TH_CONVERT_ACCREAL_TO_REAL(max_val_);
THNN_CHECK_NELEMENT(input, gradOutput);
if (inplace)
THTensor_(set)(gradInput, gradOutput);
else
THTensor_(resizeAs)(gradInput, input);
if (THTensor_nDimensionLegacyAll(input) == 1 ||
!THTensor_(isContiguous)(input) ||
!THTensor_(isContiguous)(gradOutput) ||
!THTensor_(isContiguous)(gradInput))
{
if (inplace)
{
TH_TENSOR_APPLY2(scalar_t, gradOutput, scalar_t, input,
if (*input_data <= min_val || *input_data >= max_val)
*gradOutput_data = 0;
);
}
else
TH_TENSOR_APPLY3(scalar_t, gradInput, scalar_t, gradOutput, scalar_t, input,
if (*input_data <= min_val || *input_data >= max_val)
*gradInput_data = 0;
else
*gradInput_data = *gradOutput_data;
);
}
else
{
scalar_t* ptr_gradOutput = gradOutput->data<scalar_t>();
scalar_t* ptr_gradInput = gradInput->data<scalar_t>();
scalar_t* ptr_input = input->data<scalar_t>();
ptrdiff_t i;
ptrdiff_t n = THTensor_(nElement)(input);
if (inplace)
#pragma omp parallel for private(i)
for (i = 0; i < n; i++)
{
if (ptr_input[i] <= min_val || ptr_input[i] >= max_val)
ptr_gradInput[i] = 0;
}
else
#pragma omp parallel for private(i)
for (i = 0; i < n; i++)
{
if (ptr_input[i] <= min_val || ptr_input[i] >= max_val)
ptr_gradInput[i] = 0;
else
ptr_gradInput[i] = ptr_gradOutput[i];
}
}
}
#endif
|
texture.c | /*
* Texture Manager
*/
#include "zgl.h"
static GLTexture* find_texture(GLint h) {
GLTexture* t;
GLContext* c = gl_get_context();
t = c->shared_state.texture_hash_table[h & TEXTURE_HASH_TABLE_MASK];
while (t != NULL) {
if (t->handle == h)
return t;
t = t->next;
}
return NULL;
}
GLboolean glAreTexturesResident(GLsizei n, const GLuint* textures, GLboolean* residences) {
#define RETVAL GL_FALSE
GLboolean retval = GL_TRUE;
GLint i;
#include "error_check_no_context.h"
for (i = 0; i < n; i++)
if (find_texture(textures[i])) {
residences[i] = GL_TRUE;
} else {
residences[i] = GL_FALSE;
retval = GL_FALSE;
}
return retval;
}
GLboolean glIsTexture(GLuint texture) {
GLContext* c = gl_get_context();
#define RETVAL GL_FALSE
#include "error_check.h"
if (find_texture(texture))
return GL_TRUE;
return GL_FALSE;
}
void* glGetTexturePixmap(GLint text, GLint level, GLint* xsize, GLint* ysize) {
GLTexture* tex;
GLContext* c = gl_get_context();
#if TGL_FEATURE_ERROR_CHECK == 1
if (!(text >= 0 && level < MAX_TEXTURE_LEVELS))
#define ERROR_FLAG GL_INVALID_ENUM
#define RETVAL NULL
#include "error_check.h"
#else
/*assert(text >= 0 && level < MAX_TEXTURE_LEVELS);*/
#endif
tex = find_texture(text);
if (!tex)
#if TGL_FEATURE_ERROR_CHECK == 1
#define ERROR_FLAG GL_INVALID_ENUM
#define RETVAL NULL
#include "error_check.h"
#else
return NULL;
#endif
*xsize = tex->images[level].xsize;
*ysize = tex->images[level].ysize;
return tex->images[level].pixmap;
}
static void free_texture(GLContext* c, GLint h) {
GLTexture *t, **ht;
t = find_texture(h);
if (t->prev == NULL) {
ht = &c->shared_state.texture_hash_table[t->handle & TEXTURE_HASH_TABLE_MASK];
*ht = t->next;
} else {
t->prev->next = t->next;
}
if (t->next != NULL)
t->next->prev = t->prev;
gl_free(t);
}
GLTexture* alloc_texture(GLint h) {
GLContext* c = gl_get_context();
GLTexture *t, **ht;
#define RETVAL NULL
#include "error_check.h"
t = gl_zalloc(sizeof(GLTexture));
if (!t)
#if TGL_FEATURE_ERROR_CHECK == 1
#define ERROR_FLAG GL_OUT_OF_MEMORY
#define RETVAL NULL
#include "error_check.h"
#else
gl_fatal_error("GL_OUT_OF_MEMORY");
#endif
ht = &c->shared_state.texture_hash_table[h & TEXTURE_HASH_TABLE_MASK];
t->next = *ht;
t->prev = NULL;
if (t->next != NULL)
t->next->prev = t;
*ht = t;
t->handle = h;
return t;
}
void glInitTextures() {
/* textures */
GLContext* c = gl_get_context();
c->texture_2d_enabled = 0;
c->current_texture = find_texture(0);
}
void glGenTextures(GLint n, GLuint* textures) {
GLContext* c = gl_get_context();
GLint max, i;
GLTexture* t;
#include "error_check.h"
max = 0;
for (i = 0; i < TEXTURE_HASH_TABLE_SIZE; i++) {
t = c->shared_state.texture_hash_table[i];
while (t != NULL) {
if (t->handle > max)
max = t->handle;
t = t->next;
}
}
for (i = 0; i < n; i++) {
textures[i] = max + i + 1; /* MARK: How texture handles are created.*/
}
}
void glDeleteTextures(GLint n, const GLuint* textures) {
GLint i;
GLTexture* t;
GLContext* c = gl_get_context();
#include "error_check.h"
for (i = 0; i < n; i++) {
t = find_texture(textures[i]);
if (t != NULL && t != 0) {
if (t == c->current_texture) {
glBindTexture(GL_TEXTURE_2D, 0);
#include "error_check.h"
}
free_texture(c, textures[i]);
}
}
}
void glopBindTexture(GLParam* p) {
GLint target = p[1].i;
GLint texture = p[2].i;
GLTexture* t;
GLContext* c = gl_get_context();
#if TGL_FEATURE_ERROR_CHECK == 1
if (!(target == GL_TEXTURE_2D && target > 0))
#define ERROR_FLAG GL_INVALID_ENUM
#include "error_check.h"
#else
#endif
t = find_texture(texture);
if (t == NULL) {
t = alloc_texture(texture);
#include "error_check.h"
}
if (t == NULL) {
#if TGL_FEATURE_ERROR_CHECK == 1
#define ERROR_FLAG GL_OUT_OF_MEMORY
#include "error_check.h"
#else
gl_fatal_error("GL_OUT_OF_MEMORY");
#endif
}
c->current_texture = t;
}
void glCopyTexImage2D(GLenum target,
GLint level,
GLenum internalformat,
GLint x,
GLint y,
GLsizei width,
GLsizei height, GLint border) {
GLParam p[9];
#include "error_check_no_context.h"
p[0].op = OP_CopyTexImage2D;
p[1].i = target;
p[2].i = level;
p[3].i = internalformat;
p[4].i = x;
p[5].i = y;
p[6].i = width;
p[7].i = height;
p[8].i = border;
gl_add_op(p);
}
void glopCopyTexImage2D(GLParam* p) {
GLImage* im;
PIXEL* data;
GLint i, j;
GLint target = p[1].i;
GLint level = p[2].i;
GLint x = p[4].i;
GLint y = p[5].i;
GLsizei w = p[6].i;
GLsizei h = p[7].i;
GLint border = p[8].i;
GLContext* c = gl_get_context();
y -= h;
if (c->readbuffer != GL_FRONT || c->current_texture == NULL || target != GL_TEXTURE_2D || border != 0 ||
w != TGL_FEATURE_TEXTURE_DIM || /*TODO Implement image interp*/
h != TGL_FEATURE_TEXTURE_DIM) {
#if TGL_FEATURE_ERROR_CHECK == 1
#define ERROR_FLAG GL_INVALID_OPERATION
#include "error_check.h"
#else
return;
#endif
}
im = &c->current_texture->images[level];
data = c->current_texture->images[level].pixmap;
im->xsize = TGL_FEATURE_TEXTURE_DIM;
im->ysize = TGL_FEATURE_TEXTURE_DIM;
/* TODO implement the scaling and stuff that the GL spec says it should have.*/
#if TGL_FEATURE_MULTITHREADED_COPY_TEXIMAGE_2D == 1
#ifdef _OPENMP
#pragma omp parallel for
#endif
for (j = 0; j < h; j++)
for (i = 0; i < w; i++) {
data[i + j * w] = c->zb->pbuf[((i + x) % (c->zb->xsize)) + ((j + y) % (c->zb->ysize)) * (c->zb->xsize)];
}
#else
for (j = 0; j < h; j++)
for (i = 0; i < w; i++) {
data[i + j * w] = c->zb->pbuf[((i + x) % (c->zb->xsize)) + ((j + y) % (c->zb->ysize)) * (c->zb->xsize)];
}
#endif
}
void glopTexImage1D(GLParam* p) {
GLint target = p[1].i;
GLint level = p[2].i;
GLint components = p[3].i;
GLint width = p[4].i;
/* GLint height = p[5].i;*/
GLint height = 1;
GLint border = p[5].i;
GLint format = p[6].i;
GLint type = p[7].i;
void* pixels = p[8].p;
GLImage* im;
GLubyte* pixels1;
GLint do_free=0;
GLContext* c = gl_get_context();
{
#if TGL_FEATURE_ERROR_CHECK == 1
if (!(c->current_texture != NULL && target == GL_TEXTURE_1D && level == 0 && components == 3 && border == 0 && format == GL_RGB &&
type == GL_UNSIGNED_BYTE))
#define ERROR_FLAG GL_INVALID_ENUM
#include "error_check.h"
#else
if (!(c->current_texture != NULL && target == GL_TEXTURE_1D && level == 0 && components == 3 && border == 0 && format == GL_RGB &&
type == GL_UNSIGNED_BYTE))
gl_fatal_error("glTexImage2D: combination of parameters not handled!!");
#endif
}
if (width != TGL_FEATURE_TEXTURE_DIM || height != TGL_FEATURE_TEXTURE_DIM) {
pixels1 = gl_malloc(TGL_FEATURE_TEXTURE_DIM * TGL_FEATURE_TEXTURE_DIM * 3); /* GUARDED*/
if (pixels1 == NULL) {
#if TGL_FEATURE_ERROR_CHECK == 1
#define ERROR_FLAG GL_OUT_OF_MEMORY
#include "error_check.h"
#else
gl_fatal_error("GL_OUT_OF_MEMORY");
#endif
}
/* no GLinterpolation is done here to respect the original image aliasing ! */
gl_resizeImageNoInterpolate(pixels1, TGL_FEATURE_TEXTURE_DIM, TGL_FEATURE_TEXTURE_DIM, pixels, width, height);
do_free = 1;
width = TGL_FEATURE_TEXTURE_DIM;
height = TGL_FEATURE_TEXTURE_DIM;
} else {
pixels1 = pixels;
}
im = &c->current_texture->images[level];
im->xsize = width;
im->ysize = height;
#if TGL_FEATURE_RENDER_BITS == 32
gl_convertRGB_to_8A8R8G8B(im->pixmap, pixels1, width, height);
#elif TGL_FEATURE_RENDER_BITS == 16
gl_convertRGB_to_5R6G5B(im->pixmap, pixels1, width, height);
#else
#error bad TGL_FEATURE_RENDER_BITS
#endif
if (do_free)
gl_free(pixels1);
}
void glopTexImage2D(GLParam* p) {
GLint target = p[1].i;
GLint level = p[2].i;
GLint components = p[3].i;
GLint width = p[4].i;
GLint height = p[5].i;
GLint border = p[6].i;
GLint format = p[7].i;
GLint type = p[8].i;
void* pixels = p[9].p;
GLImage* im;
GLubyte* pixels1;
GLint do_free=0;
GLContext* c = gl_get_context();
{
#if TGL_FEATURE_ERROR_CHECK == 1
if (!(c->current_texture != NULL && target == GL_TEXTURE_2D && level == 0 && components == 3 && border == 0 && format == GL_RGB &&
type == GL_UNSIGNED_BYTE))
#define ERROR_FLAG GL_INVALID_ENUM
#include "error_check.h"
#else
if (!(c->current_texture != NULL && target == GL_TEXTURE_2D && level == 0 && components == 3 && border == 0 && format == GL_RGB &&
type == GL_UNSIGNED_BYTE))
gl_fatal_error("glTexImage2D: combination of parameters not handled!!");
#endif
}
if (width != TGL_FEATURE_TEXTURE_DIM || height != TGL_FEATURE_TEXTURE_DIM) {
pixels1 = gl_malloc(TGL_FEATURE_TEXTURE_DIM * TGL_FEATURE_TEXTURE_DIM * 3); /* GUARDED*/
if (pixels1 == NULL) {
#if TGL_FEATURE_ERROR_CHECK == 1
#define ERROR_FLAG GL_OUT_OF_MEMORY
#include "error_check.h"
#else
gl_fatal_error("GL_OUT_OF_MEMORY");
#endif
}
/* no GLinterpolation is done here to respect the original image aliasing ! */
gl_resizeImageNoInterpolate(pixels1, TGL_FEATURE_TEXTURE_DIM, TGL_FEATURE_TEXTURE_DIM, pixels, width, height);
do_free = 1;
width = TGL_FEATURE_TEXTURE_DIM;
height = TGL_FEATURE_TEXTURE_DIM;
} else {
pixels1 = pixels;
}
im = &c->current_texture->images[level];
im->xsize = width;
im->ysize = height;
#if TGL_FEATURE_RENDER_BITS == 32
gl_convertRGB_to_8A8R8G8B(im->pixmap, pixels1, width, height);
#elif TGL_FEATURE_RENDER_BITS == 16
gl_convertRGB_to_5R6G5B(im->pixmap, pixels1, width, height);
#else
#error Bad TGL_FEATURE_RENDER_BITS
#endif
if (do_free)
gl_free(pixels1);
}
/* TODO: not all tests are done */
/*
void glopTexEnv(GLContext* c, GLParam* p) {
GLint target = p[1].i;
GLint pname = p[2].i;
GLint param = p[3].i;
if (target != GL_TEXTURE_ENV) {
error:
#if TGL_FEATURE_ERROR_CHECK == 1
#define ERROR_FLAG GL_INVALID_ENUM
#include "error_check.h"
#else
gl_fatal_error("glTexParameter: unsupported option");
#endif
}
if (pname != GL_TEXTURE_ENV_MODE)
goto error;
if (param != GL_DECAL)
goto error;
}
*/
/* TODO: not all tests are done */
/*
void glopTexParameter(GLContext* c, GLParam* p) {
GLint target = p[1].i;
GLint pname = p[2].i;
GLint param = p[3].i;
if (target != GL_TEXTURE_2D &&
target != GL_TEXTURE_1D) {
error:
tgl_warning("glTexParameter: unsupported option");
return;
}
switch (pname) {
case GL_TEXTURE_WRAP_S:
case GL_TEXTURE_WRAP_T:
if (param != GL_REPEAT)
goto error;
break;
}
}
*/
/*
void glopPixelStore(GLContext* c, GLParam* p) {
GLint pname = p[1].i;
GLint param = p[2].i;
if (pname != GL_UNPACK_ALIGNMENT || param != 1) {
gl_fatal_error("glPixelStore: unsupported option");
}
}
*/
|
GB_binop__bclr_int32.c | //------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_mkl.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB_AaddB__bclr_int32
// A.*B function (eWiseMult): GB_AemultB__bclr_int32
// A*D function (colscale): (none)
// D*A function (rowscale): (node)
// C+=B function (dense accum): GB_Cdense_accumB__bclr_int32
// C+=b function (dense accum): GB_Cdense_accumb__bclr_int32
// C+=A+B function (dense ewise3): (none)
// C=A+B function (dense ewise3): GB_Cdense_ewise3_noaccum__bclr_int32
// C=scalar+B GB_bind1st__bclr_int32
// C=scalar+B' GB_bind1st_tran__bclr_int32
// C=A+scalar GB_bind2nd__bclr_int32
// C=A'+scalar GB_bind2nd_tran__bclr_int32
// C type: int32_t
// A type: int32_t
// B,b type: int32_t
// BinaryOp: cij = GB_BITCLR (aij, bij, int32_t, 32)
#define GB_ATYPE \
int32_t
#define GB_BTYPE \
int32_t
#define GB_CTYPE \
int32_t
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
int32_t aij = Ax [pA]
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB) \
int32_t bij = Bx [pB]
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
int32_t t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA) \
cij = Ax [pA]
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB) \
cij = Bx [pB]
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z, x, y) \
z = GB_BITCLR (x, y, int32_t, 32) ;
// op is second
#define GB_OP_IS_SECOND \
0
// op is plus_fp32 or plus_fp64
#define GB_OP_IS_PLUS_REAL \
0
// op is minus_fp32 or minus_fp64
#define GB_OP_IS_MINUS_REAL \
0
// GB_cblas_*axpy gateway routine, if it exists for this operator and type:
#define GB_CBLAS_AXPY \
(none)
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_BCLR || GxB_NO_INT32 || GxB_NO_BCLR_INT32)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
#if 0
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void (none)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
#endif
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
GrB_Info GB_Cdense_ewise3_noaccum__bclr_int32
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_dense_ewise3_noaccum_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB_Cdense_accumB__bclr_int32
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *GB_RESTRICT kfirst_slice,
const int64_t *GB_RESTRICT klast_slice,
const int64_t *GB_RESTRICT pstart_slice,
const int ntasks,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB_Cdense_accumb__bclr_int32
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type int32_t
int32_t bwork = (*((int32_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
#if 0
GrB_Info (none)
(
GrB_Matrix C,
const GrB_Matrix A, bool A_is_pattern,
const GrB_Matrix D, bool D_is_pattern,
const int64_t *GB_RESTRICT kfirst_slice,
const int64_t *GB_RESTRICT klast_slice,
const int64_t *GB_RESTRICT pstart_slice,
const int ntasks,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int32_t *GB_RESTRICT Cx = (int32_t *) C->x ;
#include "GB_AxB_colscale_meta.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
#if 0
GrB_Info (node)
(
GrB_Matrix C,
const GrB_Matrix D, bool D_is_pattern,
const GrB_Matrix B, bool B_is_pattern,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int32_t *GB_RESTRICT Cx = (int32_t *) C->x ;
#include "GB_AxB_rowscale_meta.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
//------------------------------------------------------------------------------
// eWiseAdd: C = A+B or C<M> = A+B
//------------------------------------------------------------------------------
GrB_Info GB_AaddB__bclr_int32
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const bool Ch_is_Mh,
const int64_t *GB_RESTRICT C_to_M,
const int64_t *GB_RESTRICT C_to_A,
const int64_t *GB_RESTRICT C_to_B,
const GB_task_struct *GB_RESTRICT TaskList,
const int ntasks,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_add_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C = A.*B or C<M> = A.*B
//------------------------------------------------------------------------------
GrB_Info GB_AemultB__bclr_int32
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *GB_RESTRICT C_to_M,
const int64_t *GB_RESTRICT C_to_A,
const int64_t *GB_RESTRICT C_to_B,
const GB_task_struct *GB_RESTRICT TaskList,
const int ntasks,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB_bind1st__bclr_int32
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int32_t *Cx = (int32_t *) Cx_output ;
int32_t x = (*((int32_t *) x_input)) ;
int32_t *Bx = (int32_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
int32_t bij = Bx [p] ;
Cx [p] = GB_BITCLR (x, bij, int32_t, 32) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB_bind2nd__bclr_int32
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
int32_t *Cx = (int32_t *) Cx_output ;
int32_t *Ax = (int32_t *) Ax_input ;
int32_t y = (*((int32_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
int32_t aij = Ax [p] ;
Cx [p] = GB_BITCLR (aij, y, int32_t, 32) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typcasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
int32_t aij = Ax [pA] ; \
Cx [pC] = GB_BITCLR (x, aij, int32_t, 32) ; \
}
GrB_Info GB_bind1st_tran__bclr_int32
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *GB_RESTRICT *Rowcounts,
GBI_single_iterator Iter,
const int64_t *GB_RESTRICT A_slice,
int naslice
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
int32_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int32_t x = (*((const int32_t *) x_input)) ;
#define GB_PHASE_2_OF_2
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
int32_t
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typcasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
int32_t aij = Ax [pA] ; \
Cx [pC] = GB_BITCLR (aij, y, int32_t, 32) ; \
}
GrB_Info GB_bind2nd_tran__bclr_int32
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *GB_RESTRICT *Rowcounts,
GBI_single_iterator Iter,
const int64_t *GB_RESTRICT A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int32_t y = (*((const int32_t *) y_input)) ;
#define GB_PHASE_2_OF_2
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
GB_hyper_prune.c | //------------------------------------------------------------------------------
// GB_hyper_prune: remove empty vectors from a hypersparse Ap, Ah list
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// Removes empty vectors from a hypersparse list. On input, *Ap and *Ah are
// assumed to be NULL. The input arrays Ap_old and Ah_old are not modified,
// and thus can be shallow content from another matrix. New hyperlists Ap and
// Ah are allocated, for nvec vectors, all nonempty.
#include "GB.h"
GrB_Info GB_hyper_prune
(
// output, not allocated on input:
int64_t *restrict *p_Ap, size_t *p_Ap_size, // size nvec+1
int64_t *restrict *p_Ah, size_t *p_Ah_size, // size nvec
int64_t *p_nvec, // # of vectors, all nonempty
// input, not modified
const int64_t *Ap_old, // size nvec_old+1
const int64_t *Ah_old, // size nvec_old
const int64_t nvec_old, // original number of vectors
GB_Context Context
)
{
//--------------------------------------------------------------------------
// check inputs
//--------------------------------------------------------------------------
ASSERT (p_Ap != NULL) ;
ASSERT (p_Ah != NULL) ;
ASSERT (p_nvec != NULL) ;
ASSERT (Ap_old != NULL) ;
ASSERT (Ah_old != NULL) ;
ASSERT (nvec_old >= 0) ;
(*p_Ap) = NULL ; (*p_Ap_size) = 0 ;
(*p_Ah) = NULL ; (*p_Ah_size) = 0 ;
(*p_nvec) = -1 ;
int64_t *restrict W = NULL ; size_t W_size = 0 ;
int64_t *restrict Ap = NULL ; size_t Ap_size = 0 ;
int64_t *restrict Ah = NULL ; size_t Ah_size = 0 ;
//--------------------------------------------------------------------------
// determine the # of threads to use
//--------------------------------------------------------------------------
GB_GET_NTHREADS_MAX (nthreads_max, chunk, Context) ;
int nthreads = GB_nthreads (nvec_old, chunk, nthreads_max) ;
//--------------------------------------------------------------------------
// allocate workspace
//--------------------------------------------------------------------------
W = GB_MALLOC_WORK (nvec_old+1, int64_t, &W_size) ;
if (W == NULL)
{
// out of memory
return (GrB_OUT_OF_MEMORY) ;
}
//--------------------------------------------------------------------------
// count the # of nonempty vectors
//--------------------------------------------------------------------------
int64_t k ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (k = 0 ; k < nvec_old ; k++)
{
// W [k] = 1 if the kth vector is nonempty; 0 if empty
W [k] = (Ap_old [k] < Ap_old [k+1]) ;
}
int64_t nvec ;
GB_cumsum (W, nvec_old, &nvec, nthreads, Context) ;
//--------------------------------------------------------------------------
// allocate the result
//--------------------------------------------------------------------------
Ap = GB_MALLOC (nvec+1, int64_t, &Ap_size) ;
Ah = GB_MALLOC (nvec , int64_t, &Ah_size) ;
if (Ap == NULL || Ah == NULL)
{
// out of memory
GB_FREE_WORK (&W, W_size) ;
GB_FREE (&Ap, Ap_size) ;
GB_FREE (&Ah, Ah_size) ;
return (GrB_OUT_OF_MEMORY) ;
}
//--------------------------------------------------------------------------
// create the Ap and Ah result
//--------------------------------------------------------------------------
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (k = 0 ; k < nvec_old ; k++)
{
if (Ap_old [k] < Ap_old [k+1])
{
int64_t knew = W [k] ;
Ap [knew] = Ap_old [k] ;
Ah [knew] = Ah_old [k] ;
}
}
Ap [nvec] = Ap_old [nvec_old] ;
//--------------------------------------------------------------------------
// free workspace and return result
//--------------------------------------------------------------------------
GB_FREE_WORK (&W, W_size) ;
(*p_Ap) = Ap ; (*p_Ap_size) = Ap_size ;
(*p_Ah) = Ah ; (*p_Ah_size) = Ah_size ;
(*p_nvec) = nvec ;
return (GrB_SUCCESS) ;
}
|
matrixadd_column.c | #include<stdio.h>
#include<stdlib.h>
#include "generic.h"
#define size 10000
#define NT 8
int A[size][size];
int B[size][size];
int C[size][size];
int flag[size];//to set flag[i]==1 if arr[i] is maximum
int main(int argc, char *argv[]){
srand(atoi(argv[1]));//Seed for random number command line integer value
//generates random number
for(int i=0;i<size;i++){
for(int j=0;j<size;j++){
A[i][j]=rand()%1048576;
B[i][j]=rand()%1048576;
}
}
double t1=rtclock();
#pragma omp parallel for num_threads(8)
for(int i=0;i<size;i++)
for(int j=0;j<size;j++)
C[j][i]=A[j][i]+B[j][i];
double t2=rtclock();
printf("\nTIME =%f \n",(t2-t1)*1000);
}
/*Run executable-path <integer-seed-value>
*example: ./a.out 3 */
|
GB_unaryop__ainv_int16_bool.c | //------------------------------------------------------------------------------
// GB_unaryop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_iterator.h"
#include "GB_unaryop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop__ainv_int16_bool
// op(A') function: GB_tran__ainv_int16_bool
// C type: int16_t
// A type: bool
// cast: int16_t cij = (int16_t) aij
// unaryop: cij = -aij
#define GB_ATYPE \
bool
#define GB_CTYPE \
int16_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
bool aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = -x ;
// casting
#define GB_CASTING(z, x) \
int16_t z = (int16_t) x ;
// cij = op (cast (aij))
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
GB_GETA (aij, Ax, pA) ; \
/* Cx [pC] = op (cast (aij)) */ \
GB_CASTING (x, aij) ; \
GB_OP (GB_CX (pC), x) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_AINV || GxB_NO_INT16 || GxB_NO_BOOL)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop__ainv_int16_bool
(
int16_t *restrict Cx,
const bool *restrict Ax,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (int64_t p = 0 ; p < anz ; p++)
{
GB_CAST_OP (p, p) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_tran__ainv_int16_bool
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t **Rowcounts,
GBI_single_iterator Iter,
const int64_t *restrict A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#define GB_PHASE_2_OF_2
#include "GB_unaryop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
Fill.h | #ifndef DDM__ALGORITHM__FILL_H__
#define DDM__ALGORITHM__FILL_H__
#include "../../ddm/internal/Config.h"
#include "../../ddm/iterator/GlobIter.h"
#include "../../ddm/algorithm/LocalRange.h"
#include "../../ddm/algorithm/Operation.h"
#include "../../ddm/util/UnitLocality.h"
#include "../dart-impl/dart_communication.h"
#ifdef DDM_ENABLE_OPENMP
#include <omp.h>
#endif
namespace ddm {
/**
* Assigns the given value to the elements in the range [first, last)
*
* Being a collaborative operation, each unit will assign the value to
* its local elements only.
*
* \tparam ElementType Type of the elements in the sequence
* \complexity O(d) + O(nl), with \c d dimensions in the global iterators'
* pattern and \c nl local elements within the global range
*
* \ingroup DDMAlgorithms
*/
template <typename GlobIterType>
void fill(
/// Iterator to the initial position in the sequence
GlobIterType first,
/// Iterator to the final position in the sequence
GlobIterType last,
/// Value which will be assigned to the elements in range [first, last)
const typename GlobIterType::value_type & value)
{
typedef typename GlobIterType::index_type index_t;
typedef typename GlobIterType::value_type value_t;
// Global iterators to local range:
auto index_range = ddm::local_range(first, last);
value_t * lfirst = index_range.begin;
value_t * llast = index_range.end;
auto nlocal = llast - lfirst;
#if 0
for (index_t lt = 0; lt < nlocal; lt++) {
lfirst[lt] = value;
}
#else
#ifdef DDM_ENABLE_OPENMP
ddm::util::UnitLocality uloc;
auto n_threads = uloc.num_domain_threads();
DDM_LOG_DEBUG("ddm::fill", "thread capacity:", n_threads);
#pragma omp parallel num_threads(n_threads)
for (index_t lt = 0; lt < nlocal; lt += 2) {
lfirst[lt] = value;
}
#pragma omp parallel num_threads(n_threads)
for (index_t lt = 1; lt < nlocal; lt += 2) {
lfirst[lt] = value;
}
#else
for (index_t lt = 0; lt < nlocal; lt += 2) {
lfirst[lt] = value;
}
for (index_t lt = 1; lt < nlocal; lt += 2) {
lfirst[lt] = value;
}
#endif
#endif
}
} // namespace ddm
#endif // DDM__ALGORITHM__FILL_H__
|
GB_binop__isge_int32.c |
//------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_emult.h"
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB (_AaddB__isge_int32)
// A.*B function (eWiseMult): GB (_AemultB_08__isge_int32)
// A.*B function (eWiseMult): GB (_AemultB_02__isge_int32)
// A.*B function (eWiseMult): GB (_AemultB_04__isge_int32)
// A.*B function (eWiseMult): GB (_AemultB_bitmap__isge_int32)
// A*D function (colscale): GB (_AxD__isge_int32)
// D*A function (rowscale): GB (_DxB__isge_int32)
// C+=B function (dense accum): GB (_Cdense_accumB__isge_int32)
// C+=b function (dense accum): GB (_Cdense_accumb__isge_int32)
// C+=A+B function (dense ewise3): GB ((none))
// C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__isge_int32)
// C=scalar+B GB (_bind1st__isge_int32)
// C=scalar+B' GB (_bind1st_tran__isge_int32)
// C=A+scalar GB (_bind2nd__isge_int32)
// C=A'+scalar GB (_bind2nd_tran__isge_int32)
// C type: int32_t
// A type: int32_t
// A pattern? 0
// B type: int32_t
// B pattern? 0
// BinaryOp: cij = (aij >= bij)
#define GB_ATYPE \
int32_t
#define GB_BTYPE \
int32_t
#define GB_CTYPE \
int32_t
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA,A_iso) \
int32_t aij = GBX (Ax, pA, A_iso)
// true if values of A are not used
#define GB_A_IS_PATTERN \
0 \
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB,B_iso) \
int32_t bij = GBX (Bx, pB, B_iso)
// true if values of B are not used
#define GB_B_IS_PATTERN \
0 \
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
int32_t t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \
cij = GBX (Ax, pA, A_iso)
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \
cij = GBX (Bx, pB, B_iso)
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z,x,y,i,j) \
z = (x >= y) ;
// true if the binop must be flipped
#define GB_BINOP_FLIP \
0
// op is second
#define GB_OP_IS_SECOND \
0
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_ISGE || GxB_NO_INT32 || GxB_NO_ISGE_INT32)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
#if 0
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB ((none))
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
#endif
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
void GB (_Cdense_ewise3_noaccum__isge_int32)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_noaccum_template.c"
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumB__isge_int32)
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumb__isge_int32)
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type int32_t
int32_t bwork = (*((int32_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_AxD__isge_int32)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix D,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int32_t *restrict Cx = (int32_t *) C->x ;
#include "GB_AxB_colscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_DxB__isge_int32)
(
GrB_Matrix C,
const GrB_Matrix D,
const GrB_Matrix B,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int32_t *restrict Cx = (int32_t *) C->x ;
#include "GB_AxB_rowscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B
//------------------------------------------------------------------------------
GrB_Info GB (_AaddB__isge_int32)
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool is_eWiseUnion,
const GB_void *alpha_scalar_in,
const GB_void *beta_scalar_in,
const bool Ch_is_Mh,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GB_WERK_DECLARE (M_ek_slicing, int64_t) ;
GB_WERK_DECLARE (A_ek_slicing, int64_t) ;
GB_WERK_DECLARE (B_ek_slicing, int64_t) ;
int32_t alpha_scalar ;
int32_t beta_scalar ;
if (is_eWiseUnion)
{
alpha_scalar = (*((int32_t *) alpha_scalar_in)) ;
beta_scalar = (*((int32_t *) beta_scalar_in )) ;
}
#include "GB_add_template.c"
GB_FREE_WORKSPACE ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_08__isge_int32)
(
GrB_Matrix C,
const int C_sparsity,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_08_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_02__isge_int32)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool flipxy,
const int64_t *restrict Cp_kfirst,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if GB_BINOP_FLIP
// The operator is not commutative, and does not have a flipped
// variant. For example z=atan2(y,x).
if (flipxy)
{
// use fmult(y,x)
#undef GB_FLIPPED
#define GB_FLIPPED 1
#include "GB_emult_02_template.c"
}
else
{
// use fmult(x,y)
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
}
#else
// No need to handle the flip: the operator is either commutative, or
// has been handled by changing z=div(y,x) to z=rdiv(x,y) for example.
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_04__isge_int32)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict Cp_kfirst,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_04_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_bitmap__isge_int32)
(
GrB_Matrix C,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_bitmap_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB (_bind1st__isge_int32)
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *restrict Bb,
int64_t bnz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int32_t *Cx = (int32_t *) Cx_output ;
int32_t x = (*((int32_t *) x_input)) ;
int32_t *Bx = (int32_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < bnz ; p++)
{
if (!GBB (Bb, p)) continue ;
int32_t bij = GBX (Bx, p, false) ;
Cx [p] = (x >= bij) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB (_bind2nd__isge_int32)
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *restrict Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
int32_t *Cx = (int32_t *) Cx_output ;
int32_t *Ax = (int32_t *) Ax_input ;
int32_t y = (*((int32_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
int32_t aij = GBX (Ax, p, false) ;
Cx [p] = (aij >= y) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
int32_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = (x >= aij) ; \
}
GrB_Info GB (_bind1st_tran__isge_int32)
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
int32_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int32_t x = (*((const int32_t *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
int32_t
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
int32_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = (aij >= y) ; \
}
GrB_Info GB (_bind2nd_tran__isge_int32)
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int32_t y = (*((const int32_t *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
test_example.c | #pragma omp greg
int omp_get_num_threads();
int omp_get_thread_num();
void genericForHeader();
void foo();
void bar();
int a, b, c;
int main(int argc, char** argv)
{
int d[10], e, f;
#pragma omp parallel for private(a, d) firstprivate(b, c) default(shared) ordered
for(int iterator=0; iterator<10; iterator++)
{
a=b=c=0;
}
bar();
#pragma omp parallel private(a, d) firstprivate(b, c) default(shared)
{
d[0] = e = f = 1;
}
#pragma omp critical
bar();
#pragma omp single nowait
bar();
#pragma omp master
bar();
#pragma omp critical
{
}
#pragma omp single
{
d[0] = e = f = 1;
}
#pragma omp master
{
d[0] = e = f = 1;
}
return 0;
}
|
GB_unaryop__lnot_uint16_int16.c | //------------------------------------------------------------------------------
// GB_unaryop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_iterator.h"
#include "GB_unaryop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop__lnot_uint16_int16
// op(A') function: GB_tran__lnot_uint16_int16
// C type: uint16_t
// A type: int16_t
// cast: uint16_t cij = (uint16_t) aij
// unaryop: cij = !(aij != 0)
#define GB_ATYPE \
int16_t
#define GB_CTYPE \
uint16_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
int16_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = !(x != 0) ;
// casting
#define GB_CASTING(z, aij) \
uint16_t z = (uint16_t) aij ;
// cij = op (cast (aij))
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
GB_GETA (aij, Ax, pA) ; \
/* Cx [pC] = op (cast (aij)) */ \
GB_CASTING (z, aij) ; \
GB_OP (GB_CX (pC), z) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_LNOT || GxB_NO_UINT16 || GxB_NO_INT16)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop__lnot_uint16_int16
(
uint16_t *Cx, // Cx and Ax may be aliased
int16_t *Ax,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
GB_CAST_OP (p, p) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_tran__lnot_uint16_int16
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *GB_RESTRICT *Rowcounts,
GBI_single_iterator Iter,
const int64_t *GB_RESTRICT A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#define GB_PHASE_2_OF_2
#include "GB_unaryop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
testing_dpotri.c | /**
*
* @file testing_dpotri.c
*
* PLASMA testing routines
* PLASMA is a software package provided by Univ. of Tennessee,
* Univ. of California Berkeley and Univ. of Colorado Denver
*
* @version 2.6.0
* @author Hatem Ltaief
* @date 2010-11-15
* @generated d Tue Jan 7 11:45:18 2014
*
**/
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <math.h>
#include <plasma.h>
#include <cblas.h>
#include <lapacke.h>
#include <core_blas.h>
#include "testing_dmain.h"
enum blas_order_type {
blas_rowmajor = 101,
blas_colmajor = 102 };
enum blas_cmach_type {
blas_base = 151,
blas_t = 152,
blas_rnd = 153,
blas_ieee = 154,
blas_emin = 155,
blas_emax = 156,
blas_eps = 157,
blas_prec = 158,
blas_underflow = 159,
blas_overflow = 160,
blas_sfmin = 161};
enum blas_norm_type {
blas_one_norm = 171,
blas_real_one_norm = 172,
blas_two_norm = 173,
blas_frobenius_norm = 174,
blas_inf_norm = 175,
blas_real_inf_norm = 176,
blas_max_norm = 177,
blas_real_max_norm = 178 };
static void
BLAS_error(char *rname, int err, int val, int x) {
fprintf( stderr, "%s %d %d %d\n", rname, err, val, x );
abort();
}
static
void
BLAS_dge_norm(enum blas_order_type order, enum blas_norm_type norm,
int m, int n, const double *a, int lda, double *res) {
int i, j; float anorm, v;
char rname[] = "BLAS_dge_norm";
if (order != blas_colmajor) BLAS_error( rname, -1, order, 0 );
if (norm == blas_frobenius_norm) {
anorm = 0.0f;
for (j = n; j; --j) {
for (i = m; i; --i) {
v = a[0];
anorm += v * v;
a++;
}
a += lda - m;
}
anorm = sqrt( anorm );
} else if (norm == blas_inf_norm) {
anorm = 0.0f;
for (i = 0; i < m; ++i) {
v = 0.0f;
for (j = 0; j < n; ++j) {
v += fabs( a[i + j * lda] );
}
if (v > anorm)
anorm = v;
}
} else if (norm == blas_one_norm) {
anorm = 0.0f;
for (i = 0; i < m; ++i) {
v = 0.0f;
for (j = 0; j < n; ++j) {
v += fabs( a[i + j * lda] );
}
if (v > anorm)
anorm = v;
}
} else {
BLAS_error( rname, -2, norm, 0 );
return;
}
if (res) *res = anorm;
}
static
double
BLAS_dpow_di(double x, int n) {
double rv = 1.0;
if (n < 0) {
n = -n;
x = 1.0 / x;
}
for (; n; n >>= 1, x *= x) {
if (n & 1)
rv *= x;
}
return rv;
}
static
double
BLAS_dfpinfo(enum blas_cmach_type cmach) {
double eps = 1.0, r = 1.0, o = 1.0, b = 2.0;
int t = 53, l = 1024, m = -1021;
char rname[] = "BLAS_dfpinfo";
if ((sizeof eps) == sizeof(float)) {
t = 24;
l = 128;
m = -125;
} else {
t = 53;
l = 1024;
m = -1021;
}
/* for (i = 0; i < t; ++i) eps *= half; */
eps = BLAS_dpow_di( b, -t );
/* for (i = 0; i >= m; --i) r *= half; */
r = BLAS_dpow_di( b, m-1 );
o -= eps;
/* for (i = 0; i < l; ++i) o *= b; */
o = (o * BLAS_dpow_di( b, l-1 )) * b;
switch (cmach) {
case blas_eps: return eps;
case blas_sfmin: return r;
default:
BLAS_error( rname, -1, cmach, 0 );
break;
}
return 0.0;
}
static
void
SPD_GEN(int N, double *A, int LDA)
{
srand48(time(NULL));
int i,j;
for(i=0; i<N*LDA; ++i)
A[i] = drand48();
for(j=0; j<N; ++j) {
for(i=0; i<N; ++i) {
A[j*LDA+i] = A[i*LDA+j] = drand48();
if (i==j)
A[j*LDA+i] *= LDA*N;
}
}
// for(i=0; i<LDA; ++i){
// for(j=0; j<N; ++j){
// printf("%e ", A[j*LDA+i]);
// }
// printf("\n");
// }
}
static int check_factorization(int, double*, double*, int, int, double);
static int check_inverse(int, double *, double *, int, int, double);
int testing_dpotri(int argc, char **argv)
{
/* Check for number of arguments*/
if (argc != 2){
USAGE("POTRI", "N LDA",
" - N : the size of the matrix\n"
" - LDA : leading dimension of the matrix A\n");
return -1;
}
int N = atoi(argv[0]);
int LDA = atoi(argv[1]);
double eps;
int uplo;
int info_inverse, info_factorization;
double *A1 = (double *)malloc(LDA*N*sizeof(double));
#pragma omp register([LDA*N]A1)
double *A2 = (double *)malloc(LDA*N*sizeof(double));
#pragma omp register([LDA*N]A2)
double *WORK = (double *)malloc(2*LDA*sizeof(double));
#pragma omp register([2*LDA]WORK)
/* Check if unable to allocate memory */
if ((!A1)||(!A2)){
printf("Out of Memory \n ");
return -2;
}
eps = BLAS_dfpinfo( blas_eps );
uplo = PlasmaUpper;
/*-------------------------------------------------------------
* TESTING DPOTRI
*/
/* Initialize A1 and A2 for Symmetric Positif Matrix */
SPD_GEN(N, A1, LDA);
// PLASMA_dplgsy( (double)N, N, A1, LDA, 51 );
PLASMA_dlacpy( PlasmaUpperLower, N, N, A1, LDA, A2, LDA );
printf("\n");
printf("------ TESTS FOR PLASMA DPOTRI ROUTINE ------- \n");
printf(" Size of the Matrix %d by %d\n", N, N);
printf("\n");
printf(" The matrix A is randomly generated for each test.\n");
printf("============\n");
printf(" The relative machine precision (eps) is to be %e \n", eps);
printf(" Computational tests pass if scaled residuals are less than 60.\n");
/* PLASMA DPOTRF */
PLASMA_dpotrf(uplo, N, A2, LDA);
/* Check the factorization */
info_factorization = check_factorization( N, A1, A2, LDA, uplo, eps);
/* PLASMA DPOTRI */
PLASMA_dpotri(uplo, N, A2, LDA);
/* Check the inverse */
info_inverse = check_inverse(N, A1, A2, LDA, uplo, eps);
if ( (info_inverse == 0) && (info_factorization == 0) ) {
printf("***************************************************\n");
printf(" ---- TESTING DPOTRI ..................... PASSED !\n");
printf("***************************************************\n");
}
else {
printf("***************************************************\n");
printf(" - TESTING DPOTRI ... FAILED !\n");
printf("***************************************************\n");
}
free(A1); free(A2); free(WORK);
return 0;
}
/*------------------------------------------------------------------------
* Check the factorization of the matrix A2
*/
static int check_factorization(int N, double *A1, double *A2, int LDA, int uplo, double eps)
{
double Anorm, Rnorm;
double alpha;
int info_factorization;
int i,j;
double *Residual = (double *)malloc(N*N*sizeof(double));
double *L1 = (double *)malloc(N*N*sizeof(double));
double *L2 = (double *)malloc(N*N*sizeof(double));
double *work = (double *)malloc(N*sizeof(double));
memset((void*)L1, 0, N*N*sizeof(double));
memset((void*)L2, 0, N*N*sizeof(double));
alpha= 1.0;
LAPACKE_dlacpy_work(LAPACK_COL_MAJOR,' ', N, N, A1, LDA, Residual, N);
/* Dealing with L'L or U'U */
if (uplo == PlasmaUpper){
LAPACKE_dlacpy_work(LAPACK_COL_MAJOR,'u', N, N, A2, LDA, L1, N);
LAPACKE_dlacpy_work(LAPACK_COL_MAJOR,'u', N, N, A2, LDA, L2, N);
cblas_dtrmm(CblasColMajor, CblasLeft, CblasUpper, CblasTrans, CblasNonUnit, N, N, (alpha), L1, N, L2, N);
}
else{
LAPACKE_dlacpy_work(LAPACK_COL_MAJOR,'l', N, N, A2, LDA, L1, N);
LAPACKE_dlacpy_work(LAPACK_COL_MAJOR,'l', N, N, A2, LDA, L2, N);
cblas_dtrmm(CblasColMajor, CblasRight, CblasLower, CblasTrans, CblasNonUnit, N, N, (alpha), L1, N, L2, N);
}
/* Compute the Residual || A -L'L|| */
for (i = 0; i < N; i++)
for (j = 0; j < N; j++)
Residual[j*N+i] = L2[j*N+i] - Residual[j*N+i];
BLAS_dge_norm( blas_colmajor, blas_inf_norm, N, N, Residual, N, &Rnorm );
BLAS_dge_norm( blas_colmajor, blas_inf_norm, N, N, A1, LDA, &Anorm );
printf("============\n");
printf("Checking the Cholesky Factorization \n");
printf("-- ||L'L-A||_oo/(||A||_oo.N.eps) = %e \n",Rnorm/(Anorm*N*eps));
if ( isnan(Rnorm/(Anorm*N*eps)) || isinf(Rnorm/(Anorm*N*eps)) || (Rnorm/(Anorm*N*eps) > 60.0) ){
printf("-- Factorization is suspicious ! \n");
info_factorization = 1;
}
else{
printf("-- Factorization is CORRECT ! \n");
info_factorization = 0;
}
free(Residual); free(L1); free(L2); free(work);
return info_factorization;
}
/*------------------------------------------------------------------------
* Check the accuracy of the computed inverse
*/
static int check_inverse(int N, double *A1, double *A2, int LDA, int uplo, double eps )
{
int info_inverse;
int i, j;
double Rnorm, Anorm, Ainvnorm, result;
double alpha, beta, zone;
double *work = (double *)malloc(N*N*sizeof(double));
alpha = -1.0;
beta = 0.0;
zone = 1.0;
/* Rebuild the other part of the inverse matrix */
if(uplo == PlasmaUpper){
for(j=0; j<N; j++)
for(i=0; i<j; i++)
*(A2+j+i*LDA) = *(A2+i+j*LDA);
cblas_dsymm(CblasColMajor, CblasLeft, CblasUpper, N, N, (alpha), A2, LDA, A1, LDA, (beta), work, N);
}
else {
for(j=0; j<N; j++)
for(i=j; i<N; i++)
*(A2+j+i*LDA) = *(A2+i+j*LDA);
cblas_dsymm(CblasColMajor, CblasLeft, CblasLower, N, N, (alpha), A2, LDA, A1, LDA, (beta), work, N);
}
/* Add the identity matrix to work */
for(i=0; i<N; i++)
*(work+i+i*N) = *(work+i+i*N) + zone;
BLAS_dge_norm( blas_colmajor, blas_one_norm, N, N, work, N, &Rnorm );
BLAS_dge_norm( blas_colmajor, blas_one_norm, N, N, A1, LDA, &Anorm );
BLAS_dge_norm( blas_colmajor, blas_one_norm, N, N, A2, LDA, &Ainvnorm );
if (getenv("PLASMA_TESTING_VERBOSE"))
printf( "||A||_1=%f\n||Ainv||_1=%f\n||Id - A*Ainv||_1=%e\n", Anorm, Ainvnorm, Rnorm );
result = Rnorm / ( (Anorm*Ainvnorm)*N*eps ) ;
printf("============\n");
printf("Checking the Residual of the inverse \n");
printf("-- ||Id - A*Ainv||_1/((||A||_1||Ainv||_1).N.eps) = %e \n", result);
if ( isnan(Ainvnorm) || isinf(Ainvnorm) || isnan(result) || isinf(result) || (result > 60.0) ) {
printf("-- The inverse is suspicious ! \n");
info_inverse = 1;
}
else{
printf("-- The inverse is CORRECT ! \n");
info_inverse = 0;
}
free(work);
return info_inverse;
}
|
GB_binop__le_uint64.c | //------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_mkl.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB_AaddB__le_uint64
// A.*B function (eWiseMult): GB_AemultB__le_uint64
// A*D function (colscale): GB_AxD__le_uint64
// D*A function (rowscale): GB_DxB__le_uint64
// C+=B function (dense accum): GB_Cdense_accumB__le_uint64
// C+=b function (dense accum): GB_Cdense_accumb__le_uint64
// C+=A+B function (dense ewise3): (none)
// C=A+B function (dense ewise3): GB_Cdense_ewise3_noaccum__le_uint64
// C=scalar+B GB_bind1st__le_uint64
// C=scalar+B' GB_bind1st_tran__le_uint64
// C=A+scalar GB_bind2nd__le_uint64
// C=A'+scalar GB_bind2nd_tran__le_uint64
// C type: bool
// A type: uint64_t
// B,b type: uint64_t
// BinaryOp: cij = (aij <= bij)
#define GB_ATYPE \
uint64_t
#define GB_BTYPE \
uint64_t
#define GB_CTYPE \
bool
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
0
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
0
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
uint64_t aij = Ax [pA]
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB) \
uint64_t bij = Bx [pB]
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
bool t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA) \
cij = Ax [pA]
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB) \
cij = Bx [pB]
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z, x, y) \
z = (x <= y) ;
// op is second
#define GB_OP_IS_SECOND \
0
// op is plus_fp32 or plus_fp64
#define GB_OP_IS_PLUS_REAL \
0
// op is minus_fp32 or minus_fp64
#define GB_OP_IS_MINUS_REAL \
0
// GB_cblas_*axpy gateway routine, if it exists for this operator and type:
#define GB_CBLAS_AXPY \
(none)
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_LE || GxB_NO_UINT64 || GxB_NO_LE_UINT64)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
#if 0
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void (none)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
#endif
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
GrB_Info GB_Cdense_ewise3_noaccum__le_uint64
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_dense_ewise3_noaccum_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB_Cdense_accumB__le_uint64
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *GB_RESTRICT kfirst_slice,
const int64_t *GB_RESTRICT klast_slice,
const int64_t *GB_RESTRICT pstart_slice,
const int ntasks,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if 0
{
#include "GB_dense_subassign_23_template.c"
}
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB_Cdense_accumb__le_uint64
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if 0
{
// get the scalar b for C += b, of type uint64_t
uint64_t bwork = (*((uint64_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB_AxD__le_uint64
(
GrB_Matrix C,
const GrB_Matrix A, bool A_is_pattern,
const GrB_Matrix D, bool D_is_pattern,
const int64_t *GB_RESTRICT kfirst_slice,
const int64_t *GB_RESTRICT klast_slice,
const int64_t *GB_RESTRICT pstart_slice,
const int ntasks,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
bool *GB_RESTRICT Cx = (bool *) C->x ;
#include "GB_AxB_colscale_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB_DxB__le_uint64
(
GrB_Matrix C,
const GrB_Matrix D, bool D_is_pattern,
const GrB_Matrix B, bool B_is_pattern,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
bool *GB_RESTRICT Cx = (bool *) C->x ;
#include "GB_AxB_rowscale_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseAdd: C = A+B or C<M> = A+B
//------------------------------------------------------------------------------
GrB_Info GB_AaddB__le_uint64
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const bool Ch_is_Mh,
const int64_t *GB_RESTRICT C_to_M,
const int64_t *GB_RESTRICT C_to_A,
const int64_t *GB_RESTRICT C_to_B,
const GB_task_struct *GB_RESTRICT TaskList,
const int ntasks,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_add_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C = A.*B or C<M> = A.*B
//------------------------------------------------------------------------------
GrB_Info GB_AemultB__le_uint64
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *GB_RESTRICT C_to_M,
const int64_t *GB_RESTRICT C_to_A,
const int64_t *GB_RESTRICT C_to_B,
const GB_task_struct *GB_RESTRICT TaskList,
const int ntasks,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB_bind1st__le_uint64
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
bool *Cx = (bool *) Cx_output ;
uint64_t x = (*((uint64_t *) x_input)) ;
uint64_t *Bx = (uint64_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
uint64_t bij = Bx [p] ;
Cx [p] = (x <= bij) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB_bind2nd__le_uint64
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
bool *Cx = (bool *) Cx_output ;
uint64_t *Ax = (uint64_t *) Ax_input ;
uint64_t y = (*((uint64_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
uint64_t aij = Ax [p] ;
Cx [p] = (aij <= y) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typcasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
uint64_t aij = Ax [pA] ; \
Cx [pC] = (x <= aij) ; \
}
GrB_Info GB_bind1st_tran__le_uint64
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *GB_RESTRICT *Rowcounts,
GBI_single_iterator Iter,
const int64_t *GB_RESTRICT A_slice,
int naslice
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
uint64_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint64_t x = (*((const uint64_t *) x_input)) ;
#define GB_PHASE_2_OF_2
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
uint64_t
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typcasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
uint64_t aij = Ax [pA] ; \
Cx [pC] = (aij <= y) ; \
}
GrB_Info GB_bind2nd_tran__le_uint64
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *GB_RESTRICT *Rowcounts,
GBI_single_iterator Iter,
const int64_t *GB_RESTRICT A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint64_t y = (*((const uint64_t *) y_input)) ;
#define GB_PHASE_2_OF_2
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
parallel_transform.h | namespace Rcpp{
namespace parallel{
#if defined(RCPP11_EXPERIMENTAL_PARALLEL)
template <typename InputIterator, typename OutputIterator, typename Function>
void transform( int nthreads, InputIterator begin, InputIterator end, OutputIterator target, Function fun ){
std::vector<std::thread> workers(nthreads-1) ;
R_xlen_t chunk_size = std::distance(begin, end) / nthreads ;
R_xlen_t start=0;
for( int i=0; i<nthreads-1; i++, start+=chunk_size){
workers[i] = std::thread( std::transform<InputIterator, OutputIterator, Function>,
begin + start, begin + start + chunk_size,
target + start,
fun) ;
}
std::transform( begin + start, end, target + start, fun ) ;
for( int i=0; i<nthreads-1; i++) workers[i].join() ;
}
#else
template <typename InputIterator, typename OutputIterator, typename Function>
inline void transform( int, InputIterator begin, InputIterator end, OutputIterator target, Function fun ){
std::transform( begin, end, target, fun ) ;
}
#endif
}
}
template <class T,
class InputIterator,
class MapFunction,
class ReductionFunction>
T MapReduce_n(InputIterator in,
unsigned int size,
T baseval,
MapFunction mapper,
ReductionFunction reducer)
{
T val = baseval;
#pragma omp parallel
{
T map_val = baseval;
#pragma omp for nowait
for (auto i = 0U; i < size; ++i)
{
map_val = reducer(map_val, mapper(*(in + i)));
}
#pragma omp critical
val = reducer(val, map_val);
}
return val;
} |
libperf.c | /**
* Copyright (C) Mellanox Technologies Ltd. 2001-2019. ALL RIGHTS RESERVED.
* Copyright (C) UT-Battelle, LLC. 2015. ALL RIGHTS RESERVED.
* Copyright (C) The University of Tennessee and The University
* of Tennessee Research Foundation. 2015-2016. ALL RIGHTS RESERVED.
* Copyright (C) ARM Ltd. 2017. ALL RIGHTS RESERVED.
* See file LICENSE for terms.
*/
#ifdef HAVE_CONFIG_H
# include "config.h"
#endif
#include <ucs/debug/log.h>
#include <ucs/arch/bitops.h>
#include <ucs/sys/module.h>
#include <ucs/sys/string.h>
#include <string.h>
#include <tools/perf/lib/libperf_int.h>
#include <unistd.h>
#if _OPENMP
#include <omp.h>
#endif /* _OPENMP */
#define ATOMIC_OP_CONFIG(_size, _op32, _op64, _op, _msg, _params, _status) \
_status = __get_atomic_flag((_size), (_op32), (_op64), (_op)); \
if (_status != UCS_OK) { \
ucs_error(UCT_PERF_TEST_PARAMS_FMT" does not support atomic %s for " \
"message size %zu bytes", UCT_PERF_TEST_PARAMS_ARG(_params), \
(_msg)[_op], (_size)); \
return _status; \
}
#define ATOMIC_OP_CHECK(_size, _attr, _required, _params, _msg) \
if (!ucs_test_all_flags(_attr, _required)) { \
if ((_params)->flags & UCX_PERF_TEST_FLAG_VERBOSE) { \
ucs_error(UCT_PERF_TEST_PARAMS_FMT" does not support required " \
#_size"-bit atomic: %s", UCT_PERF_TEST_PARAMS_ARG(_params), \
(_msg)[ucs_ffs64(~(_attr) & (_required))]); \
} \
return UCS_ERR_UNSUPPORTED; \
}
typedef struct {
union {
struct {
size_t dev_addr_len;
size_t iface_addr_len;
size_t ep_addr_len;
} uct;
struct {
size_t worker_addr_len;
size_t total_wireup_len;
} ucp;
};
size_t rkey_size;
unsigned long recv_buffer;
} ucx_perf_ep_info_t;
const ucx_perf_allocator_t* ucx_perf_mem_type_allocators[UCS_MEMORY_TYPE_LAST];
static const char *perf_iface_ops[] = {
[ucs_ilog2(UCT_IFACE_FLAG_AM_SHORT)] = "am short",
[ucs_ilog2(UCT_IFACE_FLAG_AM_BCOPY)] = "am bcopy",
[ucs_ilog2(UCT_IFACE_FLAG_AM_ZCOPY)] = "am zcopy",
[ucs_ilog2(UCT_IFACE_FLAG_PUT_SHORT)] = "put short",
[ucs_ilog2(UCT_IFACE_FLAG_PUT_BCOPY)] = "put bcopy",
[ucs_ilog2(UCT_IFACE_FLAG_PUT_ZCOPY)] = "put zcopy",
[ucs_ilog2(UCT_IFACE_FLAG_GET_SHORT)] = "get short",
[ucs_ilog2(UCT_IFACE_FLAG_GET_BCOPY)] = "get bcopy",
[ucs_ilog2(UCT_IFACE_FLAG_GET_ZCOPY)] = "get zcopy",
[ucs_ilog2(UCT_IFACE_FLAG_ERRHANDLE_PEER_FAILURE)] = "peer failure handler",
[ucs_ilog2(UCT_IFACE_FLAG_CONNECT_TO_IFACE)] = "connect to iface",
[ucs_ilog2(UCT_IFACE_FLAG_CONNECT_TO_EP)] = "connect to ep",
[ucs_ilog2(UCT_IFACE_FLAG_AM_DUP)] = "full reliability",
[ucs_ilog2(UCT_IFACE_FLAG_CB_SYNC)] = "sync callback",
[ucs_ilog2(UCT_IFACE_FLAG_CB_ASYNC)] = "async callback",
[ucs_ilog2(UCT_IFACE_FLAG_PENDING)] = "pending",
[ucs_ilog2(UCT_IFACE_FLAG_TAG_EAGER_SHORT)] = "tag eager short",
[ucs_ilog2(UCT_IFACE_FLAG_TAG_EAGER_BCOPY)] = "tag eager bcopy",
[ucs_ilog2(UCT_IFACE_FLAG_TAG_EAGER_ZCOPY)] = "tag eager zcopy",
[ucs_ilog2(UCT_IFACE_FLAG_TAG_RNDV_ZCOPY)] = "tag rndv zcopy"
};
static const char *perf_atomic_op[] = {
[UCT_ATOMIC_OP_ADD] = "add",
[UCT_ATOMIC_OP_AND] = "and",
[UCT_ATOMIC_OP_OR] = "or" ,
[UCT_ATOMIC_OP_XOR] = "xor"
};
static const char *perf_atomic_fop[] = {
[UCT_ATOMIC_OP_ADD] = "fetch-add",
[UCT_ATOMIC_OP_AND] = "fetch-and",
[UCT_ATOMIC_OP_OR] = "fetch-or",
[UCT_ATOMIC_OP_XOR] = "fetch-xor",
[UCT_ATOMIC_OP_SWAP] = "swap",
[UCT_ATOMIC_OP_CSWAP] = "cswap"
};
/*
* This Quickselect routine is based on the algorithm described in
* "Numerical recipes in C", Second Edition,
* Cambridge University Press, 1992, Section 8.5, ISBN 0-521-43108-5
* This code by Nicolas Devillard - 1998. Public domain.
*/
static ucs_time_t __find_median_quick_select(ucs_time_t arr[], int n)
{
int low, high ;
int median;
int middle, ll, hh;
#define ELEM_SWAP(a,b) { register ucs_time_t t=(a);(a)=(b);(b)=t; }
low = 0 ; high = n-1 ; median = (low + high) / 2;
for (;;) {
if (high <= low) /* One element only */
return arr[median] ;
if (high == low + 1) { /* Two elements only */
if (arr[low] > arr[high])
ELEM_SWAP(arr[low], arr[high]) ;
return arr[median] ;
}
/* Find median of low, middle and high items; swap into position low */
middle = (low + high) / 2;
if (arr[middle] > arr[high]) ELEM_SWAP(arr[middle], arr[high]) ;
if (arr[low] > arr[high]) ELEM_SWAP(arr[low], arr[high]) ;
if (arr[middle] > arr[low]) ELEM_SWAP(arr[middle], arr[low]) ;
/* Swap low item (now in position middle) into position (low+1) */
ELEM_SWAP(arr[middle], arr[low+1]) ;
/* Nibble from each end towards middle, swapping items when stuck */
ll = low + 1;
hh = high;
for (;;) {
do ll++; while (arr[low] > arr[ll]) ;
do hh--; while (arr[hh] > arr[low]) ;
if (hh < ll)
break;
ELEM_SWAP(arr[ll], arr[hh]) ;
}
/* Swap middle item (in position low) back into correct position */
ELEM_SWAP(arr[low], arr[hh]) ;
/* Re-set active partition */
if (hh <= median)
low = ll;
if (hh >= median)
high = hh - 1;
}
}
static ucs_status_t
uct_perf_test_alloc_host(const ucx_perf_context_t *perf, size_t length,
unsigned flags, uct_allocated_memory_t *alloc_mem)
{
ucs_status_t status;
status = uct_iface_mem_alloc(perf->uct.iface, length,
flags, "perftest", alloc_mem);
if (status != UCS_OK) {
ucs_free(alloc_mem);
ucs_error("failed to allocate memory: %s", ucs_status_string(status));
return status;
}
ucs_assert(alloc_mem->md == perf->uct.md);
return UCS_OK;
}
static void uct_perf_test_free_host(const ucx_perf_context_t *perf,
uct_allocated_memory_t *alloc_mem)
{
uct_iface_mem_free(alloc_mem);
}
static void ucx_perf_test_memcpy_host(void *dst, ucs_memory_type_t dst_mem_type,
const void *src, ucs_memory_type_t src_mem_type,
size_t count)
{
if ((dst_mem_type != UCS_MEMORY_TYPE_HOST) ||
(src_mem_type != UCS_MEMORY_TYPE_HOST)) {
ucs_error("wrong memory type passed src - %d, dst - %d",
src_mem_type, dst_mem_type);
} else {
memcpy(dst, src, count);
}
}
static ucs_status_t uct_perf_test_alloc_mem(ucx_perf_context_t *perf)
{
ucx_perf_params_t *params = &perf->params;
ucs_status_t status;
unsigned flags;
size_t buffer_size;
if ((UCT_PERF_DATA_LAYOUT_ZCOPY == params->uct.data_layout) && params->iov_stride) {
buffer_size = params->msg_size_cnt * params->iov_stride;
} else {
buffer_size = ucx_perf_get_message_size(params);
}
/* TODO use params->alignment */
flags = (params->flags & UCX_PERF_TEST_FLAG_MAP_NONBLOCK) ?
UCT_MD_MEM_FLAG_NONBLOCK : 0;
flags |= UCT_MD_MEM_ACCESS_ALL;
/* Allocate send buffer memory */
status = perf->allocator->uct_alloc(perf, buffer_size * params->thread_count,
flags, &perf->uct.send_mem);
if (status != UCS_OK) {
goto err;
}
perf->send_buffer = perf->uct.send_mem.address;
/* Allocate receive buffer memory */
status = perf->allocator->uct_alloc(perf, buffer_size * params->thread_count,
flags, &perf->uct.recv_mem);
if (status != UCS_OK) {
goto err_free_send;
}
perf->recv_buffer = perf->uct.recv_mem.address;
/* Allocate IOV datatype memory */
perf->params.msg_size_cnt = params->msg_size_cnt;
perf->uct.iov = malloc(sizeof(*perf->uct.iov) *
perf->params.msg_size_cnt *
params->thread_count);
if (NULL == perf->uct.iov) {
status = UCS_ERR_NO_MEMORY;
ucs_error("Failed allocate send IOV(%lu) buffer: %s",
perf->params.msg_size_cnt, ucs_status_string(status));
goto err_free_recv;
}
ucs_debug("allocated memory. Send buffer %p, Recv buffer %p",
perf->send_buffer, perf->recv_buffer);
return UCS_OK;
err_free_recv:
perf->allocator->uct_free(perf, &perf->uct.recv_mem);
err_free_send:
perf->allocator->uct_free(perf, &perf->uct.send_mem);
err:
return status;
}
static void uct_perf_test_free_mem(ucx_perf_context_t *perf)
{
perf->allocator->uct_free(perf, &perf->uct.send_mem);
perf->allocator->uct_free(perf, &perf->uct.recv_mem);
free(perf->uct.iov);
}
void ucx_perf_test_start_clock(ucx_perf_context_t *perf)
{
ucs_time_t start_time = ucs_get_time();
perf->start_time_acc = ucs_get_accurate_time();
perf->end_time = (perf->params.max_time == 0.0) ? UINT64_MAX :
ucs_time_from_sec(perf->params.max_time) + start_time;
perf->prev_time = start_time;
perf->prev.time = start_time;
perf->prev.time_acc = perf->start_time_acc;
perf->current.time_acc = perf->start_time_acc;
}
/* Initialize/reset all parameters that could be modified by the warm-up run */
static void ucx_perf_test_prepare_new_run(ucx_perf_context_t *perf,
const ucx_perf_params_t *params)
{
unsigned i;
perf->max_iter = (perf->params.max_iter == 0) ? UINT64_MAX :
perf->params.max_iter;
perf->report_interval = ucs_time_from_sec(perf->params.report_interval);
perf->current.time = 0;
perf->current.msgs = 0;
perf->current.bytes = 0;
perf->current.iters = 0;
perf->prev.msgs = 0;
perf->prev.bytes = 0;
perf->prev.iters = 0;
perf->timing_queue_head = 0;
for (i = 0; i < TIMING_QUEUE_SIZE; ++i) {
perf->timing_queue[i] = 0;
}
ucx_perf_test_start_clock(perf);
}
static void ucx_perf_test_init(ucx_perf_context_t *perf,
const ucx_perf_params_t *params)
{
unsigned group_index;
perf->params = *params;
group_index = rte_call(perf, group_index);
if (0 == group_index) {
perf->allocator = ucx_perf_mem_type_allocators[params->send_mem_type];
} else {
perf->allocator = ucx_perf_mem_type_allocators[params->recv_mem_type];
}
ucx_perf_test_prepare_new_run(perf, params);
}
void ucx_perf_calc_result(ucx_perf_context_t *perf, ucx_perf_result_t *result)
{
ucs_time_t median;
double factor;
if (perf->params.test_type == UCX_PERF_TEST_TYPE_PINGPONG) {
factor = 2.0;
} else {
factor = 1.0;
}
result->iters = perf->current.iters;
result->bytes = perf->current.bytes;
result->elapsed_time = perf->current.time_acc - perf->start_time_acc;
/* Latency */
median = __find_median_quick_select(perf->timing_queue, TIMING_QUEUE_SIZE);
result->latency.typical = ucs_time_to_sec(median) / factor;
result->latency.moment_average =
(perf->current.time_acc - perf->prev.time_acc)
/ (perf->current.iters - perf->prev.iters)
/ factor;
result->latency.total_average =
(perf->current.time_acc - perf->start_time_acc)
/ perf->current.iters
/ factor;
/* Bandwidth */
result->bandwidth.typical = 0.0; // Undefined
result->bandwidth.moment_average =
(perf->current.bytes - perf->prev.bytes) /
(perf->current.time_acc - perf->prev.time_acc) * factor;
result->bandwidth.total_average =
perf->current.bytes /
(perf->current.time_acc - perf->start_time_acc) * factor;
/* Packet rate */
result->msgrate.typical = 0.0; // Undefined
result->msgrate.moment_average =
(perf->current.msgs - perf->prev.msgs) /
(perf->current.time_acc - perf->prev.time_acc) * factor;
result->msgrate.total_average =
perf->current.msgs /
(perf->current.time_acc - perf->start_time_acc) * factor;
}
static ucs_status_t ucx_perf_test_check_params(ucx_perf_params_t *params)
{
size_t it;
/* check if zero-size messages are requested and supported */
if ((/* they are not supported by: */
/* - UCT tests, except UCT AM Short/Bcopy */
(params->api == UCX_PERF_API_UCT) ||
(/* - UCP RMA and AMO tests */
(params->api == UCX_PERF_API_UCP) &&
(params->command != UCX_PERF_CMD_AM) &&
(params->command != UCX_PERF_CMD_TAG) &&
(params->command != UCX_PERF_CMD_TAG_SYNC) &&
(params->command != UCX_PERF_CMD_STREAM))) &&
ucx_perf_get_message_size(params) < 1) {
if (params->flags & UCX_PERF_TEST_FLAG_VERBOSE) {
ucs_error("Message size too small, need to be at least 1");
}
return UCS_ERR_INVALID_PARAM;
}
if ((params->api == UCX_PERF_API_UCP) &&
((params->send_mem_type != UCS_MEMORY_TYPE_HOST) ||
(params->recv_mem_type != UCS_MEMORY_TYPE_HOST)) &&
((params->command == UCX_PERF_CMD_PUT) ||
(params->command == UCX_PERF_CMD_GET) ||
(params->command == UCX_PERF_CMD_ADD) ||
(params->command == UCX_PERF_CMD_FADD) ||
(params->command == UCX_PERF_CMD_SWAP) ||
(params->command == UCX_PERF_CMD_CSWAP))) {
/* TODO: remove when support for non-HOST memory types will be added */
if (params->flags & UCX_PERF_TEST_FLAG_VERBOSE) {
ucs_error("UCP doesn't support RMA/AMO for \"%s\"<->\"%s\" memory types",
ucs_memory_type_names[params->send_mem_type],
ucs_memory_type_names[params->recv_mem_type]);
}
return UCS_ERR_INVALID_PARAM;
}
if (params->max_outstanding < 1) {
if (params->flags & UCX_PERF_TEST_FLAG_VERBOSE) {
ucs_error("max_outstanding, need to be at least 1");
}
return UCS_ERR_INVALID_PARAM;
}
/* check if particular message size fit into stride size */
if (params->iov_stride) {
for (it = 0; it < params->msg_size_cnt; ++it) {
if (params->msg_size_list[it] > params->iov_stride) {
if (params->flags & UCX_PERF_TEST_FLAG_VERBOSE) {
ucs_error("Buffer size %lu bigger than stride %lu",
params->msg_size_list[it], params->iov_stride);
}
return UCS_ERR_INVALID_PARAM;
}
}
}
return UCS_OK;
}
void uct_perf_ep_flush_b(ucx_perf_context_t *perf, int peer_index)
{
uct_ep_h ep = perf->uct.peers[peer_index].ep;
uct_completion_t comp;
ucs_status_t status;
int started;
started = 0;
comp.func = NULL;
comp.count = 2;
do {
if (!started) {
status = uct_ep_flush(ep, 0, &comp);
if (status == UCS_OK) {
--comp.count;
} else if (status == UCS_INPROGRESS) {
started = 1;
} else if (status != UCS_ERR_NO_RESOURCE) {
ucs_error("uct_ep_flush() failed: %s", ucs_status_string(status));
return;
}
}
uct_worker_progress(perf->uct.worker);
} while (comp.count > 1);
}
void uct_perf_iface_flush_b(ucx_perf_context_t *perf)
{
ucs_status_t status;
do {
status = uct_iface_flush(perf->uct.iface, 0, NULL);
uct_worker_progress(perf->uct.worker);
} while (status == UCS_INPROGRESS);
if (status != UCS_OK) {
ucs_error("uct_iface_flush() failed: %s", ucs_status_string(status));
}
}
static inline uint64_t __get_flag(uct_perf_data_layout_t layout, uint64_t short_f,
uint64_t bcopy_f, uint64_t zcopy_f)
{
return (layout == UCT_PERF_DATA_LAYOUT_SHORT) ? short_f :
(layout == UCT_PERF_DATA_LAYOUT_BCOPY) ? bcopy_f :
(layout == UCT_PERF_DATA_LAYOUT_ZCOPY) ? zcopy_f :
0;
}
static inline ucs_status_t __get_atomic_flag(size_t size, uint64_t *op32,
uint64_t *op64, uint64_t op)
{
if (size == sizeof(uint32_t)) {
*op32 = UCS_BIT(op);
return UCS_OK;
} else if (size == sizeof(uint64_t)) {
*op64 = UCS_BIT(op);
return UCS_OK;
}
return UCS_ERR_UNSUPPORTED;
}
static inline size_t __get_max_size(uct_perf_data_layout_t layout, size_t short_m,
size_t bcopy_m, uint64_t zcopy_m)
{
return (layout == UCT_PERF_DATA_LAYOUT_SHORT) ? short_m :
(layout == UCT_PERF_DATA_LAYOUT_BCOPY) ? bcopy_m :
(layout == UCT_PERF_DATA_LAYOUT_ZCOPY) ? zcopy_m :
0;
}
static ucs_status_t uct_perf_test_check_md_support(ucx_perf_params_t *params,
ucs_memory_type_t mem_type,
uct_md_attr_t *md_attr)
{
if (!(md_attr->cap.access_mem_types & UCS_BIT(mem_type)) &&
!(md_attr->cap.reg_mem_types & UCS_BIT(mem_type))) {
if (params->flags & UCX_PERF_TEST_FLAG_VERBOSE) {
ucs_error("Unsupported memory type %s by "UCT_PERF_TEST_PARAMS_FMT,
ucs_memory_type_names[mem_type],
UCT_PERF_TEST_PARAMS_ARG(params));
return UCS_ERR_INVALID_PARAM;
}
}
return UCS_OK;
}
static ucs_status_t uct_perf_test_check_capabilities(ucx_perf_params_t *params,
uct_iface_h iface, uct_md_h md)
{
uint64_t required_flags = 0;
uint64_t atomic_op32 = 0;
uint64_t atomic_op64 = 0;
uint64_t atomic_fop32 = 0;
uint64_t atomic_fop64 = 0;
uct_md_attr_t md_attr;
uct_iface_attr_t attr;
ucs_status_t status;
size_t min_size, max_size, max_iov, message_size;
status = uct_md_query(md, &md_attr);
if (status != UCS_OK) {
ucs_error("uct_md_query(%s) failed: %s",
params->uct.md_name, ucs_status_string(status));
return status;
}
status = uct_iface_query(iface, &attr);
if (status != UCS_OK) {
ucs_error("uct_iface_query("UCT_PERF_TEST_PARAMS_FMT") failed: %s",
UCT_PERF_TEST_PARAMS_ARG(params),
ucs_status_string(status));
return status;
}
min_size = 0;
max_iov = 1;
message_size = ucx_perf_get_message_size(params);
switch (params->command) {
case UCX_PERF_CMD_AM:
required_flags = __get_flag(params->uct.data_layout, UCT_IFACE_FLAG_AM_SHORT,
UCT_IFACE_FLAG_AM_BCOPY, UCT_IFACE_FLAG_AM_ZCOPY);
required_flags |= UCT_IFACE_FLAG_CB_SYNC;
min_size = __get_max_size(params->uct.data_layout, 0, 0,
attr.cap.am.min_zcopy);
max_size = __get_max_size(params->uct.data_layout, attr.cap.am.max_short,
attr.cap.am.max_bcopy, attr.cap.am.max_zcopy);
max_iov = attr.cap.am.max_iov;
break;
case UCX_PERF_CMD_PUT:
required_flags = __get_flag(params->uct.data_layout, UCT_IFACE_FLAG_PUT_SHORT,
UCT_IFACE_FLAG_PUT_BCOPY, UCT_IFACE_FLAG_PUT_ZCOPY);
min_size = __get_max_size(params->uct.data_layout, 0, 0,
attr.cap.put.min_zcopy);
max_size = __get_max_size(params->uct.data_layout, attr.cap.put.max_short,
attr.cap.put.max_bcopy, attr.cap.put.max_zcopy);
max_iov = attr.cap.put.max_iov;
break;
case UCX_PERF_CMD_GET:
required_flags = __get_flag(params->uct.data_layout, UCT_IFACE_FLAG_GET_SHORT,
UCT_IFACE_FLAG_GET_BCOPY, UCT_IFACE_FLAG_GET_ZCOPY);
min_size = __get_max_size(params->uct.data_layout, 0, 0,
attr.cap.get.min_zcopy);
max_size = __get_max_size(params->uct.data_layout, attr.cap.get.max_short,
attr.cap.get.max_bcopy, attr.cap.get.max_zcopy);
max_iov = attr.cap.get.max_iov;
break;
case UCX_PERF_CMD_ADD:
ATOMIC_OP_CONFIG(message_size, &atomic_op32, &atomic_op64, UCT_ATOMIC_OP_ADD,
perf_atomic_op, params, status);
max_size = 8;
break;
case UCX_PERF_CMD_FADD:
ATOMIC_OP_CONFIG(message_size, &atomic_fop32, &atomic_fop64, UCT_ATOMIC_OP_ADD,
perf_atomic_fop, params, status);
max_size = 8;
break;
case UCX_PERF_CMD_SWAP:
ATOMIC_OP_CONFIG(message_size, &atomic_fop32, &atomic_fop64, UCT_ATOMIC_OP_SWAP,
perf_atomic_fop, params, status);
max_size = 8;
break;
case UCX_PERF_CMD_CSWAP:
ATOMIC_OP_CONFIG(message_size, &atomic_fop32, &atomic_fop64, UCT_ATOMIC_OP_CSWAP,
perf_atomic_fop, params, status);
max_size = 8;
break;
default:
if (params->flags & UCX_PERF_TEST_FLAG_VERBOSE) {
ucs_error("Invalid test command");
}
return UCS_ERR_INVALID_PARAM;
}
status = ucx_perf_test_check_params(params);
if (status != UCS_OK) {
return status;
}
/* check atomics first */
ATOMIC_OP_CHECK(32, attr.cap.atomic32.op_flags, atomic_op32, params, perf_atomic_op);
ATOMIC_OP_CHECK(64, attr.cap.atomic64.op_flags, atomic_op64, params, perf_atomic_op);
ATOMIC_OP_CHECK(32, attr.cap.atomic32.fop_flags, atomic_fop32, params, perf_atomic_fop);
ATOMIC_OP_CHECK(64, attr.cap.atomic64.fop_flags, atomic_fop64, params, perf_atomic_fop);
/* check iface flags */
if (!(atomic_op32 | atomic_op64 | atomic_fop32 | atomic_fop64) &&
(!ucs_test_all_flags(attr.cap.flags, required_flags) || !required_flags)) {
if (params->flags & UCX_PERF_TEST_FLAG_VERBOSE) {
ucs_error(UCT_PERF_TEST_PARAMS_FMT" does not support operation %s",
UCT_PERF_TEST_PARAMS_ARG(params),
perf_iface_ops[ucs_ffs64(~attr.cap.flags & required_flags)]);
}
return UCS_ERR_UNSUPPORTED;
}
if (message_size < min_size) {
if (params->flags & UCX_PERF_TEST_FLAG_VERBOSE) {
ucs_error("Message size (%zu) is smaller than min supported (%zu)",
message_size, min_size);
}
return UCS_ERR_UNSUPPORTED;
}
if (message_size > max_size) {
if (params->flags & UCX_PERF_TEST_FLAG_VERBOSE) {
ucs_error("Message size (%zu) is larger than max supported (%zu)",
message_size, max_size);
}
return UCS_ERR_UNSUPPORTED;
}
if (params->command == UCX_PERF_CMD_AM) {
if ((params->uct.data_layout == UCT_PERF_DATA_LAYOUT_SHORT) &&
(params->am_hdr_size != sizeof(uint64_t)))
{
if (params->flags & UCX_PERF_TEST_FLAG_VERBOSE) {
ucs_error("Short AM header size must be 8 bytes");
}
return UCS_ERR_INVALID_PARAM;
}
if ((params->uct.data_layout == UCT_PERF_DATA_LAYOUT_ZCOPY) &&
(params->am_hdr_size > attr.cap.am.max_hdr))
{
if (params->flags & UCX_PERF_TEST_FLAG_VERBOSE) {
ucs_error("AM header size (%zu) is larger than max supported (%zu)",
params->am_hdr_size, attr.cap.am.max_hdr);
}
return UCS_ERR_UNSUPPORTED;
}
if (params->am_hdr_size > message_size) {
if (params->flags & UCX_PERF_TEST_FLAG_VERBOSE) {
ucs_error("AM header size (%zu) is larger than message size (%zu)",
params->am_hdr_size, message_size);
}
return UCS_ERR_INVALID_PARAM;
}
if (params->uct.fc_window > UCT_PERF_TEST_MAX_FC_WINDOW) {
if (params->flags & UCX_PERF_TEST_FLAG_VERBOSE) {
ucs_error("AM flow-control window (%d) too large (should be <= %d)",
params->uct.fc_window, UCT_PERF_TEST_MAX_FC_WINDOW);
}
return UCS_ERR_INVALID_PARAM;
}
if ((params->flags & UCX_PERF_TEST_FLAG_ONE_SIDED) &&
(params->flags & UCX_PERF_TEST_FLAG_VERBOSE))
{
ucs_warn("Running active-message test with on-sided progress");
}
}
if (UCT_PERF_DATA_LAYOUT_ZCOPY == params->uct.data_layout) {
if (params->msg_size_cnt > max_iov) {
if ((params->flags & UCX_PERF_TEST_FLAG_VERBOSE) ||
!params->msg_size_cnt) {
ucs_error("Wrong number of IOV entries. Requested is %lu, "
"should be in the range 1...%lu", params->msg_size_cnt,
max_iov);
}
return UCS_ERR_UNSUPPORTED;
}
/* if msg_size_cnt == 1 the message size checked above */
if ((UCX_PERF_CMD_AM == params->command) && (params->msg_size_cnt > 1)) {
if (params->am_hdr_size > params->msg_size_list[0]) {
if (params->flags & UCX_PERF_TEST_FLAG_VERBOSE) {
ucs_error("AM header size (%lu) larger than the first IOV "
"message size (%lu)", params->am_hdr_size,
params->msg_size_list[0]);
}
return UCS_ERR_INVALID_PARAM;
}
}
}
status = uct_perf_test_check_md_support(params, params->send_mem_type, &md_attr);
if (status != UCS_OK) {
return status;
}
status = uct_perf_test_check_md_support(params, params->recv_mem_type, &md_attr);
if (status != UCS_OK) {
return status;
}
return UCS_OK;
}
static ucs_status_t uct_perf_test_setup_endpoints(ucx_perf_context_t *perf)
{
const size_t buffer_size = ADDR_BUF_SIZE;
ucx_perf_ep_info_t info, *remote_info;
unsigned group_size, i, group_index;
uct_device_addr_t *dev_addr;
uct_iface_addr_t *iface_addr;
uct_ep_addr_t *ep_addr;
uct_iface_attr_t iface_attr;
uct_md_attr_t md_attr;
uct_ep_params_t ep_params;
void *rkey_buffer;
ucs_status_t status;
struct iovec vec[5];
void *buffer;
void *req;
buffer = malloc(buffer_size);
if (buffer == NULL) {
ucs_error("Failed to allocate RTE buffer");
status = UCS_ERR_NO_MEMORY;
goto err;
}
status = uct_iface_query(perf->uct.iface, &iface_attr);
if (status != UCS_OK) {
ucs_error("Failed to uct_iface_query: %s", ucs_status_string(status));
goto err_free;
}
status = uct_md_query(perf->uct.md, &md_attr);
if (status != UCS_OK) {
ucs_error("Failed to uct_md_query: %s", ucs_status_string(status));
goto err_free;
}
if (md_attr.cap.flags & (UCT_MD_FLAG_ALLOC|UCT_MD_FLAG_REG)) {
info.rkey_size = md_attr.rkey_packed_size;
} else {
info.rkey_size = 0;
}
info.uct.dev_addr_len = iface_attr.device_addr_len;
info.uct.iface_addr_len = iface_attr.iface_addr_len;
info.uct.ep_addr_len = iface_attr.ep_addr_len;
info.recv_buffer = (uintptr_t)perf->recv_buffer;
rkey_buffer = buffer;
dev_addr = UCS_PTR_BYTE_OFFSET(rkey_buffer, info.rkey_size);
iface_addr = UCS_PTR_BYTE_OFFSET(dev_addr, info.uct.dev_addr_len);
ep_addr = UCS_PTR_BYTE_OFFSET(iface_addr, info.uct.iface_addr_len);
ucs_assert_always(UCS_PTR_BYTE_OFFSET(ep_addr, info.uct.ep_addr_len) <=
UCS_PTR_BYTE_OFFSET(buffer, buffer_size));
status = uct_iface_get_device_address(perf->uct.iface, dev_addr);
if (status != UCS_OK) {
ucs_error("Failed to uct_iface_get_device_address: %s",
ucs_status_string(status));
goto err_free;
}
status = uct_iface_get_address(perf->uct.iface, iface_addr);
if (status != UCS_OK) {
ucs_error("Failed to uct_iface_get_address: %s", ucs_status_string(status));
goto err_free;
}
if (info.rkey_size > 0) {
memset(rkey_buffer, 0, info.rkey_size);
status = uct_md_mkey_pack(perf->uct.md, perf->uct.recv_mem.memh, rkey_buffer);
if (status != UCS_OK) {
ucs_error("Failed to uct_rkey_pack: %s", ucs_status_string(status));
goto err_free;
}
}
group_size = rte_call(perf, group_size);
group_index = rte_call(perf, group_index);
perf->uct.peers = calloc(group_size, sizeof(*perf->uct.peers));
if (perf->uct.peers == NULL) {
goto err_free;
}
ep_params.field_mask = UCT_EP_PARAM_FIELD_IFACE;
ep_params.iface = perf->uct.iface;
if (iface_attr.cap.flags & UCT_IFACE_FLAG_CONNECT_TO_EP) {
for (i = 0; i < group_size; ++i) {
if (i == group_index) {
continue;
}
status = uct_ep_create(&ep_params, &perf->uct.peers[i].ep);
if (status != UCS_OK) {
ucs_error("Failed to uct_ep_create: %s", ucs_status_string(status));
goto err_destroy_eps;
}
status = uct_ep_get_address(perf->uct.peers[i].ep, ep_addr);
if (status != UCS_OK) {
ucs_error("Failed to uct_ep_get_address: %s", ucs_status_string(status));
goto err_destroy_eps;
}
}
} else if (iface_attr.cap.flags & UCT_IFACE_FLAG_CONNECT_TO_IFACE) {
ep_params.field_mask |= UCT_EP_PARAM_FIELD_DEV_ADDR |
UCT_EP_PARAM_FIELD_IFACE_ADDR;
}
vec[0].iov_base = &info;
vec[0].iov_len = sizeof(info);
vec[1].iov_base = buffer;
vec[1].iov_len = info.rkey_size + info.uct.dev_addr_len +
info.uct.iface_addr_len + info.uct.ep_addr_len;
rte_call(perf, post_vec, vec, 2, &req);
rte_call(perf, exchange_vec, req);
for (i = 0; i < group_size; ++i) {
if (i == group_index) {
continue;
}
rte_call(perf, recv, i, buffer, buffer_size, req);
remote_info = buffer;
rkey_buffer = remote_info + 1;
dev_addr = UCS_PTR_BYTE_OFFSET(rkey_buffer, remote_info->rkey_size);
iface_addr = UCS_PTR_BYTE_OFFSET(dev_addr, remote_info->uct.dev_addr_len);
ep_addr = UCS_PTR_BYTE_OFFSET(iface_addr, remote_info->uct.iface_addr_len);
perf->uct.peers[i].remote_addr = remote_info->recv_buffer;
if (!uct_iface_is_reachable(perf->uct.iface, dev_addr,
remote_info->uct.iface_addr_len ?
iface_addr : NULL)) {
ucs_error("Destination is unreachable");
status = UCS_ERR_UNREACHABLE;
goto err_destroy_eps;
}
if (remote_info->rkey_size > 0) {
status = uct_rkey_unpack(perf->uct.cmpt, rkey_buffer,
&perf->uct.peers[i].rkey);
if (status != UCS_OK) {
ucs_error("Failed to uct_rkey_unpack: %s", ucs_status_string(status));
goto err_destroy_eps;
}
} else {
perf->uct.peers[i].rkey.handle = NULL;
perf->uct.peers[i].rkey.rkey = UCT_INVALID_RKEY;
}
if (iface_attr.cap.flags & UCT_IFACE_FLAG_CONNECT_TO_EP) {
status = uct_ep_connect_to_ep(perf->uct.peers[i].ep, dev_addr, ep_addr);
} else if (iface_attr.cap.flags & UCT_IFACE_FLAG_CONNECT_TO_IFACE) {
ep_params.dev_addr = dev_addr;
ep_params.iface_addr = iface_addr;
status = uct_ep_create(&ep_params, &perf->uct.peers[i].ep);
} else {
status = UCS_ERR_UNSUPPORTED;
}
if (status != UCS_OK) {
ucs_error("Failed to connect endpoint: %s", ucs_status_string(status));
goto err_destroy_eps;
}
}
uct_perf_iface_flush_b(perf);
free(buffer);
uct_perf_barrier(perf);
return UCS_OK;
err_destroy_eps:
for (i = 0; i < group_size; ++i) {
if (perf->uct.peers[i].rkey.rkey != UCT_INVALID_RKEY) {
uct_rkey_release(perf->uct.cmpt, &perf->uct.peers[i].rkey);
}
if (perf->uct.peers[i].ep != NULL) {
uct_ep_destroy(perf->uct.peers[i].ep);
}
}
free(perf->uct.peers);
err_free:
free(buffer);
err:
return status;
}
static void uct_perf_test_cleanup_endpoints(ucx_perf_context_t *perf)
{
unsigned group_size, group_index, i;
uct_perf_barrier(perf);
uct_iface_set_am_handler(perf->uct.iface, UCT_PERF_TEST_AM_ID, NULL, NULL, 0);
group_size = rte_call(perf, group_size);
group_index = rte_call(perf, group_index);
for (i = 0; i < group_size; ++i) {
if (i != group_index) {
if (perf->uct.peers[i].rkey.rkey != UCT_INVALID_RKEY) {
uct_rkey_release(perf->uct.cmpt, &perf->uct.peers[i].rkey);
}
if (perf->uct.peers[i].ep) {
uct_ep_destroy(perf->uct.peers[i].ep);
}
}
}
free(perf->uct.peers);
}
static ucs_status_t ucp_perf_test_fill_params(ucx_perf_params_t *params,
ucp_params_t *ucp_params)
{
ucs_status_t status;
size_t message_size;
message_size = ucx_perf_get_message_size(params);
switch (params->command) {
case UCX_PERF_CMD_PUT:
case UCX_PERF_CMD_GET:
ucp_params->features |= UCP_FEATURE_RMA;
break;
case UCX_PERF_CMD_ADD:
case UCX_PERF_CMD_FADD:
case UCX_PERF_CMD_SWAP:
case UCX_PERF_CMD_CSWAP:
if (message_size == sizeof(uint32_t)) {
ucp_params->features |= UCP_FEATURE_AMO32;
} else if (message_size == sizeof(uint64_t)) {
ucp_params->features |= UCP_FEATURE_AMO64;
} else {
if (params->flags & UCX_PERF_TEST_FLAG_VERBOSE) {
ucs_error("Atomic size should be either 32 or 64 bit");
}
return UCS_ERR_INVALID_PARAM;
}
break;
case UCX_PERF_CMD_TAG:
case UCX_PERF_CMD_TAG_SYNC:
ucp_params->features |= UCP_FEATURE_TAG;
break;
case UCX_PERF_CMD_STREAM:
ucp_params->features |= UCP_FEATURE_STREAM;
break;
default:
if (params->flags & UCX_PERF_TEST_FLAG_VERBOSE) {
ucs_error("Invalid test command");
}
return UCS_ERR_INVALID_PARAM;
}
if (params->flags & UCX_PERF_TEST_FLAG_WAKEUP) {
ucp_params->features |= UCP_FEATURE_WAKEUP;
}
status = ucx_perf_test_check_params(params);
if (status != UCS_OK) {
return status;
}
return UCS_OK;
}
static ucs_status_t ucp_perf_test_alloc_iov_mem(ucp_perf_datatype_t datatype,
size_t iovcnt, unsigned thread_count,
ucp_dt_iov_t **iov_p)
{
ucp_dt_iov_t *iov;
if (UCP_PERF_DATATYPE_IOV == datatype) {
iov = malloc(sizeof(*iov) * iovcnt * thread_count);
if (NULL == iov) {
ucs_error("Failed allocate IOV buffer with iovcnt=%lu", iovcnt);
return UCS_ERR_NO_MEMORY;
}
*iov_p = iov;
}
return UCS_OK;
}
static ucs_status_t
ucp_perf_test_alloc_host(const ucx_perf_context_t *perf, size_t length,
void **address_p, ucp_mem_h *memh, int non_blk_flag)
{
ucp_mem_map_params_t mem_map_params;
ucp_mem_attr_t mem_attr;
ucs_status_t status;
mem_map_params.field_mask = UCP_MEM_MAP_PARAM_FIELD_ADDRESS |
UCP_MEM_MAP_PARAM_FIELD_LENGTH |
UCP_MEM_MAP_PARAM_FIELD_FLAGS;
mem_map_params.address = *address_p;
mem_map_params.length = length;
mem_map_params.flags = UCP_MEM_MAP_ALLOCATE;
if (perf->params.flags & UCX_PERF_TEST_FLAG_MAP_NONBLOCK) {
mem_map_params.flags |= non_blk_flag;
}
status = ucp_mem_map(perf->ucp.context, &mem_map_params, memh);
if (status != UCS_OK) {
goto err;
}
mem_attr.field_mask = UCP_MEM_ATTR_FIELD_ADDRESS;
status = ucp_mem_query(*memh, &mem_attr);
if (status != UCS_OK) {
goto err;
}
*address_p = mem_attr.address;
return UCS_OK;
err:
return status;
}
static void ucp_perf_test_free_host(const ucx_perf_context_t *perf,
void *address, ucp_mem_h memh)
{
ucs_status_t status;
status = ucp_mem_unmap(perf->ucp.context, memh);
if (status != UCS_OK) {
ucs_warn("ucp_mem_unmap() failed: %s", ucs_status_string(status));
}
}
static ucs_status_t ucp_perf_test_alloc_mem(ucx_perf_context_t *perf)
{
ucx_perf_params_t *params = &perf->params;
ucs_status_t status;
size_t buffer_size;
if (params->iov_stride) {
buffer_size = params->msg_size_cnt * params->iov_stride;
} else {
buffer_size = ucx_perf_get_message_size(params);
}
/* Allocate send buffer memory */
perf->send_buffer = NULL;
status = perf->allocator->ucp_alloc(perf, buffer_size * params->thread_count,
&perf->send_buffer, &perf->ucp.send_memh,
UCP_MEM_MAP_NONBLOCK);
if (status != UCS_OK) {
goto err;
}
/* Allocate receive buffer memory */
perf->recv_buffer = NULL;
status = perf->allocator->ucp_alloc(perf, buffer_size * params->thread_count,
&perf->recv_buffer, &perf->ucp.recv_memh,
0);
if (status != UCS_OK) {
goto err_free_send_buffer;
}
/* Allocate IOV datatype memory */
perf->ucp.send_iov = NULL;
status = ucp_perf_test_alloc_iov_mem(params->ucp.send_datatype,
perf->params.msg_size_cnt,
params->thread_count,
&perf->ucp.send_iov);
if (UCS_OK != status) {
goto err_free_buffers;
}
perf->ucp.recv_iov = NULL;
status = ucp_perf_test_alloc_iov_mem(params->ucp.recv_datatype,
perf->params.msg_size_cnt,
params->thread_count,
&perf->ucp.recv_iov);
if (UCS_OK != status) {
goto err_free_send_iov_buffers;
}
return UCS_OK;
err_free_send_iov_buffers:
free(perf->ucp.send_iov);
err_free_buffers:
perf->allocator->ucp_free(perf, perf->recv_buffer, perf->ucp.recv_memh);
err_free_send_buffer:
perf->allocator->ucp_free(perf, perf->send_buffer, perf->ucp.send_memh);
err:
return UCS_ERR_NO_MEMORY;
}
static void ucp_perf_test_free_mem(ucx_perf_context_t *perf)
{
free(perf->ucp.recv_iov);
free(perf->ucp.send_iov);
perf->allocator->ucp_free(perf, perf->recv_buffer, perf->ucp.recv_memh);
perf->allocator->ucp_free(perf, perf->send_buffer, perf->ucp.send_memh);
}
static void ucp_perf_test_destroy_eps(ucx_perf_context_t* perf)
{
unsigned i, thread_count = perf->params.thread_count;
ucs_status_ptr_t *req;
ucs_status_t status;
for (i = 0; i < thread_count; ++i) {
if (perf->ucp.tctx[i].perf.ucp.rkey != NULL) {
ucp_rkey_destroy(perf->ucp.tctx[i].perf.ucp.rkey);
}
if (perf->ucp.tctx[i].perf.ucp.ep != NULL) {
req = ucp_ep_close_nb(perf->ucp.tctx[i].perf.ucp.ep,
UCP_EP_CLOSE_MODE_FLUSH);
if (UCS_PTR_IS_PTR(req)) {
do {
ucp_worker_progress(perf->ucp.tctx[i].perf.ucp.worker);
status = ucp_request_check_status(req);
} while (status == UCS_INPROGRESS);
ucp_request_release(req);
} else if (UCS_PTR_STATUS(req) != UCS_OK) {
ucs_warn("failed to close ep %p on thread %d: %s\n",
perf->ucp.tctx[i].perf.ucp.ep, i,
ucs_status_string(UCS_PTR_STATUS(req)));
}
}
}
}
static ucs_status_t ucp_perf_test_exchange_status(ucx_perf_context_t *perf,
ucs_status_t status)
{
unsigned group_size = rte_call(perf, group_size);
ucs_status_t collective_status = status;
struct iovec vec;
void *req = NULL;
unsigned i;
vec.iov_base = &status;
vec.iov_len = sizeof(status);
rte_call(perf, post_vec, &vec, 1, &req);
rte_call(perf, exchange_vec, req);
for (i = 0; i < group_size; ++i) {
rte_call(perf, recv, i, &status, sizeof(status), req);
if (status != UCS_OK) {
collective_status = status;
}
}
return collective_status;
}
static ucs_status_t ucp_perf_test_receive_remote_data(ucx_perf_context_t *perf)
{
unsigned thread_count = perf->params.thread_count;
void *rkey_buffer = NULL;
void *req = NULL;
unsigned group_size, group_index, i;
ucx_perf_ep_info_t *remote_info;
ucp_ep_params_t ep_params;
ucp_address_t *address;
ucs_status_t status;
size_t buffer_size;
void *buffer;
group_size = rte_call(perf, group_size);
group_index = rte_call(perf, group_index);
if (group_size != 2) {
ucs_error("perftest requires group size to be exactly 2 "
"(actual group size: %u)", group_size);
return UCS_ERR_UNSUPPORTED;
}
buffer_size = ADDR_BUF_SIZE * thread_count;
buffer = malloc(buffer_size);
if (buffer == NULL) {
ucs_error("failed to allocate RTE receive buffer");
status = UCS_ERR_NO_MEMORY;
goto err;
}
/* Initialize all endpoints and rkeys to NULL to handle error flow */
for (i = 0; i < thread_count; i++) {
perf->ucp.tctx[i].perf.ucp.ep = NULL;
perf->ucp.tctx[i].perf.ucp.rkey = NULL;
}
/* receive the data from the remote peer, extract the address from it
* (along with additional wireup info) and create an endpoint to the peer */
rte_call(perf, recv, 1 - group_index, buffer, buffer_size, req);
remote_info = buffer;
for (i = 0; i < thread_count; i++) {
address = (ucp_address_t*)(remote_info + 1);
rkey_buffer = UCS_PTR_BYTE_OFFSET(address,
remote_info->ucp.worker_addr_len);
perf->ucp.tctx[i].perf.ucp.remote_addr = remote_info->recv_buffer;
ep_params.field_mask = UCP_EP_PARAM_FIELD_REMOTE_ADDRESS;
ep_params.address = address;
status = ucp_ep_create(perf->ucp.tctx[i].perf.ucp.worker, &ep_params,
&perf->ucp.tctx[i].perf.ucp.ep);
if (status != UCS_OK) {
if (perf->params.flags & UCX_PERF_TEST_FLAG_VERBOSE) {
ucs_error("ucp_ep_create() failed: %s", ucs_status_string(status));
}
goto err_free_eps_buffer;
}
if (remote_info->rkey_size > 0) {
status = ucp_ep_rkey_unpack(perf->ucp.tctx[i].perf.ucp.ep, rkey_buffer,
&perf->ucp.tctx[i].perf.ucp.rkey);
if (status != UCS_OK) {
if (perf->params.flags & UCX_PERF_TEST_FLAG_VERBOSE) {
ucs_fatal("ucp_rkey_unpack() failed: %s", ucs_status_string(status));
}
goto err_free_eps_buffer;
}
} else {
perf->ucp.tctx[i].perf.ucp.rkey = NULL;
}
remote_info = UCS_PTR_BYTE_OFFSET(remote_info,
remote_info->ucp.total_wireup_len);
}
free(buffer);
return UCS_OK;
err_free_eps_buffer:
ucp_perf_test_destroy_eps(perf);
free(buffer);
err:
return status;
}
static ucs_status_t ucp_perf_test_send_local_data(ucx_perf_context_t *perf,
uint64_t features)
{
unsigned i, j, thread_count = perf->params.thread_count;
size_t address_length = 0;
void *rkey_buffer = NULL;
void *req = NULL;
ucx_perf_ep_info_t *info;
ucp_address_t *address;
ucs_status_t status;
struct iovec *vec;
size_t rkey_size;
if (features & (UCP_FEATURE_RMA|UCP_FEATURE_AMO32|UCP_FEATURE_AMO64)) {
status = ucp_rkey_pack(perf->ucp.context, perf->ucp.recv_memh,
&rkey_buffer, &rkey_size);
if (status != UCS_OK) {
if (perf->params.flags & UCX_PERF_TEST_FLAG_VERBOSE) {
ucs_error("ucp_rkey_pack() failed: %s", ucs_status_string(status));
}
goto err;
}
} else {
rkey_size = 0;
}
/* each thread has an iovec with 3 entries to send to the remote peer:
* ep_info, worker_address and rkey buffer */
vec = calloc(3 * thread_count, sizeof(struct iovec));
if (vec == NULL) {
ucs_error("failed to allocate iovec");
status = UCS_ERR_NO_MEMORY;
goto err_rkey_release;
}
/* get the worker address created for every thread and send it to the remote
* peer */
for (i = 0; i < thread_count; i++) {
status = ucp_worker_get_address(perf->ucp.tctx[i].perf.ucp.worker,
&address, &address_length);
if (status != UCS_OK) {
if (perf->params.flags & UCX_PERF_TEST_FLAG_VERBOSE) {
ucs_error("ucp_worker_get_address() failed: %s",
ucs_status_string(status));
}
goto err_free_workers_vec;
}
vec[i * 3].iov_base = malloc(sizeof(*info));
if (vec[i * 3].iov_base == NULL) {
ucs_error("failed to allocate vec entry for info");
status = UCS_ERR_NO_MEMORY;
ucp_worker_destroy(perf->ucp.tctx[i].perf.ucp.worker);
goto err_free_workers_vec;
}
info = vec[i * 3].iov_base;
info->ucp.worker_addr_len = address_length;
info->ucp.total_wireup_len = sizeof(*info) + address_length + rkey_size;
info->rkey_size = rkey_size;
info->recv_buffer = (uintptr_t)perf->ucp.tctx[i].perf.recv_buffer;
vec[(i * 3) + 0].iov_len = sizeof(*info);
vec[(i * 3) + 1].iov_base = address;
vec[(i * 3) + 1].iov_len = address_length;
vec[(i * 3) + 2].iov_base = rkey_buffer;
vec[(i * 3) + 2].iov_len = info->rkey_size;
address_length = 0;
}
/* send to the remote peer */
rte_call(perf, post_vec, vec, 3 * thread_count, &req);
rte_call(perf, exchange_vec, req);
if (features & (UCP_FEATURE_RMA|UCP_FEATURE_AMO32|UCP_FEATURE_AMO64)) {
ucp_rkey_buffer_release(rkey_buffer);
}
for (i = 0; i < thread_count; i++) {
free(vec[i * 3].iov_base);
ucp_worker_release_address(perf->ucp.tctx[i].perf.ucp.worker,
vec[(i * 3) + 1].iov_base);
}
free(vec);
return UCS_OK;
err_free_workers_vec:
for (j = 0; j < i; j++) {
ucp_worker_destroy(perf->ucp.tctx[i].perf.ucp.worker);
}
free(vec);
err_rkey_release:
if (features & (UCP_FEATURE_RMA|UCP_FEATURE_AMO32|UCP_FEATURE_AMO64)) {
ucp_rkey_buffer_release(rkey_buffer);
}
err:
return status;
}
static ucs_status_t ucp_perf_test_setup_endpoints(ucx_perf_context_t *perf,
uint64_t features)
{
ucs_status_t status;
unsigned i;
/* pack the local endpoints data and send to the remote peer */
status = ucp_perf_test_send_local_data(perf, features);
if (status != UCS_OK) {
goto err;
}
/* receive remote peer's endpoints' data and connect to them */
status = ucp_perf_test_receive_remote_data(perf);
if (status != UCS_OK) {
goto err;
}
/* sync status across all processes */
status = ucp_perf_test_exchange_status(perf, UCS_OK);
if (status != UCS_OK) {
goto err_destroy_eps;
}
/* force wireup completion */
for (i = 0; i < perf->params.thread_count; i++) {
status = ucp_worker_flush(perf->ucp.tctx[i].perf.ucp.worker);
if (status != UCS_OK) {
ucs_warn("ucp_worker_flush() failed on theread %d: %s",
i, ucs_status_string(status));
}
}
return status;
err_destroy_eps:
ucp_perf_test_destroy_eps(perf);
err:
(void)ucp_perf_test_exchange_status(perf, status);
return status;
}
static void ucp_perf_test_cleanup_endpoints(ucx_perf_context_t *perf)
{
ucp_perf_barrier(perf);
ucp_perf_test_destroy_eps(perf);
}
static void ucp_perf_test_destroy_workers(ucx_perf_context_t *perf)
{
unsigned i;
for (i = 0; i < perf->params.thread_count; i++) {
if (perf->ucp.tctx[i].perf.ucp.worker != NULL) {
ucp_worker_destroy(perf->ucp.tctx[i].perf.ucp.worker);
}
}
}
static void ucx_perf_set_warmup(ucx_perf_context_t* perf,
const ucx_perf_params_t* params)
{
perf->max_iter = ucs_min(params->warmup_iter,
ucs_div_round_up(params->max_iter, 10));
perf->report_interval = ULONG_MAX;
}
static ucs_status_t uct_perf_create_md(ucx_perf_context_t *perf)
{
uct_component_h *uct_components;
uct_component_attr_t component_attr;
uct_tl_resource_desc_t *tl_resources;
unsigned md_index, num_components;
unsigned tl_index, num_tl_resources;
unsigned cmpt_index;
ucs_status_t status;
uct_md_h md;
uct_md_config_t *md_config;
status = uct_query_components(&uct_components, &num_components);
if (status != UCS_OK) {
goto out;
}
for (cmpt_index = 0; cmpt_index < num_components; ++cmpt_index) {
component_attr.field_mask = UCT_COMPONENT_ATTR_FIELD_MD_RESOURCE_COUNT;
status = uct_component_query(uct_components[cmpt_index], &component_attr);
if (status != UCS_OK) {
goto out_release_components_list;
}
component_attr.field_mask = UCT_COMPONENT_ATTR_FIELD_MD_RESOURCES;
component_attr.md_resources = alloca(sizeof(*component_attr.md_resources) *
component_attr.md_resource_count);
status = uct_component_query(uct_components[cmpt_index], &component_attr);
if (status != UCS_OK) {
goto out_release_components_list;
}
for (md_index = 0; md_index < component_attr.md_resource_count; ++md_index) {
status = uct_md_config_read(uct_components[cmpt_index], NULL, NULL,
&md_config);
if (status != UCS_OK) {
goto out_release_components_list;
}
ucs_strncpy_zero(perf->params.uct.md_name,
component_attr.md_resources[md_index].md_name,
UCT_MD_NAME_MAX);
status = uct_md_open(uct_components[cmpt_index],
component_attr.md_resources[md_index].md_name,
md_config, &md);
uct_config_release(md_config);
if (status != UCS_OK) {
goto out_release_components_list;
}
status = uct_md_query_tl_resources(md, &tl_resources, &num_tl_resources);
if (status != UCS_OK) {
uct_md_close(md);
goto out_release_components_list;
}
for (tl_index = 0; tl_index < num_tl_resources; ++tl_index) {
if (!strcmp(perf->params.uct.tl_name, tl_resources[tl_index].tl_name) &&
!strcmp(perf->params.uct.dev_name, tl_resources[tl_index].dev_name))
{
uct_release_tl_resource_list(tl_resources);
perf->uct.cmpt = uct_components[cmpt_index];
perf->uct.md = md;
status = UCS_OK;
goto out_release_components_list;
}
}
uct_md_close(md);
uct_release_tl_resource_list(tl_resources);
}
}
ucs_error("Cannot use "UCT_PERF_TEST_PARAMS_FMT,
UCT_PERF_TEST_PARAMS_ARG(&perf->params));
status = UCS_ERR_NO_DEVICE;
out_release_components_list:
uct_release_component_list(uct_components);
out:
return status;
}
void uct_perf_barrier(ucx_perf_context_t *perf)
{
rte_call(perf, barrier, (void(*)(void*))uct_worker_progress,
(void*)perf->uct.worker);
}
void ucp_perf_barrier(ucx_perf_context_t *perf)
{
rte_call(perf, barrier, (void(*)(void*))ucp_worker_progress,
#if _OPENMP
(void*)perf->ucp.tctx[omp_get_thread_num()].perf.ucp.worker);
#else
(void*)perf->ucp.tctx[0].perf.ucp.worker);
#endif
}
static ucs_status_t uct_perf_setup(ucx_perf_context_t *perf)
{
ucx_perf_params_t *params = &perf->params;
uct_iface_config_t *iface_config;
ucs_status_t status;
uct_iface_params_t iface_params = {
.field_mask = UCT_IFACE_PARAM_FIELD_OPEN_MODE |
UCT_IFACE_PARAM_FIELD_STATS_ROOT |
UCT_IFACE_PARAM_FIELD_RX_HEADROOM |
UCT_IFACE_PARAM_FIELD_CPU_MASK,
.open_mode = UCT_IFACE_OPEN_MODE_DEVICE,
.mode.device.tl_name = params->uct.tl_name,
.mode.device.dev_name = params->uct.dev_name,
.stats_root = ucs_stats_get_root(),
.rx_headroom = 0
};
UCS_CPU_ZERO(&iface_params.cpu_mask);
status = ucs_async_context_init(&perf->uct.async, params->async_mode);
if (status != UCS_OK) {
goto out;
}
status = uct_worker_create(&perf->uct.async, params->thread_mode,
&perf->uct.worker);
if (status != UCS_OK) {
goto out_cleanup_async;
}
status = uct_perf_create_md(perf);
if (status != UCS_OK) {
goto out_destroy_worker;
}
status = uct_md_iface_config_read(perf->uct.md, params->uct.tl_name, NULL,
NULL, &iface_config);
if (status != UCS_OK) {
goto out_destroy_md;
}
status = uct_iface_open(perf->uct.md, perf->uct.worker, &iface_params,
iface_config, &perf->uct.iface);
uct_config_release(iface_config);
if (status != UCS_OK) {
ucs_error("Failed to open iface: %s", ucs_status_string(status));
goto out_destroy_md;
}
status = uct_perf_test_check_capabilities(params, perf->uct.iface,
perf->uct.md);
/* sync status across all processes */
status = ucp_perf_test_exchange_status(perf, status);
if (status != UCS_OK) {
goto out_iface_close;
}
status = uct_perf_test_alloc_mem(perf);
if (status != UCS_OK) {
goto out_iface_close;
}
/* Enable progress before `uct_iface_flush` and `uct_worker_progress` called
* to give a chance to finish connection for some tranports (ib/ud, tcp).
* They may return UCS_INPROGRESS from `uct_iface_flush` when connections are
* in progress */
uct_iface_progress_enable(perf->uct.iface,
UCT_PROGRESS_SEND | UCT_PROGRESS_RECV);
status = uct_perf_test_setup_endpoints(perf);
if (status != UCS_OK) {
ucs_error("Failed to setup endpoints: %s", ucs_status_string(status));
goto out_free_mem;
}
return UCS_OK;
out_free_mem:
uct_perf_test_free_mem(perf);
out_iface_close:
uct_iface_close(perf->uct.iface);
out_destroy_md:
uct_md_close(perf->uct.md);
out_destroy_worker:
uct_worker_destroy(perf->uct.worker);
out_cleanup_async:
ucs_async_context_cleanup(&perf->uct.async);
out:
return status;
}
static void uct_perf_cleanup(ucx_perf_context_t *perf)
{
uct_perf_test_cleanup_endpoints(perf);
uct_perf_test_free_mem(perf);
uct_iface_close(perf->uct.iface);
uct_md_close(perf->uct.md);
uct_worker_destroy(perf->uct.worker);
ucs_async_context_cleanup(&perf->uct.async);
}
static void ucp_perf_request_init(void *req)
{
ucp_perf_request_t *request = req;
request->context = NULL;
}
static ucs_status_t ucp_perf_setup(ucx_perf_context_t *perf)
{
ucp_params_t ucp_params;
ucp_worker_params_t worker_params;
ucp_config_t *config;
ucs_status_t status;
unsigned i, thread_count;
size_t message_size;
ucp_params.field_mask = UCP_PARAM_FIELD_FEATURES |
UCP_PARAM_FIELD_REQUEST_SIZE |
UCP_PARAM_FIELD_REQUEST_INIT;
ucp_params.features = 0;
ucp_params.request_size = sizeof(ucp_perf_request_t);
ucp_params.request_init = ucp_perf_request_init;
if (perf->params.thread_count > 1) {
/* when there is more than one thread, a ucp_worker would be created for
* each. all of them will share the same ucp_context */
ucp_params.features |= UCP_PARAM_FIELD_MT_WORKERS_SHARED;
ucp_params.mt_workers_shared = 1;
}
status = ucp_perf_test_fill_params(&perf->params, &ucp_params);
if (status != UCS_OK) {
goto err;
}
status = ucp_config_read(NULL, NULL, &config);
if (status != UCS_OK) {
goto err;
}
status = ucp_init(&ucp_params, config, &perf->ucp.context);
ucp_config_release(config);
if (status != UCS_OK) {
goto err;
}
thread_count = perf->params.thread_count;
message_size = ucx_perf_get_message_size(&perf->params);
status = ucp_perf_test_alloc_mem(perf);
if (status != UCS_OK) {
ucs_warn("ucp test failed to allocate memory");
goto err_cleanup;
}
perf->ucp.tctx = calloc(thread_count, sizeof(ucx_perf_thread_context_t));
if (perf->ucp.tctx == NULL) {
ucs_warn("ucp test failed to allocate memory for thread contexts");
goto err_free_mem;
}
worker_params.field_mask = UCP_WORKER_PARAM_FIELD_THREAD_MODE;
worker_params.thread_mode = perf->params.thread_mode;
for (i = 0; i < thread_count; i++) {
perf->ucp.tctx[i].tid = i;
perf->ucp.tctx[i].perf = *perf;
/* Doctor the src and dst buffers to make them thread specific */
perf->ucp.tctx[i].perf.send_buffer =
UCS_PTR_BYTE_OFFSET(perf->send_buffer, i * message_size);
perf->ucp.tctx[i].perf.recv_buffer =
UCS_PTR_BYTE_OFFSET(perf->recv_buffer, i * message_size);
status = ucp_worker_create(perf->ucp.context, &worker_params,
&perf->ucp.tctx[i].perf.ucp.worker);
if (status != UCS_OK) {
goto err_free_tctx_destroy_workers;
}
}
status = ucp_perf_test_setup_endpoints(perf, ucp_params.features);
if (status != UCS_OK) {
if (perf->params.flags & UCX_PERF_TEST_FLAG_VERBOSE) {
ucs_error("Failed to setup endpoints: %s", ucs_status_string(status));
}
goto err_free_tctx_destroy_workers;
}
return UCS_OK;
err_free_tctx_destroy_workers:
ucp_perf_test_destroy_workers(perf);
free(perf->ucp.tctx);
err_free_mem:
ucp_perf_test_free_mem(perf);
err_cleanup:
ucp_cleanup(perf->ucp.context);
err:
return status;
}
static void ucp_perf_cleanup(ucx_perf_context_t *perf)
{
ucp_perf_test_cleanup_endpoints(perf);
ucp_perf_barrier(perf);
ucp_perf_test_free_mem(perf);
ucp_perf_test_destroy_workers(perf);
free(perf->ucp.tctx);
ucp_cleanup(perf->ucp.context);
}
static struct {
ucs_status_t (*setup)(ucx_perf_context_t *perf);
void (*cleanup)(ucx_perf_context_t *perf);
ucs_status_t (*run)(ucx_perf_context_t *perf);
void (*barrier)(ucx_perf_context_t *perf);
} ucx_perf_funcs[] = {
[UCX_PERF_API_UCT] = {uct_perf_setup, uct_perf_cleanup,
uct_perf_test_dispatch, uct_perf_barrier},
[UCX_PERF_API_UCP] = {ucp_perf_setup, ucp_perf_cleanup,
ucp_perf_test_dispatch, ucp_perf_barrier}
};
static ucs_status_t ucx_perf_thread_spawn(ucx_perf_context_t *perf,
ucx_perf_result_t* result);
ucs_status_t ucx_perf_run(const ucx_perf_params_t *params,
ucx_perf_result_t *result)
{
ucx_perf_context_t *perf;
ucs_status_t status;
ucx_perf_global_init();
if (params->command == UCX_PERF_CMD_LAST) {
ucs_error("Test is not selected");
status = UCS_ERR_INVALID_PARAM;
goto out;
}
if ((params->api != UCX_PERF_API_UCT) && (params->api != UCX_PERF_API_UCP)) {
ucs_error("Invalid test API parameter (should be UCT or UCP)");
status = UCS_ERR_INVALID_PARAM;
goto out;
}
perf = malloc(sizeof(*perf));
if (perf == NULL) {
status = UCS_ERR_NO_MEMORY;
goto out;
}
ucx_perf_test_init(perf, params);
if (perf->allocator == NULL) {
ucs_error("Unsupported memory types %s<->%s",
ucs_memory_type_names[params->send_mem_type],
ucs_memory_type_names[params->recv_mem_type]);
status = UCS_ERR_UNSUPPORTED;
goto out_free;
}
if ((params->api == UCX_PERF_API_UCT) &&
(perf->allocator->mem_type != UCS_MEMORY_TYPE_HOST)) {
ucs_warn("UCT tests also copy 2-byte values from %s memory to "
"%s memory, which may impact performance results",
ucs_memory_type_names[perf->allocator->mem_type],
ucs_memory_type_names[UCS_MEMORY_TYPE_HOST]);
}
status = perf->allocator->init(perf);
if (status != UCS_OK) {
goto out_free;
}
status = ucx_perf_funcs[params->api].setup(perf);
if (status != UCS_OK) {
goto out_free;
}
if (params->thread_count == 1) {
if (params->api == UCX_PERF_API_UCP) {
perf->ucp.worker = perf->ucp.tctx[0].perf.ucp.worker;
perf->ucp.ep = perf->ucp.tctx[0].perf.ucp.ep;
perf->ucp.remote_addr = perf->ucp.tctx[0].perf.ucp.remote_addr;
perf->ucp.rkey = perf->ucp.tctx[0].perf.ucp.rkey;
}
if (params->warmup_iter > 0) {
ucx_perf_set_warmup(perf, params);
status = ucx_perf_funcs[params->api].run(perf);
if (status != UCS_OK) {
goto out_cleanup;
}
ucx_perf_funcs[params->api].barrier(perf);
ucx_perf_test_prepare_new_run(perf, params);
}
/* Run test */
status = ucx_perf_funcs[params->api].run(perf);
ucx_perf_funcs[params->api].barrier(perf);
if (status == UCS_OK) {
ucx_perf_calc_result(perf, result);
rte_call(perf, report, result, perf->params.report_arg, 1, 0);
}
} else {
status = ucx_perf_thread_spawn(perf, result);
}
out_cleanup:
ucx_perf_funcs[params->api].cleanup(perf);
out_free:
free(perf);
out:
return status;
}
#if _OPENMP
static ucs_status_t ucx_perf_thread_run_test(void* arg)
{
ucx_perf_thread_context_t* tctx = (ucx_perf_thread_context_t*) arg; /* a single thread context */
ucx_perf_result_t* result = &tctx->result;
ucx_perf_context_t* perf = &tctx->perf;
ucx_perf_params_t* params = &perf->params;
ucs_status_t status;
if (params->warmup_iter > 0) {
ucx_perf_set_warmup(perf, params);
status = ucx_perf_funcs[params->api].run(perf);
ucx_perf_funcs[params->api].barrier(perf);
if (UCS_OK != status) {
goto out;
}
ucx_perf_test_prepare_new_run(perf, params);
}
/* Run test */
#pragma omp barrier
status = ucx_perf_funcs[params->api].run(perf);
ucx_perf_funcs[params->api].barrier(perf);
if (UCS_OK != status) {
goto out;
}
ucx_perf_calc_result(perf, result);
out:
return status;
}
static void ucx_perf_thread_report_aggregated_results(ucx_perf_context_t *perf)
{
ucx_perf_thread_context_t* tctx = perf->ucp.tctx; /* all the thread contexts on perf */
unsigned i, thread_count = perf->params.thread_count;
double lat_sum_total_avegare = 0.0;
ucx_perf_result_t agg_result;
agg_result.iters = tctx[0].result.iters;
agg_result.bytes = tctx[0].result.bytes;
agg_result.elapsed_time = tctx[0].result.elapsed_time;
agg_result.bandwidth.total_average = 0.0;
agg_result.bandwidth.typical = 0.0; /* Undefined since used only for latency calculations */
agg_result.latency.total_average = 0.0;
agg_result.msgrate.total_average = 0.0;
agg_result.msgrate.typical = 0.0; /* Undefined since used only for latency calculations */
/* when running with multiple threads, the moment average value is
* undefined since we don't capture the values of the last iteration */
agg_result.msgrate.moment_average = 0.0;
agg_result.bandwidth.moment_average = 0.0;
agg_result.latency.moment_average = 0.0;
agg_result.latency.typical = 0.0;
/* in case of multiple threads, we have to aggregate the results so that the
* final output of the result would show the performance numbers that were
* collected from all the threads.
* BW and message rate values will be the sum of their values from all
* the threads, while the latency value is the average latency from the
* threads. */
for (i = 0; i < thread_count; i++) {
agg_result.bandwidth.total_average += tctx[i].result.bandwidth.total_average;
agg_result.msgrate.total_average += tctx[i].result.msgrate.total_average;
lat_sum_total_avegare += tctx[i].result.latency.total_average;
}
agg_result.latency.total_average = lat_sum_total_avegare / thread_count;
rte_call(perf, report, &agg_result, perf->params.report_arg, 1, 1);
}
static ucs_status_t ucx_perf_thread_spawn(ucx_perf_context_t *perf,
ucx_perf_result_t* result)
{
ucx_perf_thread_context_t* tctx = perf->ucp.tctx; /* all the thread contexts on perf */
int ti, thread_count = perf->params.thread_count;
ucs_status_t* statuses;
ucs_status_t status;
omp_set_num_threads(thread_count);
statuses = calloc(thread_count, sizeof(ucs_status_t));
if (statuses == NULL) {
status = UCS_ERR_NO_MEMORY;
goto out;
}
#pragma omp parallel private(ti)
{
ti = omp_get_thread_num();
tctx[ti].status = ucx_perf_thread_run_test((void*)&tctx[ti]);
}
status = UCS_OK;
for (ti = 0; ti < thread_count; ti++) {
if (UCS_OK != tctx[ti].status) {
ucs_error("Thread %d failed to run test: %s", tctx[ti].tid,
ucs_status_string(tctx[ti].status));
status = tctx[ti].status;
}
}
ucx_perf_thread_report_aggregated_results(perf);
free(statuses);
out:
return status;
}
#else
static ucs_status_t ucx_perf_thread_spawn(ucx_perf_context_t *perf,
ucx_perf_result_t* result) {
ucs_error("Invalid test parameter (thread mode requested without OpenMP capabilities)");
return UCS_ERR_INVALID_PARAM;
}
#endif /* _OPENMP */
void ucx_perf_global_init()
{
static ucx_perf_allocator_t host_allocator = {
.mem_type = UCS_MEMORY_TYPE_HOST,
.init = ucs_empty_function_return_success,
.ucp_alloc = ucp_perf_test_alloc_host,
.ucp_free = ucp_perf_test_free_host,
.uct_alloc = uct_perf_test_alloc_host,
.uct_free = uct_perf_test_free_host,
.memcpy = ucx_perf_test_memcpy_host,
.memset = memset
};
UCS_MODULE_FRAMEWORK_DECLARE(ucx_perftest);
ucx_perf_mem_type_allocators[UCS_MEMORY_TYPE_HOST] = &host_allocator;
/* FIXME Memtype allocator modules must be loaded to global scope, otherwise
* alloc hooks, which are using dlsym() to get pointer to original function,
* do not work. Need to use bistro for memtype hooks to fix it.
*/
UCS_MODULE_FRAMEWORK_LOAD(ucx_perftest, UCS_MODULE_LOAD_FLAG_GLOBAL);
}
|
5-8t.c | #include <stdio.h>
#include <omp.h>
int main()
{
int i;
int sum=0;
omp_set_num_threads(8);
#pragma omp parallel for
for (i=0; i<COUNT; i++)
{
sum = sum + i;
printf("Thread number: %d Iteration: %d Local Sum: %d \n",
omp_get_thread_num(), i, sum);
}
printf("\n All Threads Done – Final Global Sum: %d \n\n", sum);
}
|
atomic-6.c | /* { dg-do compile } */
int x[10], z;
double y[10];
void f1(void)
{
#pragma omp atomic
x[z] /= y[z];
}
|
GB_unaryop__ainv_uint16_int64.c | //------------------------------------------------------------------------------
// GB_unaryop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_iterator.h"
#include "GB_unaryop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop__ainv_uint16_int64
// op(A') function: GB_tran__ainv_uint16_int64
// C type: uint16_t
// A type: int64_t
// cast: uint16_t cij = (uint16_t) aij
// unaryop: cij = -aij
#define GB_ATYPE \
int64_t
#define GB_CTYPE \
uint16_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
int64_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = -x ;
// casting
#define GB_CASTING(z, x) \
uint16_t z = (uint16_t) x ;
// cij = op (cast (aij))
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
GB_GETA (aij, Ax, pA) ; \
/* Cx [pC] = op (cast (aij)) */ \
GB_CASTING (x, aij) ; \
GB_OP (GB_CX (pC), x) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_AINV || GxB_NO_UINT16 || GxB_NO_INT64)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop__ainv_uint16_int64
(
uint16_t *restrict Cx,
const int64_t *restrict Ax,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (int64_t p = 0 ; p < anz ; p++)
{
GB_CAST_OP (p, p) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_tran__ainv_uint16_int64
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t **Rowcounts,
GBI_single_iterator Iter,
const int64_t *restrict A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#define GB_PHASE_2_OF_2
#include "GB_unaryop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
3d7pt.c | /*
* Order-1, 3D 7 point stencil
* Adapted from PLUTO and Pochoir test bench
*
* Tareq Malas
*/
#include <stdio.h>
#include <stdlib.h>
#include <sys/time.h>
#ifdef LIKWID_PERFMON
#include <likwid.h>
#endif
#include "print_utils.h"
#define TESTS 2
#define MAX(a,b) ((a) > (b) ? a : b)
#define MIN(a,b) ((a) < (b) ? a : b)
/* Subtract the `struct timeval' values X and Y,
* storing the result in RESULT.
*
* Return 1 if the difference is negative, otherwise 0.
*/
int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y)
{
/* Perform the carry for the later subtraction by updating y. */
if (x->tv_usec < y->tv_usec)
{
int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1;
y->tv_usec -= 1000000 * nsec;
y->tv_sec += nsec;
}
if (x->tv_usec - y->tv_usec > 1000000)
{
int nsec = (x->tv_usec - y->tv_usec) / 1000000;
y->tv_usec += 1000000 * nsec;
y->tv_sec -= nsec;
}
/* Compute the time remaining to wait.
* tv_usec is certainly positive.
*/
result->tv_sec = x->tv_sec - y->tv_sec;
result->tv_usec = x->tv_usec - y->tv_usec;
/* Return 1 if result is negative. */
return x->tv_sec < y->tv_sec;
}
int main(int argc, char *argv[])
{
int t, i, j, k, test;
int Nx, Ny, Nz, Nt;
if (argc > 3) {
Nx = atoi(argv[1])+2;
Ny = atoi(argv[2])+2;
Nz = atoi(argv[3])+2;
}
if (argc > 4)
Nt = atoi(argv[4]);
double ****A = (double ****) malloc(sizeof(double***)*2);
A[0] = (double ***) malloc(sizeof(double**)*Nz);
A[1] = (double ***) malloc(sizeof(double**)*Nz);
for(i=0; i<Nz; i++){
A[0][i] = (double**) malloc(sizeof(double*)*Ny);
A[1][i] = (double**) malloc(sizeof(double*)*Ny);
for(j=0;j<Ny;j++){
A[0][i][j] = (double*) malloc(sizeof(double)*Nx);
A[1][i][j] = (double*) malloc(sizeof(double)*Nx);
}
}
// tile size information, including extra element to decide the list length
int *tile_size = (int*) malloc(sizeof(int));
tile_size[0] = -1;
// The list is modified here before source-to-source transformations
tile_size = (int*) realloc((void *)tile_size, sizeof(int)*5);
tile_size[0] = 4;
tile_size[1] = 4;
tile_size[2] = 16;
tile_size[3] = 256;
tile_size[4] = -1;
// for timekeeping
int ts_return = -1;
struct timeval start, end, result;
double tdiff = 0.0, min_tdiff=1.e100;
const int BASE = 1024;
const double alpha = 0.0876;
const double beta = 0.0765;
// initialize variables
//
srand(42);
for (i = 1; i < Nz; i++) {
for (j = 1; j < Ny; j++) {
for (k = 1; k < Nx; k++) {
A[0][i][j][k] = 1.0 * (rand() % BASE);
}
}
}
#ifdef LIKWID_PERFMON
LIKWID_MARKER_INIT;
#pragma omp parallel
{
LIKWID_MARKER_THREADINIT;
#pragma omp barrier
LIKWID_MARKER_START("calc");
}
#endif
int num_threads = 1;
#if defined(_OPENMP)
num_threads = omp_get_max_threads();
#endif
for(test=0; test<TESTS; test++){
gettimeofday(&start, 0);
// serial execution - Addition: 6 && Multiplication: 2
#pragma scop
for (t = 0; t < Nt-1; t++) {
for (i = 1; i < Nz-1; i++) {
for (j = 1; j < Ny-1; j++) {
for (k = 1; k < Nx-1; k++) {
A[(t+1)%2][i][j][k] = alpha * (A[t%2][i][j][k])
+ beta * (A[t%2][i - 1][j][k] + A[t%2][i][j - 1][k] + A[t%2][i][j][k - 1] +
A[t%2][i + 1][j][k] + A[t%2][i][j + 1][k] + A[t%2][i][j][k + 1]);
}
}
}
}
#pragma endscop
gettimeofday(&end, 0);
ts_return = timeval_subtract(&result, &end, &start);
tdiff = (double) (result.tv_sec + result.tv_usec * 1.0e-6);
min_tdiff = min(min_tdiff, tdiff);
printf("Rank 0 TEST# %d time: %f\n", test, tdiff);
}
PRINT_RESULTS(1, "constant")
#ifdef LIKWID_PERFMON
#pragma omp parallel
{
LIKWID_MARKER_STOP("calc");
}
LIKWID_MARKER_CLOSE;
#endif
// Free allocated arrays (Causing performance degradation
/* for(i=0; i<Nz; i++){
for(j=0;j<Ny;j++){
free(A[0][i][j]);
free(A[1][i][j]);
}
free(A[0][i]);
free(A[1][i]);
}
free(A[0]);
free(A[1]);
*/
return 0;
}
|
libgomp-292348.c | /*
Test hipcc host compilation with -lgomp, from 292348.
*/
#include <stdio.h>
#include <omp.h>
void inc_subarray(int *array, int start, int end) {
for (int i = start; i <end; i++) {
array[i] += 1;
}
}
void inc_subarray_mt(int *array, int start, int end) {
#ifdef _OPENMP
#pragma omp parallel
{
int num_t = omp_get_num_threads();
int tid = omp_get_thread_num();
int q = (end - start) / num_t + 1;
int s = start + tid * q;
int e = s + q;
e = (e < end) ? e : end;
for (int i = s; i < e; i++) {
array[i] += 1;
}
//printf("tid: %d, num_t: %d\n", tid, num_t);
}
#endif
}
int main(int argc, char *argv[]) {
int num_threads = 0;
int errors = 0;
#pragma omp parallel
{
num_threads = omp_get_num_threads();
}
printf("omp threads: %d\n", num_threads);
int ary[1000] = {0};
int ary_mt[1000] = {0};
for (int i = 0; i < 1000; i++) {
ary[i] = 0;
ary_mt[i] = 0;
}
for (int i = 0; i < 10; i++) {
int start = (i *137) % 1000;
int end = ((i + 1) *279) % 1000;
if (start > end) {
int tmp = start;
start = end;
end = tmp;
}
printf("%d to %d\n", start, end);
inc_subarray_mt(ary_mt, start, end);
inc_subarray(ary, start, end);
}
for (int i = 0; i < 100; i++) {
if (ary_mt[i] != ary[i]) {
printf("ary[%d]: %d != %d\n", i, ary[i], ary_mt[i]);fflush(stdout);
errors++;
}
}
if (errors){
printf("FAIL\n");
return 1;
}
printf("PASS\n");
return 0;
}
|
inner_mult.h | #include "CSC.h"
#include "utility.h"
#include "hash_mult_hw.h"
#include <omp.h>
#include <algorithm>
#include <iostream>
using namespace std;
/**
** Count flop of SpGEMM between A and B in CSC format
**/
template <typename IT, typename NT>
long long int get_flop(const CSC<IT,NT> & A, const CSC<IT,NT> & B, IT *maxnnzc)
{
long long int flop = 0; // total flop (multiplication) needed to generate C
#pragma omp parallel
{
long long int tflop=0; //thread private flop
#pragma omp for
for (IT i=0; i < B.cols; ++i) { // for all columns of B
long long int locmax = 0;
for (IT j = B.colptr[i]; j < B.colptr[i+1]; ++j) { // For all the nonzeros of the ith column
IT inner = B.rowids[j]; // get the row id of B (or column id of A)
IT npins = A.colptr[inner+1] - A.colptr[inner]; // get the number of nonzeros in A's corresponding column
locmax += npins;
}
maxnnzc[i] = locmax;
tflop += locmax;
}
#pragma omp critical
{
flop += tflop;
}
}
return flop * 2;
}
template <typename IT, typename NT>
long long int get_flop(const CSC<IT,NT> & A, const CSC<IT,NT> & B)
{
IT *dummy = my_malloc<IT>(B.cols);
long long int flop = get_flop(A, B, dummy);
my_free<IT>(dummy);
return flop;
}
//*TODO:: Dealing with 5 mats. Mask, A, B, C, C_final*
template <bool vectorProbing, bool sortOutput, typename IT, typename NT, typename MultiplyOperation, typename AddOperation>
void
innerSpGEMM_nohash(const CSR<IT,NT> & A, const CSC<IT,NT> & B, CSR<IT,NT> & C_final, const CSR<IT,NT> & M, MultiplyOperation multop, AddOperation addop, unsigned threadCount)
{
CSR<IT,NT> C;
//*A^2*
C.rows = M.rows;
C.cols = M.cols; // B ?=A
C.nnz = M.nnz;
C.zerobased = true;
C.rowptr = my_malloc<IT>(M.rows + 1);
C.colids = my_malloc<IT>(M.nnz);
C.values = my_malloc<NT>(M.nnz);
for (IT i = 0; i < C.rows; ++i)
C.rowptr[i] = M.rowptr[i];
for (IT i = 0; i < C.nnz; ++i) {
C.colids[i] = M.colids[i]; // unnecessary
C.values[i] = 0;
}
BIN<IT, NT> bin(A.rows, IMB_PWMIN, threadCount);
/* Set max bin */
// Double check, changed 3rd param to colptr
bin.set_max_bin(A.rowptr, A.colids, B.colptr, C.rows, C.cols);
IT numThreads;
#pragma omp parallel num_threads(threadCount)
{
numThreads = omp_get_num_threads();
}
vector<IT> th_nnz(numThreads, 0);
vector<IT> rownnz(C.rows, 0);
IT rowPerThread = (M.rows + numThreads -1) / numThreads;
#pragma omp parallel num_threads(threadCount)
{
IT i, start_row, end_row, col;
IT tid;
tid = omp_get_thread_num();
// start_row = bin.rows_offset[tid];
// end_row = bin.rows_offset[tid + 1];
start_row = rowPerThread * tid;
end_row = min(rowPerThread * (tid+1), M.rows);
// each th keeps track of active nnz in C (not all from Mask)
//* blocks of rows the mask *
for (i = start_row; i < end_row; ++i) {
IT j, cur_col, nnz_r, nnz_c;
IT cur_row = i;
NT t_val = 0;
bool active = false;
//* nonzeros of the row over the mask *
for (j = M.rowptr[i]; j < M.rowptr[i + 1]; ++j) {
cur_col = M.colids[j];
nnz_r = A.rowptr[cur_row];
nnz_c = B.colptr[cur_col];
t_val = 0;
active = false;
//*dot product between row of A and col of B
while(nnz_r < A.rowptr[cur_row+1] && nnz_c < B.colptr[cur_col+1]){
if(A.colids[nnz_r] < B.rowids[nnz_c])
nnz_r++;
else if(A.colids[nnz_r] > B.rowids[nnz_c])
nnz_c++;
else { //A.colids[nnz_r] == B.rowids[nnz_c];
t_val = addop(t_val, multop(A.values[nnz_r], B.values[nnz_c]));
nnz_r++,
nnz_c++;
active = true;
}
}
if(active) {// active nnz, shrink output accordingly
IT loc = M.rowptr[start_row] + th_nnz[tid];
C.colids[loc] = M.colids[j];
C.values[loc] = t_val;
th_nnz[tid]++;
rownnz[i]++;
}
}
}
#pragma omp barrier
}
//shrink C
//* sequentially create global rowptr for final shrinked C*
for (IT i = 0; i < C.rows; ++i)
C_final.nnz += rownnz[i];
C_final.rows = C.rows;
C_final.cols = C.cols;
C_final.zerobased = true;
C_final.rowptr = my_malloc<IT>(C.rows + 1);
C_final.colids = my_malloc<IT>(C_final.nnz);
C_final.values = my_malloc<NT>(C_final.nnz);
memcpy (C_final.colids, C.colids, th_nnz[0] * sizeof(IT)) ;
memcpy (C_final.values, C.values, th_nnz[0] * sizeof(NT)) ;
IT dest = 0;
for (IT i = 1; i < numThreads; ++i) {
IT loc = min(i * rowPerThread, A.rows);
dest += th_nnz[i-1];
memcpy (C_final.colids + dest, C.colids + M.rowptr[loc], th_nnz[i] * sizeof(C.colids[0]));
memcpy (C_final.values + dest, C.values + M.rowptr[loc], th_nnz[i] * sizeof(C.values[0]));
}
//TODO:: optimize prefix sum
C_final.rowptr[0] = 0;
for (IT i = 1; i <= C_final.rows; ++i) {
C_final.rowptr[i] = C_final.rowptr[i-1] + rownnz[i-1];//A.rowptr[rowPerThread * i];
}
// cout << "Dot SpGEMM with Mask C_final" << endl;
// for (int i = 0; i < 10; ++i){
// cout << i << " : " << C_final.rowptr[i] << " ";
// for (int j = C_final.rowptr[i]; j < C_final.rowptr[i+1]; ++j)
// cout << C_final.colids[j] << " " << C_final.values[j] << ", ";
// cout << endl;
// }
// cout << endl;
C.make_empty();
}
|
RelativeNeighborhoodGraph.h | // Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
#ifndef _SPTAG_COMMON_RNG_H_
#define _SPTAG_COMMON_RNG_H_
#include "NeighborhoodGraph.h"
namespace SPTAG
{
namespace COMMON
{
class RelativeNeighborhoodGraph: public NeighborhoodGraph
{
public:
void RebuildNeighbors(VectorIndex* index, const SizeType node, SizeType* nodes, const BasicResult* queryResults, const int numResults) {
DimensionType count = 0;
for (int j = 0; j < numResults && count < m_iNeighborhoodSize; j++) {
const BasicResult& item = queryResults[j];
if (item.VID < 0) break;
if (item.VID == node) continue;
bool good = true;
for (DimensionType k = 0; k < count; k++) {
if (index->ComputeDistance(index->GetSample(nodes[k]), index->GetSample(item.VID)) <= item.Dist) {
good = false;
break;
}
}
if (good) nodes[count++] = item.VID;
}
for (DimensionType j = count; j < m_iNeighborhoodSize; j++) nodes[j] = -1;
}
void InsertNeighbors(VectorIndex* index, const SizeType node, SizeType insertNode, float insertDist)
{
std::lock_guard<std::mutex> lock(m_dataUpdateLock);
SizeType* nodes = m_pNeighborhoodGraph[node];
SizeType tmpNode;
float tmpDist;
for (DimensionType k = 0; k < m_iNeighborhoodSize; k++)
{
tmpNode = nodes[k];
if (tmpNode < -1) break;
if (tmpNode < 0 || (tmpDist = index->ComputeDistance(index->GetSample(node), index->GetSample(tmpNode))) > insertDist
|| (insertDist == tmpDist && insertNode < tmpNode))
{
bool good = true;
for (DimensionType t = 0; t < k; t++) {
if (index->ComputeDistance(index->GetSample(insertNode), index->GetSample(nodes[t])) < insertDist) {
good = false;
break;
}
}
if (good) {
nodes[k] = insertNode;
while (tmpNode >= 0 && ++k < m_iNeighborhoodSize && nodes[k] >= -1 &&
index->ComputeDistance(index->GetSample(tmpNode), index->GetSample(insertNode)) >=
index->ComputeDistance(index->GetSample(node), index->GetSample(tmpNode)))
{
std::swap(tmpNode, nodes[k]);
}
}
break;
}
}
}
float GraphAccuracyEstimation(VectorIndex* index, const SizeType samples, const std::unordered_map<SizeType, SizeType>* idmap = nullptr)
{
DimensionType* correct = new DimensionType[samples];
#pragma omp parallel for schedule(dynamic)
for (SizeType i = 0; i < samples; i++)
{
SizeType x = COMMON::Utils::rand(m_iGraphSize);
//int x = i;
COMMON::QueryResultSet<void> query(nullptr, m_iCEF);
for (SizeType y = 0; y < m_iGraphSize; y++)
{
if ((idmap != nullptr && idmap->find(y) != idmap->end())) continue;
float dist = index->ComputeDistance(index->GetSample(x), index->GetSample(y));
query.AddPoint(y, dist);
}
query.SortResult();
SizeType * exact_rng = new SizeType[m_iNeighborhoodSize];
RebuildNeighbors(index, x, exact_rng, query.GetResults(), m_iCEF);
correct[i] = 0;
for (DimensionType j = 0; j < m_iNeighborhoodSize; j++) {
if (exact_rng[j] == -1) {
correct[i] += m_iNeighborhoodSize - j;
break;
}
for (DimensionType k = 0; k < m_iNeighborhoodSize; k++)
if ((m_pNeighborhoodGraph)[x][k] == exact_rng[j]) {
correct[i]++;
break;
}
}
delete[] exact_rng;
}
float acc = 0;
for (SizeType i = 0; i < samples; i++) acc += float(correct[i]);
acc = acc / samples / m_iNeighborhoodSize;
delete[] correct;
return acc;
}
};
}
}
#endif |
dragonfly4_fmt_plug.c | /*
* This file is part of John the Ripper password cracker,
* based on rawSHA256_fmt.c code
*
* This software is Copyright (c) 2012 magnum, and it is hereby released to the
* general public under the following terms: Redistribution and use in source
* and binary forms, with or without modification, are permitted.
*
* The DragonFly BSD 2.10.1-REL crypt-sha2 hashes are seriously broken. See
* http://www.openwall.com/lists/john-dev/2012/01/16/1
*
*/
#if FMT_EXTERNS_H
extern struct fmt_main fmt_dragonfly4_32;
extern struct fmt_main fmt_dragonfly4_64;
#elif FMT_REGISTERS_H
john_register_one(&fmt_dragonfly4_32);
john_register_one(&fmt_dragonfly4_64);
#else
#include "sha2.h"
#include "arch.h"
#include "params.h"
#include "common.h"
#include "formats.h"
#ifdef _OPENMP
#ifndef OMP_SCALE
#define OMP_SCALE 2048 // tuned on K8-dual HT
#endif
#include <omp.h>
#endif
#include "memdbg.h"
#define FORMAT_LABEL_32 "dragonfly4-32"
#define FORMAT_LABEL_64 "dragonfly4-64"
#define FORMAT_NAME_32 "DragonFly BSD $4$ w/ bugs, 32-bit"
#define FORMAT_NAME_64 "DragonFly BSD $4$ w/ bugs, 64-bit"
#define FORMAT_TAG "$4$"
#define FORMAT_TAG_LEN (sizeof(FORMAT_TAG)-1)
#if ARCH_BITS >= 64
#define ALGORITHM_NAME "SHA512 64/" ARCH_BITS_STR " " SHA2_LIB
#else
#define ALGORITHM_NAME "SHA512 32/" ARCH_BITS_STR " " SHA2_LIB
#endif
#define BENCHMARK_COMMENT ""
#define BENCHMARK_LENGTH 0
#define PLAINTEXT_LENGTH 125
#define CIPHERTEXT_LENGTH 84
#define BINARY_SIZE 64
#define BINARY_ALIGN 4
#define USED_BINARY_SIZE 62 // Due to base64 bug in DragonBSD crypt-sha512.c
#define SALT_SIZE_32 (1+4+8) // 1st char is length
#define SALT_SIZE_64 (1+8+8)
#define SALT_ALIGN 1
#define MIN_KEYS_PER_CRYPT 1
#define MAX_KEYS_PER_CRYPT 1
static struct fmt_tests tests_32[] = {
{"$4$7E48ul$K4u43llx1P184KZBoILl2hnFLBHj6.486TtxWA.EA1pLZuQS7P5k0LQqyEULux47.5vttDbSo/Cbpsez.AUI", "magnum"},
{"$4$Hz$5U1s18ntUYE24mF3JN44BYZPN34HBCMw57.Yw2JeKoiBkTVSGBDZEPT325hvR7iw8QYHy9kG7WUW8LCM.6UD", ""},
{"$4$W$79ddF.iDXVPcf/uf8bMFl15leilo1GE8C2KnEAWs3isK930rVy1EZZS2veHgU17NRt4qpKTtZRCA.QC7.68j", "password"},
{"$4$dw7uRHW$Cs6rbZqAVEEp9dsYOl4w/U84YydqdsEYyxHNvAtd2bcLz2Eem9L7FI/aGD2ayAybmprtYZLq2AtdXBio.cX0", "John the Ripper"},
{"$4$2tgCi76D$zy7ms.v1Y8HcsasTaR8n/Ng8GH4dhPv4ozihbM4JMNSJUmw7wVKbcqksefn7nVT.WrN18fV8i1yh7Gmq.cXC", "DragonFly BSD"},
{NULL}
};
static struct fmt_tests tests_64[] = {
{"$4$7E48ul$9or6.L/T.iChtPIGY4.vIgdYEmMkTW7Ru4OJxtGJtonCQo.wu3.bS4UPlUc2B8CAfGo1Oi5PgQvfhzNQ.A8v", "magnum"},
{"$4$Hz$Mujq0GrjuRtPhcM/0rOfbr2l9fXGfVwKAuL9oL5IH.RnOO1zcgG/S6rSIrebK4g0BEgKGKc0zmWpnk3O..uR", ""},
{"$4$W$.eHqh7OeyhVkBG0lCuUFnEShQq3tZt1QOLUx/9vIt3p56rUMCu2w7iQof7HwWa1pJwcBpPG.7KK3Pcce.oFX", "password"},
{"$4$dw7uRHW$17b2EzV3m0ziCLQoSKzUElTVgkL7cHXQzZzeeuNnkee/bchs0VHGqzjXrMZtWVfK2OW8.GfHvtZgzqGF.IUZ", "John the Ripper"},
{"$4$2tgCi76D$NL8CBWreQkoaVeGVL/a27ZrwYq6M8mlNt.uqc9E9.OiANu6JHdQy2r6J4uAZuD7wKqAQier1YVL7M0IF.gvi", "DragonFly BSD"},
{NULL}
};
static int (*saved_len);
static char (*saved_key)[PLAINTEXT_LENGTH + 1];
static uint32_t (*crypt_out)
[(BINARY_SIZE + sizeof(uint32_t) - 1) / sizeof(uint32_t)];
static char *cur_salt;
static int salt_len;
static void init(struct fmt_main *self)
{
#ifdef _OPENMP
int omp_t;
omp_t = omp_get_max_threads();
self->params.min_keys_per_crypt *= omp_t;
omp_t *= OMP_SCALE;
self->params.max_keys_per_crypt *= omp_t;
#endif
saved_len = mem_calloc(self->params.max_keys_per_crypt,
sizeof(*saved_len));
saved_key = mem_calloc(self->params.max_keys_per_crypt,
sizeof(*saved_key));
crypt_out = mem_calloc(self->params.max_keys_per_crypt,
sizeof(*crypt_out));
}
static void done(void)
{
MEM_FREE(crypt_out);
MEM_FREE(saved_key);
MEM_FREE(saved_len);
}
static int valid(char *ciphertext, struct fmt_main *self)
{
char *pos, *start;
if (strncmp(ciphertext, FORMAT_TAG, FORMAT_TAG_LEN))
return 0;
ciphertext += FORMAT_TAG_LEN;
for (pos = ciphertext; *pos && *pos != '$'; pos++);
if (!*pos || pos < ciphertext || pos > &ciphertext[8]) return 0;
start = ++pos;
while (atoi64[ARCH_INDEX(*pos)] != 0x7F) pos++;
if (*pos || pos - start != CIPHERTEXT_LENGTH) return 0;
return 1;
}
#define TO_BINARY(b1, b2, b3) \
value = (uint32_t)atoi64[ARCH_INDEX(pos[0])] | \
((uint32_t)atoi64[ARCH_INDEX(pos[1])] << 6) | \
((uint32_t)atoi64[ARCH_INDEX(pos[2])] << 12) | \
((uint32_t)atoi64[ARCH_INDEX(pos[3])] << 18); \
pos += 4; \
out[b1] = value >> 16; \
out[b2] = value >> 8; \
out[b3] = value;
// Don't copy this code without realising it mimics bugs in the original code!
// We are actually missing the last 16 bits with this implementation.
static void *get_binary(char *ciphertext)
{
static uint32_t outbuf[BINARY_SIZE/4];
uint32_t value;
char *pos;
unsigned char *out = (unsigned char*)outbuf;
int i;
memset(outbuf, 0, sizeof(outbuf));
pos = strrchr(ciphertext, '$') + 1;
for (i = 0; i < 20; i++) {
TO_BINARY(i, i + 21, i + 42);
}
value = (uint32_t)atoi64[ARCH_INDEX(pos[0])] |
((uint32_t)atoi64[ARCH_INDEX(pos[1])] << 6) |
((uint32_t)atoi64[ARCH_INDEX(pos[2])] << 12) |
((uint32_t)atoi64[ARCH_INDEX(pos[3])] << 18);
out[20] = value >> 16;
out[41] = value >> 8;
return (void *)out;
}
static int get_hash_0(int index) { return crypt_out[index][0] & PH_MASK_0; }
static int get_hash_1(int index) { return crypt_out[index][0] & PH_MASK_1; }
static int get_hash_2(int index) { return crypt_out[index][0] & PH_MASK_2; }
static int get_hash_3(int index) { return crypt_out[index][0] & PH_MASK_3; }
static int get_hash_4(int index) { return crypt_out[index][0] & PH_MASK_4; }
static int get_hash_5(int index) { return crypt_out[index][0] & PH_MASK_5; }
static int get_hash_6(int index) { return crypt_out[index][0] & PH_MASK_6; }
static void set_key(char *key, int index)
{
int len = strlen(key);
saved_len[index] = len;
if (len > PLAINTEXT_LENGTH)
len = saved_len[index] = PLAINTEXT_LENGTH;
memcpy(saved_key[index], key, len);
}
static char *get_key(int index)
{
saved_key[index][saved_len[index]] = 0;
return saved_key[index];
}
static int crypt_all(int *pcount, struct db_salt *salt)
{
const int count = *pcount;
int index = 0;
#ifdef _OPENMP
#pragma omp parallel for
for (index = 0; index < count; index++)
#endif
{
SHA512_CTX ctx;
SHA512_Init(&ctx);
/* First the password */
SHA512_Update(&ctx, saved_key[index], saved_len[index]);
/* Then the salt, including the $4$ magic */
SHA512_Update(&ctx, cur_salt, salt_len);
SHA512_Final((unsigned char*)crypt_out[index], &ctx);
}
return count;
}
static void set_salt(void *salt)
{
salt_len = (int)*(char*)salt;
cur_salt = (char*)salt + 1;
}
// For 32-bit version of the bug, our magic is "$4$\0"
static void *get_salt_32(char *ciphertext)
{
static char *out;
int len;
if (!out) out = mem_alloc_tiny(SALT_SIZE_32, MEM_ALIGN_WORD);
memset(out, 0, SALT_SIZE_32);
ciphertext += FORMAT_TAG_LEN;
strcpy(&out[1], FORMAT_TAG);
for (len = 0; ciphertext[len] != '$'; len++);
memcpy(&out[5], ciphertext, len);
out[0] = len + 4;
return out;
}
// For 64-bit version of the bug, our magic is "$4$\0/etc"
static void *get_salt_64(char *ciphertext)
{
static char *out;
int len;
if (!out) out = mem_alloc_tiny(SALT_SIZE_64, MEM_ALIGN_WORD);
memset(out, 0, SALT_SIZE_64);
ciphertext += FORMAT_TAG_LEN;
memcpy(&out[1], "$4$\0/etc", 8);
for (len = 0; ciphertext[len] != '$'; len++);
memcpy(&out[9], ciphertext, len);
out[0] = len + 8;
return out;
}
static int cmp_all(void *binary, int count)
{
int index = 0;
#ifdef _OPENMP
for (; index < count; index++)
#endif
if (!memcmp(binary, crypt_out[index], USED_BINARY_SIZE))
return 1;
return 0;
}
static int cmp_one(void *binary, int index)
{
return !memcmp(binary, crypt_out[index], USED_BINARY_SIZE);
}
static int cmp_exact(char *source, int index)
{
return 1;
}
// Public domain hash function by DJ Bernstein
static int salt_hash(void *salt)
{
unsigned char *s = (unsigned char*)salt + 1;
unsigned int hash = 5381;
unsigned int i;
for (i = 0; i < *(unsigned char*)salt; i++)
hash = ((hash << 5) + hash) ^ s[i];
return hash & (SALT_HASH_SIZE - 1);
}
struct fmt_main fmt_dragonfly4_32 = {
{
FORMAT_LABEL_32,
FORMAT_NAME_32,
ALGORITHM_NAME,
BENCHMARK_COMMENT,
BENCHMARK_LENGTH,
0,
PLAINTEXT_LENGTH,
USED_BINARY_SIZE,
BINARY_ALIGN,
SALT_SIZE_32,
SALT_ALIGN,
MIN_KEYS_PER_CRYPT,
MAX_KEYS_PER_CRYPT,
FMT_CASE | FMT_8_BIT | FMT_OMP,
{ NULL },
{ FORMAT_TAG },
tests_32
}, {
init,
done,
fmt_default_reset,
fmt_default_prepare,
valid,
fmt_default_split,
get_binary,
get_salt_32,
{ NULL },
fmt_default_source,
{
fmt_default_binary_hash_0,
fmt_default_binary_hash_1,
fmt_default_binary_hash_2,
fmt_default_binary_hash_3,
fmt_default_binary_hash_4,
fmt_default_binary_hash_5,
fmt_default_binary_hash_6
},
salt_hash,
NULL,
set_salt,
set_key,
get_key,
fmt_default_clear_keys,
crypt_all,
{
get_hash_0,
get_hash_1,
get_hash_2,
get_hash_3,
get_hash_4,
get_hash_5,
get_hash_6
},
cmp_all,
cmp_one,
cmp_exact
}
};
struct fmt_main fmt_dragonfly4_64 = {
{
FORMAT_LABEL_64,
FORMAT_NAME_64,
ALGORITHM_NAME,
BENCHMARK_COMMENT,
BENCHMARK_LENGTH,
0,
PLAINTEXT_LENGTH,
BINARY_SIZE,
BINARY_ALIGN,
SALT_SIZE_64,
SALT_ALIGN,
MIN_KEYS_PER_CRYPT,
MAX_KEYS_PER_CRYPT,
FMT_CASE | FMT_8_BIT | FMT_OMP,
{ NULL },
{ NULL },
tests_64
}, {
init,
done,
fmt_default_reset,
fmt_default_prepare,
valid,
fmt_default_split,
get_binary,
get_salt_64,
{ NULL },
fmt_default_source,
{
fmt_default_binary_hash_0,
fmt_default_binary_hash_1,
fmt_default_binary_hash_2,
fmt_default_binary_hash_3,
fmt_default_binary_hash_4,
fmt_default_binary_hash_5,
fmt_default_binary_hash_6
},
salt_hash,
NULL,
set_salt,
set_key,
get_key,
fmt_default_clear_keys,
crypt_all,
{
get_hash_0,
get_hash_1,
get_hash_2,
get_hash_3,
get_hash_4,
get_hash_5,
get_hash_6
},
cmp_all,
cmp_one,
cmp_exact
}
};
#endif /* plugin stanza */
|
blas_server_omp.c | /*********************************************************************/
/* Copyright 2009, 2010 The University of Texas at Austin. */
/* All rights reserved. */
/* */
/* Redistribution and use in source and binary forms, with or */
/* without modification, are permitted provided that the following */
/* conditions are met: */
/* */
/* 1. Redistributions of source code must retain the above */
/* copyright notice, this list of conditions and the following */
/* disclaimer. */
/* */
/* 2. Redistributions in binary form must reproduce the above */
/* copyright notice, this list of conditions and the following */
/* disclaimer in the documentation and/or other materials */
/* provided with the distribution. */
/* */
/* THIS SOFTWARE IS PROVIDED BY THE UNIVERSITY OF TEXAS AT */
/* AUSTIN ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, */
/* INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF */
/* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE */
/* DISCLAIMED. IN NO EVENT SHALL THE UNIVERSITY OF TEXAS AT */
/* AUSTIN OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, */
/* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES */
/* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE */
/* GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR */
/* BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF */
/* LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT */
/* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT */
/* OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE */
/* POSSIBILITY OF SUCH DAMAGE. */
/* */
/* The views and conclusions contained in the software and */
/* documentation are those of the authors and should not be */
/* interpreted as representing official policies, either expressed */
/* or implied, of The University of Texas at Austin. */
/*********************************************************************/
#include <stdbool.h>
#include <stdio.h>
#include <stdlib.h>
//#include <sys/mman.h>
#include "common.h"
#ifndef USE_OPENMP
#include "blas_server.c"
#else
#ifndef OMP_SCHED
#define OMP_SCHED static
#endif
int blas_server_avail = 0;
static void * blas_thread_buffer[MAX_PARALLEL_NUMBER][MAX_CPU_NUMBER];
#ifdef HAVE_C11
static atomic_bool blas_buffer_inuse[MAX_PARALLEL_NUMBER];
#else
static _Bool blas_buffer_inuse[MAX_PARALLEL_NUMBER];
#endif
void goto_set_num_threads(int num_threads) {
int i=0, j=0;
if (num_threads < 1) num_threads = blas_num_threads;
if (num_threads > MAX_CPU_NUMBER) num_threads = MAX_CPU_NUMBER;
if (num_threads > blas_num_threads) {
blas_num_threads = num_threads;
}
blas_cpu_number = num_threads;
omp_set_num_threads(blas_cpu_number);
//adjust buffer for each thread
for(i=0; i<MAX_PARALLEL_NUMBER; i++) {
for(j=0; j<blas_cpu_number; j++){
if(blas_thread_buffer[i][j]==NULL){
blas_thread_buffer[i][j]=blas_memory_alloc(2);
}
}
for(; j<MAX_CPU_NUMBER; j++){
if(blas_thread_buffer[i][j]!=NULL){
blas_memory_free(blas_thread_buffer[i][j]);
blas_thread_buffer[i][j]=NULL;
}
}
}
#if defined(ARCH_MIPS64)
//set parameters for different number of threads.
blas_set_parameter();
#endif
}
void openblas_set_num_threads(int num_threads) {
goto_set_num_threads(num_threads);
}
int blas_thread_init(void){
int i=0, j=0;
blas_get_cpu_number();
blas_server_avail = 1;
for(i=0; i<MAX_PARALLEL_NUMBER; i++) {
for(j=0; j<blas_num_threads; j++){
blas_thread_buffer[i][j]=blas_memory_alloc(2);
}
for(; j<MAX_CPU_NUMBER; j++){
blas_thread_buffer[i][j]=NULL;
}
}
return 0;
}
int BLASFUNC(blas_thread_shutdown)(void){
int i=0, j=0;
blas_server_avail = 0;
for(i=0; i<MAX_PARALLEL_NUMBER; i++) {
for(j=0; j<MAX_CPU_NUMBER; j++){
if(blas_thread_buffer[i][j]!=NULL){
blas_memory_free(blas_thread_buffer[i][j]);
blas_thread_buffer[i][j]=NULL;
}
}
}
return 0;
}
static void legacy_exec(void *func, int mode, blas_arg_t *args, void *sb){
if (!(mode & BLAS_COMPLEX)){
#ifdef EXPRECISION
if (mode & BLAS_XDOUBLE){
/* REAL / Extended Double */
void (*afunc)(BLASLONG, BLASLONG, BLASLONG, xdouble,
xdouble *, BLASLONG, xdouble *, BLASLONG,
xdouble *, BLASLONG, void *) = func;
afunc(args -> m, args -> n, args -> k,
((xdouble *)args -> alpha)[0],
args -> a, args -> lda,
args -> b, args -> ldb,
args -> c, args -> ldc, sb);
} else
#endif
if (mode & BLAS_DOUBLE){
/* REAL / Double */
void (*afunc)(BLASLONG, BLASLONG, BLASLONG, double,
double *, BLASLONG, double *, BLASLONG,
double *, BLASLONG, void *) = func;
afunc(args -> m, args -> n, args -> k,
((double *)args -> alpha)[0],
args -> a, args -> lda,
args -> b, args -> ldb,
args -> c, args -> ldc, sb);
} else {
/* REAL / Single */
void (*afunc)(BLASLONG, BLASLONG, BLASLONG, float,
float *, BLASLONG, float *, BLASLONG,
float *, BLASLONG, void *) = func;
afunc(args -> m, args -> n, args -> k,
((float *)args -> alpha)[0],
args -> a, args -> lda,
args -> b, args -> ldb,
args -> c, args -> ldc, sb);
}
} else {
#ifdef EXPRECISION
if (mode & BLAS_XDOUBLE){
/* COMPLEX / Extended Double */
void (*afunc)(BLASLONG, BLASLONG, BLASLONG, xdouble, xdouble,
xdouble *, BLASLONG, xdouble *, BLASLONG,
xdouble *, BLASLONG, void *) = func;
afunc(args -> m, args -> n, args -> k,
((xdouble *)args -> alpha)[0],
((xdouble *)args -> alpha)[1],
args -> a, args -> lda,
args -> b, args -> ldb,
args -> c, args -> ldc, sb);
} else
#endif
if (mode & BLAS_DOUBLE){
/* COMPLEX / Double */
void (*afunc)(BLASLONG, BLASLONG, BLASLONG, double, double,
double *, BLASLONG, double *, BLASLONG,
double *, BLASLONG, void *) = func;
afunc(args -> m, args -> n, args -> k,
((double *)args -> alpha)[0],
((double *)args -> alpha)[1],
args -> a, args -> lda,
args -> b, args -> ldb,
args -> c, args -> ldc, sb);
} else {
/* COMPLEX / Single */
void (*afunc)(BLASLONG, BLASLONG, BLASLONG, float, float,
float *, BLASLONG, float *, BLASLONG,
float *, BLASLONG, void *) = func;
afunc(args -> m, args -> n, args -> k,
((float *)args -> alpha)[0],
((float *)args -> alpha)[1],
args -> a, args -> lda,
args -> b, args -> ldb,
args -> c, args -> ldc, sb);
}
}
}
static void exec_threads(blas_queue_t *queue, int buf_index){
void *buffer, *sa, *sb;
int pos=0, release_flag=0;
buffer = NULL;
sa = queue -> sa;
sb = queue -> sb;
#ifdef CONSISTENT_FPCSR
__asm__ __volatile__ ("ldmxcsr %0" : : "m" (queue -> sse_mode));
__asm__ __volatile__ ("fldcw %0" : : "m" (queue -> x87_mode));
#endif
if ((sa == NULL) && (sb == NULL) && ((queue -> mode & BLAS_PTHREAD) == 0)) {
pos = omp_get_thread_num();
buffer = blas_thread_buffer[buf_index][pos];
//fallback
if(buffer==NULL) {
buffer = blas_memory_alloc(2);
release_flag=1;
}
if (sa == NULL) {
sa = (void *)((BLASLONG)buffer + GEMM_OFFSET_A);
queue->sa=sa;
}
if (sb == NULL) {
if (!(queue -> mode & BLAS_COMPLEX)){
#ifdef EXPRECISION
if (queue -> mode & BLAS_XDOUBLE){
sb = (void *)(((BLASLONG)sa + ((QGEMM_P * QGEMM_Q * sizeof(xdouble)
+ GEMM_ALIGN) & ~GEMM_ALIGN)) + GEMM_OFFSET_B);
} else
#endif
if (queue -> mode & BLAS_DOUBLE){
sb = (void *)(((BLASLONG)sa + ((DGEMM_P * DGEMM_Q * sizeof(double)
+ GEMM_ALIGN) & ~GEMM_ALIGN)) + GEMM_OFFSET_B);
} else {
sb = (void *)(((BLASLONG)sa + ((SGEMM_P * SGEMM_Q * sizeof(float)
+ GEMM_ALIGN) & ~GEMM_ALIGN)) + GEMM_OFFSET_B);
}
} else {
#ifdef EXPRECISION
if (queue -> mode & BLAS_XDOUBLE){
sb = (void *)(((BLASLONG)sa + ((XGEMM_P * XGEMM_Q * 2 * sizeof(xdouble)
+ GEMM_ALIGN) & ~GEMM_ALIGN)) + GEMM_OFFSET_B);
} else
#endif
if (queue -> mode & BLAS_DOUBLE){
sb = (void *)(((BLASLONG)sa + ((ZGEMM_P * ZGEMM_Q * 2 * sizeof(double)
+ GEMM_ALIGN) & ~GEMM_ALIGN)) + GEMM_OFFSET_B);
} else {
sb = (void *)(((BLASLONG)sa + ((CGEMM_P * CGEMM_Q * 2 * sizeof(float)
+ GEMM_ALIGN) & ~GEMM_ALIGN)) + GEMM_OFFSET_B);
}
}
queue->sb=sb;
}
}
if (queue -> mode & BLAS_LEGACY) {
legacy_exec(queue -> routine, queue -> mode, queue -> args, sb);
} else
if (queue -> mode & BLAS_PTHREAD) {
void (*pthreadcompat)(void *) = queue -> routine;
(pthreadcompat)(queue -> args);
} else {
int (*routine)(blas_arg_t *, void *, void *, void *, void *, BLASLONG) = queue -> routine;
(routine)(queue -> args, queue -> range_m, queue -> range_n, sa, sb, queue -> position);
}
if (release_flag) blas_memory_free(buffer);
}
int exec_blas(BLASLONG num, blas_queue_t *queue){
BLASLONG i, buf_index;
if ((num <= 0) || (queue == NULL)) return 0;
#ifdef CONSISTENT_FPCSR
for (i = 0; i < num; i ++) {
__asm__ __volatile__ ("fnstcw %0" : "=m" (queue[i].x87_mode));
__asm__ __volatile__ ("stmxcsr %0" : "=m" (queue[i].sse_mode));
}
#endif
while(true) {
for(i=0; i < MAX_PARALLEL_NUMBER; i++) {
#ifdef HAVE_C11
_Bool inuse = false;
if(atomic_compare_exchange_weak(&blas_buffer_inuse[i], &inuse, true)) {
#else
if(blas_buffer_inuse[i] == false) {
blas_buffer_inuse[i] = true;
#endif
buf_index = i;
break;
}
}
if(i != MAX_PARALLEL_NUMBER)
break;
}
#pragma omp parallel for schedule(OMP_SCHED)
for (i = 0; i < num; i ++) {
#ifndef USE_SIMPLE_THREADED_LEVEL3
queue[i].position = i;
#endif
exec_threads(&queue[i], buf_index);
}
#ifdef HAVE_C11
atomic_store(&blas_buffer_inuse[buf_index], false);
#else
blas_buffer_inuse[buf_index] = false;
#endif
return 0;
}
#endif
|
minibatch_kmeans.c | #include "kmeans.h"
#include "kmeans_utils.h"
#include "minibatch_commons.h"
#include "../../utils/matrix/csr_matrix/csr_to_vector_list.h"
#include "../../utils/matrix/vector_list/vector_list_math.h"
#include "../../utils/matrix/csr_matrix/csr_math.h"
#include "../../utils/vector/common/common_vector_math.h"
#include "../../utils/vector/sparse/sparse_vector_math.h"
#include "../../utils/fcl_logging.h"
#include <unistd.h>
#include <float.h>
#include <math.h>
struct kmeans_result* bv_minibatch_kmeans(struct csr_matrix* samples
, struct kmeans_params *prms) {
uint32_t i;
uint64_t j;
uint64_t block_vectors_dim; /* size of block vectors */
uint64_t samples_per_batch;
uint64_t keys_per_block;
uint32_t max_not_improved_counter;
uint32_t disable_optimizations;
VALUE_TYPE desired_bv_annz; /* desired size of the block vectors */
uint32_t* chosen_sample_map;
struct convergence_context conv_ctx;
struct sparse_vector* block_vectors_clusters; /* block vector matrix of clusters */
struct kmeans_result* res;
struct general_kmeans_context ctx;
disable_optimizations = prms->kmeans_algorithm_id == ALGORITHM_MINIBATCH_KMEANS;
initialize_general_context(prms, &ctx, samples);
conv_ctx.initialized = 0;
max_not_improved_counter = 20;
/* if clusters_raw was filled (this happens in kmeans++) free it
* since minibatch k-means uses a different strategy to fill the raw clusters
*/
free_cluster_hashmaps(ctx.clusters_raw, ctx.no_clusters);
/* reset cluster counts since minibatch kmeans handels them differently */
for (i = 0; i < ctx.no_clusters; i++) ctx.cluster_counts[i] = 0;
desired_bv_annz = d_get_subfloat_default(&(prms->tr)
, "additional_params", "bv_annz", 0.3);
block_vectors_dim = 0;
keys_per_block = 0;
chosen_sample_map = NULL;
/* samples_per_batch = ctx.samples->sample_count; */
samples_per_batch = d_get_subint_default(&(prms->tr)
, "additional_params", "samples_per_batch", ctx.samples->sample_count * 0.05);
if (!disable_optimizations) {
/* search for a suitable size of the block vectors for the input samples and create them */
block_vectors_dim = search_block_vector_size(ctx.samples, desired_bv_annz, prms->verbose);
keys_per_block = ctx.samples->dim / block_vectors_dim;
if (ctx.samples->dim % block_vectors_dim > 0) keys_per_block++;
/* create block vectors for the clusters */
create_block_vectors_list_from_vector_list(ctx.cluster_vectors
, block_vectors_dim
, ctx.no_clusters
, ctx.samples->dim
, &block_vectors_clusters);
}
create_chosen_sample_map(&chosen_sample_map, ctx.samples->sample_count, samples_per_batch, &(prms->seed));
for (i = 0; i < prms->iteration_limit && !ctx.converged && !prms->stop; i++) {
/* track how many blockvector calculations were made / saved */
uint64_t saved_calculations_bv, saved_calculations_prev_cluster;
uint64_t done_blockvector_calcs, saved_calculations_cauchy;
/* reset all calculation counters */
done_blockvector_calcs = 0;
saved_calculations_cauchy = 0;
saved_calculations_prev_cluster = 0;
saved_calculations_bv = 0;
/* initialize data needed for the iteration */
pre_process_iteration(&ctx);
#pragma omp parallel for schedule(dynamic, 1000)
for (j = 0; j < ctx.samples->sample_count; j++) {
/* iterate over all samples */
VALUE_TYPE dist;
uint64_t cluster_id, sample_id;
struct sparse_vector bv;
bv.nnz = 0;
bv.keys = NULL;
bv.values = NULL;
if (!prms->stop && chosen_sample_map[j]) {
sample_id = j;
if (omp_get_thread_num() == 0) check_signals(&(prms->stop));
for (cluster_id = 0; cluster_id < ctx.no_clusters; cluster_id++) {
/* iterate over all cluster centers */
if (!disable_optimizations) {
/* bv_minibatch_kmeans */
/* we already know the distance to the cluster from last iteration */
if (cluster_id == ctx.previous_cluster_assignments[sample_id]) continue;
/* evaluate cauchy approximation. fast but not good */
dist = lower_bound_euclid(ctx.vector_lengths_clusters[cluster_id]
, ctx.vector_lengths_samples[sample_id]);
if (dist >= ctx.cluster_distances[sample_id]) {
/* approximated distance is larger than current best distance. skip full distance calculation */
saved_calculations_cauchy += 1;
goto end;
}
if (bv.keys == NULL) {
create_block_vector_from_csr_matrix_vector(ctx.samples
, sample_id
, keys_per_block
, &bv);
}
/* evaluate block vector approximation. */
dist = euclid_vector(bv.keys, bv.values, bv.nnz
, block_vectors_clusters[cluster_id].keys
, block_vectors_clusters[cluster_id].values
, block_vectors_clusters[cluster_id].nnz
, ctx.vector_lengths_samples[sample_id]
, ctx.vector_lengths_clusters[cluster_id]);
done_blockvector_calcs += 1;
if (dist >= ctx.cluster_distances[sample_id] && fabs(dist - ctx.cluster_distances[sample_id]) >= 1e-6) {
/* approximated distance is larger than current best distance. skip full distance calculation */
saved_calculations_bv += 1;
goto end;
}
}
/* if we reached this point we need to calculate a full euclidean distance */
dist = euclid_vector_list(ctx.samples, sample_id, ctx.cluster_vectors, cluster_id
, ctx.vector_lengths_samples, ctx.vector_lengths_clusters);
ctx.done_calculations += 1;
if (dist < ctx.cluster_distances[sample_id]) {
/* replace current best distance with new distance */
ctx.cluster_distances[sample_id] = dist;
ctx.cluster_assignments[sample_id] = cluster_id;
}
end:;
}
}
if (!disable_optimizations) {
free_null(bv.keys);
free_null(bv.values);
}
}
check_signals(&(prms->stop));
post_process_iteration_minibatch(&ctx
, chosen_sample_map
, max_not_improved_counter
, &conv_ctx);
/* shift clusters to new position */
calculate_shifted_clusters_minibatch_kmeans(&ctx, chosen_sample_map);
/* calculate_shifted_clusters(&ctx); */
switch_to_shifted_clusters(&ctx);
create_chosen_sample_map(&chosen_sample_map, ctx.samples->sample_count, samples_per_batch, &(prms->seed));
if (!disable_optimizations) {
/* update only block vectors for cluster that shifted */
update_changed_blockvectors(ctx.cluster_vectors
, block_vectors_dim
, ctx.no_clusters
, ctx.samples->dim
, ctx.clusters_not_changed
, block_vectors_clusters);
d_add_ilist(&(prms->tr), "iteration_bv_calcs", done_blockvector_calcs);
d_add_ilist(&(prms->tr), "iteration_bv_calcs_success", saved_calculations_bv + saved_calculations_cauchy);
}
#pragma omp parallel for
for (j = 0; j < ctx.samples->sample_count; j++) {
/* iterate over all chosen samples for the next iteration and
* update their distance to their current cluster
*/
if (chosen_sample_map[j]) {
ctx.cluster_distances[j]
= euclid_vector_list(ctx.samples, j
, ctx.cluster_vectors, ctx.cluster_assignments[j]
, ctx.vector_lengths_samples
, ctx.vector_lengths_clusters);
/*#pragma omp critical*/
ctx.done_calculations += 1;
ctx.total_no_calcs += 1;
}
}
print_iteration_summary(&ctx, prms, i);
/* print block vector statistics */
if (prms->verbose) LOG_INFO("BV statistics c:%" PRINTF_INT64_MODIFIER "u/b:%" PRINTF_INT64_MODIFIER "u/db:%" PRINTF_INT64_MODIFIER "u/pc:%" PRINTF_INT64_MODIFIER "u"
, saved_calculations_cauchy
, saved_calculations_bv
, done_blockvector_calcs
, saved_calculations_prev_cluster);
}
if (prms->verbose) LOG_INFO("total total_no_calcs = %" PRINTF_INT64_MODIFIER "u", ctx.total_no_calcs);
res = create_kmeans_result(prms, &ctx);
/* cleanup all */
if (!disable_optimizations) {
free_vector_list(block_vectors_clusters, ctx.no_clusters);
free(block_vectors_clusters);
}
free_null(chosen_sample_map);
free_general_context(&ctx, prms);
return res;
}
|
irbuilder_unroll_partial_heuristic_for_collapse.c | // NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --function-signature --include-generated-funcs
// RUN: %clang_cc1 -fopenmp-enable-irbuilder -verify -fopenmp -fopenmp-version=51 -x c -triple x86_64-unknown-unknown -emit-llvm %s -o - | FileCheck %s
// expected-no-diagnostics
// REQUIRES: x86-registered-target
#ifndef HEADER
#define HEADER
double sind(double);
void unroll_partial_heuristic_for(int m, float *a, float *b, float *c, float *d, float *e, float offset) {
#pragma omp for collapse(2)
for (int i = 0; i < m; i++) {
#pragma omp unroll partial
for (int j = 0; j < 8; j++) {
a[i] += sind(b[i]) * c[i] * d[i] * e[i] + offset;
}
}
}
#endif // HEADER
// CHECK-LABEL: define {{[^@]+}}@unroll_partial_heuristic_for
// CHECK-SAME: (i32 noundef [[M:%.*]], float* noundef [[A:%.*]], float* noundef [[B:%.*]], float* noundef [[C:%.*]], float* noundef [[D:%.*]], float* noundef [[E:%.*]], float noundef [[OFFSET:%.*]]) #[[ATTR0:[0-9]+]] {
// CHECK-NEXT: entry:
// CHECK-NEXT: [[M_ADDR:%.*]] = alloca i32, align 4
// CHECK-NEXT: [[A_ADDR:%.*]] = alloca float*, align 8
// CHECK-NEXT: [[B_ADDR:%.*]] = alloca float*, align 8
// CHECK-NEXT: [[C_ADDR:%.*]] = alloca float*, align 8
// CHECK-NEXT: [[D_ADDR:%.*]] = alloca float*, align 8
// CHECK-NEXT: [[E_ADDR:%.*]] = alloca float*, align 8
// CHECK-NEXT: [[OFFSET_ADDR:%.*]] = alloca float, align 4
// CHECK-NEXT: [[DOTOMP_IV:%.*]] = alloca i64, align 8
// CHECK-NEXT: [[TMP:%.*]] = alloca i32, align 4
// CHECK-NEXT: [[TMP1:%.*]] = alloca i32, align 4
// CHECK-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
// CHECK-NEXT: [[J:%.*]] = alloca i32, align 4
// CHECK-NEXT: [[DOTCAPTURE_EXPR_2:%.*]] = alloca i64, align 8
// CHECK-NEXT: [[I:%.*]] = alloca i32, align 4
// CHECK-NEXT: [[DOTUNROLLED_IV_J:%.*]] = alloca i32, align 4
// CHECK-NEXT: [[DOTOMP_LB:%.*]] = alloca i64, align 8
// CHECK-NEXT: [[DOTOMP_UB:%.*]] = alloca i64, align 8
// CHECK-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i64, align 8
// CHECK-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
// CHECK-NEXT: [[I6:%.*]] = alloca i32, align 4
// CHECK-NEXT: [[DOTUNROLLED_IV_J7:%.*]] = alloca i32, align 4
// CHECK-NEXT: [[DOTUNROLL_INNER_IV_J:%.*]] = alloca i32, align 4
// CHECK-NEXT: store i32 [[M]], i32* [[M_ADDR]], align 4
// CHECK-NEXT: store float* [[A]], float** [[A_ADDR]], align 8
// CHECK-NEXT: store float* [[B]], float** [[B_ADDR]], align 8
// CHECK-NEXT: store float* [[C]], float** [[C_ADDR]], align 8
// CHECK-NEXT: store float* [[D]], float** [[D_ADDR]], align 8
// CHECK-NEXT: store float* [[E]], float** [[E_ADDR]], align 8
// CHECK-NEXT: store float [[OFFSET]], float* [[OFFSET_ADDR]], align 4
// CHECK-NEXT: [[TMP0:%.*]] = load i32, i32* [[M_ADDR]], align 4
// CHECK-NEXT: store i32 [[TMP0]], i32* [[DOTCAPTURE_EXPR_]], align 4
// CHECK-NEXT: store i32 0, i32* [[J]], align 4
// CHECK-NEXT: [[TMP1:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
// CHECK-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP1]], 0
// CHECK-NEXT: [[DIV:%.*]] = sdiv i32 [[SUB]], 1
// CHECK-NEXT: [[CONV:%.*]] = sext i32 [[DIV]] to i64
// CHECK-NEXT: [[MUL:%.*]] = mul nsw i64 [[CONV]], 4
// CHECK-NEXT: [[SUB3:%.*]] = sub nsw i64 [[MUL]], 1
// CHECK-NEXT: store i64 [[SUB3]], i64* [[DOTCAPTURE_EXPR_2]], align 8
// CHECK-NEXT: store i32 0, i32* [[I]], align 4
// CHECK-NEXT: store i32 0, i32* [[DOTUNROLLED_IV_J]], align 4
// CHECK-NEXT: [[TMP2:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
// CHECK-NEXT: [[CMP:%.*]] = icmp slt i32 0, [[TMP2]]
// CHECK-NEXT: br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
// CHECK: omp.precond.then:
// CHECK-NEXT: store i64 0, i64* [[DOTOMP_LB]], align 8
// CHECK-NEXT: [[TMP3:%.*]] = load i64, i64* [[DOTCAPTURE_EXPR_2]], align 8
// CHECK-NEXT: store i64 [[TMP3]], i64* [[DOTOMP_UB]], align 8
// CHECK-NEXT: store i64 1, i64* [[DOTOMP_STRIDE]], align 8
// CHECK-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
// CHECK-NEXT: [[OMP_GLOBAL_THREAD_NUM:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB3:[0-9]+]])
// CHECK-NEXT: call void @__kmpc_for_static_init_8(%struct.ident_t* @[[GLOB1:[0-9]+]], i32 [[OMP_GLOBAL_THREAD_NUM]], i32 34, i32* [[DOTOMP_IS_LAST]], i64* [[DOTOMP_LB]], i64* [[DOTOMP_UB]], i64* [[DOTOMP_STRIDE]], i64 1, i64 1)
// CHECK-NEXT: [[TMP4:%.*]] = load i64, i64* [[DOTOMP_UB]], align 8
// CHECK-NEXT: [[TMP5:%.*]] = load i64, i64* [[DOTCAPTURE_EXPR_2]], align 8
// CHECK-NEXT: [[CMP8:%.*]] = icmp sgt i64 [[TMP4]], [[TMP5]]
// CHECK-NEXT: br i1 [[CMP8]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
// CHECK: cond.true:
// CHECK-NEXT: [[TMP6:%.*]] = load i64, i64* [[DOTCAPTURE_EXPR_2]], align 8
// CHECK-NEXT: br label [[COND_END:%.*]]
// CHECK: cond.false:
// CHECK-NEXT: [[TMP7:%.*]] = load i64, i64* [[DOTOMP_UB]], align 8
// CHECK-NEXT: br label [[COND_END]]
// CHECK: cond.end:
// CHECK-NEXT: [[COND:%.*]] = phi i64 [ [[TMP6]], [[COND_TRUE]] ], [ [[TMP7]], [[COND_FALSE]] ]
// CHECK-NEXT: store i64 [[COND]], i64* [[DOTOMP_UB]], align 8
// CHECK-NEXT: [[TMP8:%.*]] = load i64, i64* [[DOTOMP_LB]], align 8
// CHECK-NEXT: store i64 [[TMP8]], i64* [[DOTOMP_IV]], align 8
// CHECK-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
// CHECK: omp.inner.for.cond:
// CHECK-NEXT: [[TMP9:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8
// CHECK-NEXT: [[TMP10:%.*]] = load i64, i64* [[DOTOMP_UB]], align 8
// CHECK-NEXT: [[CMP10:%.*]] = icmp sle i64 [[TMP9]], [[TMP10]]
// CHECK-NEXT: br i1 [[CMP10]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
// CHECK: omp.inner.for.body:
// CHECK-NEXT: [[TMP11:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8
// CHECK-NEXT: [[DIV12:%.*]] = sdiv i64 [[TMP11]], 4
// CHECK-NEXT: [[MUL13:%.*]] = mul nsw i64 [[DIV12]], 1
// CHECK-NEXT: [[ADD:%.*]] = add nsw i64 0, [[MUL13]]
// CHECK-NEXT: [[CONV14:%.*]] = trunc i64 [[ADD]] to i32
// CHECK-NEXT: store i32 [[CONV14]], i32* [[I6]], align 4
// CHECK-NEXT: [[TMP12:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8
// CHECK-NEXT: [[TMP13:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8
// CHECK-NEXT: [[DIV15:%.*]] = sdiv i64 [[TMP13]], 4
// CHECK-NEXT: [[MUL16:%.*]] = mul nsw i64 [[DIV15]], 4
// CHECK-NEXT: [[SUB17:%.*]] = sub nsw i64 [[TMP12]], [[MUL16]]
// CHECK-NEXT: [[MUL18:%.*]] = mul nsw i64 [[SUB17]], 2
// CHECK-NEXT: [[ADD19:%.*]] = add nsw i64 0, [[MUL18]]
// CHECK-NEXT: [[CONV20:%.*]] = trunc i64 [[ADD19]] to i32
// CHECK-NEXT: store i32 [[CONV20]], i32* [[DOTUNROLLED_IV_J7]], align 4
// CHECK-NEXT: [[TMP14:%.*]] = load i32, i32* [[DOTUNROLLED_IV_J7]], align 4
// CHECK-NEXT: store i32 [[TMP14]], i32* [[DOTUNROLL_INNER_IV_J]], align 4
// CHECK-NEXT: br label [[FOR_COND:%.*]]
// CHECK: for.cond:
// CHECK-NEXT: [[TMP15:%.*]] = load i32, i32* [[DOTUNROLL_INNER_IV_J]], align 4
// CHECK-NEXT: [[TMP16:%.*]] = load i32, i32* [[DOTUNROLLED_IV_J7]], align 4
// CHECK-NEXT: [[ADD21:%.*]] = add nsw i32 [[TMP16]], 2
// CHECK-NEXT: [[CMP22:%.*]] = icmp sle i32 [[TMP15]], [[ADD21]]
// CHECK-NEXT: br i1 [[CMP22]], label [[LAND_RHS:%.*]], label [[LAND_END:%.*]]
// CHECK: land.rhs:
// CHECK-NEXT: [[TMP17:%.*]] = load i32, i32* [[DOTUNROLL_INNER_IV_J]], align 4
// CHECK-NEXT: [[CMP24:%.*]] = icmp sle i32 [[TMP17]], 8
// CHECK-NEXT: br label [[LAND_END]]
// CHECK: land.end:
// CHECK-NEXT: [[TMP18:%.*]] = phi i1 [ false, [[FOR_COND]] ], [ [[CMP24]], [[LAND_RHS]] ]
// CHECK-NEXT: br i1 [[TMP18]], label [[FOR_BODY:%.*]], label [[FOR_END:%.*]]
// CHECK: for.body:
// CHECK-NEXT: [[TMP19:%.*]] = load i32, i32* [[DOTUNROLL_INNER_IV_J]], align 4
// CHECK-NEXT: [[MUL26:%.*]] = mul nsw i32 [[TMP19]], 1
// CHECK-NEXT: [[ADD27:%.*]] = add nsw i32 0, [[MUL26]]
// CHECK-NEXT: store i32 [[ADD27]], i32* [[J]], align 4
// CHECK-NEXT: [[TMP20:%.*]] = load float*, float** [[B_ADDR]], align 8
// CHECK-NEXT: [[TMP21:%.*]] = load i32, i32* [[I6]], align 4
// CHECK-NEXT: [[IDXPROM:%.*]] = sext i32 [[TMP21]] to i64
// CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds float, float* [[TMP20]], i64 [[IDXPROM]]
// CHECK-NEXT: [[TMP22:%.*]] = load float, float* [[ARRAYIDX]], align 4
// CHECK-NEXT: [[CONV28:%.*]] = fpext float [[TMP22]] to double
// CHECK-NEXT: [[CALL:%.*]] = call double @sind(double noundef [[CONV28]])
// CHECK-NEXT: [[TMP23:%.*]] = load float*, float** [[C_ADDR]], align 8
// CHECK-NEXT: [[TMP24:%.*]] = load i32, i32* [[I6]], align 4
// CHECK-NEXT: [[IDXPROM29:%.*]] = sext i32 [[TMP24]] to i64
// CHECK-NEXT: [[ARRAYIDX30:%.*]] = getelementptr inbounds float, float* [[TMP23]], i64 [[IDXPROM29]]
// CHECK-NEXT: [[TMP25:%.*]] = load float, float* [[ARRAYIDX30]], align 4
// CHECK-NEXT: [[CONV31:%.*]] = fpext float [[TMP25]] to double
// CHECK-NEXT: [[MUL32:%.*]] = fmul double [[CALL]], [[CONV31]]
// CHECK-NEXT: [[TMP26:%.*]] = load float*, float** [[D_ADDR]], align 8
// CHECK-NEXT: [[TMP27:%.*]] = load i32, i32* [[I6]], align 4
// CHECK-NEXT: [[IDXPROM33:%.*]] = sext i32 [[TMP27]] to i64
// CHECK-NEXT: [[ARRAYIDX34:%.*]] = getelementptr inbounds float, float* [[TMP26]], i64 [[IDXPROM33]]
// CHECK-NEXT: [[TMP28:%.*]] = load float, float* [[ARRAYIDX34]], align 4
// CHECK-NEXT: [[CONV35:%.*]] = fpext float [[TMP28]] to double
// CHECK-NEXT: [[MUL36:%.*]] = fmul double [[MUL32]], [[CONV35]]
// CHECK-NEXT: [[TMP29:%.*]] = load float*, float** [[E_ADDR]], align 8
// CHECK-NEXT: [[TMP30:%.*]] = load i32, i32* [[I6]], align 4
// CHECK-NEXT: [[IDXPROM37:%.*]] = sext i32 [[TMP30]] to i64
// CHECK-NEXT: [[ARRAYIDX38:%.*]] = getelementptr inbounds float, float* [[TMP29]], i64 [[IDXPROM37]]
// CHECK-NEXT: [[TMP31:%.*]] = load float, float* [[ARRAYIDX38]], align 4
// CHECK-NEXT: [[CONV39:%.*]] = fpext float [[TMP31]] to double
// CHECK-NEXT: [[MUL40:%.*]] = fmul double [[MUL36]], [[CONV39]]
// CHECK-NEXT: [[TMP32:%.*]] = load float, float* [[OFFSET_ADDR]], align 4
// CHECK-NEXT: [[CONV41:%.*]] = fpext float [[TMP32]] to double
// CHECK-NEXT: [[ADD42:%.*]] = fadd double [[MUL40]], [[CONV41]]
// CHECK-NEXT: [[TMP33:%.*]] = load float*, float** [[A_ADDR]], align 8
// CHECK-NEXT: [[TMP34:%.*]] = load i32, i32* [[I6]], align 4
// CHECK-NEXT: [[IDXPROM43:%.*]] = sext i32 [[TMP34]] to i64
// CHECK-NEXT: [[ARRAYIDX44:%.*]] = getelementptr inbounds float, float* [[TMP33]], i64 [[IDXPROM43]]
// CHECK-NEXT: [[TMP35:%.*]] = load float, float* [[ARRAYIDX44]], align 4
// CHECK-NEXT: [[CONV45:%.*]] = fpext float [[TMP35]] to double
// CHECK-NEXT: [[ADD46:%.*]] = fadd double [[CONV45]], [[ADD42]]
// CHECK-NEXT: [[CONV47:%.*]] = fptrunc double [[ADD46]] to float
// CHECK-NEXT: store float [[CONV47]], float* [[ARRAYIDX44]], align 4
// CHECK-NEXT: br label [[FOR_INC:%.*]]
// CHECK: for.inc:
// CHECK-NEXT: [[TMP36:%.*]] = load i32, i32* [[DOTUNROLL_INNER_IV_J]], align 4
// CHECK-NEXT: [[INC:%.*]] = add nsw i32 [[TMP36]], 1
// CHECK-NEXT: store i32 [[INC]], i32* [[DOTUNROLL_INNER_IV_J]], align 4
// CHECK-NEXT: br label [[FOR_COND]], !llvm.loop [[LOOP3:![0-9]+]]
// CHECK: for.end:
// CHECK-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
// CHECK: omp.body.continue:
// CHECK-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
// CHECK: omp.inner.for.inc:
// CHECK-NEXT: [[TMP37:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8
// CHECK-NEXT: [[ADD48:%.*]] = add nsw i64 [[TMP37]], 1
// CHECK-NEXT: store i64 [[ADD48]], i64* [[DOTOMP_IV]], align 8
// CHECK-NEXT: br label [[OMP_INNER_FOR_COND]]
// CHECK: omp.inner.for.end:
// CHECK-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
// CHECK: omp.loop.exit:
// CHECK-NEXT: [[OMP_GLOBAL_THREAD_NUM49:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB5:[0-9]+]])
// CHECK-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[OMP_GLOBAL_THREAD_NUM49]])
// CHECK-NEXT: br label [[OMP_PRECOND_END]]
// CHECK: omp.precond.end:
// CHECK-NEXT: [[OMP_GLOBAL_THREAD_NUM50:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB7:[0-9]+]])
// CHECK-NEXT: call void @__kmpc_barrier(%struct.ident_t* @[[GLOB6:[0-9]+]], i32 [[OMP_GLOBAL_THREAD_NUM50]])
// CHECK-NEXT: ret void
//
|
integral_parallel3.c | #include<stdio.h>
#include<omp.h>
static long num_steps = 100000;
double step;
int main(){
int i;
double x, pi, sum = 0.0, init_time, finish_time;
init_time = omp_get_wtime();
step = 1.0 / (double)num_steps;
#pragma omp parallel for private(x) reduction(+:sum)
for (i=0; i<num_steps; i++){
x = (i+0.5)*step;
sum = sum + 4.0/(1.0+x*x);
}
finish_time = omp_get_wtime()-init_time;
pi = step * sum;
printf("PI = %f\n", pi);
printf("Time = %f\n", finish_time);
}
|
builder.h | // Copyright (c) 2015, The Regents of the University of California (Regents)
// See LICENSE.txt for license details
#ifndef BUILDER_H_
#define BUILDER_H_
#include <algorithm>
#include <cinttypes>
#include <fstream>
#include <functional>
#include <type_traits>
#include <utility>
#include "timer.h"
#include "command_line.h"
#include "generator.h"
#include "graph.h"
#include "platform_atomics.h"
#include "pvector.h"
#include "reader.h"
#include "mgraph.h"
/*
GAP Benchmark Suite
Class: BuilderBase
Author: Scott Beamer
Given arguements from the command line (cli), returns a built graph
- MakeGraph() will parse cli and obtain edgelist and call
MakeGraphFromEL(edgelist) to perform actual graph construction
- edgelist can be from file (reader) or synthetically generated (generator)
- Common case: BuilderBase typedef'd (w/ params) to be Builder (benchmark.h)
*/
template <typename VertexID_, typename DestID_ = VertexID_, typename WeightT_ = VertexID_, bool invert = true>
class BuilderBase {
typedef EdgePair<VertexID_, DestID_> Edge;
typedef pvector<Edge> EdgeList;
const CLBase &cli_;
bool symmetrize_;
bool needs_weights_;
int64_t num_vertices_ = -1;
public:
explicit BuilderBase(const CLBase &cli) : cli_(cli) {
symmetrize_ = cli_.symmetrize();
if (symmetrize_) printf("Building a symmetrized graph\n");
needs_weights_ = !std::is_same<VertexID_, DestID_>::value;
}
DestID_ GetSource(EdgePair<VertexID_, VertexID_> e) {
return e.u;
}
DestID_ GetSource(EdgePair<VertexID_, VertexWeight<VertexID_, WeightT_>> e) {
return VertexWeight<VertexID_, WeightT_>(e.u, e.v.w);
}
VertexID_ FindMaxVertexID(const EdgeList &el) {
VertexID_ max_seen = 0;
#pragma omp parallel for reduction(max : max_seen)
for (auto it = el.begin(); it < el.end(); it++) {
Edge e = *it;
max_seen = std::max(max_seen, e.u);
max_seen = std::max(max_seen, (VertexID_) e.v);
}
return max_seen;
}
pvector<VertexID_> CountDegrees(const EdgeList &el, bool transpose) {
pvector<VertexID_> degrees(num_vertices_, 0);
#pragma omp parallel for
for (auto it = el.begin(); it < el.end(); it++) {
Edge e = *it;
if (symmetrize_ || (!symmetrize_ && !transpose))
fetch_and_add(degrees[e.u], 1);
if (symmetrize_ || (!symmetrize_ && transpose))
fetch_and_add(degrees[(VertexID_) e.v], 1);
}
return degrees;
}
static pvector<SGOffset> PrefixSum(const pvector<VertexID_> °rees) {
pvector<SGOffset> sums(degrees.size() + 1);
SGOffset total = 0;
for (size_t n=0; n < degrees.size(); n++) {
sums[n] = total;
total += degrees[n];
}
sums[degrees.size()] = total;
return sums;
}
static pvector<SGOffset> ParallelPrefixSum(const pvector<VertexID_> °rees) {
const size_t block_size = 1<<20;
const size_t num_blocks = (degrees.size() + block_size - 1) / block_size;
pvector<SGOffset> local_sums(num_blocks);
#pragma omp parallel for
for (size_t block=0; block < num_blocks; block++) {
SGOffset lsum = 0;
size_t block_end = std::min((block + 1) * block_size, degrees.size());
for (size_t i=block * block_size; i < block_end; i++)
lsum += degrees[i];
local_sums[block] = lsum;
}
pvector<SGOffset> bulk_prefix(num_blocks+1);
SGOffset total = 0;
for (size_t block=0; block < num_blocks; block++) {
bulk_prefix[block] = total;
total += local_sums[block];
}
bulk_prefix[num_blocks] = total;
pvector<SGOffset> prefix(degrees.size() + 1);
#pragma omp parallel for
for (size_t block=0; block < num_blocks; block++) {
SGOffset local_total = bulk_prefix[block];
size_t block_end = std::min((block + 1) * block_size, degrees.size());
for (size_t i=block * block_size; i < block_end; i++) {
prefix[i] = local_total;
local_total += degrees[i];
}
}
prefix[degrees.size()] = bulk_prefix[num_blocks];
return prefix;
}
// Removes self-loops and redundant edges
// Side effect: neighbor IDs will be sorted
void OldSquishCSR(const CSRGraph<VertexID_, DestID_, invert> &g, bool transpose, DestID_*** sq_index, DestID_** sq_neighs) {
pvector<VertexID_> diffs(g.num_vertices());
DestID_ *n_start, *n_end;
#pragma omp parallel for private(n_start, n_end)
for (VertexID_ n=0; n < g.num_vertices(); n++) {
if (transpose) {
n_start = g.in_neigh(n).begin();
n_end = g.in_neigh(n).end();
} else {
n_start = g.out_neigh(n).begin();
n_end = g.out_neigh(n).end();
}
std::sort(n_start, n_end);
DestID_ *new_end = std::unique(n_start, n_end);
new_end = std::remove(n_start, new_end, n);
diffs[n] = new_end - n_start;
}
pvector<SGOffset> sq_offsets = ParallelPrefixSum(diffs);
*sq_neighs = new DestID_[sq_offsets[g.num_vertices()]];
*sq_index = GenIndex<VertexID_, DestID_>(sq_offsets, *sq_neighs);
//*sq_index = CSRGraph<VertexID_, DestID_>::GenIndex(sq_offsets, *sq_neighs);
#pragma omp parallel for private(n_start)
for (VertexID_ n=0; n < g.num_vertices(); n++) {
if (transpose)
n_start = g.in_neigh(n).begin();
else
n_start = g.out_neigh(n).begin();
std::copy(n_start, n_start+diffs[n], (*sq_index)[n]);
}
}
void SquishCSR(const CSRGraph<VertexID_, DestID_, invert> &g, bool transpose, int** sq_rowptr, DestID_*** sq_index, DestID_** sq_neighs) {
pvector<VertexID_> diffs(g.num_vertices());
DestID_ *n_start, *n_end;
#pragma omp parallel for private(n_start, n_end)
for (VertexID_ n=0; n < g.num_vertices(); n++) {
if (transpose) {
n_start = g.in_neigh(n).begin();
n_end = g.in_neigh(n).end();
} else {
n_start = g.out_neigh(n).begin();
n_end = g.out_neigh(n).end();
}
std::sort(n_start, n_end);
DestID_ *new_end = std::unique(n_start, n_end);
new_end = std::remove(n_start, new_end, n);
diffs[n] = new_end - n_start;
}
pvector<SGOffset> sq_offsets = ParallelPrefixSum(diffs);
*sq_neighs = new DestID_[sq_offsets[g.num_vertices()]];
*sq_rowptr = new DestID_[num_vertices_+1];
*sq_index = GenIndex<VertexID_, DestID_>(sq_offsets, *sq_neighs);
//*sq_index = CSRGraph<VertexID_, DestID_>::GenIndex(sq_offsets, *sq_neighs);
for (int i = 0; i < num_vertices_+1; i ++) (*sq_rowptr)[i] = sq_offsets[i];
#pragma omp parallel for private(n_start)
for (VertexID_ n=0; n < g.num_vertices(); n++) {
if (transpose)
n_start = g.in_neigh(n).begin();
else
n_start = g.out_neigh(n).begin();
std::copy(n_start, n_start+diffs[n], (*sq_index)[n]);
}
}
void SquishGraph(CSRGraph<VertexID_, DestID_, invert>& new_g, const CSRGraph<VertexID_, DestID_, invert> &g) {
DestID_ **out_index, *out_neighs, **in_index, *in_neighs;
int *in_rowptr, *out_rowptr;
int m = g.num_vertices();
int nnz = g.num_edges();
printf("Before cleaning: num_vertices %d num_edges %d\n", m, nnz);
SquishCSR(g, false, &out_rowptr, &out_index, &out_neighs);
if (g.directed()) {
if (invert) SquishCSR(g, true, &in_rowptr, &in_index, &in_neighs);
new_g.Setup(g.num_vertices(), out_rowptr, out_index, out_neighs, in_rowptr, in_index, in_neighs);
} else new_g.Setup(g.num_vertices(), out_rowptr, out_index, out_neighs);
}
/*
Graph Bulding Steps (for CSR):
- Read edgelist once to determine vertex degrees (CountDegrees)
- Determine vertex offsets by a prefix sum (ParallelPrefixSum)
- Allocate storage and set points according to offsets (GenIndex)
- Copy edges into storage
*/
void OldMakeCSR(const EdgeList &el, bool transpose, DestID_*** index, DestID_** neighs) {
pvector<VertexID_> degrees = CountDegrees(el, transpose);
pvector<SGOffset> offsets = ParallelPrefixSum(degrees);
*neighs = new DestID_[offsets[num_vertices_]];
*index = GenIndex<VertexID_, DestID_>(offsets, *neighs);
//*index = CSRGraph<VertexID_, DestID_>::GenIndex(offsets, *neighs);
#pragma omp parallel for
for (auto it = el.begin(); it < el.end(); it++) {
Edge e = *it;
if (symmetrize_ || (!symmetrize_ && !transpose))
(*neighs)[fetch_and_add(offsets[e.u], 1)] = e.v;
if (symmetrize_ || (!symmetrize_ && transpose))
(*neighs)[fetch_and_add(offsets[static_cast<VertexID_>(e.v)], 1)] = GetSource(e);
}
}
void MakeCSR(const EdgeList &el, bool transpose, int** rowptr, DestID_*** index, DestID_** neighs) {
pvector<VertexID_> degrees = CountDegrees(el, transpose);
pvector<SGOffset> offsets = ParallelPrefixSum(degrees);
*neighs = new DestID_[offsets[num_vertices_]];
*index = GenIndex<VertexID_, DestID_>(offsets, *neighs);
//*index = CSRGraph<VertexID_, DestID_>::GenIndex(offsets, *neighs);
*rowptr = new int[num_vertices_+1];
for (int i = 0; i < num_vertices_+1; i ++) (*rowptr)[i] = offsets[i];
#pragma omp parallel for
for (auto it = el.begin(); it < el.end(); it++) {
Edge e = *it;
if (symmetrize_ || (!symmetrize_ && !transpose))
(*neighs)[fetch_and_add(offsets[e.u], 1)] = e.v;
if (symmetrize_ || (!symmetrize_ && transpose))
(*neighs)[fetch_and_add(offsets[static_cast<VertexID_>(e.v)], 1)] = GetSource(e);
}
}
void MakeGraphFromEL(EdgeList &el, CSRGraph<VertexID_, DestID_, invert> &g, bool use_dag = false) {
int *rowptr = nullptr, *inv_rowptr = nullptr;
DestID_ **index = nullptr, **inv_index = nullptr;
DestID_ *neighs = nullptr, *inv_neighs = nullptr;
Timer t;
t.Start();
if (num_vertices_ == -1)
num_vertices_ = FindMaxVertexID(el)+1;
if (needs_weights_)
Generator<VertexID_, DestID_, WeightT_>::InsertWeights(el);
MakeCSR(el, false, &rowptr, &index, &neighs);
if (!symmetrize_ && invert)
MakeCSR(el, true, &inv_rowptr, &inv_index, &inv_neighs);
t.Stop();
PrintTime("Build Time", t.Seconds());
if (symmetrize_)
g.Setup(num_vertices_, rowptr, index, neighs);
else
g.Setup(num_vertices_, rowptr, index, neighs, inv_rowptr, inv_index, inv_neighs);
}
void MakeGraph(CSRGraph<VertexID_, DestID_, invert>& new_g, bool use_dag = false) {
CSRGraph<VertexID_, DestID_, invert> g;
{ // extra scope to trigger earlier deletion of el (save memory)
EdgeList el;
if (cli_.filename() != "") {
Reader<VertexID_, DestID_, WeightT_, invert> r(cli_.filename());
if ((r.GetSuffix() == ".sg") || (r.GetSuffix() == ".wsg"))
r.ReadSerializedGraph(g);
else el = r.ReadFile(needs_weights_);
} else if (cli_.scale() != -1) {
Generator<VertexID_, DestID_> gen(cli_.scale(), cli_.degree());
el = gen.GenerateEL(cli_.uniform());
}
MakeGraphFromEL(el, g, use_dag);
}
SquishGraph(new_g, g);
}
// Relabels (and rebuilds) graph by order of decreasing degree
static
CSRGraph<VertexID_, DestID_, invert> RelabelByDegree(
const CSRGraph<VertexID_, DestID_, invert> &g) {
if (g.directed()) {
std::cout << "Cannot relabel directed graph" << std::endl;
std::exit(-11);
}
Timer t;
t.Start();
typedef std::pair<int64_t, VertexID_> degree_node_p;
pvector<degree_node_p> degree_id_pairs(g.num_vertices());
#pragma omp parallel for
for (VertexID_ n=0; n < g.num_vertices(); n++)
degree_id_pairs[n] = std::make_pair(g.out_degree(n), n);
std::sort(degree_id_pairs.begin(), degree_id_pairs.end(),
std::greater<degree_node_p>());
pvector<VertexID_> degrees(g.num_vertices());
pvector<VertexID_> new_ids(g.num_vertices());
#pragma omp parallel for
for (VertexID_ n=0; n < g.num_vertices(); n++) {
degrees[n] = degree_id_pairs[n].first;
new_ids[degree_id_pairs[n].second] = n;
}
pvector<SGOffset> offsets = ParallelPrefixSum(degrees);
DestID_* neighs = new DestID_[offsets[g.num_vertices()]];
DestID_** index = GenIndex<VertexID_, DestID_>(offsets, neighs);
//DestID_** index = CSRGraph<VertexID_, DestID_>::GenIndex(offsets, neighs);
#pragma omp parallel for
for (VertexID_ u=0; u < g.num_vertices(); u++) {
for (VertexID_ v : g.out_neigh(u))
neighs[offsets[new_ids[u]]++] = new_ids[v];
std::sort(index[new_ids[u]], index[new_ids[u]+1]);
}
t.Stop();
PrintTime("Relabel", t.Seconds());
return CSRGraph<VertexID_, DestID_, invert>(g.num_vertices(), index, neighs);
}
};
typedef BuilderBase<VertexID, VertexID, WeightT> Builder;
typedef BuilderBase<VertexID, WVertex, WeightT> WeightedBuilder;
#endif // BUILDER_H_
|
SnoopFilterMapper.c | // John D. McCalpin, mccalpin@tacc.utexas.edu
static char const rcsid[] = "$Id: SnoopFilterMapper.c,v 1.11 2018/05/17 22:24:58 mccalpin Exp mccalpin $";
// include files
#include <stdio.h> // printf, etc
#include <stdint.h> // standard integer types, e.g., uint32_t
#include <signal.h> // for signal handler
#include <stdlib.h> // exit() and EXIT_FAILURE
#include <string.h> // strerror() function converts errno to a text string for printing
#include <fcntl.h> // for open()
#include <errno.h> // errno support
#include <assert.h> // assert() function
#include <unistd.h> // sysconf() function, sleep() function
#include <sys/mman.h> // support for mmap() function
#include <linux/mman.h> // required for 1GiB page support in mmap()
#include <math.h> // for pow() function used in RAPL computations
#include <time.h>
#include <sys/time.h> // for gettimeofday
# define ARRAYSIZE 2147483648L
// MYHUGEPAGE_1GB overrides default of 2MiB for hugepages
#if defined MYHUGEPAGE_1GB
#define MYPAGESIZE 1073741824UL
#define NUMPAGES 2L
#define PAGES_MAPPED 2L // this is still specifying how many 2MiB pages to map
#else
#define MYPAGESIZE 2097152L
#define NUMPAGES 1024L
#define PAGES_MAPPED 14L
#endif
#define SPECIAL_VALUE (-1)
// interfaces for va2pa_lib.c
void print_pagemap_entry(unsigned long long pagemap_entry);
unsigned long long get_pagemap_entry( void * va );
int dumpall; // when set to 1, will cause dump of lots of stuff for debugging
int report;
int nwraps; // track number of performance counter wraps
double *array; // array pointer to mmap on 1GiB pages
double *page_pointers[NUMPAGES]; // one pointer for each page allocated
uint64_t pageframenumber[NUMPAGES]; // one PFN entry for each page allocated
// constant value defines
# define NUM_SOCKETS 2 //
# define NUM_IMC_CHANNELS 6 // includes channels on all IMCs in a socket
# define NUM_IMC_COUNTERS 5 // 0-3 are the 4 programmable counters, 4 is the fixed-function DCLK counter
# define NUM_CHA_BOXES 28
# define NUM_CHA_USED 28
# define NUM_CHA_COUNTERS 4
long imc_counts[NUM_SOCKETS][NUM_IMC_CHANNELS][NUM_IMC_COUNTERS][2]; // including the fixed-function (DCLK) counter as the final entry
long imc_pkg_sums[NUM_SOCKETS][NUM_IMC_COUNTERS]; // sum across channels for each chip
char imc_event_name[NUM_SOCKETS][NUM_IMC_CHANNELS][NUM_IMC_COUNTERS][32]; // reserve 32 characters for the IMC event names for each socket, channel, counter
uint32_t imc_perfevtsel[NUM_IMC_COUNTERS]; // expected control settings for the counters
uint32_t imc_vid_did[3]; // PCIe configuration space vendor and device IDs for the IMC blocks
long cha_counts[NUM_SOCKETS][NUM_CHA_BOXES][NUM_CHA_COUNTERS][2]; // 2 sockets, 28 tiles per socket, 4 counters per tile, 2 times (before and after)
uint32_t cha_perfevtsel[NUM_CHA_COUNTERS];
long cha_pkg_sums[NUM_SOCKETS][NUM_CHA_COUNTERS];
#define MAXCORES 112
#define CORES_USED 24
// New feature -- core counters.
// upgrade to include counters for all cores
long core_counters[MAXCORES][4][2]; // 24 cores & 24 threads on one socket, 4 counters, before and after
long fixed_counters[MAXCORES][4][2]; // 24 cores with 4 fixed-function core counters (Instr, CoreCyc, RefCyc, TSC)
long core_pkg_sums[NUM_SOCKETS][4]; // four core counters
long fixed_pkg_sums[NUM_SOCKETS][4]; // four fixed-function counters per core (Instr, CoreCyc, RefCyc, TSC)
int8_t cha_by_page[PAGES_MAPPED][32768]; // L3 numbers for each of the 32,768 cache lines in each of the first PAGES_MAPPED 2MiB pages
uint64_t paddr_by_page[PAGES_MAPPED]; // physical addresses of the base of each of the first PAGES_MAPPED 2MiB pages used
long lines_by_cha[NUM_CHA_USED]; // bulk count of lines assigned to each CHA
#ifdef DEBUG
FILE *log_file; // log file for debugging -- should not be needed in production
#endif
unsigned int *mmconfig_ptr; // must be pointer to 32-bit int so compiler will generate 32-bit loads and stores
struct timeval tp; // seconds and microseconds from gettimeofday
struct timezone tzp; // required, but not used here.
double ssum(double *a, long vl);
double mysecond()
{
struct timeval tp;
struct timezone tzp;
int i;
i = gettimeofday(&tp,&tzp);
return ( (double) tp.tv_sec + (double) tp.tv_usec * 1.e-6 );
}
# ifndef MIN
# define MIN(x,y) ((x)<(y)?(x):(y))
# endif
# ifndef MAX
# define MAX(x,y) ((x)>(y)?(x):(y))
# endif
#include "low_overhead_timers.c"
#include "SKX_IMC_BusDeviceFunctionOffset.h"
#include "MSR_defs.h"
// ===========================================================================================================================================================================
// Convert PCI(bus:device.function,offset) to uint32_t array index
uint32_t PCI_cfg_index(unsigned int Bus, unsigned int Device, unsigned int Function, unsigned int Offset)
{
uint32_t byteaddress;
uint32_t index;
assert (Device >= 0);
assert (Function >= 0);
assert (Offset >= 0);
assert (Device < (1<<5));
assert (Function < (1<<3));
assert (Offset < (1<<12));
byteaddress = (Bus<<20) | (Device<<15) | (Function<<12) | Offset;
index = byteaddress / 4;
return ( index );
}
// ===========================================================================================================================================================================
int main(int argc, char *argv[])
{
// local declarations
// int cpuid_return[4];
int i;
int retries;
int zeros;
int rc;
int core_pmc_width, fixed_pmc_width; // these will be looked up using CPUID to use in overflow/wraparound correction
int uncore_pmc_width=48; // all the uncore stuff is model-dependent, but most are 48 bits
ssize_t rc64;
char description[100];
size_t len;
long arraylen;
long l2_contained_size, inner_repetitions;
unsigned long pagemapentry;
unsigned long paddr, basephysaddr;
unsigned long pagenum, basepagenum;
uint32_t bus, device, function, offset, ctl_offset, ctr_offset, value, index;
uint32_t socket, imc, channel, counter, controller;
long count,delta;
long j,k,page_number,page_base_index,line_number;
long jstart[CORES_USED], jend[CORES_USED], mycore, vl[CORES_USED];
uint32_t low_0, high_0, low_1, high_1;
char filename[100];
int pkg, tile;
int nr_cpus;
uint64_t msr_val, msr_num;
int mem_fd;
int msr_fd[2]; // one for each socket
int proc_in_pkg[2]; // one Logical Processor number for each socket
uid_t my_uid;
gid_t my_gid;
double sum,expected;
double t0, t1;
double avg_cycles;
unsigned long tsc_start, tsc_end;
float TSC_GHz;
double sf_evict_rate;
double bandwidth;
unsigned long mmconfig_base=0x80000000; // DOUBLE-CHECK THIS ON NEW SYSTEMS!!!!! grep MMCONFIG /proc/iomem | awk -F- '{print $1}'
unsigned long mmconfig_size=0x10000000;
double private_sum,partial_sums[CORES_USED];
long iters,iteration_counts[CORES_USED];
long BaseOffset;
TSC_GHz = get_TSC_frequency()/1.0e9;
core_pmc_width = get_core_counter_width();
fixed_pmc_width = get_fixed_counter_width();
BaseOffset = 0;
#ifdef RANDOMOFFSETS
if (argc != 2) {
printf("Must Provide a Random Offset cache line offset value (an integer between 0 and 2^24-375000 (16,402,216))\n");
exit(1);
} else {
BaseOffset = atol(argv[1]);
printf("Random Cache Line Offset is %ld\n",BaseOffset);
BaseOffset = BaseOffset*8;
printf("Starting index for summation is %ld\n",BaseOffset);
}
#endif
retries = 0;
zeros = 0;
report = 1;
dumpall = 0;
nwraps = 0;
l2_contained_size = 125000 * CORES_USED; // about 95% of the L2 space in the cores used
for (i=0; i<CORES_USED; i++) {
iters = 0;
jstart[i] = BaseOffset + i*l2_contained_size/CORES_USED;
jend[i] = jstart[i] + l2_contained_size/CORES_USED;
vl[i] = jend[i]-jstart[i];
printf("thread %d jstart %ld jend %ld vl %ld\n",i,jstart[i],jend[i],vl[i]);
partial_sums[i] = 0.0;
iteration_counts[i] = 0;
for (counter=0; counter<4; counter++) {
core_counters[i][counter][0] = SPECIAL_VALUE;
core_counters[i][counter][1] = SPECIAL_VALUE;
fixed_counters[i][counter][0] = SPECIAL_VALUE;
fixed_counters[i][counter][1] = SPECIAL_VALUE;
}
}
// initialize the array that will hold the L3 numbers for each cache line for each of the first PAGES_MAPPED 2MiB pages
for (i=0; i<PAGES_MAPPED; i++) {
for (line_number=0; line_number<32768; line_number++) {
cha_by_page[i][line_number] = -1; // special value -- if set properly, all values should be in the range of 0..23
}
}
// allocate working array on a huge pages -- either 1GiB or 2MiB
len = NUMPAGES * MYPAGESIZE;
#if defined MYHUGEPAGE_1GB
array = (double*) mmap(NULL, len, PROT_READ | PROT_WRITE, MAP_ANONYMOUS | MAP_PRIVATE | MAP_HUGETLB | MAP_HUGE_1GB, -1, 0 );
#elif defined MYHUGEPAGE_THP
//array = (double*) mmap(NULL, len, PROT_READ | PROT_WRITE, MAP_ANONYMOUS | MAP_PRIVATE, -1, 0 );
rc = posix_memalign((void **)&array, (size_t) 2097152, (size_t) len);
if (rc != 0) {
printf("ERROR: posix_memalign call failed with error code %d\n",rc);
exit(3);
}
#else
array = (double*) mmap(NULL, len, PROT_READ | PROT_WRITE, MAP_ANONYMOUS | MAP_PRIVATE | MAP_HUGETLB, -1, 0 );
#endif
if (array == (void *)(-1)) {
perror("ERROR: mmap of array a failed! ");
exit(1);
}
// initialize working array
arraylen = NUMPAGES * MYPAGESIZE/sizeof(double);
#pragma omp parallel for
for (j=0; j<arraylen; j++) {
array[j] = 1.0;
}
// initialize page_pointers to point to the beginning of each page in the array
// then get and print physical addresses for each
#ifdef VERBOSE
printf(" Page ArrayIndex VirtAddr PagemapEntry PFN PhysAddr\n");
#endif
for (j=0; j<NUMPAGES; j++) {
k = j*MYPAGESIZE/sizeof(double);
page_pointers[j] = &array[k];
pagemapentry = get_pagemap_entry(&array[k]);
pageframenumber[j] = (pagemapentry & (unsigned long) 0x007FFFFFFFFFFFFF);
#ifdef VERBOSE
printf(" %.5ld %.10ld %#18lx %#18lx %#18lx %#18lx\n",j,k,&array[k],pagemapentry,pageframenumber[j],(pageframenumber[j]<<12));
#endif
}
printf("PAGE_ADDRESSES ");
for (j=0; j<PAGES_MAPPED; j++) {
basephysaddr = pageframenumber[j] << 12;
paddr_by_page[j] = basephysaddr;
printf("0x%.12lx ",paddr_by_page[j]);
}
printf("\n");
// initialize arrays for counter data
for (socket=0; socket<NUM_SOCKETS; socket++) {
for (channel=0; channel<NUM_IMC_CHANNELS; channel++) {
for (counter=0; counter<NUM_IMC_COUNTERS; counter++) {
imc_counts[socket][channel][counter][0] = 0;
imc_counts[socket][channel][counter][1] = 0;
}
}
for (tile=0; tile<NUM_CHA_USED; tile++) {
lines_by_cha[tile] = 0;
for (counter=0; counter<4; counter++) {
cha_counts[socket][tile][counter][0] = 0;
cha_counts[socket][tile][counter][1] = 0;
}
}
}
// get the host name, assume that it is of the TACC standard form, and use this as part
// of the log file name.... Standard form is "c263-109.stampede2.tacc.utexas.edu", so
// truncating at the first "." is done by writing \0 to character #8.
len = 100;
rc = gethostname(description, len);
if (rc != 0) {
fprintf(stderr,"ERROR when trying to get hostname\n");
exit(-1);
}
description[8] = 0; // assume hostname of the form c263-109.stampede2.tacc.utexas.edu -- truncate after first period
my_uid = getuid();
my_gid = getgid();
#ifdef DEBUG
sprintf(filename,"log.%s.perf_counters",description);
// sprintf(filename,"log.perf_counters");
log_file = fopen(filename,"w+");
if (log_file == 0) {
fprintf(stderr,"ERROR %s when trying to open log file %s\n",strerror(errno),filename);
exit(-1);
}
fprintf(log_file,"DEBUG: my uid is %d, my gid is %d\n",my_uid,my_gid);
rc = chown(filename,my_uid,my_gid);
if (rc == 0) {
fprintf(log_file,"DEBUG: Successfully changed ownership of log file to %d %d\n",my_uid,my_gid);
} else {
fprintf(stderr,"ERROR: Attempt to change ownership of log file failed -- bailing out\n");
exit(-1);
}
#endif
//========================================================================================================================
// initial checks
// is this a supported core? (CPUID Family/Model)
// Every processor that I am going to see will be Family 0x06 (no ExtFamily needed).
// The DisplayModel field is (ExtModel<<4)+Model and should be 0x3F for all Xeon E5 v3 systems
int leaf = 1;
int subleaf = 0;
uint32_t eax, ebx, ecx, edx;
__asm__ __volatile__ ("cpuid" : \
"=a" (eax), "=b" (ebx), "=c" (ecx), "=d" (edx) : "a" (leaf), "c" (subleaf));
// Alternate form:
// The compiler cpuid intrinsics are not documented by Intel -- they use the Microsoft format
// described at https://msdn.microsoft.com/en-us/library/hskdteyh.aspx
// __cpuid(array to hold eax,ebx,ecx,edx outputs, initial eax value)
// __cpuidex(array to hold eax,ebx,ecx,edx outputs, initial eax value, initial ecx value)
// CPUID function 0x01 returns the model info in eax.
// 27:20 ExtFamily -- expect 0x00
// 19:16 ExtModel -- expect 0x3 for HSW, 0x5 for SKX
// 11:8 Family -- expect 0x6
// 7:4 Model -- expect 0xf for HSW, 0x5 for SKX
// __cpuid(&cpuid_return[0], 1);
// uint32_t ModelInfo = cpuid_return[0] & 0x0fff0ff0; // mask out the reserved and "stepping" fields, leaving only the based and extended Family/Model fields
uint32_t ModelInfo = eax & 0x0fff0ff0; // mask out the reserved and "stepping" fields, leaving only the based and extended Family/Model fields
if (ModelInfo != 0x00050650) { // expected values for Skylake Xeon
fprintf(stderr,"ERROR -- this does not appear to be the correct processor type!!!\n");
fprintf(stderr,"ERROR -- Expected CPUID(0x01) Family/Model bits = 0x%x, but found 0x%x\n",0x00050650,ModelInfo);
exit(1);
}
#ifdef IMC_COUNTS
// ===================================================================================================================
// ------------------ REQUIRES ROOT PERMISSIONS ------------------
// open /dev/mem for PCI device access and mmap() a pointer to the beginning
// of the 256 MiB PCI Configuration Space.
// check VID/DID for uncore bus:device:function combinations
// Note that using /dev/mem for PCI configuration space access is required for some devices on KNL.
// It is not required on other systems, but it is not particularly inconvenient either.
sprintf(filename,"/dev/mem");
#ifdef DEBUG
fprintf(log_file,"opening %s\n",filename);
#endif
mem_fd = open(filename, O_RDWR);
if (mem_fd == -1) {
fprintf(stderr,"ERROR %s when trying to open %s\n",strerror(errno),filename);
exit(-1);
}
int map_prot = PROT_READ | PROT_WRITE;
mmconfig_ptr = mmap(NULL, mmconfig_size, map_prot, MAP_SHARED, mem_fd, mmconfig_base);
if (mmconfig_ptr == MAP_FAILED) {
fprintf(stderr,"cannot mmap base of PCI configuration space from /dev/mem: address %lx\n", mmconfig_base);
exit(2);
#ifdef DEBUG
} else {
fprintf(log_file,"Successful mmap of base of PCI configuration space from /dev/mem at address %lx\n", mmconfig_base);
#endif
}
close(mem_fd); // OK to close file after mmap() -- the mapping persists until unmap() or program exit
// New simple test that does not need to know the uncore bus numbers here...
// Skylake bus 0, Function 5, offset 0 -- Sky Lake-E MM/Vt-d Configuration Registers
//
// simple test -- should return "20248086" on Skylake Xeon EP -- DID 0x2024, VID 0x8086
bus = 0x00;
device = 0x5;
function = 0x0;
offset = 0x0;
index = PCI_cfg_index(bus, device, function, offset);
value = mmconfig_ptr[index];
if (value != 0x20248086) {
fprintf(stderr,"ERROR: Bus %x device %x function %x offset %x expected %x, found %x\n",bus,device,function,offset,0x20248086,value);
exit(3);
#ifdef DEBUG
} else {
fprintf(log_file,"DEBUG: Well done! Bus %x device %x function %x offset %x returns expected value of %x\n",bus,device,function,offset,value);
#endif
}
#endif
#ifdef CHA_COUNTS
// ===================================================================================================================
// open the MSR driver using one core in socket 0 and one core in socket 1
nr_cpus = sysconf(_SC_NPROCESSORS_ONLN);
proc_in_pkg[0] = 0; // logical processor 0 is in socket 0 in all TACC systems
proc_in_pkg[1] = nr_cpus-1; // logical processor N-1 is in socket 1 in all TACC 2-socket systems
for (pkg=0; pkg<2; pkg++) {
sprintf(filename,"/dev/cpu/%d/msr",proc_in_pkg[pkg]);
msr_fd[pkg] = open(filename, O_RDWR);
if (msr_fd[pkg] == -1) {
fprintf(stderr,"ERROR %s when trying to open %s\n",strerror(errno),filename);
exit(-1);
}
}
for (pkg=0; pkg<2; pkg++) {
pread(msr_fd[pkg],&msr_val,sizeof(msr_val),IA32_TIME_STAMP_COUNTER);
fprintf(stdout,"DEBUG: TSC on core %d socket %d is %ld\n",proc_in_pkg[pkg],pkg,msr_val);
}
pread(msr_fd[0],&msr_val,sizeof(msr_val),0x186);
printf("Core PerfEvtSel0 0x%lx\n",msr_val);
pread(msr_fd[0],&msr_val,sizeof(msr_val),0x187);
printf("Core PerfEvtSel1 0x%lx\n",msr_val);
pread(msr_fd[0],&msr_val,sizeof(msr_val),0x188);
printf("Core PerfEvtSel2 0x%lx\n",msr_val);
pread(msr_fd[0],&msr_val,sizeof(msr_val),0x189);
printf("Core PerfEvtSel3 0x%lx\n",msr_val);
// Program the CHA mesh counters
// Each CHA has a block of 16 MSRs reserved, of which 12 are used
// The base for each CHA is 0xE00 + 0x10*CHA
// Within each block:
// Unit Control is at offset 0x00
// CTL0, 1, 2, 3 are at offsets 0x01, 0x02, 0x03, 0x04
// CTR0, 1, 2, 3 are at offsets 0x08, 0x09, 0x0a, 0x0b
// For the moment I think I can ignore the filter registers at offsets 0x05 and 0x06
// and the status register at offset 0x07
// The control register needs bit 22 set to enabled, then bits 15:8 as Umask and 7:0 as EventSelect
// Mesh Events:
// HORZ_RING_BL_IN_USE = 0xab
// LEFT_EVEN = 0x01
// LEFT_ODD = 0x02
// RIGHT_EVEN = 0x04
// RIGHT_ODD = 0x08
// VERT_RING_BL_IN_USE = 0xaa
// UP_EVEN = 0x01
// UP_ODD = 0x02
// DN_EVEN = 0x04
// DN_ODD = 0x08
// For starters, I will combine even and odd and create 4 events
// 0x004003ab HORZ_RING_BL_IN_USE.LEFT
// 0x00400cab HORZ_RING_BL_IN_USE.RIGHT
// 0x004003aa VERT_RING_BL_IN_USE.UP
// 0x00400caa VERT_RING_BL_IN_USE.DN
// first set to try....
cha_perfevtsel[0] = 0x004003ab; // HORZ_RING_BL_IN_USE.LEFT
cha_perfevtsel[1] = 0x00400cab; // HORZ_RING_BL_IN_USE.RIGHT
cha_perfevtsel[2] = 0x004003aa; // VERT_RING_BL_IN_USE.UP
cha_perfevtsel[3] = 0x00400caa; // VERT_RING_BL_IN_USE.DN
// second set to try....
// cha_perfevtsel[0] = 0x004001ab; // HORZ_RING_BL_IN_USE.LEFT_EVEN
// cha_perfevtsel[1] = 0x004002ab; // HORZ_RING_BL_IN_USE.LEFT_ODD
// cha_perfevtsel[2] = 0x004004ab; // HORZ_RING_BL_IN_USE.RIGHT_EVEN
// cha_perfevtsel[3] = 0x004008ab; // HORZ_RING_BL_IN_USE.RIGHT_ODD
// Snoop Filter Eviction counters
cha_perfevtsel[0] = 0x0040073d; // SF_EVICTION S,E,M states
cha_perfevtsel[1] = 0x00400334; // LLC_LOOKUP.DATA_READ <-- requires CHA_FILTER0[26:17]
cha_perfevtsel[2] = 0x00400534; // LLC_LOOKUP.DATA_WRITE (WB from L2) <-- requires CHA_FILTER0[26:17]
cha_perfevtsel[3] = 0x0040af37; // LLC_VICTIMS.TOTAL (MESF) (does not count clean victims)
uint64_t cha_filter0 = 0x01e20000; // set bits 24,23,22,21,17 FMESI -- all LLC lookups, no SF lookups
printf("CHA PerfEvtSel0 0x%lx\n",cha_perfevtsel[0]);
printf("CHA PerfEvtSel1 0x%lx\n",cha_perfevtsel[1]);
printf("CHA PerfEvtSel2 0x%lx\n",cha_perfevtsel[2]);
printf("CHA PerfEvtSel3 0x%lx\n",cha_perfevtsel[3]);
printf("CHA FILTER0 0x%lx\n",cha_filter0);
#ifdef VERBOSE
printf("VERBOSE: programming CHA counters\n");
#endif
for (pkg=0; pkg<2; pkg++) {
for (tile=0; tile<NUM_CHA_USED; tile++) {
msr_num = 0xe00 + 0x10*tile; // box control register -- set enable bit
msr_val = 0x00400000;
pwrite(msr_fd[pkg],&msr_val,sizeof(msr_val),msr_num);
msr_num = 0xe00 + 0x10*tile + 1; // ctl0
msr_val = cha_perfevtsel[0];
pwrite(msr_fd[pkg],&msr_val,sizeof(msr_val),msr_num);
msr_num = 0xe00 + 0x10*tile + 2; // ctl1
msr_val = cha_perfevtsel[1];
pwrite(msr_fd[pkg],&msr_val,sizeof(msr_val),msr_num);
msr_num = 0xe00 + 0x10*tile + 3; // ctl2
msr_val = cha_perfevtsel[2];
pwrite(msr_fd[pkg],&msr_val,sizeof(msr_val),msr_num);
msr_num = 0xe00 + 0x10*tile + 4; // ctl3
msr_val = cha_perfevtsel[3];
pwrite(msr_fd[pkg],&msr_val,sizeof(msr_val),msr_num);
msr_num = 0xe00 + 0x10*tile + 5; // filter0
msr_val = cha_filter0; // bits 24:21,17 FMESI -- all LLC lookups, not not SF lookups
pwrite(msr_fd[pkg],&msr_val,sizeof(msr_val),msr_num);
}
}
#ifdef VERBOSE
printf("VERBOSE: finished programming CHA counters\n");
#endif
#endif
#ifdef IMC_COUNTS
// ===================================================================================================================
// Read the current programming of the IMC counters and look for the standard values (in this order)
// CAS_COUNT.READS Event 0x04, Umask 0x03
// CAS_COUNT.WRITES Event 0x04, Umask 0x0C
// ACT.ALL Event 0x01, Umask 0x0B
// PRE_COUNT.MISS Event 0x02, Umask 0x01
// DCLK
#ifdef VERBOSE
printf("Preparing to program IMC counters\n");
#endif
// expected values of IMC performance counter event select control registers
imc_perfevtsel[0] = 0x00400304; // CAS_COUNT.READS
imc_perfevtsel[1] = 0x00400C04; // CAS_COUNT.WRITES
imc_perfevtsel[2] = 0x00400B01; // ACT_COUNT.ALL
imc_perfevtsel[3] = 0x00400102; // PRE_COUNT.MISS
imc_perfevtsel[4] = 0x00400000; // DCLK
imc_vid_did[0] = 0x20428086; // all channel 0 devices are 2042
imc_vid_did[1] = 0x20468086; // all channel 1 devices are 2046
imc_vid_did[2] = 0x204a8086; // all channel 2 devices are 204a
printf("IMC PerfEvtSel0 0x%lx\n",imc_perfevtsel[0]);
printf("IMC PerfEvtSel1 0x%lx\n",imc_perfevtsel[1]);
printf("IMC PerfEvtSel2 0x%lx\n",imc_perfevtsel[2]);
printf("IMC PerfEvtSel3 0x%lx\n",imc_perfevtsel[3]);
printf("IMC PerfEvtSel4 0x%lx\n",imc_perfevtsel[4]);
// print the full wall-clock time in seconds and microseconds
// assume both components of tp struct are longs.
fprintf(stdout,"# %s\n", rcsid);
i = gettimeofday(&tp,&tzp);
fprintf(stdout,"%ld %ld\n", tp.tv_sec,tp.tv_usec);
for (socket=0; socket<NUM_SOCKETS; socket++) {
bus = IMC_BUS_Socket[socket];
#ifdef VERBOSE
printf("VERBOSE: socket %d bus %d\n",socket,bus);
#endif
for (channel=0; channel<NUM_IMC_CHANNELS; channel++) {
device = IMC_Device_Channel[channel];
function = IMC_Function_Channel[channel];
#ifdef VERBOSE
printf("VERBOSE: channel %d device %d function %d\n",channel, device, function);
#endif
// check to make sure this is the correct device
offset = 0x0;
index = PCI_cfg_index(bus, device, function, offset);
value = mmconfig_ptr[index];
if ( value != imc_vid_did[channel%3]) {
fprintf(stderr,"WARNING!!!! socket %d, channel %d has vid_did %x but should be %x\n",socket,channel,value,imc_vid_did[channel%3]);
}
for (counter=0; counter<NUM_IMC_COUNTERS; counter++) {
// check to see if this unit is programmed correctly and reprogram if needed
offset = IMC_PmonCtl_Offset[counter];
index = PCI_cfg_index(bus, device, function, offset);
value = mmconfig_ptr[index];
if ( value != imc_perfevtsel[counter]) {
fprintf(stderr,"WARNING!!!! socket %d, channel %d has perfevtsel %x but should be %x -- reprogramming\n",socket,channel,value,imc_perfevtsel[counter]);
mmconfig_ptr[index] = imc_perfevtsel[counter];
}
}
}
}
#endif
// ========= END OF PERFORMANCE COUNTER SETUP ========================================================================
#ifdef MAP_L3
// ============== BEGIN L3 MAPPING TESTS ==============================
// For each of the PAGES_MAPPED 2MiB pages:
// 1. Use "access()" to see if the mapping file already exists.
// If exists:
// 2. Use "stat()" to make sure the file is the correct size
// If right size:
// 3. Read the contents into the 32768-element int8_t array of L3 numbers.
// Else (wrong size):
// 4. Abort and tell the user to fix it manually.
// Else (not exists):
// 4. Call the mapping function to re-compute the map
// 5. Create mapping file
// 6. Save data in mapping file
// 7. Close output file
FILE *ptr_mapping_file;
int needs_mapping;
int good, good_old, good_new, pass1, pass2, pass3, found, numtries;
int min_count, max_count, sum_count, old_cha;
double avg_count, goodness1, goodness2, goodness3;
int globalsum = 0;
long totaltries = 0;
int NFLUSHES = 1000;
for (page_number=0; page_number<PAGES_MAPPED; page_number++) {
needs_mapping=0;
sprintf(filename,"PADDR_0x%.12lx.map",paddr_by_page[page_number]);
i = access(filename, F_OK);
if (i == -1) { // file does not exist
printf("DEBUG: Mapping file %s does not exist -- will create file after mapping cache lines\n",filename);
needs_mapping = 1;
} else { // file exists
i = access(filename, R_OK);
if (i == -1) { // file exists without read permissions
printf("ERROR: Mapping file %s exists, but without read permission\n",filename);
exit(1);
} else { // file exists with read permissions
ptr_mapping_file = fopen(filename,"r");
if (!ptr_mapping_file) {
printf("ERROR: Failed to open Mapping File %s, should not happen\n",filename);
exit(2);
}
k = fread(&cha_by_page[page_number][0],(size_t) 32768,(size_t) 1,ptr_mapping_file);
if (k != 1) { // incorrect read length
printf("ERROR: Read from Mapping File %s, returned the wrong record count %ld expected 1\n",filename,k);
exit(3);
} else { // correct read length
printf("DEBUG: Mapping File read for %s succeeded -- skipping mapping for this page\n",filename);
needs_mapping = 0;
}
}
}
if (needs_mapping == 1) {
// code imported from SystemMirrors/Hikari/MemSuite/InterventionLatency/L3_mapping.c
#ifdef VERBOSE
printf("DEBUG: here I need to perform the mapping for paddr 0x%.12lx, and then save the file\n",paddr_by_page[page_number]);
#endif
page_base_index = page_number*262144; // index of element at beginning of current 2MiB page
for (line_number=0; line_number<32768; line_number++) {
good = 0;
good_old = 0;
good_new = 0;
numtries = 0;
#ifdef VERBOSE
if (line_number%64 == 0) {
pagemapentry = get_pagemap_entry(&array[page_base_index+line_number*8]);
printf("DEBUG: page_base_index %ld line_number %ld index %ld pagemapentry 0x%lx\n",page_base_index,line_number,page_base_index+line_number*8,pagemapentry);
}
#endif
do { // -------------- Inner Repeat Loop until results pass "goodness" tests --------------
numtries++;
if (numtries > 100) {
printf("ERROR: No good results for line %d after %d tries\n",line_number,numtries);
exit(101);
}
totaltries++;
// 1. read L3 counters before starting test
for (tile=0; tile<NUM_CHA_USED; tile++) {
msr_num = 0xe00 + 0x10*tile + 0x8 + 1; // counter 1 is the LLC_LOOKUPS.READ event
pread(msr_fd[0],&msr_val,sizeof(msr_val),msr_num);
cha_counts[0][tile][1][0] = msr_val; // use the array I have already declared for cha counts
// printf("DEBUG: page %ld line %ld msr_num 0x%x msr_val %ld cha_counter1 %lu\n",
// page_number,line_number,msr_num,msr_val,cha_counts[0][tile][1][0]);
}
// 2. Access the line many times
sum = 0;
for (i=0; i<NFLUSHES; i++) {
sum += array[page_base_index+line_number*8];
_mm_mfence();
_mm_clflush(&array[page_base_index+line_number*8]);
_mm_mfence();
}
globalsum += sum;
// 3. read L3 counters after loads are done
for (tile=0; tile<NUM_CHA_USED; tile++) {
msr_num = 0xe00 + 0x10*tile + 0x8 + 1; // counter 1 is the LLC_LOOKUPS.READ event
pread(msr_fd[0],&msr_val,sizeof(msr_val),msr_num);
cha_counts[0][tile][1][1] = msr_val; // use the array I have already declared for cha counts
}
#ifdef VERBOSE
for (tile=0; tile<NUM_CHA_USED; tile++) {
printf("DEBUG: page %ld line %ld cha_counter1_after %lu cha_counter1 before %lu delta %lu\n",
page_number,line_number,cha_counts[0][tile][1][1],cha_counts[0][tile][1][0],cha_counts[0][tile][1][1]-cha_counts[0][tile][1][0]);
}
#endif
// CHA counter 1 set to LLC_LOOKUP.READ
//
// 4. Determine which L3 slice owns the cache line and
// 5. Save the CHA number in the cha_by_page[page][line] array
// first do a rough quantitative checks of the "goodness" of the data
// goodness1 = max/NFLUSHES (pass if >95%)
// goodness2 = min/NFLUSHES (pass if <20%)
// goodness3 = avg/NFLUSHES (pass if <40%)
max_count = 0;
min_count = 1<<30;
sum_count = 0;
for (tile=0; tile<NUM_CHA_USED; tile++) {
max_count = MAX(max_count, cha_counts[0][tile][1][1]-cha_counts[0][tile][1][0]);
min_count = MIN(min_count, cha_counts[0][tile][1][1]-cha_counts[0][tile][1][0]);
sum_count += cha_counts[0][tile][1][1]-cha_counts[0][tile][1][0];
}
avg_count = (double)(sum_count - max_count) / (double)(NUM_CHA_USED);
goodness1 = (double) max_count / (double) NFLUSHES;
goodness2 = (double) min_count / (double) NFLUSHES;
goodness3 = avg_count / (double) NFLUSHES;
// compare the goodness parameters with manually chosen limits & combine into a single pass (good=1) or fail (good=0)
pass1 = 0;
pass2 = 0;
pass3 = 0;
if ( goodness1 > 0.95 ) pass1 = 1;
if ( goodness2 < 0.20 ) pass2 = 1;
if ( goodness3 < 0.40 ) pass3 = 1;
good_new = pass1 * pass2 * pass3;
#ifdef VERBOSE
printf("GOODNESS: line_number %ld max_count %d min_count %d sum_count %d avg_count %f goodness1 %f goodness2 %f goodness3 %f pass123 %d %d %d\n",
line_number, max_count, min_count, sum_count, avg_count, goodness1, goodness2, goodness3, pass1, pass2, pass3);
if (good_new == 0) printf("DEBUG: one or more of the sanity checks failed for line=%ld: %d %d %d goodness values %f %f %f\n",
line_number,pass1,pass2,pass3,goodness1,goodness2,goodness3);
#endif
// test to see if more than one CHA reports > 0.95*NFLUSHES events
found = 0;
old_cha = -1;
int min_counts = (NFLUSHES*19)/20;
for (tile=0; tile<NUM_CHA_USED; tile++) {
if (cha_counts[0][tile][1][1]-cha_counts[0][tile][1][0] >= min_counts) {
old_cha = cha_by_page[page_number][line_number];
cha_by_page[page_number][line_number] = tile;
found++;
#ifdef VERBOSE
if (found > 1) {
printf("WARNING: Multiple (%d) CHAs found using counter 1 for cache line %ld, index %ld: old_cha %d new_cha %d\n",found,line_number,page_base_index+line_number*8,old_cha,cha_by_page[page_number][line_number]);
}
#endif
}
}
if (found == 0) {
good_old = 0;
#ifdef VERBOSE
printf("WARNING: no CHA entry has been found for line %ld!\n",line_number);
printf("DEBUG dump for no CHA found\n");
for (tile=0; tile<NUM_CHA_USED; tile++) {
printf("CHA %d LLC_LOOKUP.READ delta %ld\n",tile,(cha_counts[0][tile][1][1]-cha_counts[0][tile][1][0]));
}
#endif
} else if (found == 1) {
good_old = 1;
} else {
good_old = 0;
#ifdef VERBOSE
printf("DEBUG dump for multiple CHAs found\n");
for (tile=0; tile<NUM_CHA_USED; tile++) {
printf("CHA %d LLC_LOOKUP.READ delta %ld\n",tile,(cha_counts[0][tile][1][1]-cha_counts[0][tile][1][0]));
}
#endif
}
good = good_new * good_old; // trigger a repeat if either the old or new tests failed
}
while (good == 0);
#if 0
// 6. save the cache line number in the appropriate the cbo_indices[cbo][#lines] array
// 7. increment the corresponding cbo_num_lines[cbo] array entry
this_cbo = cha_by_page[page_number][line_number];
if (this_cbo == -1) {
printf("ERROR: cha_by_page[%ld][%ld] has not been set!\n",page_number,line_number);
exit(80);
}
cbo_indices[this_cbo][cbo_num_lines[this_cbo]] = line_number;
cbo_num_lines[this_cbo]++;
#endif
}
// I have not overwritten the filename, but I will rebuild it here just in case I add something stupid in between....
sprintf(filename,"PADDR_0x%.12lx.map",paddr_by_page[page_number]);
ptr_mapping_file = fopen(filename,"w");
if (!ptr_mapping_file) {
printf("ERROR: Failed to open Mapping File %s for writing -- aborting\n",filename);
exit(4);
}
// first try -- write one record of 32768 bytes
rc64 = fwrite(&cha_by_page[page_number][0],(size_t) 32768, (size_t) 1, ptr_mapping_file);
if (rc64 != 1) {
printf("ERROR: failed to write one 32768 Byte record to %s -- return code %ld\n",filename,rc64);
exit(5);
} else {
printf("SUCCESS: wrote mapping file %s\n",filename);
}
}
}
printf("DUMMY: globalsum %d\n",globalsum);
printf("VERBOSE: L3 Mapping Complete in %ld tries for %d cache lines ratio %f\n",totaltries,32768*PAGES_MAPPED,(double)totaltries/(double)(32768*PAGES_MAPPED));
#ifndef MYHUGEPAGE_1GB
// TODO!! Fix this so that it is not hard-coded for the 24-core case!!!!
//
// now that the mapping is complete, I can add up the number of lines mapped to each CHA
// be careful to count only the lines that are used, not the full 24MiB
// 3 million elements is ~11.44 2MiB pages, so count all lines in each of the first 11 pages
// If I did the arithmetic correctly, the 3 million elements uses 931328 Bytes of the 12th 2MiB page
// which is 116416 elements or 14552 cache lines.
// first accumulate the first 11 full pages
for (page_number=0; page_number<11; page_number++) {
for (line_number=0; line_number<32768; line_number++) {
lines_by_cha[cha_by_page[page_number][line_number]]++;
}
}
// then accumulate the partial 12th page
for (line_number=0; line_number<14552; line_number++) {
lines_by_cha[cha_by_page[11][line_number]]++;
}
// output
long lines_accounted = 0;
printf("LINES_BY_CHA");
for (i=0; i<NUM_CHA_USED; i++) {
printf(" %ld",lines_by_cha[i]);
lines_accounted += lines_by_cha[i];
}
printf("\n");
printf("ACCCOUNTED FOR %ld lines expected %ld lines\n",lines_accounted,l2_contained_size/8);
#endif
// ============== END L3 MAPPING TESTS ==============================
#endif
// For the snoop filter tests, I want to repeatedly read
// some number of arrays per core with an aggregate footprint
// close to 1MiB per core
// 24 cores = 24 MiB = 3 Mi elements, so
// using an array length of 3 million should be just about right 95.3674%
// l2_contained_size = arraylen; // only use if I want a large memory-contained version
inner_repetitions = 1000;
int stride = 1; // used in thread binding checks: use 2 for Dell nodes, 1 for Intel nodes
// try to pre-load the working data into the L2 caches before the initial performance counter reads
sum = 0.0;
#pragma omp parallel for reduction(+:sum)
for (i=0; i<l2_contained_size; i++) sum += array[i];
// While I am at it, I need to warm up the cores using AVX-512 code to get them to full frequency
// This may take up to 100 microseconds, or maybe 400,000 AVX512 instructions per thread.
// This is a pain because I can't trust the compiler to generate AVX512 code at any given time,
// so I have to resort to inline assembly.
tsc_start = rdtsc();
#pragma omp parallel for
for (i=0; i<CORES_USED; i++) {
for (j=0; j<10*1000*1000; j++) {
__asm__ __volatile__ (
"vpaddq %%zmm0, %%zmm1, %%zmm2\n\t"
"vpaddq %%zmm1, %%zmm2, %%zmm3\n\t"
"vpaddq %%zmm2, %%zmm3, %%zmm0\n\t"
"vpaddq %%zmm3, %%zmm0, %%zmm1"
: : : "zmm0","zmm1","zmm2","zmm3");
}
}
tsc_end = rdtsc();
printf("DEBUG: WARMUP LOOP took %lu TSC cycles\n",tsc_end - tsc_start);
// =================== BEGINNING OF PERFORMANCE COUNTER READS BEFORE KERNEL TESTING ==============================
#ifdef IMC_COUNTS
// ------------ read the initial values of the IMC counters ------------
for (socket=0; socket<NUM_SOCKETS; socket++) {
bus = IMC_BUS_Socket[socket];
for (channel=0; channel<NUM_IMC_CHANNELS; channel++) {
device = IMC_Device_Channel[channel];
function = IMC_Function_Channel[channel];
for (counter=0; counter<NUM_IMC_COUNTERS; counter++) {
offset = IMC_PmonCtr_Offset[counter];
index = PCI_cfg_index(bus, device, function, offset);
// read each counter twice to identify rare cases where the low-order bits
// overflow and increment the high-order bits between the two reads.
// Use the second set of values unless (( high_1 != high_0 ) && ( low_1 > low_0))
// (this indicates that the counter rolled between the 3rd and 4th reads).
low_0 = mmconfig_ptr[index];
high_0 = mmconfig_ptr[index+1];
low_1 = mmconfig_ptr[index];
high_1 = mmconfig_ptr[index+1];
if ( (high_1 != high_0) && (low_1 > low_0) ) {
count = ((uint64_t) high_0) << 32 | (uint64_t) low_0;
} else {
count = ((uint64_t) high_1) << 32 | (uint64_t) low_1;
}
imc_counts[socket][channel][counter][0] = count;
}
}
}
#if 0
// for debugging only: print initial values of IMC counts
for (socket=0; socket<NUM_SOCKETS; socket++) {
for (channel=0; channel<NUM_IMC_CHANNELS; channel++) {
fprintf(stdout,"%d %d",socket,channel);
for (counter=0; counter<NUM_IMC_COUNTERS; counter++) {
fprintf(stdout," %ld",imc_counts[socket][channel][counter][0]);
}
fprintf(stdout,"\n");
}
}
#endif
#endif
#ifdef CHA_COUNTS
// ------------ read the initial values of the CHA mesh counters ------------
for (pkg=0; pkg<2; pkg++) {
for (tile=0; tile<NUM_CHA_USED; tile++) {
for (counter=0; counter<4; counter++) {
msr_num = 0xe00 + 0x10*tile + 0x8 + counter;
pread(msr_fd[pkg],&msr_val,sizeof(msr_val),msr_num);
cha_counts[pkg][tile][counter][0] = msr_val;
}
}
}
#if 0
// for debugging only: print initial values of CHA counters
for (pkg=0; pkg<2; pkg++) {
for (tile=0; tile<NUM_CHA_USED; tile++) {
for (counter=0; counter<4; counter++) {
printf("Package %d, tile %d, counter %d, value %lu\n",pkg,tile,counter,cha_counts[pkg][tile][counter][0]);
}
}
}
#endif
#endif
// ------------ read the initial values of the programmable core counters ------------
#pragma omp parallel for private(counter)
for (i=0; i<CORES_USED; i++) {
#ifdef CHECK_THREAD_LOCATION
if (get_core_number() != stride*i) {
printf("ERROR: thread %d is in the wrong place %d\n",i,get_core_number());
}
#endif
for (counter=0; counter<4; counter++) {
core_counters[i][counter][0] = rdpmc(counter);
}
}
tsc_start = rdtsc();
// ================= CODE UNDER TEST BEGINS HERE ====================
#ifdef SIMPLE_OMP_LOOP
sum = 0.0;
for (k=0; k<inner_repetitions; k++) {
#pragma omp parallel for, reduction(+:sum)
for (j=0; j<l2_contained_size; j++) {
sum += array[j];
}
}
#else
#pragma omp parallel for private(j,k,iters,private_sum)
for (i=0; i<CORES_USED; i++) {
iters = 0;
fixed_counters[i][0][0] = rdpmc_instructions();
fixed_counters[i][1][0] = rdpmc_actual_cycles();
fixed_counters[i][2][0] = rdpmc_reference_cycles();
fixed_counters[i][3][0] = rdtsc();
for (k=0; k<inner_repetitions; k++) {
private_sum = ssum(&array[jstart[i]],vl[i]);
partial_sums[i] += private_sum;
iters++;
}
fixed_counters[i][0][1] = rdpmc_instructions();
fixed_counters[i][1][1] = rdpmc_actual_cycles();
fixed_counters[i][2][1] = rdpmc_reference_cycles();
fixed_counters[i][3][1] = rdtsc();
iteration_counts[i] = iters;
}
#endif
// ================ CODE UNDER TEST ENDS HERE ====================
tsc_end = rdtsc();
// use the partial sums so the optimizer will not eliminate code
for (i=0; i<CORES_USED; i++) {
sum += partial_sums[i];
}
// -------------------- read the final values of the Programmable Core counters ------------------------
#pragma omp parallel for private(counter)
for (i=0; i<CORES_USED; i++) {
#ifdef CHECK_THREAD_LOCATION
if (get_core_number() != stride*i) {
printf("ERROR: thread %d is in the wrong place %d\n",i,get_core_number());
}
#endif
for (counter=0; counter<4; counter++) {
core_counters[i][counter][1] = rdpmc(counter);
#ifdef RETRIES
// if the counter returns zero, read it one more time....
if (core_counters[i][counter][1] == SPECIAL_VALUE) {
core_counters[i][counter][1] = rdpmc(counter);
#pragma omp atomic update
retries++;
}
#endif
}
}
#ifdef CHECK_SPECIAL_VALUES
for (i=0; i<CORES_USED; i++) {
for (counter=0; counter<4; counter++) {
if (core_counters[i][counter][0] == SPECIAL_VALUE) {
printf("DEBUG: SPECIAL_VALUE found after loop in start count on thread %d counter %d\n",i,counter);
zeros++;
}
if (core_counters[i][counter][1] == SPECIAL_VALUE) {
printf("DEBUG: SPECIAL_VALUE found after loop in end count on thread %d counter %d\n",i,counter);
zeros++;
}
}
}
#endif
#ifdef CHA_COUNTS
// ------------------- read the final values of the CHA mesh counters ----------------
for (pkg=0; pkg<2; pkg++) {
for (tile=0; tile<NUM_CHA_USED; tile++) {
for (counter=0; counter<4; counter++) {
msr_num = 0xe00 + 0x10*tile + 0x8 + counter;
pread(msr_fd[pkg],&msr_val,sizeof(msr_val),msr_num);
cha_counts[pkg][tile][counter][1] = msr_val;
}
}
}
#endif
#ifdef IMC_COUNTS
// ------------------ read the final values of the IMC counters -----------------
for (socket=0; socket<NUM_SOCKETS; socket++) {
bus = IMC_BUS_Socket[socket];
for (channel=0; channel<NUM_IMC_CHANNELS; channel++) {
device = IMC_Device_Channel[channel];
function = IMC_Function_Channel[channel];
for (counter=0; counter<NUM_IMC_COUNTERS; counter++) {
offset = IMC_PmonCtr_Offset[counter];
index = PCI_cfg_index(bus, device, function, offset);
// read each counter twice to identify rare cases where the low-order bits
// overflow and increment the high-order bits between the two reads.
// Use the second set of values unless (( high_1 != high_0 ) && ( low_1 > low_0))
// (this indicates that the counter rolled between the 3rd and 4th reads).
low_0 = mmconfig_ptr[index];
high_0 = mmconfig_ptr[index+1];
low_1 = mmconfig_ptr[index];
high_1 = mmconfig_ptr[index+1];
if ( (high_1 != high_0) && (low_1 > low_0) ) {
count = ((uint64_t) high_0) << 32 | (uint64_t) low_0;
} else {
count = ((uint64_t) high_1) << 32 | (uint64_t) low_1;
}
imc_counts[socket][channel][counter][1] = count;
}
}
}
#endif
// ================================== END OF PERFORMANCE COUNTER READS AFTER TEST ==============================================
t0 = 0.0;
t1 = (double) (tsc_end - tsc_start) / TSC_GHz / 1.0e9;
printf("Instrumented code required %f seconds to execute\n",t1-t0);
bandwidth = sizeof(double)*(double)l2_contained_size*(double)inner_repetitions / (t1-t0) / 1e9;
printf("Bandwidth %f GB/s\n",bandwidth);
printf("Bandwidth per core %f GB/s\n",bandwidth/(double)CORES_USED);
printf("Approx Bytes/cycle per core %f\n",bandwidth/(double)CORES_USED/2.0);
expected = (double)l2_contained_size * (double)(inner_repetitions) / (double)CORES_USED;
avg_cycles = (double)(tsc_end - tsc_start) / expected;
printf("Average TSC cycles per element %f\n",avg_cycles);
// clear the arrays for the package-level sums
for (pkg=0; pkg<2; pkg++) {
for (counter=0; counter<4; counter++) { // no point in summing the cycle counts, so exclude counter 4
core_pkg_sums[pkg][counter] = 0;
fixed_pkg_sums[pkg][counter] = 0;
imc_pkg_sums[pkg][counter] = 0;
cha_pkg_sums[pkg][counter] = 0;
}
}
// compute core package sums and optional print
for (i=0; i<CORES_USED; i++) {
for (counter=0; counter<4; counter++) {
delta = corrected_pmc_delta(fixed_counters[i][counter][1],fixed_counters[i][counter][0],fixed_pmc_width);
fixed_pkg_sums[0][counter] += delta;
}
for (counter=0; counter<4; counter++) {
#ifdef CHECK_SPECIAL_VALUES
if (core_counters[i][counter][0] == SPECIAL_VALUE) {
printf("DEBUG: SPECIAL_VALUE found in post-processing in start count on thread %d counter %d\n",i,counter);
}
if (core_counters[i][counter][1] == SPECIAL_VALUE) {
printf("DEBUG: SPECIAL_VALUE found in post-processing in end count on thread %d counter %d\n",i,counter);
}
#endif
delta = corrected_pmc_delta(core_counters[i][counter][1],core_counters[i][counter][0],core_pmc_width);
#ifdef VERBOSE
printf("CORE %d counter %d end %ld start %ld delta %ld\n",i,counter,core_counters[i][counter][1],core_counters[i][counter][0],delta);
#endif
core_pkg_sums[0][counter] += delta;
}
}
if (dumpall == 1) {
report = 0;
for (i=0; i<CORES_USED; i++) {
for (counter=0; counter<4; counter++) {
delta = corrected_pmc_delta(core_counters[i][counter][1],core_counters[i][counter][0],core_pmc_width);
printf("CORE %d counter %d end %ld start %ld delta %ld\n",i,counter,core_counters[i][counter][1],core_counters[i][counter][0],delta);
}
}
}
report = 1;
dumpall = 0;
#ifdef CHA_COUNTS
// print out the differences and compute sums of differences
for (pkg=0; pkg<2; pkg++) {
for (tile=0; tile<NUM_CHA_USED; tile++) {
for (counter=0; counter<4; counter++) {
delta = corrected_pmc_delta(cha_counts[pkg][tile][counter][1],cha_counts[pkg][tile][counter][0],uncore_pmc_width);
#ifdef VERBOSE
printf("CHA pkg %d tile %d counter %d delta %ld\n",pkg,tile,counter,delta);
#endif
cha_pkg_sums[pkg][counter] += delta;
}
}
}
#endif
#ifdef IMC_COUNTS
for (pkg=0; pkg<2; pkg++) {
for (channel=0; channel<NUM_IMC_CHANNELS; channel++) {
for (counter=0; counter<NUM_IMC_COUNTERS; counter++) {
delta = corrected_pmc_delta(imc_counts[pkg][channel][counter][1],imc_counts[pkg][channel][counter][0],uncore_pmc_width);
#ifdef VERBOSE
printf("IMC pkg %d channel %d counter %d delta %ld\n",pkg,channel,counter,delta);
#endif
imc_pkg_sums[pkg][counter] += delta;
}
}
}
#endif
int max_display_pkg = 1;
printf("Expected AVX512 arithmetic instructions (Event 0xC7, Umask 0x40) %ld\n",l2_contained_size*inner_repetitions/8);
for (pkg=0; pkg<max_display_pkg; pkg++) {
for (counter=0; counter<4; counter++) {
printf("CORE_PKG_SUMS pkg %d counter %d sum_delta %ld\n",pkg,counter,core_pkg_sums[pkg][counter]);
}
}
for (pkg=0; pkg<max_display_pkg; pkg++) {
for (counter=0; counter<4; counter++) {
printf("FIXED_PKG_SUMS pkg %d counter %d sum_delta %ld\n",pkg,counter,fixed_pkg_sums[pkg][counter]);
}
}
// the fixed-function counters are measured inside the OpenMP loop, so they should not be contaminated by
// spin-waiting....
// Compute per-core metrics here -- note that the fixed-function counter set is (Instr, CoreCyc, RefCyc, TSC)
// Utilization = RefCyc/TSC (fixed2/fixed3)
// AvgGHz_unhalted = CoreCyc/RefCyc * 2.1 (fixed1/fixed2 * 2.1)
// AvgGHz_wall = CoreCyc/TSC * 2.1 (fixed1/fixed3 * 2.1)
// IPC = Instr/CoreCyc (fixed0/fixed1)
long delta_inst, delta_core, delta_ref, delta_tsc;
double utilization, avg_ghz, ipc;
printf("CORE_UTILIZATION ");
for (i=0; i<CORES_USED; i++) {
delta_ref = corrected_pmc_delta(fixed_counters[i][2][1],fixed_counters[i][2][0],fixed_pmc_width);
delta_tsc = corrected_pmc_delta(fixed_counters[i][3][1],fixed_counters[i][3][0],fixed_pmc_width);
utilization = (double)delta_ref / (double)delta_tsc;
printf("%6.4f ",utilization);
}
printf("\n");
printf("CORE_GHZ ");
for (i=0; i<CORES_USED; i++) {
delta_core = corrected_pmc_delta(fixed_counters[i][1][1],fixed_counters[i][1][0],fixed_pmc_width);
delta_ref = corrected_pmc_delta(fixed_counters[i][2][1],fixed_counters[i][2][0],fixed_pmc_width);
avg_ghz = (double)delta_core / (double)delta_ref * TSC_GHz;
printf("%6.4f ",avg_ghz);
}
printf("\n");
printf("CORE_IPC ");
for (i=0; i<CORES_USED; i++) {
delta_inst = corrected_pmc_delta(fixed_counters[i][0][1],fixed_counters[i][0][0],fixed_pmc_width);
delta_core = corrected_pmc_delta(fixed_counters[i][1][1],fixed_counters[i][1][0],fixed_pmc_width);
ipc = (double)delta_inst / (double)delta_core;
printf("%6.4f ",ipc);
}
printf("\n");
printf("THREAD_EXECUTION_TIME ");
for (i=0; i<CORES_USED; i++) {
delta_tsc = corrected_pmc_delta(fixed_counters[i][3][1],fixed_counters[i][3][0],fixed_pmc_width);
t0 = (double)delta_tsc / (TSC_GHz*1.0e9);
printf("%f ",t0);
}
printf("\n");
#ifdef CHA_COUNTS
for (pkg=0; pkg<max_display_pkg; pkg++) {
for (counter=0; counter<4; counter++) {
printf("CHA_PKG_SUMS pkg %d counter %d sum_delta %ld\n",pkg,counter,cha_pkg_sums[pkg][counter]);
}
}
#endif
#ifdef IMC_COUNTS
for (pkg=0; pkg<max_display_pkg; pkg++) {
for (counter=0; counter<4; counter++) { // no point in summing the cycle counts, so exclude counter 4
printf("IMC_PKG_SUMS pkg %d counter %d sum_delta %ld\n",pkg,counter,imc_pkg_sums[pkg][counter]);
}
}
#endif
// for the Snoop Filter set
// expected = expected number of cache lines loaded from L2
// sf_evict_rate = #evictions / expected number of loads
expected = 8.0/64.0* (double)l2_contained_size * (double) inner_repetitions;
sf_evict_rate = (double) cha_pkg_sums[0][0] / expected;
printf("SnoopFilterEvictionRate %f\n",sf_evict_rate);
expected = (double)l2_contained_size * (double) (inner_repetitions+1); // adjusted for pre-load of data
printf("Dummy Sum value is %f, expected value %f\n",sum,expected);
expected = (double)l2_contained_size * (double) inner_repetitions;
printf("Expected number of cache lines loaded from L2 %f\n",expected/8.0);
printf("Number of performance counter wraprounds detected %d\n",nwraps);
#ifdef RETRIES
printf("Number of core performance counter reads retried %d\n",retries);
#endif
printf("Number of zero values found in the inner loop %d\n",zeros);
// printf("Expected Number of Loads for AVX2 code %ld\n",arraylen/4);
// printf("Expected Number of Cache Lines loaded %ld\n",arraylen/8);
for (i=0; i<CORES_USED; i++) {
if (iteration_counts[i] != inner_repetitions) {
printf("ERROR: thread %d iteration_counts %ld expected %ld\n",i,iteration_counts[i],inner_repetitions);
}
}
// per-core performance counter values
for (counter=0; counter<4; counter++) {
printf("CORE_counter %d ",counter);
for (i=0; i<CORES_USED; i++) {
delta = corrected_pmc_delta(core_counters[i][counter][1],core_counters[i][counter][0],core_pmc_width);
printf("%ld ",delta);
}
printf("\n");
}
// per-CHA performance counter values -- socket 0 only
for (counter=0; counter<4; counter++) {
printf("CHA_counter %d ",counter);
for (i=0; i<NUM_CHA_USED; i++) {
delta = corrected_pmc_delta(cha_counts[0][i][counter][1],cha_counts[0][i][counter][0],uncore_pmc_width);
printf("%ld ",delta);
}
printf("\n");
}
printf("Double-check physical address of base of array\n");
pagemapentry = get_pagemap_entry(&array[0]);
printf(" array[0] va 0x%.16lx pagemapentry 0x%.16lx\n",&array[0],pagemapentry);
}
|
GeneralMatrixMatrix.h | // This file is part of Eigen, a lightweight C++ template library
// for linear algebra.
//
// Copyright (C) 2008-2009 Gael Guennebaud <gael.guennebaud@inria.fr>
//
// This Source Code Form is subject to the terms of the Mozilla
// Public License v. 2.0. If a copy of the MPL was not distributed
// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
#ifndef EIGEN_GENERAL_MATRIX_MATRIX_H
#define EIGEN_GENERAL_MATRIX_MATRIX_H
namespace Eigen {
namespace internal {
template<typename _LhsScalar, typename _RhsScalar> class level3_blocking;
/* Specialization for a row-major destination matrix => simple transposition of the product */
template<
typename Index,
typename LhsScalar, int LhsStorageOrder, bool ConjugateLhs,
typename RhsScalar, int RhsStorageOrder, bool ConjugateRhs>
struct general_matrix_matrix_product<Index,LhsScalar,LhsStorageOrder,ConjugateLhs,RhsScalar,RhsStorageOrder,ConjugateRhs,RowMajor>
{
typedef gebp_traits<RhsScalar,LhsScalar> Traits;
typedef typename ScalarBinaryOpTraits<LhsScalar, RhsScalar>::ReturnType ResScalar;
static EIGEN_STRONG_INLINE void run(
Index rows, Index cols, Index depth,
const LhsScalar* lhs, Index lhsStride,
const RhsScalar* rhs, Index rhsStride,
ResScalar* res, Index resStride,
ResScalar alpha,
level3_blocking<RhsScalar,LhsScalar>& blocking,
GemmParallelInfo<Index>* info = 0)
{
// transpose the product such that the result is column major
general_matrix_matrix_product<Index,
RhsScalar, RhsStorageOrder==RowMajor ? ColMajor : RowMajor, ConjugateRhs,
LhsScalar, LhsStorageOrder==RowMajor ? ColMajor : RowMajor, ConjugateLhs,
ColMajor>
::run(cols,rows,depth,rhs,rhsStride,lhs,lhsStride,res,resStride,alpha,blocking,info);
}
};
/* Specialization for a col-major destination matrix
* => Blocking algorithm following Goto's paper */
template<
typename Index,
typename LhsScalar, int LhsStorageOrder, bool ConjugateLhs,
typename RhsScalar, int RhsStorageOrder, bool ConjugateRhs>
struct general_matrix_matrix_product<Index,LhsScalar,LhsStorageOrder,ConjugateLhs,RhsScalar,RhsStorageOrder,ConjugateRhs,ColMajor>
{
typedef gebp_traits<LhsScalar,RhsScalar> Traits;
typedef typename ScalarBinaryOpTraits<LhsScalar, RhsScalar>::ReturnType ResScalar;
static void run(Index rows, Index cols, Index depth,
const LhsScalar* _lhs, Index lhsStride,
const RhsScalar* _rhs, Index rhsStride,
ResScalar* _res, Index resStride,
ResScalar alpha,
level3_blocking<LhsScalar,RhsScalar>& blocking,
GemmParallelInfo<Index>* info = 0)
{
typedef const_blas_data_mapper<LhsScalar, Index, LhsStorageOrder> LhsMapper;
typedef const_blas_data_mapper<RhsScalar, Index, RhsStorageOrder> RhsMapper;
typedef blas_data_mapper<typename Traits::ResScalar, Index, ColMajor> ResMapper;
LhsMapper lhs(_lhs,lhsStride);
RhsMapper rhs(_rhs,rhsStride);
ResMapper res(_res, resStride);
Index kc = blocking.kc(); // cache block size along the K direction
Index mc = (std::min)(rows,blocking.mc()); // cache block size along the M direction
Index nc = (std::min)(cols,blocking.nc()); // cache block size along the N direction
gemm_pack_lhs<LhsScalar, Index, LhsMapper, Traits::mr, Traits::LhsProgress, LhsStorageOrder> pack_lhs;
gemm_pack_rhs<RhsScalar, Index, RhsMapper, Traits::nr, RhsStorageOrder> pack_rhs;
gebp_kernel<LhsScalar, RhsScalar, Index, ResMapper, Traits::mr, Traits::nr, ConjugateLhs, ConjugateRhs> gebp;
#ifdef EIGEN_HAS_OPENMP
if(info)
{
// this is the parallel version!
int tid = omp_get_thread_num();
int threads = omp_get_num_threads();
LhsScalar* blockA = blocking.blockA();
eigen_internal_assert(blockA!=0);
std::size_t sizeB = kc*nc;
ei_declare_aligned_stack_constructed_variable(RhsScalar, blockB, sizeB, 0);
// For each horizontal panel of the rhs, and corresponding vertical panel of the lhs...
for(Index k=0; k<depth; k+=kc)
{
const Index actual_kc = (std::min)(k+kc,depth)-k; // => rows of B', and cols of the A'
// In order to reduce the chance that a thread has to wait for the other,
// let's start by packing B'.
pack_rhs(blockB, rhs.getSubMapper(k,0), actual_kc, nc);
// Pack A_k to A' in a parallel fashion:
// each thread packs the sub block A_k,i to A'_i where i is the thread id.
// However, before copying to A'_i, we have to make sure that no other thread is still using it,
// i.e., we test that info[tid].users equals 0.
// Then, we set info[tid].users to the number of threads to mark that all other threads are going to use it.
while(info[tid].users!=0) {}
info[tid].users = threads;
pack_lhs(blockA+info[tid].lhs_start*actual_kc, lhs.getSubMapper(info[tid].lhs_start,k), actual_kc, info[tid].lhs_length);
// Notify the other threads that the part A'_i is ready to go.
info[tid].sync = k;
// Computes C_i += A' * B' per A'_i
for(int shift=0; shift<threads; ++shift)
{
int i = (tid+shift)%threads;
// At this point we have to make sure that A'_i has been updated by the thread i,
// we use testAndSetOrdered to mimic a volatile access.
// However, no need to wait for the B' part which has been updated by the current thread!
if (shift>0) {
while(info[i].sync!=k) {
}
}
gebp(res.getSubMapper(info[i].lhs_start, 0), blockA+info[i].lhs_start*actual_kc, blockB, info[i].lhs_length, actual_kc, nc, alpha);
}
// Then keep going as usual with the remaining B'
for(Index j=nc; j<cols; j+=nc)
{
const Index actual_nc = (std::min)(j+nc,cols)-j;
// pack B_k,j to B'
pack_rhs(blockB, rhs.getSubMapper(k,j), actual_kc, actual_nc);
// C_j += A' * B'
gebp(res.getSubMapper(0, j), blockA, blockB, rows, actual_kc, actual_nc, alpha);
}
// Release all the sub blocks A'_i of A' for the current thread,
// i.e., we simply decrement the number of users by 1
for(Index i=0; i<threads; ++i)
#if !EIGEN_HAS_CXX11_ATOMIC
#pragma omp atomic
#endif
info[i].users -= 1;
}
}
else
#endif // EIGEN_HAS_OPENMP
{
EIGEN_UNUSED_VARIABLE(info);
// this is the sequential version!
std::size_t sizeA = kc*mc;
std::size_t sizeB = kc*nc;
ei_declare_aligned_stack_constructed_variable(LhsScalar, blockA, sizeA, blocking.blockA());
ei_declare_aligned_stack_constructed_variable(RhsScalar, blockB, sizeB, blocking.blockB());
const bool pack_rhs_once = mc!=rows && kc==depth && nc==cols;
// For each horizontal panel of the rhs, and corresponding panel of the lhs...
for(Index i2=0; i2<rows; i2+=mc)
{
const Index actual_mc = (std::min)(i2+mc,rows)-i2;
for(Index k2=0; k2<depth; k2+=kc)
{
const Index actual_kc = (std::min)(k2+kc,depth)-k2;
// OK, here we have selected one horizontal panel of rhs and one vertical panel of lhs.
// => Pack lhs's panel into a sequential chunk of memory (L2/L3 caching)
// Note that this panel will be read as many times as the number of blocks in the rhs's
// horizontal panel which is, in practice, a very low number.
pack_lhs(blockA, lhs.getSubMapper(i2,k2), actual_kc, actual_mc);
// For each kc x nc block of the rhs's horizontal panel...
for(Index j2=0; j2<cols; j2+=nc)
{
const Index actual_nc = (std::min)(j2+nc,cols)-j2;
// We pack the rhs's block into a sequential chunk of memory (L2 caching)
// Note that this block will be read a very high number of times, which is equal to the number of
// micro horizontal panel of the large rhs's panel (e.g., rows/12 times).
if((!pack_rhs_once) || i2==0)
pack_rhs(blockB, rhs.getSubMapper(k2,j2), actual_kc, actual_nc);
// Everything is packed, we can now call the panel * block kernel:
gebp(res.getSubMapper(i2, j2), blockA, blockB, actual_mc, actual_kc, actual_nc, alpha);
}
}
}
}
}
};
/*********************************************************************************
* Specialization of generic_product_impl for "large" GEMM, i.e.,
* implementation of the high level wrapper to general_matrix_matrix_product
**********************************************************************************/
template<typename Scalar, typename Index, typename Gemm, typename Lhs, typename Rhs, typename Dest, typename BlockingType>
struct gemm_functor
{
gemm_functor(const Lhs& lhs, const Rhs& rhs, Dest& dest, const Scalar& actualAlpha, BlockingType& blocking)
: m_lhs(lhs), m_rhs(rhs), m_dest(dest), m_actualAlpha(actualAlpha), m_blocking(blocking)
{}
void initParallelSession(Index num_threads) const
{
m_blocking.initParallel(m_lhs.rows(), m_rhs.cols(), m_lhs.cols(), num_threads);
m_blocking.allocateA();
}
void operator() (Index row, Index rows, Index col=0, Index cols=-1, GemmParallelInfo<Index>* info=0) const
{
if(cols==-1)
cols = m_rhs.cols();
Gemm::run(rows, cols, m_lhs.cols(),
&m_lhs.coeffRef(row,0), m_lhs.outerStride(),
&m_rhs.coeffRef(0,col), m_rhs.outerStride(),
(Scalar*)&(m_dest.coeffRef(row,col)), m_dest.outerStride(),
m_actualAlpha, m_blocking, info);
}
typedef typename Gemm::Traits Traits;
protected:
const Lhs& m_lhs;
const Rhs& m_rhs;
Dest& m_dest;
Scalar m_actualAlpha;
BlockingType& m_blocking;
};
template<int StorageOrder, typename LhsScalar, typename RhsScalar, int MaxRows, int MaxCols, int MaxDepth, int KcFactor=1,
bool FiniteAtCompileTime = MaxRows!=Dynamic && MaxCols!=Dynamic && MaxDepth != Dynamic> class gemm_blocking_space;
template<typename _LhsScalar, typename _RhsScalar>
class level3_blocking
{
typedef _LhsScalar LhsScalar;
typedef _RhsScalar RhsScalar;
protected:
LhsScalar* m_blockA;
RhsScalar* m_blockB;
Index m_mc;
Index m_nc;
Index m_kc;
public:
level3_blocking()
: m_blockA(0), m_blockB(0), m_mc(0), m_nc(0), m_kc(0)
{}
inline Index mc() const { return m_mc; }
inline Index nc() const { return m_nc; }
inline Index kc() const { return m_kc; }
inline LhsScalar* blockA() { return m_blockA; }
inline RhsScalar* blockB() { return m_blockB; }
};
template<int StorageOrder, typename _LhsScalar, typename _RhsScalar, int MaxRows, int MaxCols, int MaxDepth, int KcFactor>
class gemm_blocking_space<StorageOrder,_LhsScalar,_RhsScalar,MaxRows, MaxCols, MaxDepth, KcFactor, true /* == FiniteAtCompileTime */>
: public level3_blocking<
typename conditional<StorageOrder==RowMajor,_RhsScalar,_LhsScalar>::type,
typename conditional<StorageOrder==RowMajor,_LhsScalar,_RhsScalar>::type>
{
enum {
Transpose = StorageOrder==RowMajor,
ActualRows = Transpose ? MaxCols : MaxRows,
ActualCols = Transpose ? MaxRows : MaxCols
};
typedef typename conditional<Transpose,_RhsScalar,_LhsScalar>::type LhsScalar;
typedef typename conditional<Transpose,_LhsScalar,_RhsScalar>::type RhsScalar;
typedef gebp_traits<LhsScalar,RhsScalar> Traits;
enum {
SizeA = ActualRows * MaxDepth,
SizeB = ActualCols * MaxDepth
};
#if EIGEN_MAX_STATIC_ALIGN_BYTES >= EIGEN_DEFAULT_ALIGN_BYTES
EIGEN_ALIGN_MAX LhsScalar m_staticA[SizeA];
EIGEN_ALIGN_MAX RhsScalar m_staticB[SizeB];
#else
EIGEN_ALIGN_MAX char m_staticA[SizeA * sizeof(LhsScalar) + EIGEN_DEFAULT_ALIGN_BYTES-1];
EIGEN_ALIGN_MAX char m_staticB[SizeB * sizeof(RhsScalar) + EIGEN_DEFAULT_ALIGN_BYTES-1];
#endif
public:
gemm_blocking_space(Index /*rows*/, Index /*cols*/, Index /*depth*/, Index /*num_threads*/, bool /*full_rows = false*/)
{
this->m_mc = ActualRows;
this->m_nc = ActualCols;
this->m_kc = MaxDepth;
#if EIGEN_MAX_STATIC_ALIGN_BYTES >= EIGEN_DEFAULT_ALIGN_BYTES
this->m_blockA = m_staticA;
this->m_blockB = m_staticB;
#else
this->m_blockA = reinterpret_cast<LhsScalar*>((internal::UIntPtr(m_staticA) + (EIGEN_DEFAULT_ALIGN_BYTES-1)) & ~std::size_t(EIGEN_DEFAULT_ALIGN_BYTES-1));
this->m_blockB = reinterpret_cast<RhsScalar*>((internal::UIntPtr(m_staticB) + (EIGEN_DEFAULT_ALIGN_BYTES-1)) & ~std::size_t(EIGEN_DEFAULT_ALIGN_BYTES-1));
#endif
}
void initParallel(Index, Index, Index, Index)
{}
inline void allocateA() {}
inline void allocateB() {}
inline void allocateAll() {}
};
template<int StorageOrder, typename _LhsScalar, typename _RhsScalar, int MaxRows, int MaxCols, int MaxDepth, int KcFactor>
class gemm_blocking_space<StorageOrder,_LhsScalar,_RhsScalar,MaxRows, MaxCols, MaxDepth, KcFactor, false>
: public level3_blocking<
typename conditional<StorageOrder==RowMajor,_RhsScalar,_LhsScalar>::type,
typename conditional<StorageOrder==RowMajor,_LhsScalar,_RhsScalar>::type>
{
enum {
Transpose = StorageOrder==RowMajor
};
typedef typename conditional<Transpose,_RhsScalar,_LhsScalar>::type LhsScalar;
typedef typename conditional<Transpose,_LhsScalar,_RhsScalar>::type RhsScalar;
typedef gebp_traits<LhsScalar,RhsScalar> Traits;
Index m_sizeA;
Index m_sizeB;
public:
gemm_blocking_space(Index rows, Index cols, Index depth, Index num_threads, bool l3_blocking)
{
this->m_mc = Transpose ? cols : rows;
this->m_nc = Transpose ? rows : cols;
this->m_kc = depth;
if(l3_blocking)
{
computeProductBlockingSizes<LhsScalar,RhsScalar,KcFactor>(this->m_kc, this->m_mc, this->m_nc, num_threads);
}
else // no l3 blocking
{
Index n = this->m_nc;
computeProductBlockingSizes<LhsScalar,RhsScalar,KcFactor>(this->m_kc, this->m_mc, n, num_threads);
}
m_sizeA = this->m_mc * this->m_kc;
m_sizeB = this->m_kc * this->m_nc;
}
void initParallel(Index rows, Index cols, Index depth, Index num_threads)
{
this->m_mc = Transpose ? cols : rows;
this->m_nc = Transpose ? rows : cols;
this->m_kc = depth;
eigen_internal_assert(this->m_blockA==0 && this->m_blockB==0);
Index m = this->m_mc;
computeProductBlockingSizes<LhsScalar,RhsScalar,KcFactor>(this->m_kc, m, this->m_nc, num_threads);
m_sizeA = this->m_mc * this->m_kc;
m_sizeB = this->m_kc * this->m_nc;
}
void allocateA()
{
if(this->m_blockA==0)
this->m_blockA = aligned_new<LhsScalar>(m_sizeA);
}
void allocateB()
{
if(this->m_blockB==0)
this->m_blockB = aligned_new<RhsScalar>(m_sizeB);
}
void allocateAll()
{
allocateA();
allocateB();
}
~gemm_blocking_space()
{
aligned_delete(this->m_blockA, m_sizeA);
aligned_delete(this->m_blockB, m_sizeB);
}
};
} // end namespace internal
namespace internal {
template<typename Lhs, typename Rhs>
struct generic_product_impl<Lhs,Rhs,DenseShape,DenseShape,GemmProduct>
: generic_product_impl_base<Lhs,Rhs,generic_product_impl<Lhs,Rhs,DenseShape,DenseShape,GemmProduct> >
{
typedef typename Product<Lhs,Rhs>::Scalar Scalar;
typedef typename Lhs::Scalar LhsScalar;
typedef typename Rhs::Scalar RhsScalar;
typedef internal::blas_traits<Lhs> LhsBlasTraits;
typedef typename LhsBlasTraits::DirectLinearAccessType ActualLhsType;
typedef typename internal::remove_all<ActualLhsType>::type ActualLhsTypeCleaned;
typedef internal::blas_traits<Rhs> RhsBlasTraits;
typedef typename RhsBlasTraits::DirectLinearAccessType ActualRhsType;
typedef typename internal::remove_all<ActualRhsType>::type ActualRhsTypeCleaned;
enum {
MaxDepthAtCompileTime = EIGEN_SIZE_MIN_PREFER_FIXED(Lhs::MaxColsAtCompileTime,Rhs::MaxRowsAtCompileTime)
};
typedef generic_product_impl<Lhs,Rhs,DenseShape,DenseShape,CoeffBasedProductMode> lazyproduct;
template<typename Dst>
static void evalTo(Dst& dst, const Lhs& lhs, const Rhs& rhs)
{
// See http://eigen.tuxfamily.org/bz/show_bug.cgi?id=404 for a discussion and helper program
// to determine the following heuristic.
// EIGEN_GEMM_TO_COEFFBASED_THRESHOLD is typically defined to 20 in GeneralProduct.h,
// unless it has been specialized by the user or for a given architecture.
// Note that the condition rhs.rows()>0 was required because lazy product is (was?) not happy with empty inputs.
// I'm not sure it is still required.
if((rhs.rows()+dst.rows()+dst.cols())<EIGEN_GEMM_TO_COEFFBASED_THRESHOLD && rhs.rows()>0)
lazyproduct::eval_dynamic(dst, lhs, rhs, internal::assign_op<typename Dst::Scalar,Scalar>());
else
{
dst.setZero();
scaleAndAddTo(dst, lhs, rhs, Scalar(1));
}
}
template<typename Dst>
static void addTo(Dst& dst, const Lhs& lhs, const Rhs& rhs)
{
if((rhs.rows()+dst.rows()+dst.cols())<EIGEN_GEMM_TO_COEFFBASED_THRESHOLD && rhs.rows()>0)
lazyproduct::eval_dynamic(dst, lhs, rhs, internal::add_assign_op<typename Dst::Scalar,Scalar>());
else
scaleAndAddTo(dst,lhs, rhs, Scalar(1));
}
template<typename Dst>
static void subTo(Dst& dst, const Lhs& lhs, const Rhs& rhs)
{
if((rhs.rows()+dst.rows()+dst.cols())<EIGEN_GEMM_TO_COEFFBASED_THRESHOLD && rhs.rows()>0)
lazyproduct::eval_dynamic(dst, lhs, rhs, internal::sub_assign_op<typename Dst::Scalar,Scalar>());
else
scaleAndAddTo(dst, lhs, rhs, Scalar(-1));
}
template<typename Dest>
static void scaleAndAddTo(Dest& dst, const Lhs& a_lhs, const Rhs& a_rhs, const Scalar& alpha)
{
eigen_assert(dst.rows()==a_lhs.rows() && dst.cols()==a_rhs.cols());
if(a_lhs.cols()==0 || a_lhs.rows()==0 || a_rhs.cols()==0)
return;
typename internal::add_const_on_value_type<ActualLhsType>::type lhs = LhsBlasTraits::extract(a_lhs);
typename internal::add_const_on_value_type<ActualRhsType>::type rhs = RhsBlasTraits::extract(a_rhs);
Scalar actualAlpha = alpha * LhsBlasTraits::extractScalarFactor(a_lhs)
* RhsBlasTraits::extractScalarFactor(a_rhs);
typedef internal::gemm_blocking_space<(Dest::Flags&RowMajorBit) ? RowMajor : ColMajor,LhsScalar,RhsScalar,
Dest::MaxRowsAtCompileTime,Dest::MaxColsAtCompileTime,MaxDepthAtCompileTime> BlockingType;
typedef internal::gemm_functor<
Scalar, Index,
internal::general_matrix_matrix_product<
Index,
LhsScalar, (ActualLhsTypeCleaned::Flags&RowMajorBit) ? RowMajor : ColMajor, bool(LhsBlasTraits::NeedToConjugate),
RhsScalar, (ActualRhsTypeCleaned::Flags&RowMajorBit) ? RowMajor : ColMajor, bool(RhsBlasTraits::NeedToConjugate),
(Dest::Flags&RowMajorBit) ? RowMajor : ColMajor>,
ActualLhsTypeCleaned, ActualRhsTypeCleaned, Dest, BlockingType> GemmFunctor;
BlockingType blocking(dst.rows(), dst.cols(), lhs.cols(), 1, true);
internal::parallelize_gemm<(Dest::MaxRowsAtCompileTime>32 || Dest::MaxRowsAtCompileTime==Dynamic)>
(GemmFunctor(lhs, rhs, dst, actualAlpha, blocking), a_lhs.rows(), a_rhs.cols(), a_lhs.cols(), Dest::Flags&RowMajorBit);
}
};
} // end namespace internal
} // end namespace Eigen
#endif // EIGEN_GENERAL_MATRIX_MATRIX_H
|
7949.c | // this source is derived from CHILL AST originally from file '/uufs/chpc.utah.edu/common/home/u1142914/lib/ytopt_vinu/polybench/polybench-code/stencils/heat-3d/kernel.c' as parsed by frontend compiler rose
void kernel_heat_3d(int tsteps, int n, double A[120 + 0][120 + 0][120 + 0], double B[120 + 0][120 + 0][120 + 0]) {
int t12;
int t10;
int t8;
int t6;
int t4;
int t2;
for (t2 = 1; t2 <= 500; t2 += 1) {
#pragma omp parallel for private(t4,t8,t10,t12,t14)
for (t4 = 1; t4 <= n - 2; t4 += 32)
for (t6 = t4; t6 <= (t4 + 31 < n - 2 ? t4 + 31 : n - 2); t6 += 1)
for (t8 = 1; t8 <= n - 2; t8 += 8)
for (t10 = t8; t10 <= (t8 + 7 < n - 2 ? t8 + 7 : n - 2); t10 += 1)
for (t12 = 1; t12 <= n - 2; t12 += 1)
B[t6][t10][t12] = 0.125 * (A[t6 + 1][t10][t12] - 2 * A[t6][t10][t12] + A[t6 - 1][t10][t12]) + 0.125 * (A[t6][t10 + 1][t12] - 2 * A[t6][t10][t12] + A[t6][t10 - 1][t12]) + 0.125 * (A[t6][t10][t12 + 1] - 2 * A[t6][t10][t12] + A[t6][t10][t12 - 1]) + A[t6][t10][t12];
#pragma omp parallel for private(t4,t8,t10,t12,t14)
for (t4 = 1; t4 <= n - 2; t4 += 32)
for (t6 = t4; t6 <= (t4 + 31 < n - 2 ? t4 + 31 : n - 2); t6 += 1)
for (t8 = 1; t8 <= n - 2; t8 += 8)
for (t10 = t8; t10 <= (t8 + 7 < n - 2 ? t8 + 7 : n - 2); t10 += 1)
for (t12 = 1; t12 <= n - 2; t12 += 1)
A[t6][t10][t12] = 0.125 * (B[t6 + 1][t10][t12] - 2 * B[t6][t10][t12] + B[t6 - 1][t10][t12]) + 0.125 * (B[t6][t10 + 1][t12] - 2 * B[t6][t10][t12] + B[t6][t10 - 1][t12]) + 0.125 * (B[t6][t10][t12 + 1] - 2 * B[t6][t10][t12] + B[t6][t10][t12 - 1]) + B[t6][t10][t12];
}
}
|
pbeampp.c | /**************************************************************************
PBEAMPP.C of ZIB optimizer MCF, SPEC version
This software was developed at ZIB Berlin. Maintenance and revisions
solely on responsibility of Andreas Loebel
Dr. Andreas Loebel
Ortlerweg 29b, 12207 Berlin
Konrad-Zuse-Zentrum fuer Informationstechnik Berlin (ZIB)
Scientific Computing - Optimization
Takustr. 7, 14195 Berlin-Dahlem
Copyright (c) 1998-2000 ZIB.
Copyright (c) 2000-2002 ZIB & Loebel.
Copyright (c) 2003-2005 Andreas Loebel.
**************************************************************************/
/* LAST EDIT: Sun Nov 21 16:22:04 2004 by Andreas Loebel (boss.local.de) */
/* $Id: pbeampp.c,v 1.10 2005/02/17 19:42:32 bzfloebe Exp $ */
#define K 300
#define B 50
#include "pbeampp.h"
#ifdef _PROTO_
int bea_is_dual_infeasible(arc_t *arc, cost_t red_cost)
#else
int bea_is_dual_infeasible(arc, red_cost)
arc_t *arc;
cost_t red_cost;
#endif
{
return ((red_cost < 0 && arc->ident == AT_LOWER) || (red_cost > 0 && arc->ident == AT_UPPER));
}
typedef struct basket
{
arc_t *a;
cost_t cost;
cost_t abs_cost;
} BASKET;
static long basket_size;
static BASKET basket[B + K + 1];
static BASKET *perm[B + K + 1];
#ifdef _PROTO_
void sort_basket(long min, long max)
#else
void sort_basket(min, max) long min, max;
#endif
{
long l, r;
cost_t cut;
BASKET *xchange;
l = min;
r = max;
cut = perm[(long)((l + r) / 2)]->abs_cost;
do
{
while (perm[l]->abs_cost > cut)
l++;
while (cut > perm[r]->abs_cost)
r--;
if (l < r)
{
xchange = perm[l];
perm[l] = perm[r];
perm[r] = xchange;
}
if (l <= r)
{
l++;
r--;
}
} while (l <= r);
if (min < r)
sort_basket(min, r);
if (l < max && l <= B)
sort_basket(l, max);
}
static long nr_group;
static long group_pos;
static long initialize = 1;
#ifdef _PROTO_
arc_t *primal_bea_mpp(long m, arc_t *arcs, arc_t *stop_arcs,
cost_t *red_cost_of_bea)
#else
arc_t *primal_bea_mpp(m, arcs, stop_arcs, red_cost_of_bea) long m;
arc_t *arcs;
arc_t *stop_arcs;
cost_t *red_cost_of_bea;
#endif
{
long old_group_pos;
if (initialize)
{
for (long i = 1; i < K + B + 1; i++)
perm[i] = &(basket[i]);
nr_group = ((m - 1) / K) + 1;
group_pos = 0;
basket_size = 0;
initialize = 0;
}
else
{
/*****************************************************************/
/******************* BEGINNING FIRST FOR LOOP ********************/
/*****************************************************************/
int next_array[B + K + 1];
int next_increased_array[B + K + 1];
int next_increase_count[5];
arc_t *arc_array[B + K + 1];
cost_t red_cost_array[B + K + 1];
int min = B < basket_size ? B : basket_size;
int chunk_size = (min - 2 + 1) / 4;
#pragma omp parallel for
for (long j = 0; j < 4; j++)
{
long chunk_start = 2 + j * chunk_size;
long chunk_end = j == 3 ? min : 2 + j * chunk_size + chunk_size - 1;
next_increase_count[j + 1] = 0;
for (long i = chunk_start; i <= chunk_end; i++)
{
arc_t *arc = perm[i]->a;
cost_t red_cost = arc->cost - arc->tail->potential + arc->head->potential;
int ident = arc->ident;
arc_array[i] = arc;
red_cost_array[i] = red_cost;
int predicate = (red_cost < 0 && ident == AT_LOWER) || (red_cost > 0 && ident == AT_UPPER);
next_increased_array[i] = predicate ? 1 : 0;
next_increase_count[j + 1] += predicate;
}
}
next_increase_count[0] = 0;
for (long j = 1; j <= 4; j++)
{
next_increase_count[j] += next_increase_count[j - 1];
}
#pragma omp parallel for
for (long j = 0; j < 4; j++)
{
long next = 0;
long global_next;
long chunk_start = 2 + j * chunk_size;
long chunk_end = j == 3 ? min : 2 + j * chunk_size + chunk_size - 1;
for (long i = chunk_start; i <= chunk_end; i++)
{
if (next_increased_array[i])
{
next++;
global_next = next_increase_count[j] + next;
BASKET *current_prem = perm[global_next];
cost_t red_cost = red_cost_array[i];
current_prem->a = arc_array[i];
current_prem->cost = red_cost;
current_prem->abs_cost = ABS(red_cost);
}
}
}
basket_size = next_increase_count[4];
}
/*****************************************************************/
/********************* END FIRST FOR LOOP ************************/
/*****************************************************************/
old_group_pos = group_pos;
/*****************************************************************/
/********************** BEGINNING GOTO LOOP **********************/
/*****************************************************************/
// int new_group_pos = group_pos;
// int new_group_pos_set = 0;
// int chunk_size = nr_group / 4;
// int basket_size_increase_count[5];
// int basket_size_increased_array_size = (((stop_arcs - arcs) / nr_group) + 1) * nr_group;
// // int bla = stop_arcs - arcs;
// // printf("bla: %d\n", basket_size_increased_array_size / 8);
// int *basket_size_increased_array = malloc(basket_size_increased_array_size * sizeof(int));
// for (long j = 0; j < 4; j++)
// {
// // printf("HELLO!!!!!\n");
// long chunk_start = j * chunk_size;
// long chunk_end = j == 3 ? nr_group : j * chunk_size + chunk_size;
// long real_chunk_size = chunk_end - chunk_start + 1;
// basket_size_increase_count[j + 1] = 0;
// for (long group_pos_index = 0; group_pos_index < real_chunk_size; group_pos_index++)
// {
// long current_group_pos = (group_pos + (j * chunk_size) + group_pos_index) % nr_group;
// arc_t *arc = arcs + current_group_pos;
// int index = 0;
// for (; arc < stop_arcs; arc += nr_group)
// {
// long basket_size_increased_index = nr_group * current_group_pos + index;
// // if (basket_size_increased_index < B)
// // {
// // printf("HELsdfsdfsdfLO2!!!!!\n");
// if (arc->ident > BASIC)
// {
// cost_t red_cost = arc->cost - arc->tail->potential + arc->head->potential;
// int predicate = bea_is_dual_infeasible(arc, red_cost);
// basket_size_increased_array[basket_size_increased_index] = predicate;
// basket_size_increase_count[j + 1] += predicate;
// }
// // }
// // else if (new_group_pos_set == 0)
// // {
// // new_group_pos_set = 1;
// // new_group_pos = current_group_pos;
// // }
// index++;
// }
// }
// }
// basket_size_increase_count[0] = 0;
// for (long j = 1; j <= 4; j++)
// {
// basket_size_increase_count[j] += basket_size_increase_count[j - 1];
// }
// for (long j = 0; j < 4; j++)
// {
// long global_basket_size = basket_size_increase_count[j];
// long chunk_start = j * chunk_size;
// long chunk_end = j == 3 ? nr_group : j * chunk_size + chunk_size;
// basket_size_increase_count[j + 1] = 0;
// for (long group_pos_index = 0; group_pos_index < chunk_size; group_pos_index++)
// {
// long current_group_pos = (group_pos + (j * chunk_size) + group_pos_index) % nr_group;
// arc_t *arc = arcs + current_group_pos;
// int index = 0;
// long local_basket_size = 0;
// for (; arc < stop_arcs; arc += nr_group)
// {
// long basket_size_increased_index = nr_group * current_group_pos + index;
// if (global_basket_size < B)
// {
// if (arc->ident > BASIC)
// {
// if (basket_size_increased_array[basket_size_increased_index])
// {
// local_basket_size++;
// global_basket_size = basket_size_increase_count[j] + local_basket_size;
// BASKET *current_prem = perm[global_basket_size];
// cost_t red_cost = arc->cost - arc->tail->potential + arc->head->potential;
// current_prem->a = arc;
// current_prem->cost = red_cost;
// current_prem->abs_cost = ABS(red_cost);
// }
// }
// }
// else if (new_group_pos_set == 0)
// {
// new_group_pos_set = 1;
// new_group_pos = current_group_pos;
// }
// index++;
// }
// }
// }
// group_pos = new_group_pos;
// basket_size = basket_size_increase_count[4];
/*****************************************************************/
/************************* END GOTO LOOP *************************/
/*****************************************************************/
arc_t *arc;
cost_t red_cost;
NEXT:
/* price next group */
arc = arcs + group_pos;
for (; arc < stop_arcs; arc += nr_group)
{
if (arc->ident > BASIC)
{
/* red_cost = bea_compute_red_cost( arc ); */
red_cost = arc->cost - arc->tail->potential + arc->head->potential;
if (bea_is_dual_infeasible(arc, red_cost))
{
basket_size++;
perm[basket_size]->a = arc;
perm[basket_size]->cost = red_cost;
perm[basket_size]->abs_cost = ABS(red_cost);
}
}
}
if (++group_pos == nr_group)
group_pos = 0;
if (basket_size < B && group_pos != old_group_pos)
goto NEXT;
if (basket_size == 0)
{
initialize = 1;
*red_cost_of_bea = 0;
return NULL;
}
sort_basket(1, basket_size);
*red_cost_of_bea = perm[1]->cost;
return (perm[1]->a);
}
|
fx.c | /*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% FFFFF X X %
% F X X %
% FFF X %
% F X X %
% F X X %
% %
% %
% MagickCore Image Special Effects Methods %
% %
% Software Design %
% Cristy %
% October 1996 %
% %
% %
% %
% Copyright 1999-2019 ImageMagick Studio LLC, a non-profit organization %
% dedicated to making software imaging solutions freely available. %
% %
% You may not use this file except in compliance with the License. You may %
% obtain a copy of the License at %
% %
% https://imagemagick.org/script/license.php %
% %
% Unless required by applicable law or agreed to in writing, software %
% distributed under the License is distributed on an "AS IS" BASIS, %
% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. %
% See the License for the specific language governing permissions and %
% limitations under the License. %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
%
%
*/
/*
Include declarations.
*/
#include "MagickCore/studio.h"
#include "MagickCore/accelerate-private.h"
#include "MagickCore/annotate.h"
#include "MagickCore/artifact.h"
#include "MagickCore/attribute.h"
#include "MagickCore/cache.h"
#include "MagickCore/cache-view.h"
#include "MagickCore/channel.h"
#include "MagickCore/color.h"
#include "MagickCore/color-private.h"
#include "MagickCore/colorspace-private.h"
#include "MagickCore/composite.h"
#include "MagickCore/decorate.h"
#include "MagickCore/distort.h"
#include "MagickCore/draw.h"
#include "MagickCore/effect.h"
#include "MagickCore/enhance.h"
#include "MagickCore/exception.h"
#include "MagickCore/exception-private.h"
#include "MagickCore/fx.h"
#include "MagickCore/fx-private.h"
#include "MagickCore/gem.h"
#include "MagickCore/gem-private.h"
#include "MagickCore/geometry.h"
#include "MagickCore/layer.h"
#include "MagickCore/list.h"
#include "MagickCore/log.h"
#include "MagickCore/image.h"
#include "MagickCore/image-private.h"
#include "MagickCore/magick.h"
#include "MagickCore/memory_.h"
#include "MagickCore/memory-private.h"
#include "MagickCore/monitor.h"
#include "MagickCore/monitor-private.h"
#include "MagickCore/option.h"
#include "MagickCore/pixel.h"
#include "MagickCore/pixel-accessor.h"
#include "MagickCore/property.h"
#include "MagickCore/quantum.h"
#include "MagickCore/quantum-private.h"
#include "MagickCore/random_.h"
#include "MagickCore/random-private.h"
#include "MagickCore/resample.h"
#include "MagickCore/resample-private.h"
#include "MagickCore/resize.h"
#include "MagickCore/resource_.h"
#include "MagickCore/splay-tree.h"
#include "MagickCore/statistic.h"
#include "MagickCore/string_.h"
#include "MagickCore/string-private.h"
#include "MagickCore/thread-private.h"
#include "MagickCore/threshold.h"
#include "MagickCore/transform.h"
#include "MagickCore/transform-private.h"
#include "MagickCore/utility.h"
/*
Define declarations.
*/
#define LeftShiftOperator 0xf5U
#define RightShiftOperator 0xf6U
#define LessThanEqualOperator 0xf7U
#define GreaterThanEqualOperator 0xf8U
#define EqualOperator 0xf9U
#define NotEqualOperator 0xfaU
#define LogicalAndOperator 0xfbU
#define LogicalOrOperator 0xfcU
#define ExponentialNotation 0xfdU
struct _FxInfo
{
const Image
*images;
char
*expression;
FILE
*file;
SplayTreeInfo
*colors,
*symbols;
CacheView
**view;
RandomInfo
*random_info;
ExceptionInfo
*exception;
};
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ A c q u i r e F x I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% AcquireFxInfo() allocates the FxInfo structure.
%
% The format of the AcquireFxInfo method is:
%
% FxInfo *AcquireFxInfo(Image *images,const char *expression,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o images: the image sequence.
%
% o expression: the expression.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickPrivate FxInfo *AcquireFxInfo(const Image *images,const char *expression,
ExceptionInfo *exception)
{
char
fx_op[2];
const Image
*next;
FxInfo
*fx_info;
register ssize_t
i;
fx_info=(FxInfo *) AcquireCriticalMemory(sizeof(*fx_info));
(void) memset(fx_info,0,sizeof(*fx_info));
fx_info->exception=AcquireExceptionInfo();
fx_info->images=images;
fx_info->colors=NewSplayTree(CompareSplayTreeString,RelinquishMagickMemory,
RelinquishMagickMemory);
fx_info->symbols=NewSplayTree(CompareSplayTreeString,RelinquishMagickMemory,
RelinquishMagickMemory);
fx_info->view=(CacheView **) AcquireQuantumMemory(GetImageListLength(
fx_info->images),sizeof(*fx_info->view));
if (fx_info->view == (CacheView **) NULL)
ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed");
i=0;
next=GetFirstImageInList(fx_info->images);
for ( ; next != (Image *) NULL; next=next->next)
{
fx_info->view[i]=AcquireVirtualCacheView(next,exception);
i++;
}
fx_info->random_info=AcquireRandomInfo();
fx_info->expression=ConstantString(expression);
fx_info->file=stderr;
(void) SubstituteString(&fx_info->expression," ",""); /* compact string */
/*
Force right-to-left associativity for unary negation.
*/
(void) SubstituteString(&fx_info->expression,"-","-1.0*");
(void) SubstituteString(&fx_info->expression,"^-1.0*","^-");
(void) SubstituteString(&fx_info->expression,"E-1.0*","E-");
(void) SubstituteString(&fx_info->expression,"e-1.0*","e-");
/*
Convert compound to simple operators.
*/
fx_op[1]='\0';
*fx_op=(char) LeftShiftOperator;
(void) SubstituteString(&fx_info->expression,"<<",fx_op);
*fx_op=(char) RightShiftOperator;
(void) SubstituteString(&fx_info->expression,">>",fx_op);
*fx_op=(char) LessThanEqualOperator;
(void) SubstituteString(&fx_info->expression,"<=",fx_op);
*fx_op=(char) GreaterThanEqualOperator;
(void) SubstituteString(&fx_info->expression,">=",fx_op);
*fx_op=(char) EqualOperator;
(void) SubstituteString(&fx_info->expression,"==",fx_op);
*fx_op=(char) NotEqualOperator;
(void) SubstituteString(&fx_info->expression,"!=",fx_op);
*fx_op=(char) LogicalAndOperator;
(void) SubstituteString(&fx_info->expression,"&&",fx_op);
*fx_op=(char) LogicalOrOperator;
(void) SubstituteString(&fx_info->expression,"||",fx_op);
*fx_op=(char) ExponentialNotation;
(void) SubstituteString(&fx_info->expression,"**",fx_op);
return(fx_info);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% A d d N o i s e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% AddNoiseImage() adds random noise to the image.
%
% The format of the AddNoiseImage method is:
%
% Image *AddNoiseImage(const Image *image,const NoiseType noise_type,
% const double attenuate,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o channel: the channel type.
%
% o noise_type: The type of noise: Uniform, Gaussian, Multiplicative,
% Impulse, Laplacian, or Poisson.
%
% o attenuate: attenuate the random distribution.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *AddNoiseImage(const Image *image,const NoiseType noise_type,
const double attenuate,ExceptionInfo *exception)
{
#define AddNoiseImageTag "AddNoise/Image"
CacheView
*image_view,
*noise_view;
Image
*noise_image;
MagickBooleanType
status;
MagickOffsetType
progress;
RandomInfo
**magick_restrict random_info;
ssize_t
y;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
unsigned long
key;
#endif
/*
Initialize noise image attributes.
*/
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
#if defined(MAGICKCORE_OPENCL_SUPPORT)
noise_image=AccelerateAddNoiseImage(image,noise_type,attenuate,exception);
if (noise_image != (Image *) NULL)
return(noise_image);
#endif
noise_image=CloneImage(image,0,0,MagickTrue,exception);
if (noise_image == (Image *) NULL)
return((Image *) NULL);
if (SetImageStorageClass(noise_image,DirectClass,exception) == MagickFalse)
{
noise_image=DestroyImage(noise_image);
return((Image *) NULL);
}
/*
Add noise in each row.
*/
status=MagickTrue;
progress=0;
random_info=AcquireRandomInfoThreadSet();
image_view=AcquireVirtualCacheView(image,exception);
noise_view=AcquireAuthenticCacheView(noise_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
key=GetRandomSecretKey(random_info[0]);
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(image,noise_image,image->rows,key == ~0UL)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
const int
id = GetOpenMPThreadId();
MagickBooleanType
sync;
register const Quantum
*magick_restrict p;
register ssize_t
x;
register Quantum
*magick_restrict q;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception);
q=QueueCacheViewAuthenticPixels(noise_view,0,y,noise_image->columns,1,
exception);
if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL))
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
register ssize_t
i;
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
PixelTrait noise_traits=GetPixelChannelTraits(noise_image,channel);
if ((traits == UndefinedPixelTrait) ||
(noise_traits == UndefinedPixelTrait))
continue;
if ((noise_traits & CopyPixelTrait) != 0)
{
SetPixelChannel(noise_image,channel,p[i],q);
continue;
}
SetPixelChannel(noise_image,channel,ClampToQuantum(
GenerateDifferentialNoise(random_info[id],p[i],noise_type,attenuate)),
q);
}
p+=GetPixelChannels(image);
q+=GetPixelChannels(noise_image);
}
sync=SyncCacheViewAuthenticPixels(noise_view,exception);
if (sync == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(image,AddNoiseImageTag,progress,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
noise_view=DestroyCacheView(noise_view);
image_view=DestroyCacheView(image_view);
random_info=DestroyRandomInfoThreadSet(random_info);
if (status == MagickFalse)
noise_image=DestroyImage(noise_image);
return(noise_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% B l u e S h i f t I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% BlueShiftImage() mutes the colors of the image to simulate a scene at
% nighttime in the moonlight.
%
% The format of the BlueShiftImage method is:
%
% Image *BlueShiftImage(const Image *image,const double factor,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o factor: the shift factor.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *BlueShiftImage(const Image *image,const double factor,
ExceptionInfo *exception)
{
#define BlueShiftImageTag "BlueShift/Image"
CacheView
*image_view,
*shift_view;
Image
*shift_image;
MagickBooleanType
status;
MagickOffsetType
progress;
ssize_t
y;
/*
Allocate blue shift image.
*/
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
shift_image=CloneImage(image,0,0,MagickTrue,exception);
if (shift_image == (Image *) NULL)
return((Image *) NULL);
if (SetImageStorageClass(shift_image,DirectClass,exception) == MagickFalse)
{
shift_image=DestroyImage(shift_image);
return((Image *) NULL);
}
/*
Blue-shift DirectClass image.
*/
status=MagickTrue;
progress=0;
image_view=AcquireVirtualCacheView(image,exception);
shift_view=AcquireAuthenticCacheView(shift_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(image,shift_image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
MagickBooleanType
sync;
PixelInfo
pixel;
Quantum
quantum;
register const Quantum
*magick_restrict p;
register ssize_t
x;
register Quantum
*magick_restrict q;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception);
q=QueueCacheViewAuthenticPixels(shift_view,0,y,shift_image->columns,1,
exception);
if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL))
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
quantum=GetPixelRed(image,p);
if (GetPixelGreen(image,p) < quantum)
quantum=GetPixelGreen(image,p);
if (GetPixelBlue(image,p) < quantum)
quantum=GetPixelBlue(image,p);
pixel.red=0.5*(GetPixelRed(image,p)+factor*quantum);
pixel.green=0.5*(GetPixelGreen(image,p)+factor*quantum);
pixel.blue=0.5*(GetPixelBlue(image,p)+factor*quantum);
quantum=GetPixelRed(image,p);
if (GetPixelGreen(image,p) > quantum)
quantum=GetPixelGreen(image,p);
if (GetPixelBlue(image,p) > quantum)
quantum=GetPixelBlue(image,p);
pixel.red=0.5*(pixel.red+factor*quantum);
pixel.green=0.5*(pixel.green+factor*quantum);
pixel.blue=0.5*(pixel.blue+factor*quantum);
SetPixelRed(shift_image,ClampToQuantum(pixel.red),q);
SetPixelGreen(shift_image,ClampToQuantum(pixel.green),q);
SetPixelBlue(shift_image,ClampToQuantum(pixel.blue),q);
p+=GetPixelChannels(image);
q+=GetPixelChannels(shift_image);
}
sync=SyncCacheViewAuthenticPixels(shift_view,exception);
if (sync == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(image,BlueShiftImageTag,progress,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
image_view=DestroyCacheView(image_view);
shift_view=DestroyCacheView(shift_view);
if (status == MagickFalse)
shift_image=DestroyImage(shift_image);
return(shift_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% C h a r c o a l I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% CharcoalImage() creates a new image that is a copy of an existing one with
% the edge highlighted. It allocates the memory necessary for the new Image
% structure and returns a pointer to the new image.
%
% The format of the CharcoalImage method is:
%
% Image *CharcoalImage(const Image *image,const double radius,
% const double sigma,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o radius: the radius of the pixel neighborhood.
%
% o sigma: the standard deviation of the Gaussian, in pixels.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *CharcoalImage(const Image *image,const double radius,
const double sigma,ExceptionInfo *exception)
{
Image
*charcoal_image,
*edge_image;
MagickBooleanType
status;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
edge_image=EdgeImage(image,radius,exception);
if (edge_image == (Image *) NULL)
return((Image *) NULL);
charcoal_image=(Image *) NULL;
status=ClampImage(edge_image,exception);
if (status != MagickFalse)
charcoal_image=BlurImage(edge_image,radius,sigma,exception);
edge_image=DestroyImage(edge_image);
if (charcoal_image == (Image *) NULL)
return((Image *) NULL);
status=NormalizeImage(charcoal_image,exception);
if (status != MagickFalse)
status=NegateImage(charcoal_image,MagickFalse,exception);
if (status != MagickFalse)
status=GrayscaleImage(charcoal_image,image->intensity,exception);
if (status == MagickFalse)
charcoal_image=DestroyImage(charcoal_image);
return(charcoal_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% C o l o r i z e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ColorizeImage() blends the fill color with each pixel in the image.
% A percentage blend is specified with opacity. Control the application
% of different color components by specifying a different percentage for
% each component (e.g. 90/100/10 is 90% red, 100% green, and 10% blue).
%
% The format of the ColorizeImage method is:
%
% Image *ColorizeImage(const Image *image,const char *blend,
% const PixelInfo *colorize,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o blend: A character string indicating the level of blending as a
% percentage.
%
% o colorize: A color value.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *ColorizeImage(const Image *image,const char *blend,
const PixelInfo *colorize,ExceptionInfo *exception)
{
#define ColorizeImageTag "Colorize/Image"
#define Colorize(pixel,blend_percentage,colorize) \
(((pixel)*(100.0-(blend_percentage))+(colorize)*(blend_percentage))/100.0)
CacheView
*image_view;
GeometryInfo
geometry_info;
Image
*colorize_image;
MagickBooleanType
status;
MagickOffsetType
progress;
MagickStatusType
flags;
PixelInfo
blend_percentage;
ssize_t
y;
/*
Allocate colorized image.
*/
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
colorize_image=CloneImage(image,0,0,MagickTrue,exception);
if (colorize_image == (Image *) NULL)
return((Image *) NULL);
if (SetImageStorageClass(colorize_image,DirectClass,exception) == MagickFalse)
{
colorize_image=DestroyImage(colorize_image);
return((Image *) NULL);
}
if ((IsGrayColorspace(colorize_image->colorspace) != MagickFalse) ||
(IsPixelInfoGray(colorize) != MagickFalse))
(void) SetImageColorspace(colorize_image,sRGBColorspace,exception);
if ((colorize_image->alpha_trait == UndefinedPixelTrait) &&
(colorize->alpha_trait != UndefinedPixelTrait))
(void) SetImageAlpha(colorize_image,OpaqueAlpha,exception);
if (blend == (const char *) NULL)
return(colorize_image);
GetPixelInfo(colorize_image,&blend_percentage);
flags=ParseGeometry(blend,&geometry_info);
blend_percentage.red=geometry_info.rho;
blend_percentage.green=geometry_info.rho;
blend_percentage.blue=geometry_info.rho;
blend_percentage.black=geometry_info.rho;
blend_percentage.alpha=(MagickRealType) TransparentAlpha;
if ((flags & SigmaValue) != 0)
blend_percentage.green=geometry_info.sigma;
if ((flags & XiValue) != 0)
blend_percentage.blue=geometry_info.xi;
if ((flags & PsiValue) != 0)
blend_percentage.alpha=geometry_info.psi;
if (blend_percentage.colorspace == CMYKColorspace)
{
if ((flags & PsiValue) != 0)
blend_percentage.black=geometry_info.psi;
if ((flags & ChiValue) != 0)
blend_percentage.alpha=geometry_info.chi;
}
/*
Colorize DirectClass image.
*/
status=MagickTrue;
progress=0;
image_view=AcquireVirtualCacheView(colorize_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(colorize_image,colorize_image,colorize_image->rows,1)
#endif
for (y=0; y < (ssize_t) colorize_image->rows; y++)
{
MagickBooleanType
sync;
register Quantum
*magick_restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,colorize_image->columns,1,
exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) colorize_image->columns; x++)
{
register ssize_t
i;
for (i=0; i < (ssize_t) GetPixelChannels(colorize_image); i++)
{
PixelTrait traits = GetPixelChannelTraits(colorize_image,
(PixelChannel) i);
if (traits == UndefinedPixelTrait)
continue;
if ((traits & CopyPixelTrait) != 0)
continue;
SetPixelChannel(colorize_image,(PixelChannel) i,ClampToQuantum(
Colorize(q[i],GetPixelInfoChannel(&blend_percentage,(PixelChannel) i),
GetPixelInfoChannel(colorize,(PixelChannel) i))),q);
}
q+=GetPixelChannels(colorize_image);
}
sync=SyncCacheViewAuthenticPixels(image_view,exception);
if (sync == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(image,ColorizeImageTag,progress,
colorize_image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
image_view=DestroyCacheView(image_view);
if (status == MagickFalse)
colorize_image=DestroyImage(colorize_image);
return(colorize_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% C o l o r M a t r i x I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ColorMatrixImage() applies color transformation to an image. This method
% permits saturation changes, hue rotation, luminance to alpha, and various
% other effects. Although variable-sized transformation matrices can be used,
% typically one uses a 5x5 matrix for an RGBA image and a 6x6 for CMYKA
% (or RGBA with offsets). The matrix is similar to those used by Adobe Flash
% except offsets are in column 6 rather than 5 (in support of CMYKA images)
% and offsets are normalized (divide Flash offset by 255).
%
% The format of the ColorMatrixImage method is:
%
% Image *ColorMatrixImage(const Image *image,
% const KernelInfo *color_matrix,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o color_matrix: the color matrix.
%
% o exception: return any errors or warnings in this structure.
%
*/
/* FUTURE: modify to make use of a MagickMatrix Mutliply function
That should be provided in "matrix.c"
(ASIDE: actually distorts should do this too but currently doesn't)
*/
MagickExport Image *ColorMatrixImage(const Image *image,
const KernelInfo *color_matrix,ExceptionInfo *exception)
{
#define ColorMatrixImageTag "ColorMatrix/Image"
CacheView
*color_view,
*image_view;
double
ColorMatrix[6][6] =
{
{ 1.0, 0.0, 0.0, 0.0, 0.0, 0.0 },
{ 0.0, 1.0, 0.0, 0.0, 0.0, 0.0 },
{ 0.0, 0.0, 1.0, 0.0, 0.0, 0.0 },
{ 0.0, 0.0, 0.0, 1.0, 0.0, 0.0 },
{ 0.0, 0.0, 0.0, 0.0, 1.0, 0.0 },
{ 0.0, 0.0, 0.0, 0.0, 0.0, 1.0 }
};
Image
*color_image;
MagickBooleanType
status;
MagickOffsetType
progress;
register ssize_t
i;
ssize_t
u,
v,
y;
/*
Map given color_matrix, into a 6x6 matrix RGBKA and a constant
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
i=0;
for (v=0; v < (ssize_t) color_matrix->height; v++)
for (u=0; u < (ssize_t) color_matrix->width; u++)
{
if ((v < 6) && (u < 6))
ColorMatrix[v][u]=color_matrix->values[i];
i++;
}
/*
Initialize color image.
*/
color_image=CloneImage(image,0,0,MagickTrue,exception);
if (color_image == (Image *) NULL)
return((Image *) NULL);
if (SetImageStorageClass(color_image,DirectClass,exception) == MagickFalse)
{
color_image=DestroyImage(color_image);
return((Image *) NULL);
}
if (image->debug != MagickFalse)
{
char
format[MagickPathExtent],
*message;
(void) LogMagickEvent(TransformEvent,GetMagickModule(),
" ColorMatrix image with color matrix:");
message=AcquireString("");
for (v=0; v < 6; v++)
{
*message='\0';
(void) FormatLocaleString(format,MagickPathExtent,"%.20g: ",(double) v);
(void) ConcatenateString(&message,format);
for (u=0; u < 6; u++)
{
(void) FormatLocaleString(format,MagickPathExtent,"%+f ",
ColorMatrix[v][u]);
(void) ConcatenateString(&message,format);
}
(void) LogMagickEvent(TransformEvent,GetMagickModule(),"%s",message);
}
message=DestroyString(message);
}
/*
Apply the ColorMatrix to image.
*/
status=MagickTrue;
progress=0;
image_view=AcquireVirtualCacheView(image,exception);
color_view=AcquireAuthenticCacheView(color_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(image,color_image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
PixelInfo
pixel;
register const Quantum
*magick_restrict p;
register Quantum
*magick_restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception);
q=GetCacheViewAuthenticPixels(color_view,0,y,color_image->columns,1,
exception);
if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL))
{
status=MagickFalse;
continue;
}
GetPixelInfo(image,&pixel);
for (x=0; x < (ssize_t) image->columns; x++)
{
register ssize_t
v;
size_t
height;
GetPixelInfoPixel(image,p,&pixel);
height=color_matrix->height > 6 ? 6UL : color_matrix->height;
for (v=0; v < (ssize_t) height; v++)
{
double
sum;
sum=ColorMatrix[v][0]*GetPixelRed(image,p)+ColorMatrix[v][1]*
GetPixelGreen(image,p)+ColorMatrix[v][2]*GetPixelBlue(image,p);
if (image->colorspace == CMYKColorspace)
sum+=ColorMatrix[v][3]*GetPixelBlack(image,p);
if (image->alpha_trait != UndefinedPixelTrait)
sum+=ColorMatrix[v][4]*GetPixelAlpha(image,p);
sum+=QuantumRange*ColorMatrix[v][5];
switch (v)
{
case 0: pixel.red=sum; break;
case 1: pixel.green=sum; break;
case 2: pixel.blue=sum; break;
case 3: pixel.black=sum; break;
case 4: pixel.alpha=sum; break;
default: break;
}
}
SetPixelViaPixelInfo(color_image,&pixel,q);
p+=GetPixelChannels(image);
q+=GetPixelChannels(color_image);
}
if (SyncCacheViewAuthenticPixels(color_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(image,ColorMatrixImageTag,progress,
image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
color_view=DestroyCacheView(color_view);
image_view=DestroyCacheView(image_view);
if (status == MagickFalse)
color_image=DestroyImage(color_image);
return(color_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ D e s t r o y F x I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DestroyFxInfo() deallocates memory associated with an FxInfo structure.
%
% The format of the DestroyFxInfo method is:
%
% ImageInfo *DestroyFxInfo(ImageInfo *fx_info)
%
% A description of each parameter follows:
%
% o fx_info: the fx info.
%
*/
MagickPrivate FxInfo *DestroyFxInfo(FxInfo *fx_info)
{
register ssize_t
i;
fx_info->exception=DestroyExceptionInfo(fx_info->exception);
fx_info->expression=DestroyString(fx_info->expression);
fx_info->symbols=DestroySplayTree(fx_info->symbols);
fx_info->colors=DestroySplayTree(fx_info->colors);
for (i=(ssize_t) GetImageListLength(fx_info->images)-1; i >= 0; i--)
fx_info->view[i]=DestroyCacheView(fx_info->view[i]);
fx_info->view=(CacheView **) RelinquishMagickMemory(fx_info->view);
fx_info->random_info=DestroyRandomInfo(fx_info->random_info);
fx_info=(FxInfo *) RelinquishMagickMemory(fx_info);
return(fx_info);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ F x E v a l u a t e C h a n n e l E x p r e s s i o n %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% FxEvaluateChannelExpression() evaluates an expression and returns the
% results.
%
% The format of the FxEvaluateExpression method is:
%
% double FxEvaluateChannelExpression(FxInfo *fx_info,
% const PixelChannel channel,const ssize_t x,const ssize_t y,
% double *alpha,Exceptioninfo *exception)
% double FxEvaluateExpression(FxInfo *fx_info,
% double *alpha,Exceptioninfo *exception)
%
% A description of each parameter follows:
%
% o fx_info: the fx info.
%
% o channel: the channel.
%
% o x,y: the pixel position.
%
% o alpha: the result.
%
% o exception: return any errors or warnings in this structure.
%
*/
static double FxChannelStatistics(FxInfo *fx_info,Image *image,
PixelChannel channel,const char *symbol,ExceptionInfo *exception)
{
ChannelType
channel_mask;
char
key[MagickPathExtent],
statistic[MagickPathExtent];
const char
*value;
register const char
*p;
channel_mask=UndefinedChannel;
for (p=symbol; (*p != '.') && (*p != '\0'); p++) ;
if (*p == '.')
{
ssize_t
option;
option=ParseCommandOption(MagickPixelChannelOptions,MagickTrue,p+1);
if (option >= 0)
{
channel=(PixelChannel) option;
channel_mask=SetPixelChannelMask(image,(ChannelType)
(1UL << channel));
}
}
(void) FormatLocaleString(key,MagickPathExtent,"%p.%.20g.%s",(void *) image,
(double) channel,symbol);
value=(const char *) GetValueFromSplayTree(fx_info->symbols,key);
if (value != (const char *) NULL)
{
if (channel_mask != UndefinedChannel)
(void) SetPixelChannelMask(image,channel_mask);
return(QuantumScale*StringToDouble(value,(char **) NULL));
}
(void) DeleteNodeFromSplayTree(fx_info->symbols,key);
if (LocaleNCompare(symbol,"depth",5) == 0)
{
size_t
depth;
depth=GetImageDepth(image,exception);
(void) FormatLocaleString(statistic,MagickPathExtent,"%.20g",(double)
depth);
}
if (LocaleNCompare(symbol,"kurtosis",8) == 0)
{
double
kurtosis,
skewness;
(void) GetImageKurtosis(image,&kurtosis,&skewness,exception);
(void) FormatLocaleString(statistic,MagickPathExtent,"%.20g",kurtosis);
}
if (LocaleNCompare(symbol,"maxima",6) == 0)
{
double
maxima,
minima;
(void) GetImageRange(image,&minima,&maxima,exception);
(void) FormatLocaleString(statistic,MagickPathExtent,"%.20g",maxima);
}
if (LocaleNCompare(symbol,"mean",4) == 0)
{
double
mean,
standard_deviation;
(void) GetImageMean(image,&mean,&standard_deviation,exception);
(void) FormatLocaleString(statistic,MagickPathExtent,"%.20g",mean);
}
if (LocaleNCompare(symbol,"minima",6) == 0)
{
double
maxima,
minima;
(void) GetImageRange(image,&minima,&maxima,exception);
(void) FormatLocaleString(statistic,MagickPathExtent,"%.20g",minima);
}
if (LocaleNCompare(symbol,"skewness",8) == 0)
{
double
kurtosis,
skewness;
(void) GetImageKurtosis(image,&kurtosis,&skewness,exception);
(void) FormatLocaleString(statistic,MagickPathExtent,"%.20g",skewness);
}
if (LocaleNCompare(symbol,"standard_deviation",18) == 0)
{
double
mean,
standard_deviation;
(void) GetImageMean(image,&mean,&standard_deviation,exception);
(void) FormatLocaleString(statistic,MagickPathExtent,"%.20g",
standard_deviation);
}
if (channel_mask != UndefinedChannel)
(void) SetPixelChannelMask(image,channel_mask);
(void) AddValueToSplayTree(fx_info->symbols,ConstantString(key),
ConstantString(statistic));
return(QuantumScale*StringToDouble(statistic,(char **) NULL));
}
static double
FxEvaluateSubexpression(FxInfo *,const PixelChannel,const ssize_t,
const ssize_t,const char *,const size_t,double *,ExceptionInfo *);
static MagickOffsetType FxGCD(MagickOffsetType alpha,MagickOffsetType beta)
{
if (beta != 0)
return(FxGCD(beta,alpha % beta));
return(alpha);
}
static inline const char *FxSubexpression(const char *expression,
ExceptionInfo *exception)
{
const char
*subexpression;
register ssize_t
level;
level=0;
subexpression=expression;
while ((*subexpression != '\0') &&
((level != 1) || (strchr(")",(int) *subexpression) == (char *) NULL)))
{
if (strchr("(",(int) *subexpression) != (char *) NULL)
level++;
else
if (strchr(")",(int) *subexpression) != (char *) NULL)
level--;
subexpression++;
}
if (*subexpression == '\0')
(void) ThrowMagickException(exception,GetMagickModule(),OptionError,
"UnbalancedParenthesis","`%s'",expression);
return(subexpression);
}
static double FxGetSymbol(FxInfo *fx_info,const PixelChannel channel,
const ssize_t x,const ssize_t y,const char *expression,const size_t depth,
ExceptionInfo *exception)
{
char
*q,
symbol[MagickPathExtent];
const char
*p,
*value;
Image
*image;
MagickBooleanType
status;
PixelInfo
pixel;
double
alpha,
beta;
PointInfo
point;
register ssize_t
i;
size_t
level;
p=expression;
i=GetImageIndexInList(fx_info->images);
level=0;
point.x=(double) x;
point.y=(double) y;
if (isalpha((int) ((unsigned char) *(p+1))) == 0)
{
char
*subexpression;
subexpression=AcquireString(expression);
if (strchr("suv",(int) *p) != (char *) NULL)
{
switch (*p)
{
case 's':
default:
{
i=GetImageIndexInList(fx_info->images);
break;
}
case 'u': i=0; break;
case 'v': i=1; break;
}
p++;
if (*p == '[')
{
level++;
q=subexpression;
for (p++; *p != '\0'; )
{
if (*p == '[')
level++;
else
if (*p == ']')
{
level--;
if (level == 0)
break;
}
*q++=(*p++);
}
*q='\0';
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,subexpression,
depth,&beta,exception);
i=(ssize_t) alpha;
if (*p != '\0')
p++;
}
if (*p == '.')
p++;
}
if ((*p == 'p') && (isalpha((int) ((unsigned char) *(p+1))) == 0))
{
p++;
if (*p == '{')
{
level++;
q=subexpression;
for (p++; *p != '\0'; )
{
if (*p == '{')
level++;
else
if (*p == '}')
{
level--;
if (level == 0)
break;
}
*q++=(*p++);
}
*q='\0';
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,subexpression,
depth,&beta,exception);
point.x=alpha;
point.y=beta;
if (*p != '\0')
p++;
}
else
if (*p == '[')
{
level++;
q=subexpression;
for (p++; *p != '\0'; )
{
if (*p == '[')
level++;
else
if (*p == ']')
{
level--;
if (level == 0)
break;
}
*q++=(*p++);
}
*q='\0';
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,subexpression,
depth,&beta,exception);
point.x+=alpha;
point.y+=beta;
if (*p != '\0')
p++;
}
if (*p == '.')
p++;
}
subexpression=DestroyString(subexpression);
}
image=GetImageFromList(fx_info->images,i);
if (image == (Image *) NULL)
{
(void) ThrowMagickException(exception,GetMagickModule(),OptionError,
"NoSuchImage","`%s'",expression);
return(0.0);
}
i=GetImageIndexInList(image);
GetPixelInfo(image,&pixel);
status=InterpolatePixelInfo(image,fx_info->view[i],image->interpolate,
point.x,point.y,&pixel,exception);
(void) status;
if ((strlen(p) > 2) && (LocaleCompare(p,"intensity") != 0) &&
(LocaleCompare(p,"luma") != 0) && (LocaleCompare(p,"luminance") != 0) &&
(LocaleCompare(p,"hue") != 0) && (LocaleCompare(p,"saturation") != 0) &&
(LocaleCompare(p,"lightness") != 0))
{
char
name[MagickPathExtent];
(void) CopyMagickString(name,p,MagickPathExtent);
for (q=name+(strlen(name)-1); q > name; q--)
{
if (*q == ')')
break;
if (*q == '.')
{
*q='\0';
break;
}
}
if ((strlen(name) > 2) &&
(GetValueFromSplayTree(fx_info->symbols,name) == (const char *) NULL))
{
PixelInfo
*color;
color=(PixelInfo *) GetValueFromSplayTree(fx_info->colors,name);
if (color != (PixelInfo *) NULL)
{
pixel=(*color);
p+=strlen(name);
}
else
{
MagickBooleanType
status;
status=QueryColorCompliance(name,AllCompliance,&pixel,
fx_info->exception);
if (status != MagickFalse)
{
(void) AddValueToSplayTree(fx_info->colors,ConstantString(
name),ClonePixelInfo(&pixel));
p+=strlen(name);
}
}
}
}
(void) CopyMagickString(symbol,p,MagickPathExtent);
StripString(symbol);
if (*symbol == '\0')
{
switch (channel)
{
case RedPixelChannel: return(QuantumScale*pixel.red);
case GreenPixelChannel: return(QuantumScale*pixel.green);
case BluePixelChannel: return(QuantumScale*pixel.blue);
case BlackPixelChannel:
{
if (image->colorspace != CMYKColorspace)
{
(void) ThrowMagickException(exception,GetMagickModule(),
ImageError,"ColorSeparatedImageRequired","`%s'",
image->filename);
return(0.0);
}
return(QuantumScale*pixel.black);
}
case AlphaPixelChannel:
{
if (pixel.alpha_trait == UndefinedPixelTrait)
return(1.0);
alpha=(double) (QuantumScale*pixel.alpha);
return(alpha);
}
case CompositePixelChannel:
{
Quantum
quantum_pixel[MaxPixelChannels];
SetPixelViaPixelInfo(image,&pixel,quantum_pixel);
return(QuantumScale*GetPixelIntensity(image,quantum_pixel));
}
case IndexPixelChannel:
return(0.0);
default:
break;
}
(void) ThrowMagickException(exception,GetMagickModule(),OptionError,
"UnableToParseExpression","`%s'",p);
return(0.0);
}
switch (*symbol)
{
case 'A':
case 'a':
{
if (LocaleCompare(symbol,"a") == 0)
return((QuantumScale*pixel.alpha));
break;
}
case 'B':
case 'b':
{
if (LocaleCompare(symbol,"b") == 0)
return(QuantumScale*pixel.blue);
break;
}
case 'C':
case 'c':
{
if (LocaleNCompare(symbol,"channel",7) == 0)
{
GeometryInfo
channel_info;
MagickStatusType
flags;
flags=ParseGeometry(symbol+7,&channel_info);
if (image->colorspace == CMYKColorspace)
switch (channel)
{
case CyanPixelChannel:
{
if ((flags & RhoValue) == 0)
return(0.0);
return(channel_info.rho);
}
case MagentaPixelChannel:
{
if ((flags & SigmaValue) == 0)
return(0.0);
return(channel_info.sigma);
}
case YellowPixelChannel:
{
if ((flags & XiValue) == 0)
return(0.0);
return(channel_info.xi);
}
case BlackPixelChannel:
{
if ((flags & PsiValue) == 0)
return(0.0);
return(channel_info.psi);
}
case AlphaPixelChannel:
{
if ((flags & ChiValue) == 0)
return(0.0);
return(channel_info.chi);
}
default:
return(0.0);
}
switch (channel)
{
case RedPixelChannel:
{
if ((flags & RhoValue) == 0)
return(0.0);
return(channel_info.rho);
}
case GreenPixelChannel:
{
if ((flags & SigmaValue) == 0)
return(0.0);
return(channel_info.sigma);
}
case BluePixelChannel:
{
if ((flags & XiValue) == 0)
return(0.0);
return(channel_info.xi);
}
case BlackPixelChannel:
{
if ((flags & ChiValue) == 0)
return(0.0);
return(channel_info.chi);
}
case AlphaPixelChannel:
{
if ((flags & PsiValue) == 0)
return(0.0);
return(channel_info.psi);
}
default:
return(0.0);
}
}
if (LocaleCompare(symbol,"c") == 0)
return(QuantumScale*pixel.red);
break;
}
case 'D':
case 'd':
{
if (LocaleNCompare(symbol,"depth",5) == 0)
return(FxChannelStatistics(fx_info,image,channel,symbol,exception));
break;
}
case 'E':
case 'e':
{
if (LocaleCompare(symbol,"extent") == 0)
{
if (image->extent != 0)
return((double) image->extent);
return((double) GetBlobSize(image));
}
break;
}
case 'G':
case 'g':
{
if (LocaleCompare(symbol,"g") == 0)
return(QuantumScale*pixel.green);
break;
}
case 'K':
case 'k':
{
if (LocaleNCompare(symbol,"kurtosis",8) == 0)
return(FxChannelStatistics(fx_info,image,channel,symbol,exception));
if (LocaleCompare(symbol,"k") == 0)
{
if (image->colorspace != CMYKColorspace)
{
(void) ThrowMagickException(exception,GetMagickModule(),
OptionError,"ColorSeparatedImageRequired","`%s'",
image->filename);
return(0.0);
}
return(QuantumScale*pixel.black);
}
break;
}
case 'H':
case 'h':
{
if (LocaleCompare(symbol,"h") == 0)
return((double) image->rows);
if (LocaleCompare(symbol,"hue") == 0)
{
double
hue,
lightness,
saturation;
ConvertRGBToHSL(pixel.red,pixel.green,pixel.blue,&hue,&saturation,
&lightness);
return(hue);
}
break;
}
case 'I':
case 'i':
{
if ((LocaleCompare(symbol,"image.depth") == 0) ||
(LocaleCompare(symbol,"image.minima") == 0) ||
(LocaleCompare(symbol,"image.maxima") == 0) ||
(LocaleCompare(symbol,"image.mean") == 0) ||
(LocaleCompare(symbol,"image.kurtosis") == 0) ||
(LocaleCompare(symbol,"image.skewness") == 0) ||
(LocaleCompare(symbol,"image.standard_deviation") == 0))
return(FxChannelStatistics(fx_info,image,channel,symbol+6,exception));
if (LocaleCompare(symbol,"image.resolution.x") == 0)
return(image->resolution.x);
if (LocaleCompare(symbol,"image.resolution.y") == 0)
return(image->resolution.y);
if (LocaleCompare(symbol,"intensity") == 0)
{
Quantum
quantum_pixel[MaxPixelChannels];
SetPixelViaPixelInfo(image,&pixel,quantum_pixel);
return(QuantumScale*GetPixelIntensity(image,quantum_pixel));
}
if (LocaleCompare(symbol,"i") == 0)
return((double) x);
break;
}
case 'J':
case 'j':
{
if (LocaleCompare(symbol,"j") == 0)
return((double) y);
break;
}
case 'L':
case 'l':
{
if (LocaleCompare(symbol,"lightness") == 0)
{
double
hue,
lightness,
saturation;
ConvertRGBToHSL(pixel.red,pixel.green,pixel.blue,&hue,&saturation,
&lightness);
return(lightness);
}
if (LocaleCompare(symbol,"luma") == 0)
{
double
luma;
luma=0.212656*pixel.red+0.715158*pixel.green+0.072186*pixel.blue;
return(QuantumScale*luma);
}
if (LocaleCompare(symbol,"luminance") == 0)
{
double
luminence;
luminence=0.212656*pixel.red+0.715158*pixel.green+0.072186*pixel.blue;
return(QuantumScale*luminence);
}
break;
}
case 'M':
case 'm':
{
if (LocaleNCompare(symbol,"maxima",6) == 0)
return(FxChannelStatistics(fx_info,image,channel,symbol,exception));
if (LocaleNCompare(symbol,"mean",4) == 0)
return(FxChannelStatistics(fx_info,image,channel,symbol,exception));
if (LocaleNCompare(symbol,"minima",6) == 0)
return(FxChannelStatistics(fx_info,image,channel,symbol,exception));
if (LocaleCompare(symbol,"m") == 0)
return(QuantumScale*pixel.green);
break;
}
case 'N':
case 'n':
{
if (LocaleCompare(symbol,"n") == 0)
return((double) GetImageListLength(fx_info->images));
break;
}
case 'O':
case 'o':
{
if (LocaleCompare(symbol,"o") == 0)
return(QuantumScale*pixel.alpha);
break;
}
case 'P':
case 'p':
{
if (LocaleCompare(symbol,"page.height") == 0)
return((double) image->page.height);
if (LocaleCompare(symbol,"page.width") == 0)
return((double) image->page.width);
if (LocaleCompare(symbol,"page.x") == 0)
return((double) image->page.x);
if (LocaleCompare(symbol,"page.y") == 0)
return((double) image->page.y);
if (LocaleCompare(symbol,"printsize.x") == 0)
return(PerceptibleReciprocal(image->resolution.x)*image->columns);
if (LocaleCompare(symbol,"printsize.y") == 0)
return(PerceptibleReciprocal(image->resolution.y)*image->rows);
break;
}
case 'Q':
case 'q':
{
if (LocaleCompare(symbol,"quality") == 0)
return((double) image->quality);
break;
}
case 'R':
case 'r':
{
if (LocaleCompare(symbol,"resolution.x") == 0)
return(image->resolution.x);
if (LocaleCompare(symbol,"resolution.y") == 0)
return(image->resolution.y);
if (LocaleCompare(symbol,"r") == 0)
return(QuantumScale*pixel.red);
break;
}
case 'S':
case 's':
{
if (LocaleCompare(symbol,"saturation") == 0)
{
double
hue,
lightness,
saturation;
ConvertRGBToHSL(pixel.red,pixel.green,pixel.blue,&hue,&saturation,
&lightness);
return(saturation);
}
if (LocaleNCompare(symbol,"skewness",8) == 0)
return(FxChannelStatistics(fx_info,image,channel,symbol,exception));
if (LocaleNCompare(symbol,"standard_deviation",18) == 0)
return(FxChannelStatistics(fx_info,image,channel,symbol,exception));
break;
}
case 'T':
case 't':
{
if (LocaleCompare(symbol,"t") == 0)
return((double) GetImageIndexInList(fx_info->images));
break;
}
case 'W':
case 'w':
{
if (LocaleCompare(symbol,"w") == 0)
return((double) image->columns);
break;
}
case 'Y':
case 'y':
{
if (LocaleCompare(symbol,"y") == 0)
return(QuantumScale*pixel.blue);
break;
}
case 'Z':
case 'z':
{
if (LocaleCompare(symbol,"z") == 0)
return((double) GetImageDepth(image,fx_info->exception));
break;
}
default:
break;
}
value=(const char *) GetValueFromSplayTree(fx_info->symbols,symbol);
if (value != (const char *) NULL)
return(StringToDouble(value,(char **) NULL));
(void) ThrowMagickException(exception,GetMagickModule(),OptionError,
"UnableToParseExpression","`%s'",symbol);
return(0.0);
}
static const char *FxOperatorPrecedence(const char *expression,
ExceptionInfo *exception)
{
typedef enum
{
UndefinedPrecedence,
NullPrecedence,
BitwiseComplementPrecedence,
ExponentPrecedence,
ExponentialNotationPrecedence,
MultiplyPrecedence,
AdditionPrecedence,
ShiftPrecedence,
RelationalPrecedence,
EquivalencyPrecedence,
BitwiseAndPrecedence,
BitwiseOrPrecedence,
LogicalAndPrecedence,
LogicalOrPrecedence,
TernaryPrecedence,
AssignmentPrecedence,
CommaPrecedence,
SeparatorPrecedence
} FxPrecedence;
FxPrecedence
precedence,
target;
register const char
*subexpression;
register int
c;
size_t
level;
c=(-1);
level=0;
subexpression=(const char *) NULL;
target=NullPrecedence;
while ((c != '\0') && (*expression != '\0'))
{
precedence=UndefinedPrecedence;
if ((isspace((int) ((unsigned char) *expression)) != 0) || (c == (int) '@'))
{
expression++;
continue;
}
switch (*expression)
{
case 'A':
case 'a':
{
#if defined(MAGICKCORE_HAVE_ACOSH)
if (LocaleNCompare(expression,"acosh",5) == 0)
{
expression+=5;
break;
}
#endif
#if defined(MAGICKCORE_HAVE_ASINH)
if (LocaleNCompare(expression,"asinh",5) == 0)
{
expression+=5;
break;
}
#endif
#if defined(MAGICKCORE_HAVE_ATANH)
if (LocaleNCompare(expression,"atanh",5) == 0)
{
expression+=5;
break;
}
#endif
if (LocaleNCompare(expression,"atan2",5) == 0)
{
expression+=5;
break;
}
break;
}
case 'E':
case 'e':
{
if ((isdigit(c) != 0) &&
((LocaleNCompare(expression,"E+",2) == 0) ||
(LocaleNCompare(expression,"E-",2) == 0)))
{
expression+=2; /* scientific notation */
break;
}
}
case 'J':
case 'j':
{
if ((LocaleNCompare(expression,"j0",2) == 0) ||
(LocaleNCompare(expression,"j1",2) == 0))
{
expression+=2;
break;
}
break;
}
case '#':
{
while (isxdigit((int) ((unsigned char) *(expression+1))) != 0)
expression++;
break;
}
default:
break;
}
if ((c == (int) '{') || (c == (int) '['))
level++;
else
if ((c == (int) '}') || (c == (int) ']'))
level--;
if (level == 0)
switch ((unsigned char) *expression)
{
case '~':
case '!':
{
precedence=BitwiseComplementPrecedence;
break;
}
case '^':
case '@':
{
precedence=ExponentPrecedence;
break;
}
default:
{
if (((c != 0) && ((isdigit(c) != 0) ||
(strchr(")",c) != (char *) NULL))) &&
(((islower((int) ((unsigned char) *expression)) != 0) ||
(strchr("(",(int) ((unsigned char) *expression)) != (char *) NULL)) ||
((isdigit(c) == 0) &&
(isdigit((int) ((unsigned char) *expression)) != 0))) &&
(strchr("xy",(int) ((unsigned char) *expression)) == (char *) NULL))
precedence=MultiplyPrecedence;
break;
}
case '*':
case '/':
case '%':
{
precedence=MultiplyPrecedence;
break;
}
case '+':
case '-':
{
if ((strchr("(+-/*%:&^|<>~,",c) == (char *) NULL) ||
(isalpha(c) != 0))
precedence=AdditionPrecedence;
break;
}
case LeftShiftOperator:
case RightShiftOperator:
{
precedence=ShiftPrecedence;
break;
}
case '<':
case LessThanEqualOperator:
case GreaterThanEqualOperator:
case '>':
{
precedence=RelationalPrecedence;
break;
}
case EqualOperator:
case NotEqualOperator:
{
precedence=EquivalencyPrecedence;
break;
}
case '&':
{
precedence=BitwiseAndPrecedence;
break;
}
case '|':
{
precedence=BitwiseOrPrecedence;
break;
}
case LogicalAndOperator:
{
precedence=LogicalAndPrecedence;
break;
}
case LogicalOrOperator:
{
precedence=LogicalOrPrecedence;
break;
}
case ExponentialNotation:
{
precedence=ExponentialNotationPrecedence;
break;
}
case ':':
case '?':
{
precedence=TernaryPrecedence;
break;
}
case '=':
{
precedence=AssignmentPrecedence;
break;
}
case ',':
{
precedence=CommaPrecedence;
break;
}
case ';':
{
precedence=SeparatorPrecedence;
break;
}
}
if ((precedence == BitwiseComplementPrecedence) ||
(precedence == TernaryPrecedence) ||
(precedence == AssignmentPrecedence))
{
if (precedence > target)
{
/*
Right-to-left associativity.
*/
target=precedence;
subexpression=expression;
}
}
else
if (precedence >= target)
{
/*
Left-to-right associativity.
*/
target=precedence;
subexpression=expression;
}
if (strchr("(",(int) *expression) != (char *) NULL)
expression=FxSubexpression(expression,exception);
c=(int) (*expression++);
}
return(subexpression);
}
static double FxEvaluateSubexpression(FxInfo *fx_info,
const PixelChannel channel,const ssize_t x,const ssize_t y,
const char *expression,const size_t depth,double *beta,
ExceptionInfo *exception)
{
#define FxMaxParenthesisDepth 58
#define FxMaxSubexpressionDepth 200
#define FxReturn(value) \
{ \
subexpression=DestroyString(subexpression); \
return(value); \
}
char
*q,
*subexpression;
double
alpha,
gamma;
register const char
*p;
*beta=0.0;
subexpression=AcquireString(expression);
*subexpression='\0';
if (depth > FxMaxSubexpressionDepth)
{
(void) ThrowMagickException(exception,GetMagickModule(),OptionError,
"UnableToParseExpression","`%s'",expression);
FxReturn(0.0);
}
if (exception->severity >= ErrorException)
FxReturn(0.0);
while (isspace((int) ((unsigned char) *expression)) != 0)
expression++;
if (*expression == '\0')
FxReturn(0.0);
p=FxOperatorPrecedence(expression,exception);
if (p != (const char *) NULL)
{
(void) CopyMagickString(subexpression,expression,(size_t)
(p-expression+1));
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,subexpression,depth+1,
beta,exception);
switch ((unsigned char) *p)
{
case '~':
{
*beta=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth+1,beta,
exception);
*beta=(double) (~(size_t) *beta);
FxReturn(*beta);
}
case '!':
{
*beta=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth+1,beta,
exception);
FxReturn(*beta == 0.0 ? 1.0 : 0.0);
}
case '^':
{
*beta=pow(alpha,FxEvaluateSubexpression(fx_info,channel,x,y,++p,
depth+1,beta,exception));
FxReturn(*beta);
}
case '*':
case ExponentialNotation:
{
*beta=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth+1,beta,
exception);
FxReturn(alpha*(*beta));
}
case '/':
{
*beta=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth+1,beta,
exception);
if (*beta == 0.0)
{
(void) ThrowMagickException(exception,GetMagickModule(),
OptionError,"DivideByZero","`%s'",expression);
FxReturn(0.0);
}
FxReturn(alpha/(*beta));
}
case '%':
{
*beta=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth+1,beta,
exception);
*beta=fabs(floor((*beta)+0.5));
if (*beta == 0.0)
{
(void) ThrowMagickException(exception,GetMagickModule(),
OptionError,"DivideByZero","`%s'",expression);
FxReturn(0.0);
}
FxReturn(fmod(alpha,*beta));
}
case '+':
{
*beta=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth+1,beta,
exception);
FxReturn(alpha+(*beta));
}
case '-':
{
*beta=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth+1,beta,
exception);
FxReturn(alpha-(*beta));
}
case LeftShiftOperator:
{
gamma=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth+1,beta,
exception);
if ((size_t) (gamma+0.5) >= (8*sizeof(size_t)))
{
(void) ThrowMagickException(exception,GetMagickModule(),
OptionError,"ShiftCountOverflow","`%s'",subexpression);
FxReturn(0.0);
}
*beta=(double) ((size_t) (alpha+0.5) << (size_t) (gamma+0.5));
FxReturn(*beta);
}
case RightShiftOperator:
{
gamma=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth+1,beta,
exception);
if ((size_t) (gamma+0.5) >= (8*sizeof(size_t)))
{
(void) ThrowMagickException(exception,GetMagickModule(),
OptionError,"ShiftCountOverflow","`%s'",subexpression);
FxReturn(0.0);
}
*beta=(double) ((size_t) (alpha+0.5) >> (size_t) (gamma+0.5));
FxReturn(*beta);
}
case '<':
{
*beta=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth+1,beta,
exception);
FxReturn(alpha < *beta ? 1.0 : 0.0);
}
case LessThanEqualOperator:
{
*beta=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth+1,beta,
exception);
FxReturn(alpha <= *beta ? 1.0 : 0.0);
}
case '>':
{
*beta=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth+1,beta,
exception);
FxReturn(alpha > *beta ? 1.0 : 0.0);
}
case GreaterThanEqualOperator:
{
*beta=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth+1,beta,
exception);
FxReturn(alpha >= *beta ? 1.0 : 0.0);
}
case EqualOperator:
{
*beta=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth+1,beta,
exception);
FxReturn(fabs(alpha-(*beta)) < MagickEpsilon ? 1.0 : 0.0);
}
case NotEqualOperator:
{
*beta=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth+1,beta,
exception);
FxReturn(fabs(alpha-(*beta)) >= MagickEpsilon ? 1.0 : 0.0);
}
case '&':
{
gamma=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth+1,beta,
exception);
*beta=(double) ((size_t) (alpha+0.5) & (size_t) (gamma+0.5));
FxReturn(*beta);
}
case '|':
{
gamma=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth+1,beta,
exception);
*beta=(double) ((size_t) (alpha+0.5) | (size_t) (gamma+0.5));
FxReturn(*beta);
}
case LogicalAndOperator:
{
p++;
if (alpha <= 0.0)
{
*beta=0.0;
FxReturn(*beta);
}
gamma=FxEvaluateSubexpression(fx_info,channel,x,y,p,depth+1,beta,
exception);
*beta=(gamma > 0.0) ? 1.0 : 0.0;
FxReturn(*beta);
}
case LogicalOrOperator:
{
p++;
if (alpha > 0.0)
{
*beta=1.0;
FxReturn(*beta);
}
gamma=FxEvaluateSubexpression(fx_info,channel,x,y,p,depth+1,beta,
exception);
*beta=(gamma > 0.0) ? 1.0 : 0.0;
FxReturn(*beta);
}
case '?':
{
(void) CopyMagickString(subexpression,++p,MagickPathExtent);
q=subexpression;
p=StringToken(":",&q);
if (q == (char *) NULL)
{
(void) ThrowMagickException(exception,GetMagickModule(),
OptionError,"UnableToParseExpression","`%s'",subexpression);
FxReturn(0.0);
}
if (fabs(alpha) >= MagickEpsilon)
gamma=FxEvaluateSubexpression(fx_info,channel,x,y,p,depth+1,beta,
exception);
else
gamma=FxEvaluateSubexpression(fx_info,channel,x,y,q,depth+1,beta,
exception);
FxReturn(gamma);
}
case '=':
{
char
numeric[MagickPathExtent];
q=subexpression;
while (isalpha((int) ((unsigned char) *q)) != 0)
q++;
if (*q != '\0')
{
(void) ThrowMagickException(exception,GetMagickModule(),
OptionError,"UnableToParseExpression","`%s'",subexpression);
FxReturn(0.0);
}
ClearMagickException(exception);
*beta=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth+1,beta,
exception);
(void) FormatLocaleString(numeric,MagickPathExtent,"%.20g",*beta);
(void) DeleteNodeFromSplayTree(fx_info->symbols,subexpression);
(void) AddValueToSplayTree(fx_info->symbols,ConstantString(
subexpression),ConstantString(numeric));
FxReturn(*beta);
}
case ',':
{
*beta=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth+1,beta,
exception);
FxReturn(alpha);
}
case ';':
{
*beta=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth+1,beta,
exception);
FxReturn(*beta);
}
default:
{
gamma=alpha*FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth+1,
beta,exception);
FxReturn(gamma);
}
}
}
if (strchr("(",(int) *expression) != (char *) NULL)
{
if (depth >= FxMaxParenthesisDepth)
(void) ThrowMagickException(exception,GetMagickModule(),OptionError,
"ParenthesisNestedTooDeeply","`%s'",expression);
(void) CopyMagickString(subexpression,expression+1,MagickPathExtent);
if (strlen(subexpression) != 0)
subexpression[strlen(subexpression)-1]='\0';
gamma=FxEvaluateSubexpression(fx_info,channel,x,y,subexpression,depth+1,
beta,exception);
FxReturn(gamma);
}
switch (*expression)
{
case '+':
{
gamma=FxEvaluateSubexpression(fx_info,channel,x,y,expression+1,depth+1,
beta,exception);
FxReturn(1.0*gamma);
}
case '-':
{
gamma=FxEvaluateSubexpression(fx_info,channel,x,y,expression+1,depth+1,
beta,exception);
FxReturn(-1.0*gamma);
}
case '~':
{
gamma=FxEvaluateSubexpression(fx_info,channel,x,y,expression+1,depth+1,
beta,exception);
FxReturn((double) (~(size_t) (gamma+0.5)));
}
case 'A':
case 'a':
{
if (LocaleNCompare(expression,"abs",3) == 0)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+3,
depth+1,beta,exception);
FxReturn(fabs(alpha));
}
#if defined(MAGICKCORE_HAVE_ACOSH)
if (LocaleNCompare(expression,"acosh",5) == 0)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+5,
depth+1,beta,exception);
FxReturn(acosh(alpha));
}
#endif
if (LocaleNCompare(expression,"acos",4) == 0)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+4,
depth+1,beta,exception);
FxReturn(acos(alpha));
}
#if defined(MAGICKCORE_HAVE_J1)
if (LocaleNCompare(expression,"airy",4) == 0)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+4,
depth+1,beta,exception);
if (alpha == 0.0)
FxReturn(1.0);
gamma=2.0*j1((MagickPI*alpha))/(MagickPI*alpha);
FxReturn(gamma*gamma);
}
#endif
#if defined(MAGICKCORE_HAVE_ASINH)
if (LocaleNCompare(expression,"asinh",5) == 0)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+5,
depth+1,beta,exception);
FxReturn(asinh(alpha));
}
#endif
if (LocaleNCompare(expression,"asin",4) == 0)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+4,
depth+1,beta,exception);
FxReturn(asin(alpha));
}
if (LocaleNCompare(expression,"alt",3) == 0)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+3,
depth+1,beta,exception);
FxReturn(((ssize_t) alpha) & 0x01 ? -1.0 : 1.0);
}
if (LocaleNCompare(expression,"atan2",5) == 0)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+5,
depth+1,beta,exception);
FxReturn(atan2(alpha,*beta));
}
#if defined(MAGICKCORE_HAVE_ATANH)
if (LocaleNCompare(expression,"atanh",5) == 0)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+5,
depth+1,beta,exception);
FxReturn(atanh(alpha));
}
#endif
if (LocaleNCompare(expression,"atan",4) == 0)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+4,
depth+1,beta,exception);
FxReturn(atan(alpha));
}
if (LocaleCompare(expression,"a") == 0)
FxReturn(FxGetSymbol(fx_info,channel,x,y,expression,depth+1,exception));
break;
}
case 'B':
case 'b':
{
if (LocaleCompare(expression,"b") == 0)
FxReturn(FxGetSymbol(fx_info,channel,x,y,expression,depth+1,exception));
break;
}
case 'C':
case 'c':
{
if (LocaleNCompare(expression,"ceil",4) == 0)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+4,
depth+1,beta,exception);
FxReturn(ceil(alpha));
}
if (LocaleNCompare(expression,"clamp",5) == 0)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+5,
depth+1,beta,exception);
if (alpha < 0.0)
FxReturn(0.0);
if (alpha > 1.0)
FxReturn(1.0);
FxReturn(alpha);
}
if (LocaleNCompare(expression,"cosh",4) == 0)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+4,
depth+1,beta,exception);
FxReturn(cosh(alpha));
}
if (LocaleNCompare(expression,"cos",3) == 0)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+3,
depth+1,beta,exception);
FxReturn(cos(alpha));
}
if (LocaleCompare(expression,"c") == 0)
FxReturn(FxGetSymbol(fx_info,channel,x,y,expression,depth+1,exception));
break;
}
case 'D':
case 'd':
{
if (LocaleNCompare(expression,"debug",5) == 0)
{
const char
*type;
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+5,
depth+1,beta,exception);
if (fx_info->images->colorspace == CMYKColorspace)
switch (channel)
{
case CyanPixelChannel: type="cyan"; break;
case MagentaPixelChannel: type="magenta"; break;
case YellowPixelChannel: type="yellow"; break;
case AlphaPixelChannel: type="opacity"; break;
case BlackPixelChannel: type="black"; break;
default: type="unknown"; break;
}
else
switch (channel)
{
case RedPixelChannel: type="red"; break;
case GreenPixelChannel: type="green"; break;
case BluePixelChannel: type="blue"; break;
case AlphaPixelChannel: type="opacity"; break;
default: type="unknown"; break;
}
*subexpression='\0';
if (strlen(expression) > 6)
(void) CopyMagickString(subexpression,expression+6,
MagickPathExtent);
if (strlen(subexpression) > 1)
subexpression[strlen(subexpression)-1]='\0';
if (fx_info->file != (FILE *) NULL)
(void) FormatLocaleFile(fx_info->file,"%s[%.20g,%.20g].%s: "
"%s=%.*g\n",fx_info->images->filename,(double) x,(double) y,type,
subexpression,GetMagickPrecision(),alpha);
FxReturn(0.0);
}
if (LocaleNCompare(expression,"drc",3) == 0)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+3,
depth+1,beta,exception);
FxReturn((alpha/(*beta*(alpha-1.0)+1.0)));
}
break;
}
case 'E':
case 'e':
{
if (LocaleCompare(expression,"epsilon") == 0)
FxReturn(MagickEpsilon);
#if defined(MAGICKCORE_HAVE_ERF)
if (LocaleNCompare(expression,"erf",3) == 0)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+3,
depth+1,beta,exception);
FxReturn(erf(alpha));
}
#endif
if (LocaleNCompare(expression,"exp",3) == 0)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+3,
depth+1,beta,exception);
FxReturn(exp(alpha));
}
if (LocaleCompare(expression,"e") == 0)
FxReturn(2.7182818284590452354);
break;
}
case 'F':
case 'f':
{
if (LocaleNCompare(expression,"floor",5) == 0)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+5,
depth+1,beta,exception);
FxReturn(floor(alpha));
}
break;
}
case 'G':
case 'g':
{
if (LocaleNCompare(expression,"gauss",5) == 0)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+5,
depth+1,beta,exception);
gamma=exp((-alpha*alpha/2.0))/sqrt(2.0*MagickPI);
FxReturn(gamma);
}
if (LocaleNCompare(expression,"gcd",3) == 0)
{
MagickOffsetType
gcd;
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+3,
depth+1,beta,exception);
gcd=FxGCD((MagickOffsetType) (alpha+0.5),(MagickOffsetType) (*beta+
0.5));
FxReturn((double) gcd);
}
if (LocaleCompare(expression,"g") == 0)
FxReturn(FxGetSymbol(fx_info,channel,x,y,expression,depth+1,exception));
break;
}
case 'H':
case 'h':
{
if (LocaleCompare(expression,"h") == 0)
FxReturn(FxGetSymbol(fx_info,channel,x,y,expression,depth+1,exception));
if (LocaleCompare(expression,"hue") == 0)
FxReturn(FxGetSymbol(fx_info,channel,x,y,expression,depth+1,exception));
if (LocaleNCompare(expression,"hypot",5) == 0)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+5,
depth+1,beta,exception);
FxReturn(hypot(alpha,*beta));
}
break;
}
case 'K':
case 'k':
{
if (LocaleCompare(expression,"k") == 0)
FxReturn(FxGetSymbol(fx_info,channel,x,y,expression,depth+1,exception));
break;
}
case 'I':
case 'i':
{
if (LocaleCompare(expression,"intensity") == 0)
FxReturn(FxGetSymbol(fx_info,channel,x,y,expression,depth+1,exception));
if (LocaleNCompare(expression,"int",3) == 0)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+3,
depth+1,beta,exception);
FxReturn(floor(alpha));
}
if (LocaleNCompare(expression,"isnan",5) == 0)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+5,
depth+1,beta,exception);
FxReturn((double) !!IsNaN(alpha));
}
if (LocaleCompare(expression,"i") == 0)
FxReturn(FxGetSymbol(fx_info,channel,x,y,expression,depth+1,exception));
break;
}
case 'J':
case 'j':
{
if (LocaleCompare(expression,"j") == 0)
FxReturn(FxGetSymbol(fx_info,channel,x,y,expression,depth+1,exception));
#if defined(MAGICKCORE_HAVE_J0)
if (LocaleNCompare(expression,"j0",2) == 0)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+2,
depth+1,beta,exception);
FxReturn(j0(alpha));
}
#endif
#if defined(MAGICKCORE_HAVE_J1)
if (LocaleNCompare(expression,"j1",2) == 0)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+2,
depth+1,beta,exception);
FxReturn(j1(alpha));
}
#endif
#if defined(MAGICKCORE_HAVE_J1)
if (LocaleNCompare(expression,"jinc",4) == 0)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+4,
depth+1,beta,exception);
if (alpha == 0.0)
FxReturn(1.0);
gamma=(2.0*j1((MagickPI*alpha))/(MagickPI*alpha));
FxReturn(gamma);
}
#endif
break;
}
case 'L':
case 'l':
{
if (LocaleNCompare(expression,"ln",2) == 0)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+2,
depth+1,beta,exception);
FxReturn(log(alpha));
}
if (LocaleNCompare(expression,"logtwo",6) == 0)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+6,
depth+1,beta,exception);
FxReturn(log10(alpha)/log10(2.0));
}
if (LocaleNCompare(expression,"log",3) == 0)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+3,
depth+1,beta,exception);
FxReturn(log10(alpha));
}
if (LocaleCompare(expression,"lightness") == 0)
FxReturn(FxGetSymbol(fx_info,channel,x,y,expression,depth+1,exception));
break;
}
case 'M':
case 'm':
{
if (LocaleCompare(expression,"MaxRGB") == 0)
FxReturn(QuantumRange);
if (LocaleNCompare(expression,"maxima",6) == 0)
break;
if (LocaleNCompare(expression,"max",3) == 0)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+3,
depth+1,beta,exception);
FxReturn(alpha > *beta ? alpha : *beta);
}
if (LocaleNCompare(expression,"minima",6) == 0)
break;
if (LocaleNCompare(expression,"min",3) == 0)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+3,
depth+1,beta,exception);
FxReturn(alpha < *beta ? alpha : *beta);
}
if (LocaleNCompare(expression,"mod",3) == 0)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+3,
depth+1,beta,exception);
gamma=alpha-floor((alpha*PerceptibleReciprocal(*beta)))*(*beta);
FxReturn(gamma);
}
if (LocaleCompare(expression,"m") == 0)
FxReturn(FxGetSymbol(fx_info,channel,x,y,expression,depth+1,exception));
break;
}
case 'N':
case 'n':
{
if (LocaleNCompare(expression,"not",3) == 0)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+3,
depth+1,beta,exception);
FxReturn((double) (alpha < MagickEpsilon));
}
if (LocaleCompare(expression,"n") == 0)
FxReturn(FxGetSymbol(fx_info,channel,x,y,expression,depth+1,exception));
break;
}
case 'O':
case 'o':
{
if (LocaleCompare(expression,"Opaque") == 0)
FxReturn(1.0);
if (LocaleCompare(expression,"o") == 0)
FxReturn(FxGetSymbol(fx_info,channel,x,y,expression,depth+1,exception));
break;
}
case 'P':
case 'p':
{
if (LocaleCompare(expression,"phi") == 0)
FxReturn(MagickPHI);
if (LocaleCompare(expression,"pi") == 0)
FxReturn(MagickPI);
if (LocaleNCompare(expression,"pow",3) == 0)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+3,
depth+1,beta,exception);
FxReturn(pow(alpha,*beta));
}
if (LocaleCompare(expression,"p") == 0)
FxReturn(FxGetSymbol(fx_info,channel,x,y,expression,depth+1,exception));
break;
}
case 'Q':
case 'q':
{
if (LocaleCompare(expression,"QuantumRange") == 0)
FxReturn(QuantumRange);
if (LocaleCompare(expression,"QuantumScale") == 0)
FxReturn(QuantumScale);
break;
}
case 'R':
case 'r':
{
if (LocaleNCompare(expression,"rand",4) == 0)
{
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_FxEvaluateSubexpression)
#endif
alpha=GetPseudoRandomValue(fx_info->random_info);
FxReturn(alpha);
}
if (LocaleNCompare(expression,"round",5) == 0)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+5,
depth+1,beta,exception);
FxReturn(floor(alpha+0.5));
}
if (LocaleCompare(expression,"r") == 0)
FxReturn(FxGetSymbol(fx_info,channel,x,y,expression,depth+1,exception));
break;
}
case 'S':
case 's':
{
if (LocaleCompare(expression,"saturation") == 0)
FxReturn(FxGetSymbol(fx_info,channel,x,y,expression,depth+1,exception));
if (LocaleNCompare(expression,"sign",4) == 0)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+4,
depth+1,beta,exception);
FxReturn(alpha < 0.0 ? -1.0 : 1.0);
}
if (LocaleNCompare(expression,"sinc",4) == 0)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+4,
depth+1,beta,exception);
if (alpha == 0)
FxReturn(1.0);
gamma=sin((MagickPI*alpha))/(MagickPI*alpha);
FxReturn(gamma);
}
if (LocaleNCompare(expression,"sinh",4) == 0)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+4,
depth+1,beta,exception);
FxReturn(sinh(alpha));
}
if (LocaleNCompare(expression,"sin",3) == 0)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+3,
depth+1,beta,exception);
FxReturn(sin(alpha));
}
if (LocaleNCompare(expression,"sqrt",4) == 0)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+4,
depth+1,beta,exception);
FxReturn(sqrt(alpha));
}
if (LocaleNCompare(expression,"squish",6) == 0)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+6,
depth+1,beta,exception);
FxReturn((1.0/(1.0+exp(-alpha))));
}
if (LocaleCompare(expression,"s") == 0)
FxReturn(FxGetSymbol(fx_info,channel,x,y,expression,depth+1,exception));
break;
}
case 'T':
case 't':
{
if (LocaleNCompare(expression,"tanh",4) == 0)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+4,
depth+1,beta,exception);
FxReturn(tanh(alpha));
}
if (LocaleNCompare(expression,"tan",3) == 0)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+3,
depth+1,beta,exception);
FxReturn(tan(alpha));
}
if (LocaleCompare(expression,"Transparent") == 0)
FxReturn(0.0);
if (LocaleNCompare(expression,"trunc",5) == 0)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+5,
depth+1,beta,exception);
if (alpha >= 0.0)
FxReturn(floor(alpha));
FxReturn(ceil(alpha));
}
if (LocaleCompare(expression,"t") == 0)
FxReturn(FxGetSymbol(fx_info,channel,x,y,expression,depth+1,exception));
break;
}
case 'U':
case 'u':
{
if (LocaleCompare(expression,"u") == 0)
FxReturn(FxGetSymbol(fx_info,channel,x,y,expression,depth+1,exception));
break;
}
case 'V':
case 'v':
{
if (LocaleCompare(expression,"v") == 0)
FxReturn(FxGetSymbol(fx_info,channel,x,y,expression,depth+1,exception));
break;
}
case 'W':
case 'w':
{
if (LocaleNCompare(expression,"while",5) == 0)
{
do
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+5,
depth+1,beta,exception);
} while (fabs(alpha) >= MagickEpsilon);
FxReturn(*beta);
}
if (LocaleCompare(expression,"w") == 0)
FxReturn(FxGetSymbol(fx_info,channel,x,y,expression,depth+1,exception));
break;
}
case 'Y':
case 'y':
{
if (LocaleCompare(expression,"y") == 0)
FxReturn(FxGetSymbol(fx_info,channel,x,y,expression,depth+1,exception));
break;
}
case 'Z':
case 'z':
{
if (LocaleCompare(expression,"z") == 0)
FxReturn(FxGetSymbol(fx_info,channel,x,y,expression,depth+1,exception));
break;
}
default:
break;
}
subexpression=DestroyString(subexpression);
q=(char *) expression;
alpha=InterpretSiPrefixValue(expression,&q);
if (q == expression)
FxReturn(FxGetSymbol(fx_info,channel,x,y,expression,depth+1,exception));
FxReturn(alpha);
}
MagickPrivate MagickBooleanType FxEvaluateExpression(FxInfo *fx_info,
double *alpha,ExceptionInfo *exception)
{
MagickBooleanType
status;
status=FxEvaluateChannelExpression(fx_info,GrayPixelChannel,0,0,alpha,
exception);
return(status);
}
MagickExport MagickBooleanType FxPreprocessExpression(FxInfo *fx_info,
double *alpha,ExceptionInfo *exception)
{
FILE
*file;
MagickBooleanType
status;
file=fx_info->file;
fx_info->file=(FILE *) NULL;
status=FxEvaluateChannelExpression(fx_info,GrayPixelChannel,0,0,alpha,
exception);
fx_info->file=file;
return(status);
}
MagickPrivate MagickBooleanType FxEvaluateChannelExpression(FxInfo *fx_info,
const PixelChannel channel,const ssize_t x,const ssize_t y,
double *alpha,ExceptionInfo *exception)
{
double
beta;
beta=0.0;
*alpha=FxEvaluateSubexpression(fx_info,channel,x,y,fx_info->expression,0,
&beta,exception);
return(exception->severity == OptionError ? MagickFalse : MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% F x I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% FxImage() applies a mathematical expression to the specified image.
%
% The format of the FxImage method is:
%
% Image *FxImage(const Image *image,const char *expression,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o expression: A mathematical expression.
%
% o exception: return any errors or warnings in this structure.
%
*/
static FxInfo **DestroyFxThreadSet(FxInfo **fx_info)
{
register ssize_t
i;
assert(fx_info != (FxInfo **) NULL);
for (i=0; i < (ssize_t) GetMagickResourceLimit(ThreadResource); i++)
if (fx_info[i] != (FxInfo *) NULL)
fx_info[i]=DestroyFxInfo(fx_info[i]);
fx_info=(FxInfo **) RelinquishMagickMemory(fx_info);
return(fx_info);
}
static FxInfo **AcquireFxThreadSet(const Image *image,const char *expression,
ExceptionInfo *exception)
{
char
*fx_expression;
FxInfo
**fx_info;
double
alpha;
register ssize_t
i;
size_t
number_threads;
number_threads=(size_t) GetMagickResourceLimit(ThreadResource);
fx_info=(FxInfo **) AcquireQuantumMemory(number_threads,sizeof(*fx_info));
if (fx_info == (FxInfo **) NULL)
{
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","`%s'",image->filename);
return((FxInfo **) NULL);
}
(void) memset(fx_info,0,number_threads*sizeof(*fx_info));
if (*expression != '@')
fx_expression=ConstantString(expression);
else
fx_expression=FileToString(expression+1,~0UL,exception);
for (i=0; i < (ssize_t) number_threads; i++)
{
MagickBooleanType
status;
fx_info[i]=AcquireFxInfo(image,fx_expression,exception);
if (fx_info[i] == (FxInfo *) NULL)
break;
status=FxPreprocessExpression(fx_info[i],&alpha,exception);
if (status == MagickFalse)
break;
}
fx_expression=DestroyString(fx_expression);
if (i < (ssize_t) number_threads)
fx_info=DestroyFxThreadSet(fx_info);
return(fx_info);
}
MagickExport Image *FxImage(const Image *image,const char *expression,
ExceptionInfo *exception)
{
#define FxImageTag "Fx/Image"
CacheView
*fx_view,
*image_view;
FxInfo
**magick_restrict fx_info;
Image
*fx_image;
MagickBooleanType
status;
MagickOffsetType
progress;
ssize_t
y;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
if (expression == (const char *) NULL)
return(CloneImage(image,0,0,MagickTrue,exception));
fx_info=AcquireFxThreadSet(image,expression,exception);
if (fx_info == (FxInfo **) NULL)
return((Image *) NULL);
fx_image=CloneImage(image,0,0,MagickTrue,exception);
if (fx_image == (Image *) NULL)
{
fx_info=DestroyFxThreadSet(fx_info);
return((Image *) NULL);
}
if (SetImageStorageClass(fx_image,DirectClass,exception) == MagickFalse)
{
fx_info=DestroyFxThreadSet(fx_info);
fx_image=DestroyImage(fx_image);
return((Image *) NULL);
}
/*
Fx image.
*/
status=MagickTrue;
progress=0;
image_view=AcquireVirtualCacheView(image,exception);
fx_view=AcquireAuthenticCacheView(fx_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(image,fx_image,fx_image->rows,1)
#endif
for (y=0; y < (ssize_t) fx_image->rows; y++)
{
const int
id = GetOpenMPThreadId();
register const Quantum
*magick_restrict p;
register Quantum
*magick_restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception);
q=QueueCacheViewAuthenticPixels(fx_view,0,y,fx_image->columns,1,exception);
if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL))
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) fx_image->columns; x++)
{
register ssize_t
i;
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
double
alpha;
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
PixelTrait fx_traits=GetPixelChannelTraits(fx_image,channel);
if ((traits == UndefinedPixelTrait) ||
(fx_traits == UndefinedPixelTrait))
continue;
if ((fx_traits & CopyPixelTrait) != 0)
{
SetPixelChannel(fx_image,channel,p[i],q);
continue;
}
alpha=0.0;
(void) FxEvaluateChannelExpression(fx_info[id],channel,x,y,&alpha,
exception);
q[i]=ClampToQuantum(QuantumRange*alpha);
}
p+=GetPixelChannels(image);
q+=GetPixelChannels(fx_image);
}
if (SyncCacheViewAuthenticPixels(fx_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(image,FxImageTag,progress,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
fx_view=DestroyCacheView(fx_view);
image_view=DestroyCacheView(image_view);
fx_info=DestroyFxThreadSet(fx_info);
if (status == MagickFalse)
fx_image=DestroyImage(fx_image);
return(fx_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% I m p l o d e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ImplodeImage() creates a new image that is a copy of an existing
% one with the image pixels "implode" by the specified percentage. It
% allocates the memory necessary for the new Image structure and returns a
% pointer to the new image.
%
% The format of the ImplodeImage method is:
%
% Image *ImplodeImage(const Image *image,const double amount,
% const PixelInterpolateMethod method,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o implode_image: Method ImplodeImage returns a pointer to the image
% after it is implode. A null image is returned if there is a memory
% shortage.
%
% o image: the image.
%
% o amount: Define the extent of the implosion.
%
% o method: the pixel interpolation method.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *ImplodeImage(const Image *image,const double amount,
const PixelInterpolateMethod method,ExceptionInfo *exception)
{
#define ImplodeImageTag "Implode/Image"
CacheView
*canvas_view,
*implode_view,
*interpolate_view;
double
radius;
Image
*canvas_image,
*implode_image;
MagickBooleanType
status;
MagickOffsetType
progress;
PointInfo
center,
scale;
ssize_t
y;
/*
Initialize implode image attributes.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
canvas_image=CloneImage(image,0,0,MagickTrue,exception);
if (canvas_image == (Image *) NULL)
return((Image *) NULL);
if ((canvas_image->alpha_trait == UndefinedPixelTrait) &&
(canvas_image->background_color.alpha != OpaqueAlpha))
(void) SetImageAlphaChannel(canvas_image,OpaqueAlphaChannel,exception);
implode_image=CloneImage(canvas_image,0,0,MagickTrue,exception);
if (implode_image == (Image *) NULL)
{
canvas_image=DestroyImage(canvas_image);
return((Image *) NULL);
}
if (SetImageStorageClass(implode_image,DirectClass,exception) == MagickFalse)
{
canvas_image=DestroyImage(canvas_image);
implode_image=DestroyImage(implode_image);
return((Image *) NULL);
}
/*
Compute scaling factor.
*/
scale.x=1.0;
scale.y=1.0;
center.x=0.5*canvas_image->columns;
center.y=0.5*canvas_image->rows;
radius=center.x;
if (canvas_image->columns > canvas_image->rows)
scale.y=(double) canvas_image->columns/(double) canvas_image->rows;
else
if (canvas_image->columns < canvas_image->rows)
{
scale.x=(double) canvas_image->rows/(double) canvas_image->columns;
radius=center.y;
}
/*
Implode image.
*/
status=MagickTrue;
progress=0;
canvas_view=AcquireVirtualCacheView(canvas_image,exception);
interpolate_view=AcquireVirtualCacheView(canvas_image,exception);
implode_view=AcquireAuthenticCacheView(implode_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(canvas_image,implode_image,canvas_image->rows,1)
#endif
for (y=0; y < (ssize_t) canvas_image->rows; y++)
{
double
distance;
PointInfo
delta;
register const Quantum
*magick_restrict p;
register ssize_t
x;
register Quantum
*magick_restrict q;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(canvas_view,0,y,canvas_image->columns,1,
exception);
q=QueueCacheViewAuthenticPixels(implode_view,0,y,implode_image->columns,1,
exception);
if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL))
{
status=MagickFalse;
continue;
}
delta.y=scale.y*(double) (y-center.y);
for (x=0; x < (ssize_t) canvas_image->columns; x++)
{
register ssize_t
i;
/*
Determine if the pixel is within an ellipse.
*/
delta.x=scale.x*(double) (x-center.x);
distance=delta.x*delta.x+delta.y*delta.y;
if (distance >= (radius*radius))
for (i=0; i < (ssize_t) GetPixelChannels(canvas_image); i++)
{
PixelChannel channel = GetPixelChannelChannel(canvas_image,i);
PixelTrait traits = GetPixelChannelTraits(canvas_image,channel);
PixelTrait implode_traits = GetPixelChannelTraits(implode_image,
channel);
if ((traits == UndefinedPixelTrait) ||
(implode_traits == UndefinedPixelTrait))
continue;
SetPixelChannel(implode_image,channel,p[i],q);
}
else
{
double
factor;
/*
Implode the pixel.
*/
factor=1.0;
if (distance > 0.0)
factor=pow(sin(MagickPI*sqrt((double) distance)/radius/2),-amount);
status=InterpolatePixelChannels(canvas_image,interpolate_view,
implode_image,method,(double) (factor*delta.x/scale.x+center.x),
(double) (factor*delta.y/scale.y+center.y),q,exception);
if (status == MagickFalse)
break;
}
p+=GetPixelChannels(canvas_image);
q+=GetPixelChannels(implode_image);
}
if (SyncCacheViewAuthenticPixels(implode_view,exception) == MagickFalse)
status=MagickFalse;
if (canvas_image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(canvas_image,ImplodeImageTag,progress,
canvas_image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
implode_view=DestroyCacheView(implode_view);
interpolate_view=DestroyCacheView(interpolate_view);
canvas_view=DestroyCacheView(canvas_view);
canvas_image=DestroyImage(canvas_image);
if (status == MagickFalse)
implode_image=DestroyImage(implode_image);
return(implode_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% M o r p h I m a g e s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% The MorphImages() method requires a minimum of two images. The first
% image is transformed into the second by a number of intervening images
% as specified by frames.
%
% The format of the MorphImage method is:
%
% Image *MorphImages(const Image *image,const size_t number_frames,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o number_frames: Define the number of in-between image to generate.
% The more in-between frames, the smoother the morph.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *MorphImages(const Image *image,const size_t number_frames,
ExceptionInfo *exception)
{
#define MorphImageTag "Morph/Image"
double
alpha,
beta;
Image
*morph_image,
*morph_images;
MagickBooleanType
status;
MagickOffsetType
scene;
register const Image
*next;
register ssize_t
n;
ssize_t
y;
/*
Clone first frame in sequence.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
morph_images=CloneImage(image,0,0,MagickTrue,exception);
if (morph_images == (Image *) NULL)
return((Image *) NULL);
if (GetNextImageInList(image) == (Image *) NULL)
{
/*
Morph single image.
*/
for (n=1; n < (ssize_t) number_frames; n++)
{
morph_image=CloneImage(image,0,0,MagickTrue,exception);
if (morph_image == (Image *) NULL)
{
morph_images=DestroyImageList(morph_images);
return((Image *) NULL);
}
AppendImageToList(&morph_images,morph_image);
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
proceed=SetImageProgress(image,MorphImageTag,(MagickOffsetType) n,
number_frames);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
return(GetFirstImageInList(morph_images));
}
/*
Morph image sequence.
*/
status=MagickTrue;
scene=0;
next=image;
for ( ; GetNextImageInList(next) != (Image *) NULL; next=GetNextImageInList(next))
{
for (n=0; n < (ssize_t) number_frames; n++)
{
CacheView
*image_view,
*morph_view;
beta=(double) (n+1.0)/(double) (number_frames+1.0);
alpha=1.0-beta;
morph_image=ResizeImage(next,(size_t) (alpha*next->columns+beta*
GetNextImageInList(next)->columns+0.5),(size_t) (alpha*next->rows+beta*
GetNextImageInList(next)->rows+0.5),next->filter,exception);
if (morph_image == (Image *) NULL)
{
morph_images=DestroyImageList(morph_images);
return((Image *) NULL);
}
status=SetImageStorageClass(morph_image,DirectClass,exception);
if (status == MagickFalse)
{
morph_image=DestroyImage(morph_image);
return((Image *) NULL);
}
AppendImageToList(&morph_images,morph_image);
morph_images=GetLastImageInList(morph_images);
morph_image=ResizeImage(GetNextImageInList(next),morph_images->columns,
morph_images->rows,GetNextImageInList(next)->filter,exception);
if (morph_image == (Image *) NULL)
{
morph_images=DestroyImageList(morph_images);
return((Image *) NULL);
}
image_view=AcquireVirtualCacheView(morph_image,exception);
morph_view=AcquireAuthenticCacheView(morph_images,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(morph_image,morph_image,morph_image->rows,1)
#endif
for (y=0; y < (ssize_t) morph_images->rows; y++)
{
MagickBooleanType
sync;
register const Quantum
*magick_restrict p;
register ssize_t
x;
register Quantum
*magick_restrict q;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,0,y,morph_image->columns,1,
exception);
q=GetCacheViewAuthenticPixels(morph_view,0,y,morph_images->columns,1,
exception);
if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL))
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) morph_images->columns; x++)
{
register ssize_t
i;
for (i=0; i < (ssize_t) GetPixelChannels(morph_image); i++)
{
PixelChannel channel = GetPixelChannelChannel(morph_image,i);
PixelTrait traits = GetPixelChannelTraits(morph_image,channel);
PixelTrait morph_traits=GetPixelChannelTraits(morph_images,channel);
if ((traits == UndefinedPixelTrait) ||
(morph_traits == UndefinedPixelTrait))
continue;
if ((morph_traits & CopyPixelTrait) != 0)
{
SetPixelChannel(morph_image,channel,p[i],q);
continue;
}
SetPixelChannel(morph_image,channel,ClampToQuantum(alpha*
GetPixelChannel(morph_images,channel,q)+beta*p[i]),q);
}
p+=GetPixelChannels(morph_image);
q+=GetPixelChannels(morph_images);
}
sync=SyncCacheViewAuthenticPixels(morph_view,exception);
if (sync == MagickFalse)
status=MagickFalse;
}
morph_view=DestroyCacheView(morph_view);
image_view=DestroyCacheView(image_view);
morph_image=DestroyImage(morph_image);
}
if (n < (ssize_t) number_frames)
break;
/*
Clone last frame in sequence.
*/
morph_image=CloneImage(GetNextImageInList(next),0,0,MagickTrue,exception);
if (morph_image == (Image *) NULL)
{
morph_images=DestroyImageList(morph_images);
return((Image *) NULL);
}
AppendImageToList(&morph_images,morph_image);
morph_images=GetLastImageInList(morph_images);
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
proceed=SetImageProgress(image,MorphImageTag,scene,
GetImageListLength(image));
if (proceed == MagickFalse)
status=MagickFalse;
}
scene++;
}
if (GetNextImageInList(next) != (Image *) NULL)
{
morph_images=DestroyImageList(morph_images);
return((Image *) NULL);
}
return(GetFirstImageInList(morph_images));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% P l a s m a I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% PlasmaImage() initializes an image with plasma fractal values. The image
% must be initialized with a base color and the random number generator
% seeded before this method is called.
%
% The format of the PlasmaImage method is:
%
% MagickBooleanType PlasmaImage(Image *image,const SegmentInfo *segment,
% size_t attenuate,size_t depth,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o segment: Define the region to apply plasma fractals values.
%
% o attenuate: Define the plasma attenuation factor.
%
% o depth: Limit the plasma recursion depth.
%
% o exception: return any errors or warnings in this structure.
%
*/
static inline Quantum PlasmaPixel(RandomInfo *random_info,
const double pixel,const double noise)
{
Quantum
plasma;
plasma=ClampToQuantum(pixel+noise*GetPseudoRandomValue(random_info)-
noise/2.0);
if (plasma <= 0)
return((Quantum) 0);
if (plasma >= QuantumRange)
return(QuantumRange);
return(plasma);
}
static MagickBooleanType PlasmaImageProxy(Image *image,CacheView *image_view,
CacheView *u_view,CacheView *v_view,RandomInfo *random_info,
const SegmentInfo *segment,size_t attenuate,size_t depth,
ExceptionInfo *exception)
{
double
plasma;
register const Quantum
*magick_restrict u,
*magick_restrict v;
register Quantum
*magick_restrict q;
register ssize_t
i;
ssize_t
x,
x_mid,
y,
y_mid;
if ((fabs(segment->x2-segment->x1) <= MagickEpsilon) &&
(fabs(segment->y2-segment->y1) <= MagickEpsilon))
return(MagickTrue);
if (depth != 0)
{
MagickBooleanType
status;
SegmentInfo
local_info;
/*
Divide the area into quadrants and recurse.
*/
depth--;
attenuate++;
x_mid=(ssize_t) ceil((segment->x1+segment->x2)/2-0.5);
y_mid=(ssize_t) ceil((segment->y1+segment->y2)/2-0.5);
local_info=(*segment);
local_info.x2=(double) x_mid;
local_info.y2=(double) y_mid;
(void) PlasmaImageProxy(image,image_view,u_view,v_view,random_info,
&local_info,attenuate,depth,exception);
local_info=(*segment);
local_info.y1=(double) y_mid;
local_info.x2=(double) x_mid;
(void) PlasmaImageProxy(image,image_view,u_view,v_view,random_info,
&local_info,attenuate,depth,exception);
local_info=(*segment);
local_info.x1=(double) x_mid;
local_info.y2=(double) y_mid;
(void) PlasmaImageProxy(image,image_view,u_view,v_view,random_info,
&local_info,attenuate,depth,exception);
local_info=(*segment);
local_info.x1=(double) x_mid;
local_info.y1=(double) y_mid;
status=PlasmaImageProxy(image,image_view,u_view,v_view,random_info,
&local_info,attenuate,depth,exception);
return(status);
}
x_mid=(ssize_t) ceil((segment->x1+segment->x2)/2-0.5);
y_mid=(ssize_t) ceil((segment->y1+segment->y2)/2-0.5);
if ((fabs(segment->x1-x_mid) < MagickEpsilon) &&
(fabs(segment->x2-x_mid) < MagickEpsilon) &&
(fabs(segment->y1-y_mid) < MagickEpsilon) &&
(fabs(segment->y2-y_mid) < MagickEpsilon))
return(MagickFalse);
/*
Average pixels and apply plasma.
*/
plasma=(double) QuantumRange/(2.0*attenuate);
if ((fabs(segment->x1-x_mid) > MagickEpsilon) ||
(fabs(segment->x2-x_mid) > MagickEpsilon))
{
/*
Left pixel.
*/
x=(ssize_t) ceil(segment->x1-0.5);
u=GetCacheViewVirtualPixels(u_view,x,(ssize_t) ceil(segment->y1-0.5),1,1,
exception);
v=GetCacheViewVirtualPixels(v_view,x,(ssize_t) ceil(segment->y2-0.5),1,1,
exception);
q=QueueCacheViewAuthenticPixels(image_view,x,y_mid,1,1,exception);
if ((u == (const Quantum *) NULL) || (v == (const Quantum *) NULL) ||
(q == (Quantum *) NULL))
return(MagickTrue);
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
if (traits == UndefinedPixelTrait)
continue;
q[i]=PlasmaPixel(random_info,(u[i]+v[i])/2.0,plasma);
}
(void) SyncCacheViewAuthenticPixels(image_view,exception);
if (fabs(segment->x1-segment->x2) > MagickEpsilon)
{
/*
Right pixel.
*/
x=(ssize_t) ceil(segment->x2-0.5);
u=GetCacheViewVirtualPixels(u_view,x,(ssize_t) ceil(segment->y1-0.5),
1,1,exception);
v=GetCacheViewVirtualPixels(v_view,x,(ssize_t) ceil(segment->y2-0.5),
1,1,exception);
q=QueueCacheViewAuthenticPixels(image_view,x,y_mid,1,1,exception);
if ((u == (const Quantum *) NULL) || (v == (const Quantum *) NULL) ||
(q == (Quantum *) NULL))
return(MagickTrue);
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
if (traits == UndefinedPixelTrait)
continue;
q[i]=PlasmaPixel(random_info,(u[i]+v[i])/2.0,plasma);
}
(void) SyncCacheViewAuthenticPixels(image_view,exception);
}
}
if ((fabs(segment->y1-y_mid) > MagickEpsilon) ||
(fabs(segment->y2-y_mid) > MagickEpsilon))
{
if ((fabs(segment->x1-x_mid) > MagickEpsilon) ||
(fabs(segment->y2-y_mid) > MagickEpsilon))
{
/*
Bottom pixel.
*/
y=(ssize_t) ceil(segment->y2-0.5);
u=GetCacheViewVirtualPixels(u_view,(ssize_t) ceil(segment->x1-0.5),y,
1,1,exception);
v=GetCacheViewVirtualPixels(v_view,(ssize_t) ceil(segment->x2-0.5),y,
1,1,exception);
q=QueueCacheViewAuthenticPixels(image_view,x_mid,y,1,1,exception);
if ((u == (const Quantum *) NULL) || (v == (const Quantum *) NULL) ||
(q == (Quantum *) NULL))
return(MagickTrue);
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
if (traits == UndefinedPixelTrait)
continue;
q[i]=PlasmaPixel(random_info,(u[i]+v[i])/2.0,plasma);
}
(void) SyncCacheViewAuthenticPixels(image_view,exception);
}
if (fabs(segment->y1-segment->y2) > MagickEpsilon)
{
/*
Top pixel.
*/
y=(ssize_t) ceil(segment->y1-0.5);
u=GetCacheViewVirtualPixels(u_view,(ssize_t) ceil(segment->x1-0.5),y,
1,1,exception);
v=GetCacheViewVirtualPixels(v_view,(ssize_t) ceil(segment->x2-0.5),y,
1,1,exception);
q=QueueCacheViewAuthenticPixels(image_view,x_mid,y,1,1,exception);
if ((u == (const Quantum *) NULL) || (v == (const Quantum *) NULL) ||
(q == (Quantum *) NULL))
return(MagickTrue);
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
if (traits == UndefinedPixelTrait)
continue;
q[i]=PlasmaPixel(random_info,(u[i]+v[i])/2.0,plasma);
}
(void) SyncCacheViewAuthenticPixels(image_view,exception);
}
}
if ((fabs(segment->x1-segment->x2) > MagickEpsilon) ||
(fabs(segment->y1-segment->y2) > MagickEpsilon))
{
/*
Middle pixel.
*/
x=(ssize_t) ceil(segment->x1-0.5);
y=(ssize_t) ceil(segment->y1-0.5);
u=GetCacheViewVirtualPixels(u_view,x,y,1,1,exception);
x=(ssize_t) ceil(segment->x2-0.5);
y=(ssize_t) ceil(segment->y2-0.5);
v=GetCacheViewVirtualPixels(v_view,x,y,1,1,exception);
q=QueueCacheViewAuthenticPixels(image_view,x_mid,y_mid,1,1,exception);
if ((u == (const Quantum *) NULL) || (v == (const Quantum *) NULL) ||
(q == (Quantum *) NULL))
return(MagickTrue);
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
if (traits == UndefinedPixelTrait)
continue;
q[i]=PlasmaPixel(random_info,(u[i]+v[i])/2.0,plasma);
}
(void) SyncCacheViewAuthenticPixels(image_view,exception);
}
if ((fabs(segment->x2-segment->x1) < 3.0) &&
(fabs(segment->y2-segment->y1) < 3.0))
return(MagickTrue);
return(MagickFalse);
}
MagickExport MagickBooleanType PlasmaImage(Image *image,
const SegmentInfo *segment,size_t attenuate,size_t depth,
ExceptionInfo *exception)
{
CacheView
*image_view,
*u_view,
*v_view;
MagickBooleanType
status;
RandomInfo
*random_info;
assert(image != (Image *) NULL);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"...");
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"...");
if (SetImageStorageClass(image,DirectClass,exception) == MagickFalse)
return(MagickFalse);
image_view=AcquireAuthenticCacheView(image,exception);
u_view=AcquireVirtualCacheView(image,exception);
v_view=AcquireVirtualCacheView(image,exception);
random_info=AcquireRandomInfo();
status=PlasmaImageProxy(image,image_view,u_view,v_view,random_info,segment,
attenuate,depth,exception);
random_info=DestroyRandomInfo(random_info);
v_view=DestroyCacheView(v_view);
u_view=DestroyCacheView(u_view);
image_view=DestroyCacheView(image_view);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% P o l a r o i d I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% PolaroidImage() simulates a Polaroid picture.
%
% The format of the PolaroidImage method is:
%
% Image *PolaroidImage(const Image *image,const DrawInfo *draw_info,
% const char *caption,const double angle,
% const PixelInterpolateMethod method,ExceptionInfo exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o draw_info: the draw info.
%
% o caption: the Polaroid caption.
%
% o angle: Apply the effect along this angle.
%
% o method: the pixel interpolation method.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *PolaroidImage(const Image *image,const DrawInfo *draw_info,
const char *caption,const double angle,const PixelInterpolateMethod method,
ExceptionInfo *exception)
{
Image
*bend_image,
*caption_image,
*flop_image,
*picture_image,
*polaroid_image,
*rotate_image,
*trim_image;
size_t
height;
ssize_t
quantum;
/*
Simulate a Polaroid picture.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
quantum=(ssize_t) MagickMax(MagickMax((double) image->columns,(double)
image->rows)/25.0,10.0);
height=image->rows+2*quantum;
caption_image=(Image *) NULL;
if (caption != (const char *) NULL)
{
char
*text;
/*
Generate caption image.
*/
caption_image=CloneImage(image,image->columns,1,MagickTrue,exception);
if (caption_image == (Image *) NULL)
return((Image *) NULL);
text=InterpretImageProperties((ImageInfo *) NULL,(Image *) image,caption,
exception);
if (text != (char *) NULL)
{
char
geometry[MagickPathExtent];
DrawInfo
*annotate_info;
MagickBooleanType
status;
ssize_t
count;
TypeMetric
metrics;
annotate_info=CloneDrawInfo((const ImageInfo *) NULL,draw_info);
(void) CloneString(&annotate_info->text,text);
count=FormatMagickCaption(caption_image,annotate_info,MagickTrue,
&metrics,&text,exception);
status=SetImageExtent(caption_image,image->columns,(size_t)
((count+1)*(metrics.ascent-metrics.descent)+0.5),exception);
if (status == MagickFalse)
caption_image=DestroyImage(caption_image);
else
{
caption_image->background_color=image->border_color;
(void) SetImageBackgroundColor(caption_image,exception);
(void) CloneString(&annotate_info->text,text);
(void) FormatLocaleString(geometry,MagickPathExtent,"+0+%.20g",
metrics.ascent);
if (annotate_info->gravity == UndefinedGravity)
(void) CloneString(&annotate_info->geometry,AcquireString(
geometry));
(void) AnnotateImage(caption_image,annotate_info,exception);
height+=caption_image->rows;
}
annotate_info=DestroyDrawInfo(annotate_info);
text=DestroyString(text);
}
}
picture_image=CloneImage(image,image->columns+2*quantum,height,MagickTrue,
exception);
if (picture_image == (Image *) NULL)
{
if (caption_image != (Image *) NULL)
caption_image=DestroyImage(caption_image);
return((Image *) NULL);
}
picture_image->background_color=image->border_color;
(void) SetImageBackgroundColor(picture_image,exception);
(void) CompositeImage(picture_image,image,OverCompositeOp,MagickTrue,quantum,
quantum,exception);
if (caption_image != (Image *) NULL)
{
(void) CompositeImage(picture_image,caption_image,OverCompositeOp,
MagickTrue,quantum,(ssize_t) (image->rows+3*quantum/2),exception);
caption_image=DestroyImage(caption_image);
}
(void) QueryColorCompliance("none",AllCompliance,
&picture_image->background_color,exception);
(void) SetImageAlphaChannel(picture_image,OpaqueAlphaChannel,exception);
rotate_image=RotateImage(picture_image,90.0,exception);
picture_image=DestroyImage(picture_image);
if (rotate_image == (Image *) NULL)
return((Image *) NULL);
picture_image=rotate_image;
bend_image=WaveImage(picture_image,0.01*picture_image->rows,2.0*
picture_image->columns,method,exception);
picture_image=DestroyImage(picture_image);
if (bend_image == (Image *) NULL)
return((Image *) NULL);
picture_image=bend_image;
rotate_image=RotateImage(picture_image,-90.0,exception);
picture_image=DestroyImage(picture_image);
if (rotate_image == (Image *) NULL)
return((Image *) NULL);
picture_image=rotate_image;
picture_image->background_color=image->background_color;
polaroid_image=ShadowImage(picture_image,80.0,2.0,quantum/3,quantum/3,
exception);
if (polaroid_image == (Image *) NULL)
{
picture_image=DestroyImage(picture_image);
return(picture_image);
}
flop_image=FlopImage(polaroid_image,exception);
polaroid_image=DestroyImage(polaroid_image);
if (flop_image == (Image *) NULL)
{
picture_image=DestroyImage(picture_image);
return(picture_image);
}
polaroid_image=flop_image;
(void) CompositeImage(polaroid_image,picture_image,OverCompositeOp,
MagickTrue,(ssize_t) (-0.01*picture_image->columns/2.0),0L,exception);
picture_image=DestroyImage(picture_image);
(void) QueryColorCompliance("none",AllCompliance,
&polaroid_image->background_color,exception);
rotate_image=RotateImage(polaroid_image,angle,exception);
polaroid_image=DestroyImage(polaroid_image);
if (rotate_image == (Image *) NULL)
return((Image *) NULL);
polaroid_image=rotate_image;
trim_image=TrimImage(polaroid_image,exception);
polaroid_image=DestroyImage(polaroid_image);
if (trim_image == (Image *) NULL)
return((Image *) NULL);
polaroid_image=trim_image;
return(polaroid_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S e p i a T o n e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% MagickSepiaToneImage() applies a special effect to the image, similar to the
% effect achieved in a photo darkroom by sepia toning. Threshold ranges from
% 0 to QuantumRange and is a measure of the extent of the sepia toning. A
% threshold of 80% is a good starting point for a reasonable tone.
%
% The format of the SepiaToneImage method is:
%
% Image *SepiaToneImage(const Image *image,const double threshold,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o threshold: the tone threshold.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *SepiaToneImage(const Image *image,const double threshold,
ExceptionInfo *exception)
{
#define SepiaToneImageTag "SepiaTone/Image"
CacheView
*image_view,
*sepia_view;
Image
*sepia_image;
MagickBooleanType
status;
MagickOffsetType
progress;
ssize_t
y;
/*
Initialize sepia-toned image attributes.
*/
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
sepia_image=CloneImage(image,0,0,MagickTrue,exception);
if (sepia_image == (Image *) NULL)
return((Image *) NULL);
if (SetImageStorageClass(sepia_image,DirectClass,exception) == MagickFalse)
{
sepia_image=DestroyImage(sepia_image);
return((Image *) NULL);
}
/*
Tone each row of the image.
*/
status=MagickTrue;
progress=0;
image_view=AcquireVirtualCacheView(image,exception);
sepia_view=AcquireAuthenticCacheView(sepia_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(image,sepia_image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
register const Quantum
*magick_restrict p;
register ssize_t
x;
register Quantum
*magick_restrict q;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception);
q=GetCacheViewAuthenticPixels(sepia_view,0,y,sepia_image->columns,1,
exception);
if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL))
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
double
intensity,
tone;
intensity=GetPixelIntensity(image,p);
tone=intensity > threshold ? (double) QuantumRange : intensity+
(double) QuantumRange-threshold;
SetPixelRed(sepia_image,ClampToQuantum(tone),q);
tone=intensity > (7.0*threshold/6.0) ? (double) QuantumRange :
intensity+(double) QuantumRange-7.0*threshold/6.0;
SetPixelGreen(sepia_image,ClampToQuantum(tone),q);
tone=intensity < (threshold/6.0) ? 0 : intensity-threshold/6.0;
SetPixelBlue(sepia_image,ClampToQuantum(tone),q);
tone=threshold/7.0;
if ((double) GetPixelGreen(image,q) < tone)
SetPixelGreen(sepia_image,ClampToQuantum(tone),q);
if ((double) GetPixelBlue(image,q) < tone)
SetPixelBlue(sepia_image,ClampToQuantum(tone),q);
SetPixelAlpha(sepia_image,GetPixelAlpha(image,p),q);
p+=GetPixelChannels(image);
q+=GetPixelChannels(sepia_image);
}
if (SyncCacheViewAuthenticPixels(sepia_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(image,SepiaToneImageTag,progress,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
sepia_view=DestroyCacheView(sepia_view);
image_view=DestroyCacheView(image_view);
(void) NormalizeImage(sepia_image,exception);
(void) ContrastImage(sepia_image,MagickTrue,exception);
if (status == MagickFalse)
sepia_image=DestroyImage(sepia_image);
return(sepia_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S h a d o w I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ShadowImage() simulates a shadow from the specified image and returns it.
%
% The format of the ShadowImage method is:
%
% Image *ShadowImage(const Image *image,const double alpha,
% const double sigma,const ssize_t x_offset,const ssize_t y_offset,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o alpha: percentage transparency.
%
% o sigma: the standard deviation of the Gaussian, in pixels.
%
% o x_offset: the shadow x-offset.
%
% o y_offset: the shadow y-offset.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *ShadowImage(const Image *image,const double alpha,
const double sigma,const ssize_t x_offset,const ssize_t y_offset,
ExceptionInfo *exception)
{
#define ShadowImageTag "Shadow/Image"
CacheView
*image_view;
ChannelType
channel_mask;
Image
*border_image,
*clone_image,
*shadow_image;
MagickBooleanType
status;
PixelInfo
background_color;
RectangleInfo
border_info;
ssize_t
y;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
clone_image=CloneImage(image,0,0,MagickTrue,exception);
if (clone_image == (Image *) NULL)
return((Image *) NULL);
if (IsGrayColorspace(image->colorspace) != MagickFalse)
(void) SetImageColorspace(clone_image,sRGBColorspace,exception);
(void) SetImageVirtualPixelMethod(clone_image,EdgeVirtualPixelMethod,
exception);
border_info.width=(size_t) floor(2.0*sigma+0.5);
border_info.height=(size_t) floor(2.0*sigma+0.5);
border_info.x=0;
border_info.y=0;
(void) QueryColorCompliance("none",AllCompliance,&clone_image->border_color,
exception);
clone_image->alpha_trait=BlendPixelTrait;
border_image=BorderImage(clone_image,&border_info,OverCompositeOp,exception);
clone_image=DestroyImage(clone_image);
if (border_image == (Image *) NULL)
return((Image *) NULL);
if (border_image->alpha_trait == UndefinedPixelTrait)
(void) SetImageAlphaChannel(border_image,OpaqueAlphaChannel,exception);
/*
Shadow image.
*/
status=MagickTrue;
background_color=border_image->background_color;
background_color.alpha_trait=BlendPixelTrait;
image_view=AcquireAuthenticCacheView(border_image,exception);
for (y=0; y < (ssize_t) border_image->rows; y++)
{
register Quantum
*magick_restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
q=QueueCacheViewAuthenticPixels(image_view,0,y,border_image->columns,1,
exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) border_image->columns; x++)
{
if (border_image->alpha_trait != UndefinedPixelTrait)
background_color.alpha=GetPixelAlpha(border_image,q)*alpha/100.0;
SetPixelViaPixelInfo(border_image,&background_color,q);
q+=GetPixelChannels(border_image);
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
}
image_view=DestroyCacheView(image_view);
if (status == MagickFalse)
{
border_image=DestroyImage(border_image);
return((Image *) NULL);
}
channel_mask=SetImageChannelMask(border_image,AlphaChannel);
shadow_image=BlurImage(border_image,0.0,sigma,exception);
border_image=DestroyImage(border_image);
if (shadow_image == (Image *) NULL)
return((Image *) NULL);
(void) SetPixelChannelMask(shadow_image,channel_mask);
if (shadow_image->page.width == 0)
shadow_image->page.width=shadow_image->columns;
if (shadow_image->page.height == 0)
shadow_image->page.height=shadow_image->rows;
shadow_image->page.width+=x_offset-(ssize_t) border_info.width;
shadow_image->page.height+=y_offset-(ssize_t) border_info.height;
shadow_image->page.x+=x_offset-(ssize_t) border_info.width;
shadow_image->page.y+=y_offset-(ssize_t) border_info.height;
return(shadow_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S k e t c h I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SketchImage() simulates a pencil sketch. We convolve the image with a
% Gaussian operator of the given radius and standard deviation (sigma). For
% reasonable results, radius should be larger than sigma. Use a radius of 0
% and SketchImage() selects a suitable radius for you. Angle gives the angle
% of the sketch.
%
% The format of the SketchImage method is:
%
% Image *SketchImage(const Image *image,const double radius,
% const double sigma,const double angle,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o radius: the radius of the Gaussian, in pixels, not counting the
% center pixel.
%
% o sigma: the standard deviation of the Gaussian, in pixels.
%
% o angle: apply the effect along this angle.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *SketchImage(const Image *image,const double radius,
const double sigma,const double angle,ExceptionInfo *exception)
{
CacheView
*random_view;
Image
*blend_image,
*blur_image,
*dodge_image,
*random_image,
*sketch_image;
MagickBooleanType
status;
RandomInfo
**magick_restrict random_info;
ssize_t
y;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
unsigned long
key;
#endif
/*
Sketch image.
*/
random_image=CloneImage(image,image->columns << 1,image->rows << 1,
MagickTrue,exception);
if (random_image == (Image *) NULL)
return((Image *) NULL);
status=MagickTrue;
random_info=AcquireRandomInfoThreadSet();
random_view=AcquireAuthenticCacheView(random_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
key=GetRandomSecretKey(random_info[0]);
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(random_image,random_image,random_image->rows,key == ~0UL)
#endif
for (y=0; y < (ssize_t) random_image->rows; y++)
{
const int
id = GetOpenMPThreadId();
register Quantum
*magick_restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
q=QueueCacheViewAuthenticPixels(random_view,0,y,random_image->columns,1,
exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) random_image->columns; x++)
{
double
value;
register ssize_t
i;
value=GetPseudoRandomValue(random_info[id]);
for (i=0; i < (ssize_t) GetPixelChannels(random_image); i++)
{
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
if (traits == UndefinedPixelTrait)
continue;
q[i]=ClampToQuantum(QuantumRange*value);
}
q+=GetPixelChannels(random_image);
}
if (SyncCacheViewAuthenticPixels(random_view,exception) == MagickFalse)
status=MagickFalse;
}
random_view=DestroyCacheView(random_view);
random_info=DestroyRandomInfoThreadSet(random_info);
if (status == MagickFalse)
{
random_image=DestroyImage(random_image);
return(random_image);
}
blur_image=MotionBlurImage(random_image,radius,sigma,angle,exception);
random_image=DestroyImage(random_image);
if (blur_image == (Image *) NULL)
return((Image *) NULL);
dodge_image=EdgeImage(blur_image,radius,exception);
blur_image=DestroyImage(blur_image);
if (dodge_image == (Image *) NULL)
return((Image *) NULL);
status=ClampImage(dodge_image,exception);
if (status != MagickFalse)
status=NormalizeImage(dodge_image,exception);
if (status != MagickFalse)
status=NegateImage(dodge_image,MagickFalse,exception);
if (status != MagickFalse)
status=TransformImage(&dodge_image,(char *) NULL,"50%",exception);
sketch_image=CloneImage(image,0,0,MagickTrue,exception);
if (sketch_image == (Image *) NULL)
{
dodge_image=DestroyImage(dodge_image);
return((Image *) NULL);
}
(void) CompositeImage(sketch_image,dodge_image,ColorDodgeCompositeOp,
MagickTrue,0,0,exception);
dodge_image=DestroyImage(dodge_image);
blend_image=CloneImage(image,0,0,MagickTrue,exception);
if (blend_image == (Image *) NULL)
{
sketch_image=DestroyImage(sketch_image);
return((Image *) NULL);
}
if (blend_image->alpha_trait != BlendPixelTrait)
(void) SetImageAlpha(blend_image,TransparentAlpha,exception);
(void) SetImageArtifact(blend_image,"compose:args","20x80");
(void) CompositeImage(sketch_image,blend_image,BlendCompositeOp,MagickTrue,
0,0,exception);
blend_image=DestroyImage(blend_image);
return(sketch_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S o l a r i z e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SolarizeImage() applies a special effect to the image, similar to the effect
% achieved in a photo darkroom by selectively exposing areas of photo
% sensitive paper to light. Threshold ranges from 0 to QuantumRange and is a
% measure of the extent of the solarization.
%
% The format of the SolarizeImage method is:
%
% MagickBooleanType SolarizeImage(Image *image,const double threshold,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o threshold: Define the extent of the solarization.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType SolarizeImage(Image *image,
const double threshold,ExceptionInfo *exception)
{
#define SolarizeImageTag "Solarize/Image"
CacheView
*image_view;
MagickBooleanType
status;
MagickOffsetType
progress;
ssize_t
y;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
if (IsGrayColorspace(image->colorspace) != MagickFalse)
(void) SetImageColorspace(image,sRGBColorspace,exception);
if (image->storage_class == PseudoClass)
{
register ssize_t
i;
/*
Solarize colormap.
*/
for (i=0; i < (ssize_t) image->colors; i++)
{
if ((double) image->colormap[i].red > threshold)
image->colormap[i].red=QuantumRange-image->colormap[i].red;
if ((double) image->colormap[i].green > threshold)
image->colormap[i].green=QuantumRange-image->colormap[i].green;
if ((double) image->colormap[i].blue > threshold)
image->colormap[i].blue=QuantumRange-image->colormap[i].blue;
}
}
/*
Solarize image.
*/
status=MagickTrue;
progress=0;
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
register ssize_t
x;
register Quantum
*magick_restrict q;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
register ssize_t
i;
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
if ((traits & UpdatePixelTrait) == 0)
continue;
if ((double) q[i] > threshold)
q[i]=QuantumRange-q[i];
}
q+=GetPixelChannels(image);
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(image,SolarizeImageTag,progress,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
image_view=DestroyCacheView(image_view);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S t e g a n o I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SteganoImage() hides a digital watermark within the image. Recover
% the hidden watermark later to prove that the authenticity of an image.
% Offset defines the start position within the image to hide the watermark.
%
% The format of the SteganoImage method is:
%
% Image *SteganoImage(const Image *image,Image *watermark,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o watermark: the watermark image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *SteganoImage(const Image *image,const Image *watermark,
ExceptionInfo *exception)
{
#define GetBit(alpha,i) ((((size_t) (alpha) >> (size_t) (i)) & 0x01) != 0)
#define SetBit(alpha,i,set) (Quantum) ((set) != 0 ? (size_t) (alpha) \
| (one << (size_t) (i)) : (size_t) (alpha) & ~(one << (size_t) (i)))
#define SteganoImageTag "Stegano/Image"
CacheView
*stegano_view,
*watermark_view;
Image
*stegano_image;
int
c;
MagickBooleanType
status;
PixelInfo
pixel;
register Quantum
*q;
register ssize_t
x;
size_t
depth,
one;
ssize_t
i,
j,
k,
y;
/*
Initialize steganographic image attributes.
*/
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(watermark != (const Image *) NULL);
assert(watermark->signature == MagickCoreSignature);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
one=1UL;
stegano_image=CloneImage(image,0,0,MagickTrue,exception);
if (stegano_image == (Image *) NULL)
return((Image *) NULL);
stegano_image->depth=MAGICKCORE_QUANTUM_DEPTH;
if (SetImageStorageClass(stegano_image,DirectClass,exception) == MagickFalse)
{
stegano_image=DestroyImage(stegano_image);
return((Image *) NULL);
}
/*
Hide watermark in low-order bits of image.
*/
c=0;
i=0;
j=0;
depth=stegano_image->depth;
k=stegano_image->offset;
status=MagickTrue;
watermark_view=AcquireVirtualCacheView(watermark,exception);
stegano_view=AcquireAuthenticCacheView(stegano_image,exception);
for (i=(ssize_t) depth-1; (i >= 0) && (j < (ssize_t) depth); i--)
{
for (y=0; (y < (ssize_t) watermark->rows) && (j < (ssize_t) depth); y++)
{
for (x=0; (x < (ssize_t) watermark->columns) && (j < (ssize_t) depth); x++)
{
ssize_t
offset;
(void) GetOneCacheViewVirtualPixelInfo(watermark_view,x,y,&pixel,
exception);
offset=k/(ssize_t) stegano_image->columns;
if (offset >= (ssize_t) stegano_image->rows)
break;
q=GetCacheViewAuthenticPixels(stegano_view,k % (ssize_t)
stegano_image->columns,k/(ssize_t) stegano_image->columns,1,1,
exception);
if (q == (Quantum *) NULL)
break;
switch (c)
{
case 0:
{
SetPixelRed(stegano_image,SetBit(GetPixelRed(stegano_image,q),j,
GetBit(GetPixelInfoIntensity(stegano_image,&pixel),i)),q);
break;
}
case 1:
{
SetPixelGreen(stegano_image,SetBit(GetPixelGreen(stegano_image,q),j,
GetBit(GetPixelInfoIntensity(stegano_image,&pixel),i)),q);
break;
}
case 2:
{
SetPixelBlue(stegano_image,SetBit(GetPixelBlue(stegano_image,q),j,
GetBit(GetPixelInfoIntensity(stegano_image,&pixel),i)),q);
break;
}
}
if (SyncCacheViewAuthenticPixels(stegano_view,exception) == MagickFalse)
break;
c++;
if (c == 3)
c=0;
k++;
if (k == (ssize_t) (stegano_image->columns*stegano_image->columns))
k=0;
if (k == stegano_image->offset)
j++;
}
}
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
proceed=SetImageProgress(image,SteganoImageTag,(MagickOffsetType)
(depth-i),depth);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
stegano_view=DestroyCacheView(stegano_view);
watermark_view=DestroyCacheView(watermark_view);
if (status == MagickFalse)
stegano_image=DestroyImage(stegano_image);
return(stegano_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S t e r e o A n a g l y p h I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% StereoAnaglyphImage() combines two images and produces a single image that
% is the composite of a left and right image of a stereo pair. Special
% red-green stereo glasses are required to view this effect.
%
% The format of the StereoAnaglyphImage method is:
%
% Image *StereoImage(const Image *left_image,const Image *right_image,
% ExceptionInfo *exception)
% Image *StereoAnaglyphImage(const Image *left_image,
% const Image *right_image,const ssize_t x_offset,const ssize_t y_offset,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o left_image: the left image.
%
% o right_image: the right image.
%
% o exception: return any errors or warnings in this structure.
%
% o x_offset: amount, in pixels, by which the left image is offset to the
% right of the right image.
%
% o y_offset: amount, in pixels, by which the left image is offset to the
% bottom of the right image.
%
%
*/
MagickExport Image *StereoImage(const Image *left_image,
const Image *right_image,ExceptionInfo *exception)
{
return(StereoAnaglyphImage(left_image,right_image,0,0,exception));
}
MagickExport Image *StereoAnaglyphImage(const Image *left_image,
const Image *right_image,const ssize_t x_offset,const ssize_t y_offset,
ExceptionInfo *exception)
{
#define StereoImageTag "Stereo/Image"
const Image
*image;
Image
*stereo_image;
MagickBooleanType
status;
ssize_t
y;
assert(left_image != (const Image *) NULL);
assert(left_image->signature == MagickCoreSignature);
if (left_image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",
left_image->filename);
assert(right_image != (const Image *) NULL);
assert(right_image->signature == MagickCoreSignature);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
image=left_image;
if ((left_image->columns != right_image->columns) ||
(left_image->rows != right_image->rows))
ThrowImageException(ImageError,"LeftAndRightImageSizesDiffer");
/*
Initialize stereo image attributes.
*/
stereo_image=CloneImage(left_image,left_image->columns,left_image->rows,
MagickTrue,exception);
if (stereo_image == (Image *) NULL)
return((Image *) NULL);
if (SetImageStorageClass(stereo_image,DirectClass,exception) == MagickFalse)
{
stereo_image=DestroyImage(stereo_image);
return((Image *) NULL);
}
(void) SetImageColorspace(stereo_image,sRGBColorspace,exception);
/*
Copy left image to red channel and right image to blue channel.
*/
status=MagickTrue;
for (y=0; y < (ssize_t) stereo_image->rows; y++)
{
register const Quantum
*magick_restrict p,
*magick_restrict q;
register ssize_t
x;
register Quantum
*magick_restrict r;
p=GetVirtualPixels(left_image,-x_offset,y-y_offset,image->columns,1,
exception);
q=GetVirtualPixels(right_image,0,y,right_image->columns,1,exception);
r=QueueAuthenticPixels(stereo_image,0,y,stereo_image->columns,1,exception);
if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL) ||
(r == (Quantum *) NULL))
break;
for (x=0; x < (ssize_t) stereo_image->columns; x++)
{
SetPixelRed(stereo_image,GetPixelRed(left_image,p),r);
SetPixelGreen(stereo_image,GetPixelGreen(right_image,q),r);
SetPixelBlue(stereo_image,GetPixelBlue(right_image,q),r);
if ((GetPixelAlphaTraits(stereo_image) & CopyPixelTrait) != 0)
SetPixelAlpha(stereo_image,(GetPixelAlpha(left_image,p)+
GetPixelAlpha(right_image,q))/2,r);
p+=GetPixelChannels(left_image);
q+=GetPixelChannels(right_image);
r+=GetPixelChannels(stereo_image);
}
if (SyncAuthenticPixels(stereo_image,exception) == MagickFalse)
break;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
proceed=SetImageProgress(image,StereoImageTag,(MagickOffsetType) y,
stereo_image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
if (status == MagickFalse)
stereo_image=DestroyImage(stereo_image);
return(stereo_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S w i r l I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SwirlImage() swirls the pixels about the center of the image, where
% degrees indicates the sweep of the arc through which each pixel is moved.
% You get a more dramatic effect as the degrees move from 1 to 360.
%
% The format of the SwirlImage method is:
%
% Image *SwirlImage(const Image *image,double degrees,
% const PixelInterpolateMethod method,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o degrees: Define the tightness of the swirling effect.
%
% o method: the pixel interpolation method.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *SwirlImage(const Image *image,double degrees,
const PixelInterpolateMethod method,ExceptionInfo *exception)
{
#define SwirlImageTag "Swirl/Image"
CacheView
*canvas_view,
*interpolate_view,
*swirl_view;
double
radius;
Image
*canvas_image,
*swirl_image;
MagickBooleanType
status;
MagickOffsetType
progress;
PointInfo
center,
scale;
ssize_t
y;
/*
Initialize swirl image attributes.
*/
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
canvas_image=CloneImage(image,0,0,MagickTrue,exception);
if (canvas_image == (Image *) NULL)
return((Image *) NULL);
if ((canvas_image->alpha_trait == UndefinedPixelTrait) &&
(canvas_image->background_color.alpha != OpaqueAlpha))
(void) SetImageAlphaChannel(canvas_image,OpaqueAlphaChannel,exception);
swirl_image=CloneImage(canvas_image,0,0,MagickTrue,exception);
if (swirl_image == (Image *) NULL)
{
canvas_image=DestroyImage(canvas_image);
return((Image *) NULL);
}
if (SetImageStorageClass(swirl_image,DirectClass,exception) == MagickFalse)
{
canvas_image=DestroyImage(canvas_image);
swirl_image=DestroyImage(swirl_image);
return((Image *) NULL);
}
/*
Compute scaling factor.
*/
center.x=(double) canvas_image->columns/2.0;
center.y=(double) canvas_image->rows/2.0;
radius=MagickMax(center.x,center.y);
scale.x=1.0;
scale.y=1.0;
if (canvas_image->columns > canvas_image->rows)
scale.y=(double) canvas_image->columns/(double) canvas_image->rows;
else
if (canvas_image->columns < canvas_image->rows)
scale.x=(double) canvas_image->rows/(double) canvas_image->columns;
degrees=(double) DegreesToRadians(degrees);
/*
Swirl image.
*/
status=MagickTrue;
progress=0;
canvas_view=AcquireVirtualCacheView(canvas_image,exception);
interpolate_view=AcquireVirtualCacheView(image,exception);
swirl_view=AcquireAuthenticCacheView(swirl_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(canvas_image,swirl_image,canvas_image->rows,1)
#endif
for (y=0; y < (ssize_t) canvas_image->rows; y++)
{
double
distance;
PointInfo
delta;
register const Quantum
*magick_restrict p;
register ssize_t
x;
register Quantum
*magick_restrict q;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(canvas_view,0,y,canvas_image->columns,1,
exception);
q=QueueCacheViewAuthenticPixels(swirl_view,0,y,swirl_image->columns,1,
exception);
if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL))
{
status=MagickFalse;
continue;
}
delta.y=scale.y*(double) (y-center.y);
for (x=0; x < (ssize_t) canvas_image->columns; x++)
{
/*
Determine if the pixel is within an ellipse.
*/
delta.x=scale.x*(double) (x-center.x);
distance=delta.x*delta.x+delta.y*delta.y;
if (distance >= (radius*radius))
{
register ssize_t
i;
for (i=0; i < (ssize_t) GetPixelChannels(canvas_image); i++)
{
PixelChannel channel = GetPixelChannelChannel(canvas_image,i);
PixelTrait traits = GetPixelChannelTraits(canvas_image,channel);
PixelTrait swirl_traits = GetPixelChannelTraits(swirl_image,
channel);
if ((traits == UndefinedPixelTrait) ||
(swirl_traits == UndefinedPixelTrait))
continue;
SetPixelChannel(swirl_image,channel,p[i],q);
}
}
else
{
double
cosine,
factor,
sine;
/*
Swirl the pixel.
*/
factor=1.0-sqrt((double) distance)/radius;
sine=sin((double) (degrees*factor*factor));
cosine=cos((double) (degrees*factor*factor));
status=InterpolatePixelChannels(canvas_image,interpolate_view,
swirl_image,method,((cosine*delta.x-sine*delta.y)/scale.x+center.x),
(double) ((sine*delta.x+cosine*delta.y)/scale.y+center.y),q,
exception);
if (status == MagickFalse)
break;
}
p+=GetPixelChannels(canvas_image);
q+=GetPixelChannels(swirl_image);
}
if (SyncCacheViewAuthenticPixels(swirl_view,exception) == MagickFalse)
status=MagickFalse;
if (canvas_image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(canvas_image,SwirlImageTag,progress,
canvas_image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
swirl_view=DestroyCacheView(swirl_view);
interpolate_view=DestroyCacheView(interpolate_view);
canvas_view=DestroyCacheView(canvas_view);
canvas_image=DestroyImage(canvas_image);
if (status == MagickFalse)
swirl_image=DestroyImage(swirl_image);
return(swirl_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% T i n t I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% TintImage() applies a color vector to each pixel in the image. The length
% of the vector is 0 for black and white and at its maximum for the midtones.
% The vector weighting function is f(x)=(1-(4.0*((x-0.5)*(x-0.5))))
%
% The format of the TintImage method is:
%
% Image *TintImage(const Image *image,const char *blend,
% const PixelInfo *tint,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o blend: A color value used for tinting.
%
% o tint: A color value used for tinting.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *TintImage(const Image *image,const char *blend,
const PixelInfo *tint,ExceptionInfo *exception)
{
#define TintImageTag "Tint/Image"
CacheView
*image_view,
*tint_view;
double
intensity;
GeometryInfo
geometry_info;
Image
*tint_image;
MagickBooleanType
status;
MagickOffsetType
progress;
PixelInfo
color_vector;
MagickStatusType
flags;
ssize_t
y;
/*
Allocate tint image.
*/
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
tint_image=CloneImage(image,0,0,MagickTrue,exception);
if (tint_image == (Image *) NULL)
return((Image *) NULL);
if (SetImageStorageClass(tint_image,DirectClass,exception) == MagickFalse)
{
tint_image=DestroyImage(tint_image);
return((Image *) NULL);
}
if ((IsGrayColorspace(image->colorspace) != MagickFalse) &&
(IsPixelInfoGray(tint) == MagickFalse))
(void) SetImageColorspace(tint_image,sRGBColorspace,exception);
if (blend == (const char *) NULL)
return(tint_image);
/*
Determine RGB values of the color.
*/
GetPixelInfo(image,&color_vector);
flags=ParseGeometry(blend,&geometry_info);
color_vector.red=geometry_info.rho;
color_vector.green=geometry_info.rho;
color_vector.blue=geometry_info.rho;
color_vector.alpha=(MagickRealType) OpaqueAlpha;
if ((flags & SigmaValue) != 0)
color_vector.green=geometry_info.sigma;
if ((flags & XiValue) != 0)
color_vector.blue=geometry_info.xi;
if ((flags & PsiValue) != 0)
color_vector.alpha=geometry_info.psi;
if (image->colorspace == CMYKColorspace)
{
color_vector.black=geometry_info.rho;
if ((flags & PsiValue) != 0)
color_vector.black=geometry_info.psi;
if ((flags & ChiValue) != 0)
color_vector.alpha=geometry_info.chi;
}
intensity=(double) GetPixelInfoIntensity((const Image *) NULL,tint);
color_vector.red=(double) (color_vector.red*tint->red/100.0-intensity);
color_vector.green=(double) (color_vector.green*tint->green/100.0-intensity);
color_vector.blue=(double) (color_vector.blue*tint->blue/100.0-intensity);
color_vector.black=(double) (color_vector.black*tint->black/100.0-intensity);
color_vector.alpha=(double) (color_vector.alpha*tint->alpha/100.0-intensity);
/*
Tint image.
*/
status=MagickTrue;
progress=0;
image_view=AcquireVirtualCacheView(image,exception);
tint_view=AcquireAuthenticCacheView(tint_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(image,tint_image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
register const Quantum
*magick_restrict p;
register Quantum
*magick_restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception);
q=QueueCacheViewAuthenticPixels(tint_view,0,y,tint_image->columns,1,
exception);
if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL))
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
PixelInfo
pixel;
double
weight;
GetPixelInfo(image,&pixel);
weight=QuantumScale*GetPixelRed(image,p)-0.5;
pixel.red=(MagickRealType) GetPixelRed(image,p)+color_vector.red*
(1.0-(4.0*(weight*weight)));
weight=QuantumScale*GetPixelGreen(image,p)-0.5;
pixel.green=(MagickRealType) GetPixelGreen(image,p)+color_vector.green*
(1.0-(4.0*(weight*weight)));
weight=QuantumScale*GetPixelBlue(image,p)-0.5;
pixel.blue=(MagickRealType) GetPixelBlue(image,p)+color_vector.blue*
(1.0-(4.0*(weight*weight)));
weight=QuantumScale*GetPixelBlack(image,p)-0.5;
pixel.black=(MagickRealType) GetPixelBlack(image,p)+color_vector.black*
(1.0-(4.0*(weight*weight)));
pixel.alpha=(MagickRealType) GetPixelAlpha(image,p);
SetPixelViaPixelInfo(tint_image,&pixel,q);
p+=GetPixelChannels(image);
q+=GetPixelChannels(tint_image);
}
if (SyncCacheViewAuthenticPixels(tint_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(image,TintImageTag,progress,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
tint_view=DestroyCacheView(tint_view);
image_view=DestroyCacheView(image_view);
if (status == MagickFalse)
tint_image=DestroyImage(tint_image);
return(tint_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% V i g n e t t e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% VignetteImage() softens the edges of the image in vignette style.
%
% The format of the VignetteImage method is:
%
% Image *VignetteImage(const Image *image,const double radius,
% const double sigma,const ssize_t x,const ssize_t y,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o radius: the radius of the pixel neighborhood.
%
% o sigma: the standard deviation of the Gaussian, in pixels.
%
% o x, y: Define the x and y ellipse offset.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *VignetteImage(const Image *image,const double radius,
const double sigma,const ssize_t x,const ssize_t y,ExceptionInfo *exception)
{
char
ellipse[MagickPathExtent];
DrawInfo
*draw_info;
Image
*canvas,
*blur_image,
*oval_image,
*vignette_image;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
canvas=CloneImage(image,0,0,MagickTrue,exception);
if (canvas == (Image *) NULL)
return((Image *) NULL);
if (SetImageStorageClass(canvas,DirectClass,exception) == MagickFalse)
{
canvas=DestroyImage(canvas);
return((Image *) NULL);
}
canvas->alpha_trait=BlendPixelTrait;
oval_image=CloneImage(canvas,canvas->columns,canvas->rows,MagickTrue,
exception);
if (oval_image == (Image *) NULL)
{
canvas=DestroyImage(canvas);
return((Image *) NULL);
}
(void) QueryColorCompliance("#000000",AllCompliance,
&oval_image->background_color,exception);
(void) SetImageBackgroundColor(oval_image,exception);
draw_info=CloneDrawInfo((const ImageInfo *) NULL,(const DrawInfo *) NULL);
(void) QueryColorCompliance("#ffffff",AllCompliance,&draw_info->fill,
exception);
(void) QueryColorCompliance("#ffffff",AllCompliance,&draw_info->stroke,
exception);
(void) FormatLocaleString(ellipse,MagickPathExtent,"ellipse %g,%g,%g,%g,"
"0.0,360.0",image->columns/2.0,image->rows/2.0,image->columns/2.0-x,
image->rows/2.0-y);
draw_info->primitive=AcquireString(ellipse);
(void) DrawImage(oval_image,draw_info,exception);
draw_info=DestroyDrawInfo(draw_info);
blur_image=BlurImage(oval_image,radius,sigma,exception);
oval_image=DestroyImage(oval_image);
if (blur_image == (Image *) NULL)
{
canvas=DestroyImage(canvas);
return((Image *) NULL);
}
blur_image->alpha_trait=UndefinedPixelTrait;
(void) CompositeImage(canvas,blur_image,IntensityCompositeOp,MagickTrue,
0,0,exception);
blur_image=DestroyImage(blur_image);
vignette_image=MergeImageLayers(canvas,FlattenLayer,exception);
canvas=DestroyImage(canvas);
if (vignette_image != (Image *) NULL)
(void) TransformImageColorspace(vignette_image,image->colorspace,exception);
return(vignette_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% W a v e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% WaveImage() creates a "ripple" effect in the image by shifting the pixels
% vertically along a sine wave whose amplitude and wavelength is specified
% by the given parameters.
%
% The format of the WaveImage method is:
%
% Image *WaveImage(const Image *image,const double amplitude,
% const double wave_length,const PixelInterpolateMethod method,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o amplitude, wave_length: Define the amplitude and wave length of the
% sine wave.
%
% o interpolate: the pixel interpolation method.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *WaveImage(const Image *image,const double amplitude,
const double wave_length,const PixelInterpolateMethod method,
ExceptionInfo *exception)
{
#define WaveImageTag "Wave/Image"
CacheView
*canvas_image_view,
*wave_view;
Image
*canvas_image,
*wave_image;
MagickBooleanType
status;
MagickOffsetType
progress;
double
*sine_map;
register ssize_t
i;
ssize_t
y;
/*
Initialize wave image attributes.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
canvas_image=CloneImage(image,0,0,MagickTrue,exception);
if (canvas_image == (Image *) NULL)
return((Image *) NULL);
if ((canvas_image->alpha_trait == UndefinedPixelTrait) &&
(canvas_image->background_color.alpha != OpaqueAlpha))
(void) SetImageAlpha(canvas_image,OpaqueAlpha,exception);
wave_image=CloneImage(canvas_image,canvas_image->columns,(size_t)
(canvas_image->rows+2.0*fabs(amplitude)),MagickTrue,exception);
if (wave_image == (Image *) NULL)
{
canvas_image=DestroyImage(canvas_image);
return((Image *) NULL);
}
if (SetImageStorageClass(wave_image,DirectClass,exception) == MagickFalse)
{
canvas_image=DestroyImage(canvas_image);
wave_image=DestroyImage(wave_image);
return((Image *) NULL);
}
/*
Allocate sine map.
*/
sine_map=(double *) AcquireQuantumMemory((size_t) wave_image->columns,
sizeof(*sine_map));
if (sine_map == (double *) NULL)
{
canvas_image=DestroyImage(canvas_image);
wave_image=DestroyImage(wave_image);
ThrowImageException(ResourceLimitError,"MemoryAllocationFailed");
}
for (i=0; i < (ssize_t) wave_image->columns; i++)
sine_map[i]=fabs(amplitude)+amplitude*sin((double) ((2.0*MagickPI*i)/
wave_length));
/*
Wave image.
*/
status=MagickTrue;
progress=0;
canvas_image_view=AcquireVirtualCacheView(canvas_image,exception);
wave_view=AcquireAuthenticCacheView(wave_image,exception);
(void) SetCacheViewVirtualPixelMethod(canvas_image_view,
BackgroundVirtualPixelMethod);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(canvas_image,wave_image,wave_image->rows,1)
#endif
for (y=0; y < (ssize_t) wave_image->rows; y++)
{
register const Quantum
*magick_restrict p;
register Quantum
*magick_restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(canvas_image_view,0,y,canvas_image->columns,1,
exception);
q=QueueCacheViewAuthenticPixels(wave_view,0,y,wave_image->columns,1,
exception);
if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL))
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) wave_image->columns; x++)
{
status=InterpolatePixelChannels(canvas_image,canvas_image_view,
wave_image,method,(double) x,(double) (y-sine_map[x]),q,exception);
if (status == MagickFalse)
break;
p+=GetPixelChannels(canvas_image);
q+=GetPixelChannels(wave_image);
}
if (SyncCacheViewAuthenticPixels(wave_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(canvas_image,WaveImageTag,progress,
canvas_image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
wave_view=DestroyCacheView(wave_view);
canvas_image_view=DestroyCacheView(canvas_image_view);
canvas_image=DestroyImage(canvas_image);
sine_map=(double *) RelinquishMagickMemory(sine_map);
if (status == MagickFalse)
wave_image=DestroyImage(wave_image);
return(wave_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% W a v e l e t D e n o i s e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% WaveletDenoiseImage() removes noise from the image using a wavelet
% transform. The wavelet transform is a fast hierarchical scheme for
% processing an image using a set of consecutive lowpass and high_pass filters,
% followed by a decimation. This results in a decomposition into different
% scales which can be regarded as different “frequency bands”, determined by
% the mother wavelet. Adapted from dcraw.c by David Coffin.
%
% The format of the WaveletDenoiseImage method is:
%
% Image *WaveletDenoiseImage(const Image *image,const double threshold,
% const double softness,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o threshold: set the threshold for smoothing.
%
% o softness: attenuate the smoothing threshold.
%
% o exception: return any errors or warnings in this structure.
%
*/
static inline void HatTransform(const float *magick_restrict pixels,
const size_t stride,const size_t extent,const size_t scale,float *kernel)
{
const float
*magick_restrict p,
*magick_restrict q,
*magick_restrict r;
register ssize_t
i;
p=pixels;
q=pixels+scale*stride;
r=pixels+scale*stride;
for (i=0; i < (ssize_t) scale; i++)
{
kernel[i]=0.25f*(*p+(*p)+(*q)+(*r));
p+=stride;
q-=stride;
r+=stride;
}
for ( ; i < (ssize_t) (extent-scale); i++)
{
kernel[i]=0.25f*(2.0f*(*p)+*(p-scale*stride)+*(p+scale*stride));
p+=stride;
}
q=p-scale*stride;
r=pixels+stride*(extent-2);
for ( ; i < (ssize_t) extent; i++)
{
kernel[i]=0.25f*(*p+(*p)+(*q)+(*r));
p+=stride;
q+=stride;
r-=stride;
}
}
MagickExport Image *WaveletDenoiseImage(const Image *image,
const double threshold,const double softness,ExceptionInfo *exception)
{
CacheView
*image_view,
*noise_view;
float
*kernel,
*pixels;
Image
*noise_image;
MagickBooleanType
status;
MagickSizeType
number_pixels;
MemoryInfo
*pixels_info;
ssize_t
channel;
static const float
noise_levels[] = { 0.8002f, 0.2735f, 0.1202f, 0.0585f, 0.0291f, 0.0152f,
0.0080f, 0.0044f };
/*
Initialize noise image attributes.
*/
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
#if defined(MAGICKCORE_OPENCL_SUPPORT)
noise_image=AccelerateWaveletDenoiseImage(image,threshold,exception);
if (noise_image != (Image *) NULL)
return(noise_image);
#endif
noise_image=CloneImage(image,0,0,MagickTrue,exception);
if (noise_image == (Image *) NULL)
return((Image *) NULL);
if (SetImageStorageClass(noise_image,DirectClass,exception) == MagickFalse)
{
noise_image=DestroyImage(noise_image);
return((Image *) NULL);
}
if (AcquireMagickResource(WidthResource,4*image->columns) == MagickFalse)
ThrowImageException(ResourceLimitError,"MemoryAllocationFailed");
pixels_info=AcquireVirtualMemory(3*image->columns,image->rows*
sizeof(*pixels));
kernel=(float *) AcquireQuantumMemory(MagickMax(image->rows,image->columns)+1,
GetOpenMPMaximumThreads()*sizeof(*kernel));
if ((pixels_info == (MemoryInfo *) NULL) || (kernel == (float *) NULL))
{
if (kernel != (float *) NULL)
kernel=(float *) RelinquishMagickMemory(kernel);
if (pixels_info != (MemoryInfo *) NULL)
pixels_info=RelinquishVirtualMemory(pixels_info);
ThrowImageException(ResourceLimitError,"MemoryAllocationFailed");
}
pixels=(float *) GetVirtualMemoryBlob(pixels_info);
status=MagickTrue;
number_pixels=(MagickSizeType) image->columns*image->rows;
image_view=AcquireAuthenticCacheView(image,exception);
noise_view=AcquireAuthenticCacheView(noise_image,exception);
for (channel=0; channel < (ssize_t) GetPixelChannels(image); channel++)
{
register ssize_t
i;
size_t
high_pass,
low_pass;
ssize_t
level,
y;
PixelChannel
pixel_channel;
PixelTrait
traits;
if (status == MagickFalse)
continue;
traits=GetPixelChannelTraits(image,(PixelChannel) channel);
if (traits == UndefinedPixelTrait)
continue;
pixel_channel=GetPixelChannelChannel(image,channel);
if ((pixel_channel != RedPixelChannel) &&
(pixel_channel != GreenPixelChannel) &&
(pixel_channel != BluePixelChannel))
continue;
/*
Copy channel from image to wavelet pixel array.
*/
i=0;
for (y=0; y < (ssize_t) image->rows; y++)
{
register const Quantum
*magick_restrict p;
ssize_t
x;
p=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (p == (const Quantum *) NULL)
{
status=MagickFalse;
break;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
pixels[i++]=(float) p[channel];
p+=GetPixelChannels(image);
}
}
/*
Low pass filter outputs are called approximation kernel & high pass
filters are referred to as detail kernel. The detail kernel
have high values in the noisy parts of the signal.
*/
high_pass=0;
for (level=0; level < 5; level++)
{
double
magnitude;
ssize_t
x,
y;
low_pass=(size_t) (number_pixels*((level & 0x01)+1));
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,1) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
const int
id = GetOpenMPThreadId();
register float
*magick_restrict p,
*magick_restrict q;
register ssize_t
x;
p=kernel+id*image->columns;
q=pixels+y*image->columns;
HatTransform(q+high_pass,1,image->columns,(size_t) (1UL << level),p);
q+=low_pass;
for (x=0; x < (ssize_t) image->columns; x++)
*q++=(*p++);
}
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,1) \
magick_number_threads(image,image,image->columns,1)
#endif
for (x=0; x < (ssize_t) image->columns; x++)
{
const int
id = GetOpenMPThreadId();
register float
*magick_restrict p,
*magick_restrict q;
register ssize_t
y;
p=kernel+id*image->rows;
q=pixels+x+low_pass;
HatTransform(q,image->columns,image->rows,(size_t) (1UL << level),p);
for (y=0; y < (ssize_t) image->rows; y++)
{
*q=(*p++);
q+=image->columns;
}
}
/*
To threshold, each coefficient is compared to a threshold value and
attenuated / shrunk by some factor.
*/
magnitude=threshold*noise_levels[level];
for (i=0; i < (ssize_t) number_pixels; ++i)
{
pixels[high_pass+i]-=pixels[low_pass+i];
if (pixels[high_pass+i] < -magnitude)
pixels[high_pass+i]+=magnitude-softness*magnitude;
else
if (pixels[high_pass+i] > magnitude)
pixels[high_pass+i]-=magnitude-softness*magnitude;
else
pixels[high_pass+i]*=softness;
if (high_pass != 0)
pixels[i]+=pixels[high_pass+i];
}
high_pass=low_pass;
}
/*
Reconstruct image from the thresholded wavelet kernel.
*/
i=0;
for (y=0; y < (ssize_t) image->rows; y++)
{
MagickBooleanType
sync;
register Quantum
*magick_restrict q;
register ssize_t
x;
ssize_t
offset;
q=GetCacheViewAuthenticPixels(noise_view,0,y,noise_image->columns,1,
exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
break;
}
offset=GetPixelChannelOffset(noise_image,pixel_channel);
for (x=0; x < (ssize_t) image->columns; x++)
{
MagickRealType
pixel;
pixel=(MagickRealType) pixels[i]+pixels[low_pass+i];
q[offset]=ClampToQuantum(pixel);
i++;
q+=GetPixelChannels(noise_image);
}
sync=SyncCacheViewAuthenticPixels(noise_view,exception);
if (sync == MagickFalse)
status=MagickFalse;
}
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
proceed=SetImageProgress(image,AddNoiseImageTag,(MagickOffsetType)
channel,GetPixelChannels(image));
if (proceed == MagickFalse)
status=MagickFalse;
}
}
noise_view=DestroyCacheView(noise_view);
image_view=DestroyCacheView(image_view);
kernel=(float *) RelinquishMagickMemory(kernel);
pixels_info=RelinquishVirtualMemory(pixels_info);
if (status == MagickFalse)
noise_image=DestroyImage(noise_image);
return(noise_image);
}
|
GB_binop__hypot_fp64.c | //------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_emult.h"
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB (_AaddB__hypot_fp64)
// A.*B function (eWiseMult): GB (_AemultB)
// A.*B function (eWiseMult): GB (_AemultB_02__hypot_fp64)
// A.*B function (eWiseMult): GB (_AemultB_03__hypot_fp64)
// A.*B function (eWiseMult): GB (_AemultB_bitmap__hypot_fp64)
// A*D function (colscale): GB ((none))
// D*A function (rowscale): GB ((node))
// C+=B function (dense accum): GB (_Cdense_accumB__hypot_fp64)
// C+=b function (dense accum): GB (_Cdense_accumb__hypot_fp64)
// C+=A+B function (dense ewise3): GB ((none))
// C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__hypot_fp64)
// C=scalar+B GB (_bind1st__hypot_fp64)
// C=scalar+B' GB (_bind1st_tran__hypot_fp64)
// C=A+scalar GB (_bind2nd__hypot_fp64)
// C=A'+scalar GB (_bind2nd_tran__hypot_fp64)
// C type: double
// A type: double
// B,b type: double
// BinaryOp: cij = hypot (aij, bij)
#define GB_ATYPE \
double
#define GB_BTYPE \
double
#define GB_CTYPE \
double
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
double aij = Ax [pA]
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB) \
double bij = Bx [pB]
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
double t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA) \
cij = Ax [pA]
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB) \
cij = Bx [pB]
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z, x, y, i, j) \
z = hypot (x, y) ;
// true if the binop must be flipped
#define GB_BINOP_FLIP \
0
// op is second
#define GB_OP_IS_SECOND \
0
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_HYPOT || GxB_NO_FP64 || GxB_NO_HYPOT_FP64)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
#if 0
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB ((none))
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
#endif
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_ewise3_noaccum__hypot_fp64)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_dense_ewise3_noaccum_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumB__hypot_fp64)
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumb__hypot_fp64)
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type double
double bwork = (*((double *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
#if 0
GrB_Info GB ((none))
(
GrB_Matrix C,
const GrB_Matrix A, bool A_is_pattern,
const GrB_Matrix D, bool D_is_pattern,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
double *restrict Cx = (double *) C->x ;
#include "GB_AxB_colscale_meta.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
#if 0
GrB_Info GB ((node))
(
GrB_Matrix C,
const GrB_Matrix D, bool D_is_pattern,
const GrB_Matrix B, bool B_is_pattern,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
double *restrict Cx = (double *) C->x ;
#include "GB_AxB_rowscale_meta.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
//------------------------------------------------------------------------------
// eWiseAdd: C = A+B or C<M> = A+B
//------------------------------------------------------------------------------
GrB_Info GB (_AaddB__hypot_fp64)
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool Ch_is_Mh,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GB_WERK_DECLARE (M_ek_slicing, int64_t) ;
GB_WERK_DECLARE (A_ek_slicing, int64_t) ;
GB_WERK_DECLARE (B_ek_slicing, int64_t) ;
#include "GB_add_template.c"
GB_FREE_WORK ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C = A.*B or C<M> = A.*B
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_01__hypot_fp64)
(
GrB_Matrix C,
const int C_sparsity,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_01_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_02__hypot_fp64)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool flipxy,
const int64_t *restrict Cp_kfirst,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if GB_BINOP_FLIP
// The operator is not commutative, and does not have a flipped
// variant. For example z=atan2(y,x).
if (flipxy)
{
// use fmult(y,x)
#undef GB_FLIPPED
#define GB_FLIPPED 1
#include "GB_emult_02_template.c"
}
else
{
// use fmult(x,y)
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
}
#else
// No need to handle the flip: the operator is either commutative, or
// has been handled by changing z=div(y,x) to z=rdiv(x,y) for example.
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_03__hypot_fp64)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict Cp_kfirst,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_03_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_bitmap__hypot_fp64)
(
GrB_Matrix C,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_bitmap_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB (_bind1st__hypot_fp64)
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *restrict Bb,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
double *Cx = (double *) Cx_output ;
double x = (*((double *) x_input)) ;
double *Bx = (double *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Bb, p)) continue ;
double bij = Bx [p] ;
Cx [p] = hypot (x, bij) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB (_bind2nd__hypot_fp64)
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *restrict Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
double *Cx = (double *) Cx_output ;
double *Ax = (double *) Ax_input ;
double y = (*((double *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
double aij = Ax [p] ;
Cx [p] = hypot (aij, y) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
double aij = Ax [pA] ; \
Cx [pC] = hypot (x, aij) ; \
}
GrB_Info GB (_bind1st_tran__hypot_fp64)
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
double
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
double x = (*((const double *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
double
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
double aij = Ax [pA] ; \
Cx [pC] = hypot (aij, y) ; \
}
GrB_Info GB (_bind2nd_tran__hypot_fp64)
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
double y = (*((const double *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
GB_unop__identity_int8_fc64.c | //------------------------------------------------------------------------------
// GB_unop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_atomics.h"
#include "GB_unop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB (_unop_apply__identity_int8_fc64)
// op(A') function: GB (_unop_tran__identity_int8_fc64)
// C type: int8_t
// A type: GxB_FC64_t
// cast: int8_t cij = GB_cast_to_int8_t (creal (aij))
// unaryop: cij = aij
#define GB_ATYPE \
GxB_FC64_t
#define GB_CTYPE \
int8_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
GxB_FC64_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = x ;
// casting
#define GB_CAST(z, aij) \
int8_t z = GB_cast_to_int8_t (creal (aij)) ;
// cij = op (aij)
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
GxB_FC64_t aij = Ax [pA] ; \
/* Cx [pC] = op (cast (aij)) */ \
int8_t z = GB_cast_to_int8_t (creal (aij)) ; \
Cx [pC] = z ; \
}
// true if operator is the identity op with no typecasting
#define GB_OP_IS_IDENTITY_WITH_NO_TYPECAST \
0
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_IDENTITY || GxB_NO_INT8 || GxB_NO_FC64)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_apply__identity_int8_fc64)
(
int8_t *Cx, // Cx and Ax may be aliased
const GxB_FC64_t *Ax,
const int8_t *restrict Ab, // A->b if A is bitmap
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
// TODO: if OP is ONE and uniform-valued matrices are exploited, then
// do this in O(1) time
if (Ab == NULL)
{
#if ( GB_OP_IS_IDENTITY_WITH_NO_TYPECAST )
GB_memcpy (Cx, Ax, anz * sizeof (GxB_FC64_t), nthreads) ;
#else
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
GxB_FC64_t aij = Ax [p] ;
int8_t z = GB_cast_to_int8_t (creal (aij)) ;
Cx [p] = z ;
}
#endif
}
else
{
// bitmap case, no transpose; A->b already memcpy'd into C->b
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!Ab [p]) continue ;
GxB_FC64_t aij = Ax [p] ;
int8_t z = GB_cast_to_int8_t (creal (aij)) ;
Cx [p] = z ;
}
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_tran__identity_int8_fc64)
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
w7_e2_scheduling.c | // Exploring scheduling with openMP
#include <stdlib.h>
#include <stdio.h>
#include <omp.h>
int main(int argc, char const *argv[]) {
int n = 10000000;
int chunk_size = 10;
printf("chunk_size = %d\n", chunk_size);
double *a = malloc(n * sizeof *a);
double *b = malloc(n * sizeof *b);
double factor = 1./n;
for (size_t i = 0; i < n; i++) {
a[i] = i*factor;
b[i] = i*factor;
}
double dotp = 0;
// Without openMP:
double start = omp_get_wtime();
for (size_t i = 0; i < n; i++)
dotp += a[i] * b[i];
double end = omp_get_wtime();
double tot_s = end - start; // Total serial time.
printf("Time no omp: %lf\n", tot_s);
// Using the default scheduler:
start = omp_get_wtime();
#pragma omp parallel for reduction(+ : dotp)
for (size_t i = 0; i < n; i++)
dotp += a[i] * b[i];
end = omp_get_wtime();
double tot = end - start;
printf("Time default: %lf, ", tot);
printf("Speedup: %.2lf\n", tot_s/tot);
// Using static scheduler:
start = omp_get_wtime();
#pragma omp parallel for reduction(+ : dotp) schedule(static, chunk_size)
for (size_t i = 0; i < n; i++)
dotp += a[i] * b[i];
end = omp_get_wtime();
tot = end - start;
printf("Time static: %lf, ", tot);
printf("Speedup: %.2lf\n", tot_s/tot);
// Using guided scheduler:
start = omp_get_wtime();
// You can add chunk size to the guided schedule, it acts as a minimum.
#pragma omp parallel for reduction(+ : dotp) schedule(guided)
for (size_t i = 0; i < n; i++)
dotp += a[i] * b[i];
end = omp_get_wtime();
tot = end - start;
printf("Time guided: %lf, ", end - start);
printf("Speedup: %.2lf\n", tot_s/tot);
// Using auto scheduler:
start = omp_get_wtime();
#pragma omp parallel for reduction(+ : dotp) schedule(auto)
for (size_t i = 0; i < n; i++)
dotp += a[i] * b[i];
end = omp_get_wtime();
tot = end - start;
printf("Time auto: %lf, ", end - start);
printf("Speedup: %.2lf\n", tot_s/tot);
free(a);
free(b);
return 0;
}
|
core_clag2z.c | /**
*
* @file
*
* PLASMA is a software package provided by:
* University of Tennessee, US,
* University of Manchester, UK.
*
* @precisions mixed zc -> ds
*
**/
#include "core_blas.h"
#include "core_lapack.h"
#include "plasma_types.h"
/***************************************************************************//**
*
* @ingroup core_lag2
*
* Converts m-by-n matrix A from single complex to double complex precision.
*
*******************************************************************************
*
* @param[in] m
* The number of rows of the matrix As.
* m >= 0.
*
* @param[in] n
* The number of columns of the matrix As.
* n >= 0.
*
* @param[in] As
* The ldas-by-n matrix in single complex precision to convert.
*
* @param[in] ldas
* The leading dimension of the matrix As.
* ldas >= max(1,m).
*
* @param[out] A
* On exit, the converted lda-by-n matrix in double complex precision.
*
* @param[in] lda
* The leading dimension of the matrix A.
* lda >= max(1,m).
*
******************************************************************************/
void core_clag2z(int m, int n,
plasma_complex32_t *As, int ldas,
plasma_complex64_t *A, int lda)
{
LAPACKE_clag2z_work(LAPACK_COL_MAJOR, m, n, As, ldas, A, lda);
}
/******************************************************************************/
void core_omp_clag2z(int m, int n,
plasma_complex32_t *As, int ldas,
plasma_complex64_t *A, int lda,
plasma_sequence_t *sequence, plasma_request_t *request)
{
#pragma omp task depend(in:As[0:ldas*n]) \
depend(out:A[0:lda*n])
{
if (sequence->status == PlasmaSuccess)
core_clag2z(m, n, As, ldas, A, lda);
}
}
|
data.h | /*!
* Copyright (c) 2015 by Contributors
* \file data.h
* \brief The input data structure of xgboost.
* \author Tianqi Chen
*/
#ifndef XGBOOST_DATA_H_
#define XGBOOST_DATA_H_
#include <dmlc/base.h>
#include <dmlc/data.h>
#include <rabit/rabit.h>
#include <cstring>
#include <memory>
#include <numeric>
#include <algorithm>
#include <string>
#include <vector>
#include "./base.h"
#include "../../src/common/span.h"
#include "../../src/common/group_data.h"
#include "../../src/common/host_device_vector.h"
namespace xgboost {
// forward declare learner.
class LearnerImpl;
/*! \brief data type accepted by xgboost interface */
enum DataType {
kFloat32 = 1,
kDouble = 2,
kUInt32 = 3,
kUInt64 = 4
};
/*!
* \brief Meta information about dataset, always sit in memory.
*/
class MetaInfo {
public:
/*! \brief number of rows in the data */
uint64_t num_row_{0};
/*! \brief number of columns in the data */
uint64_t num_col_{0};
/*! \brief number of nonzero entries in the data */
uint64_t num_nonzero_{0};
/*! \brief label of each instance */
HostDeviceVector<bst_float> labels_;
/*!
* \brief specified root index of each instance,
* can be used for multi task setting
*/
std::vector<bst_uint> root_index_;
/*!
* \brief the index of begin and end of a group
* needed when the learning task is ranking.
*/
std::vector<bst_uint> group_ptr_;
/*! \brief weights of each instance, optional */
HostDeviceVector<bst_float> weights_;
/*! \brief session-id of each instance, optional */
std::vector<uint64_t> qids_;
/*!
* \brief initialized margins,
* if specified, xgboost will start from this init margin
* can be used to specify initial prediction to boost from.
*/
HostDeviceVector<bst_float> base_margin_;
/*! \brief version flag, used to check version of this info */
static const int kVersion = 2;
/*! \brief version that introduced qid field */
static const int kVersionQidAdded = 2;
/*! \brief default constructor */
MetaInfo() = default;
/*!
* \brief Get weight of each instances.
* \param i Instance index.
* \return The weight.
*/
inline bst_float GetWeight(size_t i) const {
return weights_.Size() != 0 ? weights_.HostVector()[i] : 1.0f;
}
/*!
* \brief Get the root index of i-th instance.
* \param i Instance index.
* \return The pre-defined root index of i-th instance.
*/
inline unsigned GetRoot(size_t i) const {
return root_index_.size() != 0 ? root_index_[i] : 0U;
}
/*! \brief get sorted indexes (argsort) of labels by absolute value (used by cox loss) */
inline const std::vector<size_t>& LabelAbsSort() const {
if (label_order_cache_.size() == labels_.Size()) {
return label_order_cache_;
}
label_order_cache_.resize(labels_.Size());
std::iota(label_order_cache_.begin(), label_order_cache_.end(), 0);
const auto& l = labels_.HostVector();
XGBOOST_PARALLEL_SORT(label_order_cache_.begin(), label_order_cache_.end(),
[&l](size_t i1, size_t i2) {return std::abs(l[i1]) < std::abs(l[i2]);});
return label_order_cache_;
}
/*! \brief clear all the information */
void Clear();
/*!
* \brief Load the Meta info from binary stream.
* \param fi The input stream
*/
void LoadBinary(dmlc::Stream* fi);
/*!
* \brief Save the Meta info to binary stream
* \param fo The output stream.
*/
void SaveBinary(dmlc::Stream* fo) const;
/*!
* \brief Set information in the meta info.
* \param key The key of the information.
* \param dptr The data pointer of the source array.
* \param dtype The type of the source data.
* \param num Number of elements in the source array.
*/
void SetInfo(const char* key, const void* dptr, DataType dtype, size_t num);
private:
/*! \brief argsort of labels */
mutable std::vector<size_t> label_order_cache_;
};
/*! \brief Element from a sparse vector */
struct Entry {
/*! \brief feature index */
bst_uint index;
/*! \brief feature value */
bst_float fvalue;
/*! \brief default constructor */
Entry() = default;
/*!
* \brief constructor with index and value
* \param index The feature or row index.
* \param fvalue The feature value.
*/
Entry(bst_uint index, bst_float fvalue) : index(index), fvalue(fvalue) {}
/*! \brief reversely compare feature values */
inline static bool CmpValue(const Entry& a, const Entry& b) {
return a.fvalue < b.fvalue;
}
inline bool operator==(const Entry& other) const {
return (this->index == other.index && this->fvalue == other.fvalue);
}
};
/*!
* \brief In-memory storage unit of sparse batch, stored in CSR format.
*/
class SparsePage {
public:
// Offset for each row.
HostDeviceVector<size_t> offset;
/*! \brief the data of the segments */
HostDeviceVector<Entry> data;
size_t base_rowid;
/*! \brief an instance of sparse vector in the batch */
using Inst = common::Span<Entry const>;
/*! \brief get i-th row from the batch */
inline Inst operator[](size_t i) const {
const auto& data_vec = data.HostVector();
const auto& offset_vec = offset.HostVector();
size_t size;
// in distributed mode, some partitions may not get any instance for a feature. Therefore
// we should set the size as zero
if (rabit::IsDistributed() && i + 1 >= offset_vec.size()) {
size = 0;
} else {
size = offset_vec[i + 1] - offset_vec[i];
}
return {data_vec.data() + offset_vec[i],
static_cast<Inst::index_type>(size)};
}
/*! \brief constructor */
SparsePage() {
this->Clear();
}
/*! \return number of instance in the page */
inline size_t Size() const {
return offset.Size() - 1;
}
/*! \return estimation of memory cost of this page */
inline size_t MemCostBytes() const {
return offset.Size() * sizeof(size_t) + data.Size() * sizeof(Entry);
}
/*! \brief clear the page */
inline void Clear() {
base_rowid = 0;
auto& offset_vec = offset.HostVector();
offset_vec.clear();
offset_vec.push_back(0);
data.HostVector().clear();
}
SparsePage GetTranspose(int num_columns) const {
SparsePage transpose;
common::ParallelGroupBuilder<Entry> builder(&transpose.offset.HostVector(),
&transpose.data.HostVector());
const int nthread = omp_get_max_threads();
builder.InitBudget(num_columns, nthread);
long batch_size = static_cast<long>(this->Size()); // NOLINT(*)
#pragma omp parallel for schedule(static)
for (long i = 0; i < batch_size; ++i) { // NOLINT(*)
int tid = omp_get_thread_num();
auto inst = (*this)[i];
for (bst_uint j = 0; j < inst.size(); ++j) {
builder.AddBudget(inst[j].index, tid);
}
}
builder.InitStorage();
#pragma omp parallel for schedule(static)
for (long i = 0; i < batch_size; ++i) { // NOLINT(*)
int tid = omp_get_thread_num();
auto inst = (*this)[i];
for (bst_uint j = 0; j < inst.size(); ++j) {
builder.Push(
inst[j].index,
Entry(static_cast<bst_uint>(this->base_rowid + i), inst[j].fvalue),
tid);
}
}
return transpose;
}
void SortRows() {
auto ncol = static_cast<bst_omp_uint>(this->Size());
#pragma omp parallel for schedule(dynamic, 1)
for (bst_omp_uint i = 0; i < ncol; ++i) {
if (this->offset.HostVector()[i] < this->offset.HostVector()[i + 1]) {
std::sort(
this->data.HostVector().begin() + this->offset.HostVector()[i],
this->data.HostVector().begin() + this->offset.HostVector()[i + 1],
Entry::CmpValue);
}
}
}
/*!
* \brief Push row block into the page.
* \param batch the row batch.
*/
inline void Push(const dmlc::RowBlock<uint32_t>& batch) {
auto& data_vec = data.HostVector();
auto& offset_vec = offset.HostVector();
data_vec.reserve(data.Size() + batch.offset[batch.size] - batch.offset[0]);
offset_vec.reserve(offset.Size() + batch.size);
CHECK(batch.index != nullptr);
for (size_t i = 0; i < batch.size; ++i) {
offset_vec.push_back(offset_vec.back() + batch.offset[i + 1] - batch.offset[i]);
}
for (size_t i = batch.offset[0]; i < batch.offset[batch.size]; ++i) {
uint32_t index = batch.index[i];
bst_float fvalue = batch.value == nullptr ? 1.0f : batch.value[i];
data_vec.emplace_back(index, fvalue);
}
CHECK_EQ(offset_vec.back(), data.Size());
}
/*!
* \brief Push a sparse page
* \param batch the row page
*/
inline void Push(const SparsePage &batch) {
auto& data_vec = data.HostVector();
auto& offset_vec = offset.HostVector();
const auto& batch_offset_vec = batch.offset.HostVector();
const auto& batch_data_vec = batch.data.HostVector();
size_t top = offset_vec.back();
data_vec.resize(top + batch.data.Size());
std::memcpy(dmlc::BeginPtr(data_vec) + top,
dmlc::BeginPtr(batch_data_vec),
sizeof(Entry) * batch.data.Size());
size_t begin = offset.Size();
offset_vec.resize(begin + batch.Size());
for (size_t i = 0; i < batch.Size(); ++i) {
offset_vec[i + begin] = top + batch_offset_vec[i + 1];
}
}
/*!
* \brief Push one instance into page
* \param inst an instance row
*/
inline void Push(const Inst &inst) {
auto& data_vec = data.HostVector();
auto& offset_vec = offset.HostVector();
offset_vec.push_back(offset_vec.back() + inst.size());
size_t begin = data_vec.size();
data_vec.resize(begin + inst.size());
if (inst.size() != 0) {
std::memcpy(dmlc::BeginPtr(data_vec) + begin, inst.data(),
sizeof(Entry) * inst.size());
}
}
size_t Size() { return offset.Size() - 1; }
};
class BatchIteratorImpl {
public:
virtual ~BatchIteratorImpl() {}
virtual BatchIteratorImpl* Clone() = 0;
virtual const SparsePage& operator*() const = 0;
virtual void operator++() = 0;
virtual bool AtEnd() const = 0;
};
class BatchIterator {
public:
using iterator_category = std::forward_iterator_tag;
explicit BatchIterator(BatchIteratorImpl* impl) { impl_.reset(impl); }
BatchIterator(const BatchIterator& other) {
if (other.impl_) {
impl_.reset(other.impl_->Clone());
} else {
impl_.reset();
}
}
void operator++() {
CHECK(impl_ != nullptr);
++(*impl_);
}
const SparsePage& operator*() const {
CHECK(impl_ != nullptr);
return *(*impl_);
}
bool operator!=(const BatchIterator& rhs) const {
CHECK(impl_ != nullptr);
return !impl_->AtEnd();
}
bool AtEnd() const {
CHECK(impl_ != nullptr);
return impl_->AtEnd();
}
private:
std::unique_ptr<BatchIteratorImpl> impl_;
};
class BatchSet {
public:
explicit BatchSet(BatchIterator begin_iter) : begin_iter_(begin_iter) {}
BatchIterator begin() { return begin_iter_; }
BatchIterator end() { return BatchIterator(nullptr); }
private:
BatchIterator begin_iter_;
};
/*!
* \brief This is data structure that user can pass to DMatrix::Create
* to create a DMatrix for training, user can create this data structure
* for customized Data Loading on single machine.
*
* On distributed setting, usually an customized dmlc::Parser is needed instead.
*/
class DataSource : public dmlc::DataIter<SparsePage> {
public:
/*!
* \brief Meta information about the dataset
* The subclass need to be able to load this correctly from data.
*/
MetaInfo info;
};
/*!
* \brief A vector-like structure to represent set of rows.
* But saves the memory when all rows are in the set (common case in xgb)
*/
class RowSet {
public:
/*! \return i-th row index */
inline bst_uint operator[](size_t i) const;
/*! \return the size of the set. */
inline size_t Size() const;
/*! \brief push the index back to the set */
inline void PushBack(bst_uint i);
/*! \brief clear the set */
inline void Clear();
/*!
* \brief save rowset to file.
* \param fo The file to be saved.
*/
inline void Save(dmlc::Stream* fo) const;
/*!
* \brief Load rowset from file.
* \param fi The file to be loaded.
* \return if read is successful.
*/
inline bool Load(dmlc::Stream* fi);
/*! \brief constructor */
RowSet() = default;
private:
/*! \brief The internal data structure of size */
uint64_t size_{0};
/*! \brief The internal data structure of row set if not all*/
std::vector<bst_uint> rows_;
};
/*!
* \brief Internal data structured used by XGBoost during training.
* There are two ways to create a customized DMatrix that reads in user defined-format.
*
* - Provide a dmlc::Parser and pass into the DMatrix::Create
* - Alternatively, if data can be represented by an URL, define a new dmlc::Parser and register by DMLC_REGISTER_DATA_PARSER;
* - This works best for user defined data input source, such as data-base, filesystem.
* - Provide a DataSource, that can be passed to DMatrix::Create
* This can be used to re-use inmemory data structure into DMatrix.
*/
class DMatrix {
public:
/*! \brief default constructor */
DMatrix() = default;
/*! \brief meta information of the dataset */
virtual MetaInfo& Info() = 0;
/*! \brief meta information of the dataset */
virtual const MetaInfo& Info() const = 0;
/**
* \brief Gets row batches. Use range based for loop over BatchSet to access individual batches.
*/
virtual BatchSet GetRowBatches() = 0;
virtual BatchSet GetSortedColumnBatches() = 0;
virtual BatchSet GetColumnBatches() = 0;
// the following are column meta data, should be able to answer them fast.
/*! \return Whether the data columns single column block. */
virtual bool SingleColBlock() const = 0;
/*! \brief get column density */
virtual float GetColDensity(size_t cidx) = 0;
/*! \brief virtual destructor */
virtual ~DMatrix() = default;
/*!
* \brief Save DMatrix to local file.
* The saved file only works for non-sharded dataset(single machine training).
* This API is deprecated and dis-encouraged to use.
* \param fname The file name to be saved.
* \return The created DMatrix.
*/
virtual void SaveToLocalFile(const std::string& fname);
/*!
* \brief Load DMatrix from URI.
* \param uri The URI of input.
* \param silent Whether print information during loading.
* \param load_row_split Flag to read in part of rows, divided among the workers in distributed mode.
* \param file_format The format type of the file, used for dmlc::Parser::Create.
* By default "auto" will be able to load in both local binary file.
* \return The created DMatrix.
*/
static DMatrix* Load(const std::string& uri,
bool silent,
bool load_row_split,
const std::string& file_format = "auto");
/*!
* \brief create a new DMatrix, by wrapping a row_iterator, and meta info.
* \param source The source iterator of the data, the create function takes ownership of the source.
* \param cache_prefix The path to prefix of temporary cache file of the DMatrix when used in external memory mode.
* This can be nullptr for common cases, and in-memory mode will be used.
* \return a Created DMatrix.
*/
static DMatrix* Create(std::unique_ptr<DataSource>&& source,
const std::string& cache_prefix = "");
/*!
* \brief Create a DMatrix by loading data from parser.
* Parser can later be deleted after the DMatrix i created.
* \param parser The input data parser
* \param cache_prefix The path to prefix of temporary cache file of the DMatrix when used in external memory mode.
* This can be nullptr for common cases, and in-memory mode will be used.
* \sa dmlc::Parser
* \note dmlc-core provides efficient distributed data parser for libsvm format.
* User can create and register customized parser to load their own format using DMLC_REGISTER_DATA_PARSER.
* See "dmlc-core/include/dmlc/data.h" for detail.
* \return A created DMatrix.
*/
static DMatrix* Create(dmlc::Parser<uint32_t>* parser,
const std::string& cache_prefix = "");
};
// implementation of inline functions
inline bst_uint RowSet::operator[](size_t i) const {
return rows_.size() == 0 ? static_cast<bst_uint>(i) : rows_[i];
}
inline size_t RowSet::Size() const {
return size_;
}
inline void RowSet::Clear() {
rows_.clear(); size_ = 0;
}
inline void RowSet::PushBack(bst_uint i) {
if (rows_.size() == 0) {
if (i == size_) {
++size_; return;
} else {
rows_.resize(size_);
for (size_t i = 0; i < size_; ++i) {
rows_[i] = static_cast<bst_uint>(i);
}
}
}
rows_.push_back(i);
++size_;
}
inline void RowSet::Save(dmlc::Stream* fo) const {
fo->Write(rows_);
fo->Write(&size_, sizeof(size_));
}
inline bool RowSet::Load(dmlc::Stream* fi) {
if (!fi->Read(&rows_)) return false;
if (rows_.size() != 0) return true;
return fi->Read(&size_, sizeof(size_)) == sizeof(size_);
}
} // namespace xgboost
namespace dmlc {
DMLC_DECLARE_TRAITS(is_pod, xgboost::Entry, true);
DMLC_DECLARE_TRAITS(has_saveload, xgboost::RowSet, true);
}
#endif // XGBOOST_DATA_H_
|
GB_subassign_04.c | //------------------------------------------------------------------------------
// GB_subassign_04: C(I,J) += A ; using S
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// Method 04: C(I,J) += A ; using S
// M: NULL
// Mask_comp: false
// C_replace: false
// accum: present
// A: matrix
// S: constructed
// C: not bitmap: use GB_bitmap_assign instead
// A: any sparsity structure.
#include "GB_subassign_methods.h"
GrB_Info GB_subassign_04
(
GrB_Matrix C,
// input:
const GrB_Index *I,
const int64_t ni,
const int64_t nI,
const int Ikind,
const int64_t Icolon [3],
const GrB_Index *J,
const int64_t nj,
const int64_t nJ,
const int Jkind,
const int64_t Jcolon [3],
const GrB_BinaryOp accum,
const GrB_Matrix A,
GB_Context Context
)
{
//--------------------------------------------------------------------------
// check inputs
//--------------------------------------------------------------------------
ASSERT (!GB_IS_BITMAP (C)) ;
ASSERT (!GB_aliased (C, A)) ; // NO ALIAS of C==A
//--------------------------------------------------------------------------
// S = C(I,J)
//--------------------------------------------------------------------------
GB_EMPTY_TASKLIST ;
GB_OK (GB_subassign_symbolic (&S, C, I, ni, J, nj, true, Context)) ;
//--------------------------------------------------------------------------
// get inputs
//--------------------------------------------------------------------------
GB_MATRIX_WAIT_IF_JUMBLED (A) ;
GB_GET_C ; // C must not be bitmap
GB_GET_A ;
GB_GET_S ;
GB_GET_ACCUM ;
//--------------------------------------------------------------------------
// Method 04: C(I,J) += A ; using S
//--------------------------------------------------------------------------
// Time: Close to Optimal. Every entry in A must be visited, and the
// corresponding entry in S must then be found. Time for this phase is
// Omega(nnz(A)), but S has already been constructed, in Omega(nnz(S))
// time. This method simply traverses all of A+S (like GB_add for
// computing A+S), the same as Method 02. Time taken is O(nnz(A)+nnz(S)).
// The only difference is that the traversal of A+S can terminate if A is
// exhausted. Entries in S but not A do not actually require any work
// (unlike Method 02, which must visit all entries in A+S).
// Method 02 and Method 04 are somewhat similar. They differ on how C is
// modified when the entry is present in S but not A.
// TODO: phase2 of Method 02 and 04 are identical and could be
// done in a single function.
// Compare with Method 16, which computes C(I,J)<!M> += A, using S.
//--------------------------------------------------------------------------
// Parallel: A+S (Methods 02, 04, 09, 10, 11, 12, 14, 16, 18, 20)
//--------------------------------------------------------------------------
if (A_is_bitmap)
{
// all of IxJ must be examined
GB_SUBASSIGN_IXJ_SLICE ;
}
else
{
// traverse all A+S
GB_SUBASSIGN_TWO_SLICE (A, S) ;
}
//--------------------------------------------------------------------------
// phase 1: create zombies, update entries, and count pending tuples
//--------------------------------------------------------------------------
if (A_is_bitmap)
{
//----------------------------------------------------------------------
// phase1: A is bitmap
//----------------------------------------------------------------------
#pragma omp parallel for num_threads(nthreads) schedule(dynamic,1) \
reduction(+:nzombies)
for (taskid = 0 ; taskid < ntasks ; taskid++)
{
//------------------------------------------------------------------
// get the task descriptor
//------------------------------------------------------------------
GB_GET_IXJ_TASK_DESCRIPTOR_PHASE1 (iA_start, iA_end) ;
//------------------------------------------------------------------
// compute all vectors in this task
//------------------------------------------------------------------
for (int64_t j = kfirst ; j <= klast ; j++)
{
//--------------------------------------------------------------
// get S(iA_start:iA_end,j)
//--------------------------------------------------------------
GB_GET_VECTOR_FOR_IXJ (S, iA_start) ;
int64_t pA_start = j * Avlen ;
//--------------------------------------------------------------
// do a 2-way merge of S(iA_start:iA_end,j) and A(ditto,j)
//--------------------------------------------------------------
for (int64_t iA = iA_start ; iA < iA_end ; iA++)
{
int64_t pA = pA_start + iA ;
bool Sfound = (pS < pS_end) && (GBI (Si, pS, Svlen) == iA) ;
bool Afound = Ab [pA] ;
if (Sfound && !Afound)
{
// ----[C . 1] or [X . 1]-------------------------------
// S (i,j) is present but A (i,j) is not
// [C . 1]: action: ( C ): no change, with accum
// [X . 1]: action: ( X ): still a zombie
GB_NEXT (S) ;
}
else if (!Sfound && Afound)
{
// ----[. A 1]------------------------------------------
// S (i,j) is not present, A (i,j) is present
// [. A 1]: action: ( insert )
task_pending++ ;
}
else if (Sfound && Afound)
{
// ----[C A 1] or [X A 1]-------------------------------
// both S (i,j) and A (i,j) present
// [C A 1]: action: ( =C+A ): apply accum
// [X A 1]: action: ( undelete ): zombie lives
GB_C_S_LOOKUP ;
GB_withaccum_C_A_1_matrix ;
GB_NEXT (S) ;
}
}
}
GB_PHASE1_TASK_WRAPUP ;
}
}
else
{
//----------------------------------------------------------------------
// phase1: A is hypersparse, sparse, or full
//----------------------------------------------------------------------
#pragma omp parallel for num_threads(nthreads) schedule(dynamic,1) \
reduction(+:nzombies)
for (taskid = 0 ; taskid < ntasks ; taskid++)
{
//------------------------------------------------------------------
// get the task descriptor
//------------------------------------------------------------------
GB_GET_TASK_DESCRIPTOR_PHASE1 ;
//------------------------------------------------------------------
// compute all vectors in this task
//------------------------------------------------------------------
for (int64_t k = kfirst ; k <= klast ; k++)
{
//--------------------------------------------------------------
// get A(:,j) and S(:,j)
//--------------------------------------------------------------
int64_t j = GBH (Zh, k) ;
GB_GET_MAPPED (pA, pA_end, pA, pA_end, Ap, j, k, Z_to_X, Avlen);
GB_GET_MAPPED (pS, pS_end, pB, pB_end, Sp, j, k, Z_to_S, Svlen);
//--------------------------------------------------------------
// do a 2-way merge of S(:,j) and A(:,j)
//--------------------------------------------------------------
// jC = J [j] ; or J is a colon expression
// int64_t jC = GB_ijlist (J, j, Jkind, Jcolon) ;
// while both list S (:,j) and A (:,j) have entries
while (pS < pS_end && pA < pA_end)
{
int64_t iS = GBI (Si, pS, Svlen) ;
int64_t iA = GBI (Ai, pA, Avlen) ;
if (iS < iA)
{
// ----[C . 1] or [X . 1]-------------------------------
// S (i,j) is present but A (i,j) is not
// [C . 1]: action: ( C ): no change, with accum
// [X . 1]: action: ( X ): still a zombie
GB_NEXT (S) ;
}
else if (iA < iS)
{
// ----[. A 1]------------------------------------------
// S (i,j) is not present, A (i,j) is present
// [. A 1]: action: ( insert )
task_pending++ ;
GB_NEXT (A) ;
}
else
{
// ----[C A 1] or [X A 1]-------------------------------
// both S (i,j) and A (i,j) present
// [C A 1]: action: ( =C+A ): apply accum
// [X A 1]: action: ( undelete ): zombie lives
GB_C_S_LOOKUP ;
GB_withaccum_C_A_1_matrix ;
GB_NEXT (S) ;
GB_NEXT (A) ;
}
}
// ignore the remainder of S (:,j)
// List A (:,j) has entries. List S (:,j) exhausted.
task_pending += (pA_end - pA) ;
}
GB_PHASE1_TASK_WRAPUP ;
}
}
//--------------------------------------------------------------------------
// phase 2: insert pending tuples
//--------------------------------------------------------------------------
GB_PENDING_CUMSUM ;
if (A_is_bitmap)
{
//----------------------------------------------------------------------
// phase2: A is bitmap
//----------------------------------------------------------------------
#pragma omp parallel for num_threads(nthreads) schedule(dynamic,1) \
reduction(&&:pending_sorted)
for (taskid = 0 ; taskid < ntasks ; taskid++)
{
//------------------------------------------------------------------
// get the task descriptor
//------------------------------------------------------------------
GB_GET_IXJ_TASK_DESCRIPTOR_PHASE2 (iA_start, iA_end) ;
//------------------------------------------------------------------
// compute all vectors in this task
//------------------------------------------------------------------
for (int64_t j = kfirst ; j <= klast ; j++)
{
//--------------------------------------------------------------
// get S(iA_start:iA_end,j)
//--------------------------------------------------------------
GB_GET_VECTOR_FOR_IXJ (S, iA_start) ;
int64_t pA_start = j * Avlen ;
//--------------------------------------------------------------
// do a 2-way merge of S(iA_start:iA_end,j) and A(ditto,j)
//--------------------------------------------------------------
// jC = J [j] ; or J is a colon expression
int64_t jC = GB_ijlist (J, j, Jkind, Jcolon) ;
for (int64_t iA = iA_start ; iA < iA_end ; iA++)
{
int64_t pA = pA_start + iA ;
bool Sfound = (pS < pS_end) && (GBI (Si, pS, Svlen) == iA) ;
bool Afound = Ab [pA] ;
if (!Sfound && Afound)
{
// ----[. A 1]------------------------------------------
// S (i,j) is not present, A (i,j) is present
// [. A 1]: action: ( insert )
int64_t iC = GB_ijlist (I, iA, Ikind, Icolon) ;
GB_PENDING_INSERT (Ax +(pA*asize)) ;
GB_NEXT (A) ;
}
else if (Sfound)
{
// S (i,j) present
GB_NEXT (S) ;
}
}
}
GB_PHASE2_TASK_WRAPUP ;
}
}
else
{
//----------------------------------------------------------------------
// phase2: A is hypersparse, sparse, or full
//----------------------------------------------------------------------
#pragma omp parallel for num_threads(nthreads) schedule(dynamic,1) \
reduction(&&:pending_sorted)
for (taskid = 0 ; taskid < ntasks ; taskid++)
{
//------------------------------------------------------------------
// get the task descriptor
//------------------------------------------------------------------
GB_GET_TASK_DESCRIPTOR_PHASE2 ;
//------------------------------------------------------------------
// compute all vectors in this task
//------------------------------------------------------------------
for (int64_t k = kfirst ; k <= klast ; k++)
{
//--------------------------------------------------------------
// get A(:,j) and S(:,j)
//--------------------------------------------------------------
int64_t j = GBH (Zh, k) ;
GB_GET_MAPPED (pA, pA_end, pA, pA_end, Ap, j, k, Z_to_X, Avlen);
GB_GET_MAPPED (pS, pS_end, pB, pB_end, Sp, j, k, Z_to_S, Svlen);
//--------------------------------------------------------------
// do a 2-way merge of S(:,j) and A(:,j)
//--------------------------------------------------------------
// jC = J [j] ; or J is a colon expression
int64_t jC = GB_ijlist (J, j, Jkind, Jcolon) ;
// while both list S (:,j) and A (:,j) have entries
while (pS < pS_end && pA < pA_end)
{
int64_t iS = GBI (Si, pS, Svlen) ;
int64_t iA = GBI (Ai, pA, Avlen) ;
if (iS < iA)
{
GB_NEXT (S) ;
}
else if (iA < iS)
{
// ----[. A 1]------------------------------------------
// S (i,j) is not present, A (i,j) is present
// [. A 1]: action: ( insert )
int64_t iC = GB_ijlist (I, iA, Ikind, Icolon) ;
GB_PENDING_INSERT (Ax +(pA*asize)) ;
GB_NEXT (A) ;
}
else
{
GB_NEXT (S) ;
GB_NEXT (A) ;
}
}
// ignore the remainder of S (:,j)
// while list A (:,j) has entries. List S (:,j) exhausted.
while (pA < pA_end)
{
// ----[. A 1]----------------------------------------------
// S (i,j) is not present, A (i,j) is present
// [. A 1]: action: ( insert )
int64_t iA = GBI (Ai, pA, Avlen) ;
int64_t iC = GB_ijlist (I, iA, Ikind, Icolon) ;
GB_PENDING_INSERT (Ax +(pA*asize)) ;
GB_NEXT (A) ;
}
}
GB_PHASE2_TASK_WRAPUP ;
}
}
//--------------------------------------------------------------------------
// finalize the matrix and return result
//--------------------------------------------------------------------------
GB_SUBASSIGN_WRAPUP ;
}
|
GB_unaryop__abs_int64_int32.c | //------------------------------------------------------------------------------
// GB_unaryop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_iterator.h"
#include "GB_unaryop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop__abs_int64_int32
// op(A') function: GB_tran__abs_int64_int32
// C type: int64_t
// A type: int32_t
// cast: int64_t cij = (int64_t) aij
// unaryop: cij = GB_IABS (aij)
#define GB_ATYPE \
int32_t
#define GB_CTYPE \
int64_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
int32_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = GB_IABS (x) ;
// casting
#define GB_CASTING(z, aij) \
int64_t z = (int64_t) aij ;
// cij = op (cast (aij))
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
GB_GETA (aij, Ax, pA) ; \
/* Cx [pC] = op (cast (aij)) */ \
GB_CASTING (z, aij) ; \
GB_OP (GB_CX (pC), z) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_ABS || GxB_NO_INT64 || GxB_NO_INT32)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop__abs_int64_int32
(
int64_t *Cx, // Cx and Ax may be aliased
int32_t *Ax,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
GB_CAST_OP (p, p) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_tran__abs_int64_int32
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *GB_RESTRICT *Rowcounts,
GBI_single_iterator Iter,
const int64_t *GB_RESTRICT A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#define GB_PHASE_2_OF_2
#include "GB_unaryop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
map.c | #include "ghost/map.h"
#include "ghost/locality.h"
#include "ghost/util.h"
#include "ghost/error.h"
ghost_error ghost_map_create(ghost_map **map, ghost_gidx gdim, ghost_mpi_comm comm, ghost_maptype type, ghost_map_flags flags)
{
ghost_error ret = GHOST_SUCCESS;
int nranks;
GHOST_CALL_GOTO(ghost_malloc((void **)map,sizeof(ghost_map)),err,ret);
GHOST_CALL_GOTO(ghost_nrank(&nranks, comm),err,ret);
GHOST_CALL_GOTO(ghost_malloc((void **)&((*map)->goffs),sizeof(ghost_gidx)*nranks),err,ret);
GHOST_CALL_GOTO(ghost_malloc((void **)&((*map)->ldim),sizeof(ghost_lidx)*nranks),err,ret);
(*map)->ref_count = 1; /* the scope or context which creates the map is the first referrer */
(*map)->gdim = gdim;
(*map)->loc_perm = NULL;
(*map)->loc_perm_inv = NULL;
(*map)->glb_perm = NULL;
(*map)->glb_perm_inv = NULL;
(*map)->cu_loc_perm = NULL;
(*map)->dim = 0;
(*map)->dimhalo = 0;
(*map)->dimpad = 0;
(*map)->offs = 0;
(*map)->nhalo = 0;
(*map)->mpicomm = comm;
(*map)->type = type;
(*map)->flags = flags;
goto out;
err:
out:
return ret;
}
static int diag(ghost_gidx row, ghost_lidx *rowlen, ghost_gidx *col, void *val, __attribute__((unused)) void *arg)
{
*rowlen = 1;
col[0] = row;
((double *)val)[0] = (double)(row+1);
return 0;
}
ghost_error ghost_map_create_distribution(ghost_map *map, ghost_sparsemat_src_rowfunc *matsrc, double weight, ghost_map_dist_type distType, ghost_lidx *el_per_rank)
{
int me,nranks,i;
ghost_error ret = GHOST_SUCCESS;
ghost_gidx row;
GHOST_CALL_GOTO(ghost_nrank(&nranks, map->mpicomm),err,ret);
GHOST_CALL_GOTO(ghost_rank(&me,map->mpicomm),err,ret);
if (matsrc == NULL && el_per_rank == NULL) {
if (distType == GHOST_MAP_DIST_NNZ) {
GHOST_ERROR_LOG("Distribution by nnz can only be done if a matrix source is given");
return GHOST_ERR_INVALID_ARG;
} else {
ghost_sparsemat_src_rowfunc diagsrc = GHOST_SPARSEMAT_SRC_ROWFUNC_INITIALIZER;
diagsrc.func = diag;
diagsrc.maxrowlen = 1;
diagsrc.gnrows = map->gdim;
return ghost_map_create_distribution(map,&diagsrc,weight,distType,NULL);
}
}
if (distType == GHOST_MAP_DIST_NNZ) {
ghost_gidx *rpt;
ghost_gidx gnnz;
GHOST_PERFWARNING_LOG("Dividing the matrix by number of non-zeros is not scalable as rank 0 reads in _all_ row lengths of the matrix!");
GHOST_WARNING_LOG("Will not take into account possible matrix re-ordering when dividing the matrix by number of non-zeros!");
if (me == 0) {
ghost_lidx target_nnz;
if (!el_per_rank) {
GHOST_CALL_GOTO(ghost_malloc((void **)&rpt,sizeof(ghost_gidx)*(map->gdim+1)),err,ret);
#pragma omp parallel for schedule(runtime)
for( row = 0; row < map->gdim+1; row++ ) {
rpt[row] = 0;
}
char *tmpval = NULL;
ghost_gidx *tmpcol = NULL;
GHOST_CALL_GOTO(ghost_malloc((void **)&tmpval,matsrc->maxrowlen*GHOST_DT_MAX_SIZE),err,ret);
GHOST_CALL_GOTO(ghost_malloc((void **)&tmpcol,matsrc->maxrowlen*sizeof(ghost_gidx)),err,ret);
rpt[0] = 0;
ghost_lidx rowlen;
for(row = 0; row < map->gdim; row++) {
matsrc->func(row,&rowlen,tmpcol,tmpval,matsrc->arg);
rpt[row+1] = rpt[row]+rowlen;
}
free(tmpval); tmpval = NULL;
free(tmpcol); tmpcol = NULL;
gnnz = rpt[map->gdim];
target_nnz = (gnnz/nranks)+1; /* sonst bleiben welche uebrig! */
} else {
target_nnz = el_per_rank[me];
}
map->goffs[0] = 0;
ghost_lidx j = 1;
for (row=0;row<map->gdim;row++){
if (rpt[row] >= j*target_nnz){
map->goffs[j] = row;
j = j+1;
}
}
for (i=0; i<nranks-1; i++){
map->ldim[i] = map->goffs[i+1] - map->goffs[i] ;
}
map->ldim[nranks-1] = map->gdim - map->goffs[nranks-1] ;
}
#ifdef GHOST_HAVE_MPI
MPI_CALL_GOTO(MPI_Bcast(map->goffs, nranks, ghost_mpi_dt_gidx, 0, map->mpicomm),err,ret);
MPI_CALL_GOTO(MPI_Bcast(map->ldim, nranks, ghost_mpi_dt_lidx, 0, map->mpicomm),err,ret);
#endif
} else if (distType == GHOST_MAP_DIST_NROWS) {
ghost_lidx *target_rows = NULL;
if (!el_per_rank) {
double allweights = 1.;
#ifdef GHOST_HAVE_MPI
MPI_CALL_GOTO(MPI_Allreduce(&weight,&allweights,1,MPI_DOUBLE,MPI_SUM,map->mpicomm),err,ret);
#endif
ghost_lidx my_target_rows = (ghost_lidx)(map->gdim*((double)weight/(double)allweights));
if (my_target_rows == 0) {
GHOST_WARNING_LOG("This rank will have zero rows assigned!");
}
GHOST_CALL_GOTO(ghost_malloc((void **)&target_rows,nranks*sizeof(ghost_lidx)),err,ret);
#ifdef GHOST_HAVE_MPI
MPI_CALL_GOTO(MPI_Allgather(&my_target_rows,1,ghost_mpi_dt_lidx,target_rows,1,ghost_mpi_dt_lidx,map->mpicomm),err,ret);
#endif
} else {
target_rows = el_per_rank;
}
map->goffs[0] = 0;
for (i=1; i<nranks; i++){
map->goffs[i] = map->goffs[i-1]+target_rows[i-1];
}
for (i=0; i<nranks-1; i++){
ghost_gidx lnrows = map->goffs[i+1] - map->goffs[i];
if (lnrows > (ghost_gidx)GHOST_LIDX_MAX) {
GHOST_ERROR_LOG("Re-compile with 64-bit local indices!");
return GHOST_ERR_UNKNOWN;
}
map->ldim[i] = (ghost_lidx)lnrows;
}
ghost_gidx lnrows = map->gdim - map->goffs[nranks-1];
if (lnrows > (ghost_gidx)GHOST_LIDX_MAX) {
GHOST_ERROR_LOG("The local number of rows (%"PRGIDX") exceeds the maximum range. Re-compile with 64-bit local indices!",lnrows);
return GHOST_ERR_DATATYPE;
}
map->ldim[nranks-1] = (ghost_lidx)lnrows;
if (!el_per_rank) {
free(target_rows); target_rows = NULL;
}
}
map->dim = map->ldim[me];
map->dimpad = map->dim; // may be increased by some padding at a later point
map->offs = map->goffs[me];
goto out;
err:
out:
return ret;
}
void ghost_map_destroy(ghost_map *map)
{
if (map) {
map->ref_count--;
if (map->ref_count < 0) {
GHOST_ERROR_LOG("Negative ref_count! This should not have happened.");
return;
}
if (map->ref_count==0)
{
free(map->goffs); map->goffs = NULL;
free(map->ldim); map->ldim = NULL;
free(map);
}
}
}
int ghost_rank_of_row(ghost_map *map, ghost_gidx row)
{
int i,nprocs;
GHOST_CALL_RETURN(ghost_nrank(&nprocs,map->mpicomm));
for (i=0; i<nprocs; i++) {
if (map->goffs[i] <= row && map->goffs[i]+map->ldim[i] > row) {
return i;
}
}
return -1;
}
ghost_map *ghost_map_create_light(ghost_lidx dim, ghost_mpi_comm mpicomm)
{
ghost_map *map;
ghost_malloc((void **)&map,sizeof(ghost_map));
map->ref_count = 0;
map->flags = GHOST_MAP_DEFAULT;
map->dim = dim;
map->gdim = dim;
map->dimpad = dim;
map->offs = 0;
map->mpicomm = mpicomm;
map->ldim = NULL;
map->goffs = NULL;
map->cu_loc_perm = NULL;
map->loc_perm = NULL;
map->loc_perm_inv = NULL;
map->glb_perm = NULL;
map->glb_perm_inv = NULL;
map->cu_loc_perm = NULL;
map->type = GHOST_MAP_NONE; //required for checking compatibility
return map;
}
|
GB_ewise_slice.c | //------------------------------------------------------------------------------
// GB_ewise_slice: slice the entries and vectors for an ewise operation
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// Constructs a set of tasks to compute C, for an element-wise operation
// (GB_add, GB_emult, and GB_mask) that operates on two input matrices,
// C=op(A,B). The mask is ignored for computing where to slice the work, but
// it is sliced once the location has been found.
#define GB_FREE_WORK \
{ \
GB_FREE_MEMORY (Coarse, ntasks1+1, sizeof (int64_t)) ; \
GB_FREE_MEMORY (Cwork, Cnvec+1, sizeof (int64_t)) ; \
}
#define GB_FREE_ALL \
{ \
GB_FREE_WORK ; \
GB_FREE_MEMORY (TaskList, max_ntasks+1, sizeof (GB_task_struct)) ; \
}
#include "GB.h"
//------------------------------------------------------------------------------
// GB_ewise_slice
//------------------------------------------------------------------------------
GrB_Info GB_ewise_slice
(
// output:
GB_task_struct **p_TaskList, // array of structs, of size max_ntasks
int *p_max_ntasks, // size of TaskList
int *p_ntasks, // # of tasks constructed
int *p_nthreads, // # of threads for eWise operation
// input:
const int64_t Cnvec, // # of vectors of C
const int64_t *GB_RESTRICT Ch, // vectors of C, if hypersparse
const int64_t *GB_RESTRICT C_to_M, // mapping of C to M
const int64_t *GB_RESTRICT C_to_A, // mapping of C to A
const int64_t *GB_RESTRICT C_to_B, // mapping of C to B
bool Ch_is_Mh, // if true, then Ch == Mh; GB_add only
const GrB_Matrix M, // mask matrix to slice (optional)
const GrB_Matrix A, // matrix to slice
const GrB_Matrix B, // matrix to slice
GB_Context Context
)
{
//--------------------------------------------------------------------------
// check inputs
//--------------------------------------------------------------------------
ASSERT (p_TaskList != NULL) ;
ASSERT (p_max_ntasks != NULL) ;
ASSERT (p_ntasks != NULL) ;
ASSERT (p_nthreads != NULL) ;
ASSERT_MATRIX_OK (A, "A for ewise_slice", GB0) ;
ASSERT_MATRIX_OK (B, "B for ewise_slice", GB0) ;
(*p_TaskList ) = NULL ;
(*p_max_ntasks) = 0 ;
(*p_ntasks ) = 0 ;
(*p_nthreads ) = 1 ;
int64_t *GB_RESTRICT Cwork = NULL ;
int64_t *GB_RESTRICT Coarse = NULL ; // size ntasks1+1
int ntasks1 = 0 ;
//--------------------------------------------------------------------------
// determine # of threads to use
//--------------------------------------------------------------------------
GB_GET_NTHREADS_MAX (nthreads_max, chunk, Context) ;
//--------------------------------------------------------------------------
// allocate the initial TaskList
//--------------------------------------------------------------------------
// Allocate the TaskList to hold at least 2*ntask0 tasks. It will grow
// later, if needed. Usually, 64*nthreads_max is enough, but in a few cases
// fine tasks can cause this number to be exceeded. If that occurs,
// TaskList is reallocated.
// When the mask is present, it is often fastest to break the work up
// into tasks, even when nthreads_max is 1.
GB_task_struct *GB_RESTRICT TaskList = NULL ;
int max_ntasks = 0 ;
int ntasks0 = (M == NULL && nthreads_max == 1) ? 1 : (32 * nthreads_max) ;
GB_REALLOC_TASK_LIST (TaskList, ntasks0, max_ntasks) ;
//--------------------------------------------------------------------------
// check for quick return for a single task
//--------------------------------------------------------------------------
if (Cnvec == 0 || ntasks0 == 1)
{
// construct a single coarse task that computes all of C
TaskList [0].kfirst = 0 ;
TaskList [0].klast = Cnvec-1 ;
(*p_TaskList ) = TaskList ;
(*p_max_ntasks) = max_ntasks ;
(*p_ntasks ) = (Cnvec == 0) ? 0 : 1 ;
(*p_nthreads ) = 1 ;
return (GrB_SUCCESS) ;
}
//--------------------------------------------------------------------------
// get A, B, and M
//--------------------------------------------------------------------------
const int64_t vlen = A->vlen ;
const int64_t *GB_RESTRICT Ap = A->p ;
const int64_t *GB_RESTRICT Ai = A->i ;
const int64_t *GB_RESTRICT Bp = B->p ;
const int64_t *GB_RESTRICT Bi = B->i ;
bool Ch_is_Ah = (Ch != NULL && A->h != NULL && Ch == A->h) ;
bool Ch_is_Bh = (Ch != NULL && B->h != NULL && Ch == B->h) ;
const int64_t *GB_RESTRICT Mp = NULL ;
const int64_t *GB_RESTRICT Mi = NULL ;
if (M != NULL)
{
Mp = M->p ;
Mi = M->i ;
// Ch_is_Mh is true if either true on input (for GB_add, which denotes
// that Ch is a deep copy of M->h), or if Ch is a shallow copy of M->h.
Ch_is_Mh = Ch_is_Mh || (Ch != NULL && M->h != NULL && Ch == M->h) ;
}
//--------------------------------------------------------------------------
// allocate workspace
//--------------------------------------------------------------------------
GB_MALLOC_MEMORY (Cwork, Cnvec+1, sizeof (int64_t)) ;
if (Cwork == NULL)
{
// out of memory
GB_FREE_ALL ;
return (GB_OUT_OF_MEMORY) ;
}
//--------------------------------------------------------------------------
// compute an estimate of the work for each vector of C
//--------------------------------------------------------------------------
int nthreads_for_Cwork = GB_nthreads (Cnvec, chunk, nthreads_max) ;
int64_t k ;
#pragma omp parallel for num_threads(nthreads_for_Cwork) schedule(static)
for (k = 0 ; k < Cnvec ; k++)
{
//----------------------------------------------------------------------
// get the C(:,j) vector
//----------------------------------------------------------------------
int64_t j = (Ch == NULL) ? k : Ch [k] ;
//----------------------------------------------------------------------
// get the corresponding vector of A
//----------------------------------------------------------------------
int64_t kA ;
if (C_to_A != NULL)
{
// A is hypersparse and the C_to_A mapping has been created
ASSERT (A->is_hyper || A->is_slice) ;
kA = C_to_A [k] ;
ASSERT (kA >= -1 && kA < A->nvec) ;
if (kA >= 0)
{
ASSERT (j == ((A->is_hyper) ? A->h [kA] : (A->hfirst + kA))) ;
}
}
else if (Ch_is_Ah)
{
// A is hypersparse, but Ch is a shallow copy of A->h
kA = k ;
ASSERT (j == A->h [kA]) ;
}
else
{
// A is standard
ASSERT (!A->is_hyper) ;
ASSERT (!A->is_slice) ;
ASSERT (A->h == NULL) ;
kA = j ;
}
//----------------------------------------------------------------------
// get the corresponding vector of B
//----------------------------------------------------------------------
int64_t kB ;
if (C_to_B != NULL)
{
// B is hypersparse and the C_to_B mapping has been created
ASSERT (B->is_hyper || B->is_slice) ;
kB = C_to_B [k] ;
ASSERT (kB >= -1 && kB < B->nvec) ;
if (kB >= 0)
{
ASSERT (j == ((B->is_hyper) ? B->h [kB] : (B->hfirst + kB))) ;
}
}
else if (Ch_is_Bh)
{
// B is hypersparse, but Ch is a shallow copy of B->h
kB = k ;
ASSERT (j == B->h [kB]) ;
}
else
{
// B is standard
ASSERT (!B->is_hyper) ;
ASSERT (!B->is_slice) ;
ASSERT (B->h == NULL) ;
kB = j ;
}
//----------------------------------------------------------------------
// estimate the work for C(:,j)
//----------------------------------------------------------------------
ASSERT (kA >= -1 && kA < A->nvec) ;
ASSERT (kB >= -1 && kB < B->nvec) ;
int64_t aknz = (kA < 0) ? 0 : (Ap [kA+1] - Ap [kA]) ;
int64_t bknz = (kB < 0) ? 0 : (Bp [kB+1] - Bp [kB]) ;
Cwork [k] = aknz + bknz + 1 ;
}
//--------------------------------------------------------------------------
// replace Cwork with its cumulative sum
//--------------------------------------------------------------------------
GB_cumsum (Cwork, Cnvec, NULL, nthreads_for_Cwork) ;
double cwork = (double) Cwork [Cnvec] ;
//--------------------------------------------------------------------------
// determine # of threads and tasks for the eWise operation
//--------------------------------------------------------------------------
int nthreads = GB_nthreads (cwork, chunk, nthreads_max) ;
ntasks0 = (M == NULL && nthreads == 1) ? 1 : (32 * nthreads) ;
double target_task_size = cwork / (double) (ntasks0) ;
target_task_size = GB_IMAX (target_task_size, chunk) ;
ntasks1 = cwork / target_task_size ;
ntasks1 = GB_IMAX (ntasks1, 1) ;
//--------------------------------------------------------------------------
// slice the work into coarse tasks
//--------------------------------------------------------------------------
if (!GB_pslice (&Coarse, Cwork, Cnvec, ntasks1))
{
// out of memory
GB_FREE_ALL ;
return (GB_OUT_OF_MEMORY) ;
}
//--------------------------------------------------------------------------
// construct all tasks, both coarse and fine
//--------------------------------------------------------------------------
int ntasks = 0 ;
for (int t = 0 ; t < ntasks1 ; t++)
{
//----------------------------------------------------------------------
// coarse task computes C (:,k:klast)
//----------------------------------------------------------------------
int64_t k = Coarse [t] ;
int64_t klast = Coarse [t+1] - 1 ;
if (k >= Cnvec)
{
//------------------------------------------------------------------
// all tasks have been constructed
//------------------------------------------------------------------
break ;
}
else if (k < klast)
{
//------------------------------------------------------------------
// coarse task has 2 or more vectors
//------------------------------------------------------------------
// This is a non-empty coarse-grain task that does two or more
// entire vectors of C, vectors k:klast, inclusive.
GB_REALLOC_TASK_LIST (TaskList, ntasks + 1, max_ntasks) ;
TaskList [ntasks].kfirst = k ;
TaskList [ntasks].klast = klast ;
ntasks++ ;
}
else
{
//------------------------------------------------------------------
// coarse task has 0 or 1 vectors
//------------------------------------------------------------------
// As a coarse-grain task, this task is empty or does a single
// vector, k. Vector k must be removed from the work done by this
// and any other coarse-grain task, and split into one or more
// fine-grain tasks.
for (int tt = t ; tt < ntasks1 ; tt++)
{
// remove k from the initial slice tt
if (Coarse [tt] == k)
{
// remove k from task tt
Coarse [tt] = k+1 ;
}
else
{
// break, k not in task tt
break ;
}
}
//------------------------------------------------------------------
// get the vector of C
//------------------------------------------------------------------
int64_t j = (Ch == NULL) ? k : Ch [k] ;
//------------------------------------------------------------------
// get the corresponding vector of A
//------------------------------------------------------------------
int64_t kA ;
if (C_to_A != NULL)
{
// A is hypersparse and the C_to_A mapping has been created
kA = C_to_A [k] ;
}
else if (Ch_is_Ah)
{
// A is hypersparse, but Ch is a shallow copy of A->h
kA = k ;
}
else
{
// A is standard
kA = j ;
}
int64_t pA_start = (kA < 0) ? -1 : Ap [kA] ;
int64_t pA_end = (kA < 0) ? -1 : Ap [kA+1] ;
bool a_empty = (pA_end == pA_start) ;
//------------------------------------------------------------------
// get the corresponding vector of B
//------------------------------------------------------------------
int64_t kB ;
if (C_to_B != NULL)
{
// B is hypersparse and the C_to_B mapping has been created
kB = C_to_B [k] ;
}
else if (Ch_is_Bh)
{
// B is hypersparse, but Ch is a shallow copy of B->h
kB = k ;
}
else
{
// B is standard
kB = j ;
}
int64_t pB_start = (kB < 0) ? -1 : Bp [kB] ;
int64_t pB_end = (kB < 0) ? -1 : Bp [kB+1] ;
bool b_empty = (pB_end == pB_start) ;
//------------------------------------------------------------------
// get the corresponding vector of M, if present
//------------------------------------------------------------------
int64_t pM_start = -1 ;
int64_t pM_end = -1 ;
if (M != NULL)
{
int64_t kM ;
if (C_to_M != NULL)
{
// M is hypersparse and the C_to_M mapping has been created
kM = C_to_M [k] ;
}
else if (Ch_is_Mh)
{
// Ch is a deep or shallow copy of Mh
kM = k ;
}
else
{
// M is standard
kM = j ;
}
pM_start = (kM < 0) ? -1 : Mp [kM] ;
pM_end = (kM < 0) ? -1 : Mp [kM+1] ;
}
bool m_empty = (pM_end == pM_start) ;
//------------------------------------------------------------------
// determine the # of fine-grain tasks to create for vector k
//------------------------------------------------------------------
double ckwork = Cwork [k+1] - Cwork [k] ;
int nfine = ckwork / target_task_size ;
nfine = GB_IMAX (nfine, 1) ;
// make the TaskList bigger, if needed
GB_REALLOC_TASK_LIST (TaskList, ntasks + nfine, max_ntasks) ;
//------------------------------------------------------------------
// create the fine-grain tasks
//------------------------------------------------------------------
if (nfine == 1)
{
//--------------------------------------------------------------
// this is a single coarse task for all of vector k
//--------------------------------------------------------------
TaskList [ntasks].kfirst = k ;
TaskList [ntasks].klast = k ;
ntasks++ ;
}
else
{
//--------------------------------------------------------------
// slice vector k into nfine fine tasks
//--------------------------------------------------------------
// first fine task starts at the top of vector k
ASSERT (ntasks < max_ntasks) ;
TaskList [ntasks].kfirst = k ;
TaskList [ntasks].klast = -1 ; // this is a fine task
TaskList [ntasks].pM = (m_empty) ? -1 : pM_start ;
TaskList [ntasks].pA = (a_empty) ? -1 : pA_start ;
TaskList [ntasks].pB = (b_empty) ? -1 : pB_start ;
TaskList [ntasks].len = 0 ; // to be determined below
ntasks++ ;
int64_t ilast = 0, i = 0 ;
for (int tfine = 1 ; tfine < nfine ; tfine++)
{
double target_work = ((nfine-tfine) * ckwork) / nfine ;
int64_t pM, pA, pB ;
GB_slice_vector (&i, &pM, &pA, &pB,
pM_start, pM_end, Mi, // Mi NULL if M not present
pA_start, pA_end, Ai, 0, // Ai always explicit list
pB_start, pB_end, Bi, // Bi always explicit list
vlen, target_work) ;
// prior task ends at pM-1, pA-1, and pB-1
TaskList [ntasks-1].pM_end = pM ;
TaskList [ntasks-1].pA_end = pA ;
TaskList [ntasks-1].pB_end = pB ;
// prior task handles indices ilast:i-1
TaskList [ntasks-1].len = i - ilast ;
// this task starts at pM, pA, and pB
ASSERT (ntasks < max_ntasks) ;
TaskList [ntasks].kfirst = k ;
TaskList [ntasks].klast = -1 ; // this is a fine task
TaskList [ntasks].pM = pM ;
TaskList [ntasks].pA = pA ;
TaskList [ntasks].pB = pB ;
// advance to the next task
ntasks++ ;
ilast = i ;
}
// Terminate the last fine task.
ASSERT (ntasks <= max_ntasks) ;
TaskList [ntasks-1].pM_end = (m_empty) ? -1 : pM_end ;
TaskList [ntasks-1].pA_end = (a_empty) ? -1 : pA_end ;
TaskList [ntasks-1].pB_end = (b_empty) ? -1 : pB_end ;
TaskList [ntasks-1].len = vlen - i ;
}
}
}
ASSERT (ntasks <= max_ntasks) ;
//--------------------------------------------------------------------------
// free workspace and return result
//--------------------------------------------------------------------------
GB_FREE_WORK ;
(*p_TaskList ) = TaskList ;
(*p_max_ntasks) = max_ntasks ;
(*p_ntasks ) = ntasks ;
(*p_nthreads ) = nthreads ;
return (GrB_SUCCESS) ;
}
|
convolution_2x2.h | // Tencent is pleased to support the open source community by making ncnn available.
//
// Copyright (C) 2017 THL A29 Limited, a Tencent company. All rights reserved.
//
// Licensed under the BSD 3-Clause License (the "License"); you may not use this file except
// in compliance with the License. You may obtain a copy of the License at
//
// https://opensource.org/licenses/BSD-3-Clause
//
// Unless required by applicable law or agreed to in writing, software distributed
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
// specific language governing permissions and limitations under the License.
static void conv2x2s1_neon(const Mat& bottom_blob, Mat& top_blob, const Mat& _kernel, const Mat& _bias, const Option& opt)
{
int w = bottom_blob.w;
int inch = bottom_blob.c;
int outw = top_blob.w;
int outh = top_blob.h;
int outch = top_blob.c;
const float* kernel = _kernel;
const float* bias = _bias;
#pragma omp parallel for num_threads(opt.num_threads)
for (int p=0; p<outch; p++)
{
Mat out = top_blob.channel(p);
const float bias0 = bias ? bias[p] : 0.f;
out.fill(bias0);
int q = 0;
for (; q+1<inch; q+=2)
{
float* outptr = out;
const float* img0 = bottom_blob.channel(q);
const float* img1 = bottom_blob.channel(q+1);
const float* kernel0 = kernel + p*inch*4 + q*4;
const float* kernel1 = kernel0 + 4;
const float* r00 = img0;
const float* r01 = img0 + w;
const float* r10 = img1;
const float* r11 = img1 + w;
#if __ARM_NEON
float32x4_t _k0 = vld1q_f32(kernel0);
float32x4_t _k1 = vld1q_f32(kernel1);
#endif // __ARM_NEON
for (int i = 0; i < outh; i++)
{
#if __ARM_NEON
int nn = outw >> 2;
int remain = outw & 3;
#else
int remain = outw;
#endif // __ARM_NEON
#if __ARM_NEON
#if __aarch64__
if (nn > 0)
{
asm volatile(
"prfm pldl1keep, [%1, #128] \n"
"ld1 {v0.4s}, [%1], #16 \n"
"prfm pldl1keep, [%2, #128] \n"
"ld1 {v2.4s}, [%2], #16 \n"
"prfm pldl1keep, [%3, #128] \n"
"ld1 {v12.4s}, [%3], #16 \n"
"prfm pldl1keep, [%4, #128] \n"
"ld1 {v14.4s}, [%4], #16 \n"
"0: \n"
"prfm pldl1keep, [%5, #128] \n"
"ld1 {v9.4s}, [%5] \n"
"fmul v8.4s, v0.4s, %12.s[0] \n"
"fmla v9.4s, v2.4s, %12.s[2] \n"
"prfm pldl1keep, [%1, #128] \n"
"ld1 {v1.4s}, [%1], #16 \n"
"prfm pldl1keep, [%2, #128] \n"
"ld1 {v3.4s}, [%2], #16 \n"
"ext v10.16b, v0.16b, v1.16b, #4 \n"
"ext v11.16b, v2.16b, v3.16b, #4 \n"
"fmla v8.4s, v12.4s, %13.s[0] \n"
"fmla v9.4s, v14.4s, %13.s[2] \n"
"prfm pldl1keep, [%3, #128] \n"
"ld1 {v13.4s}, [%3], #16 \n"
"prfm pldl1keep, [%4, #128] \n"
"ld1 {v15.4s}, [%4], #16 \n"
"fmla v8.4s, v10.4s, %12.s[1] \n"
"fmla v9.4s, v11.4s, %12.s[3] \n"
"ext v10.16b, v12.16b, v13.16b, #4 \n"
"ext v11.16b, v14.16b, v15.16b, #4 \n"
"fmla v8.4s, v10.4s, %13.s[1] \n"
"fmla v9.4s, v11.4s, %13.s[3] \n"
"orr v0.16b, v1.16b, v1.16b \n"
"orr v2.16b, v3.16b, v3.16b \n"
"fadd v8.4s, v8.4s, v9.4s \n"
"orr v12.16b, v13.16b, v13.16b \n"
"orr v14.16b, v15.16b, v15.16b \n"
"subs %w0, %w0, #1 \n"
"st1 {v8.4s}, [%5], #16 \n"
"bne 0b \n"
"sub %1, %1, #16 \n"
"sub %2, %2, #16 \n"
"sub %3, %3, #16 \n"
"sub %4, %4, #16 \n"
: "=r"(nn), // %0
"=r"(r00), // %1
"=r"(r01), // %2
"=r"(r10), // %3
"=r"(r11), // %4
"=r"(outptr) // %5
: "0"(nn),
"1"(r00),
"2"(r01),
"3"(r10),
"4"(r11),
"5"(outptr),
"w"(_k0), // %12
"w"(_k1) // %13
: "cc", "memory", "v0", "v1", "v2", "v3", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15"
);
}
#else
if (nn > 0)
{
asm volatile(
"pld [%1, #128] \n"
"vld1.f32 {d0-d1}, [%1]! \n"
"pld [%2, #128] \n"
"vld1.f32 {d4-d5}, [%2]! \n"
"pld [%3, #128] \n"
"vld1.f32 {d24-d25}, [%3]! \n"
"pld [%4, #128] \n"
"vld1.f32 {d28-d29}, [%4]! \n"
"0: \n"
"pld [%5, #128] \n"
"vld1.f32 {d18-d19}, [%5] \n"// q9 = sum
"vmul.f32 q8, q0, %e12[0] \n"
"vmla.f32 q9, q2, %f12[0] \n"
"pld [%1, #128] \n"
"vld1.f32 {d2-d3}, [%1]! \n"
"pld [%2, #128] \n"
"vld1.f32 {d6-d7}, [%2]! \n"
"vext.f32 q10, q0, q1, #1 \n"
"vext.f32 q11, q2, q3, #1 \n"
"vmla.f32 q8, q12, %e13[0] \n"
"vmla.f32 q9, q14, %f13[0] \n"
"pld [%3, #128] \n"
"vld1.f32 {d26-d27}, [%3]! \n"
"pld [%4, #128] \n"
"vld1.f32 {d30-d31}, [%4]! \n"
"vmla.f32 q8, q10, %e12[1] \n"
"vmla.f32 q9, q11, %f12[1] \n"
"vext.f32 q10, q12, q13, #1 \n"
"vext.f32 q11, q14, q15, #1 \n"
"vmla.f32 q8, q10, %e13[1] \n"
"vmla.f32 q9, q11, %f13[1] \n"
"vorr q0, q1, q1 \n"
"vorr q2, q3, q3 \n"
"vadd.f32 q8, q8, q9 \n"
"vorr q12, q13, q13 \n"
"vorr q14, q15, q15 \n"
"subs %0, #1 \n"
"vst1.f32 {d16-d17}, [%5]! \n"
"bne 0b \n"
"sub %1, #16 \n"
"sub %2, #16 \n"
"sub %3, #16 \n"
"sub %4, #16 \n"
: "=r"(nn), // %0
"=r"(r00), // %1
"=r"(r01), // %2
"=r"(r10), // %3
"=r"(r11), // %4
"=r"(outptr) // %5
: "0"(nn),
"1"(r00),
"2"(r01),
"3"(r10),
"4"(r11),
"5"(outptr),
"w"(_k0), // %12
"w"(_k1) // %13
: "cc", "memory", "q0", "q1", "q2", "q3", "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15"
);
}
#endif // __aarch64__
#endif // __ARM_NEON
for (; remain>0; remain--)
{
#if __ARM_NEON
float32x2_t _r00 = vld1_f32(r00);
float32x2_t _r01 = vld1_f32(r01);
float32x4_t _r00r1 = vcombine_f32(_r00, _r01);
float32x4_t _s0s1 = vmulq_f32(_r00r1, _k0);
float32x2_t _r10 = vld1_f32(r10);
float32x2_t _r11 = vld1_f32(r11);
float32x4_t _r10r1 = vcombine_f32(_r10, _r11);
_s0s1 = vmlaq_f32(_s0s1, _r10r1, _k1);
float32x2_t _s = vadd_f32(vget_low_f32(_s0s1), vget_high_f32(_s0s1));
_s = vpadd_f32(_s, _s);
*outptr += vget_lane_f32(_s, 0);
#else
float sum = 0.f;
sum += r00[0] * kernel0[0];
sum += r00[1] * kernel0[1];
sum += r01[0] * kernel0[2];
sum += r01[1] * kernel0[3];
sum += r10[0] * kernel1[0];
sum += r10[1] * kernel1[1];
sum += r11[0] * kernel1[2];
sum += r11[1] * kernel1[3];
*outptr += sum;
#endif // __ARM_NEON
r00 += 1;
r01 += 1;
r10 += 1;
r11 += 1;
outptr++;
}
r00 += 1;
r01 += 1;
r10 += 1;
r11 += 1;
}
}
for (; q<inch; q++)
{
float* outptr = out;
const float* img0 = bottom_blob.channel(q);
const float* kernel0 = kernel + p*inch*4 + q*4;
const float* r0 = img0;
const float* r1 = img0 + w;
#if __ARM_NEON
float32x4_t _k0 = vdupq_n_f32(kernel0[0]);
float32x4_t _k1 = vdupq_n_f32(kernel0[1]);
float32x4_t _k2 = vdupq_n_f32(kernel0[2]);
float32x4_t _k3 = vdupq_n_f32(kernel0[3]);
#endif // __ARM_NEON
for (int i = 0; i < outh; i++)
{
#if __ARM_NEON
int nn = outw >> 2;
int remain = outw & 3;
#else
int remain = outw;
#endif // __ARM_NEON
#if __ARM_NEON
#if __aarch64__
if (nn > 0)
{
asm volatile(
"prfm pldl1keep, [%1, #128] \n"
"ld1 {v0.4s}, [%1], #16 \n"
"prfm pldl1keep, [%2, #128] \n"
"ld1 {v2.4s}, [%2], #16 \n"
"0: \n"
"prfm pldl1keep, [%3, #128] \n"
"ld1 {v9.4s}, [%3] \n"
"fmul v8.4s, v0.4s, %8.4s \n"
"fmla v9.4s, v2.4s, %10.4s \n"
"prfm pldl1keep, [%1, #128] \n"
"ld1 {v1.4s}, [%1], #16 \n"
"ext v10.16b, v0.16b, v1.16b, #4 \n"
"fmla v8.4s, v10.4s, %9.4s \n"
"prfm pldl1keep, [%2, #128] \n"
"ld1 {v3.4s}, [%2], #16 \n"
"ext v11.16b, v2.16b, v3.16b, #4 \n"
"fmla v9.4s, v11.4s, %11.4s \n"
"orr v0.16b, v1.16b, v1.16b \n"
"fadd v8.4s, v8.4s, v9.4s \n"
"orr v2.16b, v3.16b, v3.16b \n"
"subs %w0, %w0, #1 \n"
"st1 {v8.4s}, [%3], #16 \n"
"bne 0b \n"
"sub %1, %1, #16 \n"
"sub %2, %2, #16 \n"
: "=r"(nn), // %0
"=r"(r0), // %1
"=r"(r1), // %2
"=r"(outptr) // %3
: "0"(nn),
"1"(r0),
"2"(r1),
"3"(outptr),
"w"(_k0), // %8
"w"(_k1), // %9
"w"(_k2), // %10
"w"(_k3) // %11
: "cc", "memory", "v0", "v1", "v2", "v3", "v8", "v9", "v10", "v11"
);
}
#else
if (nn > 0)
{
asm volatile(
"pld [%1, #128] \n"
"vld1.f32 {d0-d1}, [%1]! \n"
"pld [%2, #128] \n"
"vld1.f32 {d4-d5}, [%2]! \n"
"0: \n"
"pld [%3, #128] \n"
"vld1.f32 {d18-d19}, [%3] \n"// q9 = sum
"vmul.f32 q8, q0, %q8 \n"
"vmla.f32 q9, q2, %q10 \n"
"pld [%1, #128] \n"
"vld1.f32 {d2-d3}, [%1]! \n"
"vext.f32 q10, q0, q1, #1 \n"
"vmla.f32 q8, q10, %q9 \n"
"pld [%2, #128] \n"
"vld1.f32 {d6-d7}, [%2]! \n"
"vext.f32 q11, q2, q3, #1 \n"
"vmla.f32 q9, q11, %q11 \n"
"vorr q0, q1, q1 \n"
"vadd.f32 q8, q8, q9 \n"
"vorr q2, q3, q3 \n"
"subs %0, #1 \n"
"vst1.f32 {d16-d17}, [%3]! \n"
"bne 0b \n"
"sub %1, #16 \n"
"sub %2, #16 \n"
: "=r"(nn), // %0
"=r"(r0), // %1
"=r"(r1), // %2
"=r"(outptr) // %3
: "0"(nn),
"1"(r0),
"2"(r1),
"3"(outptr),
"w"(_k0), // %8
"w"(_k1), // %9
"w"(_k2), // %10
"w"(_k3) // %11
: "cc", "memory", "q0", "q1", "q2", "q3", "q8", "q9", "q10", "q11"
);
}
#endif // __aarch64__
#endif // __ARM_NEON
#if __ARM_NEON
float32x4_t _k0123 = vld1q_f32(kernel0);
#endif
for (; remain>0; remain--)
{
#if __ARM_NEON
float32x2_t _r0 = vld1_f32(r0);
float32x2_t _r1 = vld1_f32(r1);
float32x4_t _r0r1 = vcombine_f32(_r0, _r1);
float32x4_t _s0s1 = vmulq_f32(_r0r1, _k0123);
float32x2_t _s = vadd_f32(vget_low_f32(_s0s1), vget_high_f32(_s0s1));
_s = vpadd_f32(_s, _s);
*outptr += vget_lane_f32(_s, 0);
#else
float sum = 0.f;
sum += r0[0] * kernel0[0];
sum += r0[1] * kernel0[1];
sum += r1[0] * kernel0[2];
sum += r1[1] * kernel0[3];
*outptr += sum;
#endif
r0 += 1;
r1 += 1;
outptr++;
}
r0 += 1;
r1 += 1;
}
}
}
}
|
GB_binop__gt_uint64.c |
//------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_emult.h"
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB (_AaddB__gt_uint64)
// A.*B function (eWiseMult): GB (_AemultB_08__gt_uint64)
// A.*B function (eWiseMult): GB (_AemultB_02__gt_uint64)
// A.*B function (eWiseMult): GB (_AemultB_04__gt_uint64)
// A.*B function (eWiseMult): GB (_AemultB_bitmap__gt_uint64)
// A*D function (colscale): GB (_AxD__gt_uint64)
// D*A function (rowscale): GB (_DxB__gt_uint64)
// C+=B function (dense accum): GB (_Cdense_accumB__gt_uint64)
// C+=b function (dense accum): GB (_Cdense_accumb__gt_uint64)
// C+=A+B function (dense ewise3): GB ((none))
// C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__gt_uint64)
// C=scalar+B GB (_bind1st__gt_uint64)
// C=scalar+B' GB (_bind1st_tran__gt_uint64)
// C=A+scalar GB (_bind2nd__gt_uint64)
// C=A'+scalar GB (_bind2nd_tran__gt_uint64)
// C type: bool
// A type: uint64_t
// A pattern? 0
// B type: uint64_t
// B pattern? 0
// BinaryOp: cij = (aij > bij)
#define GB_ATYPE \
uint64_t
#define GB_BTYPE \
uint64_t
#define GB_CTYPE \
bool
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
0
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
0
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA,A_iso) \
uint64_t aij = GBX (Ax, pA, A_iso)
// true if values of A are not used
#define GB_A_IS_PATTERN \
0 \
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB,B_iso) \
uint64_t bij = GBX (Bx, pB, B_iso)
// true if values of B are not used
#define GB_B_IS_PATTERN \
0 \
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
bool t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \
cij = GBX (Ax, pA, A_iso)
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \
cij = GBX (Bx, pB, B_iso)
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z,x,y,i,j) \
z = (x > y) ;
// true if the binop must be flipped
#define GB_BINOP_FLIP \
0
// op is second
#define GB_OP_IS_SECOND \
0
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_GT || GxB_NO_UINT64 || GxB_NO_GT_UINT64)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
#if 0
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB ((none))
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
#endif
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
void GB (_Cdense_ewise3_noaccum__gt_uint64)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_noaccum_template.c"
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumB__gt_uint64)
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if 0
{
#include "GB_dense_subassign_23_template.c"
}
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumb__gt_uint64)
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if 0
{
// get the scalar b for C += b, of type uint64_t
uint64_t bwork = (*((uint64_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_AxD__gt_uint64)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix D,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
bool *restrict Cx = (bool *) C->x ;
#include "GB_AxB_colscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_DxB__gt_uint64)
(
GrB_Matrix C,
const GrB_Matrix D,
const GrB_Matrix B,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
bool *restrict Cx = (bool *) C->x ;
#include "GB_AxB_rowscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B
//------------------------------------------------------------------------------
GrB_Info GB (_AaddB__gt_uint64)
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool is_eWiseUnion,
const GB_void *alpha_scalar_in,
const GB_void *beta_scalar_in,
const bool Ch_is_Mh,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GB_WERK_DECLARE (M_ek_slicing, int64_t) ;
GB_WERK_DECLARE (A_ek_slicing, int64_t) ;
GB_WERK_DECLARE (B_ek_slicing, int64_t) ;
uint64_t alpha_scalar ;
uint64_t beta_scalar ;
if (is_eWiseUnion)
{
alpha_scalar = (*((uint64_t *) alpha_scalar_in)) ;
beta_scalar = (*((uint64_t *) beta_scalar_in )) ;
}
#include "GB_add_template.c"
GB_FREE_WORKSPACE ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_08__gt_uint64)
(
GrB_Matrix C,
const int C_sparsity,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_08_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_02__gt_uint64)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool flipxy,
const int64_t *restrict Cp_kfirst,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if GB_BINOP_FLIP
// The operator is not commutative, and does not have a flipped
// variant. For example z=atan2(y,x).
if (flipxy)
{
// use fmult(y,x)
#undef GB_FLIPPED
#define GB_FLIPPED 1
#include "GB_emult_02_template.c"
}
else
{
// use fmult(x,y)
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
}
#else
// No need to handle the flip: the operator is either commutative, or
// has been handled by changing z=div(y,x) to z=rdiv(x,y) for example.
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_04__gt_uint64)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict Cp_kfirst,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_04_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_bitmap__gt_uint64)
(
GrB_Matrix C,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_bitmap_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB (_bind1st__gt_uint64)
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *restrict Bb,
int64_t bnz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
bool *Cx = (bool *) Cx_output ;
uint64_t x = (*((uint64_t *) x_input)) ;
uint64_t *Bx = (uint64_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < bnz ; p++)
{
if (!GBB (Bb, p)) continue ;
uint64_t bij = GBX (Bx, p, false) ;
Cx [p] = (x > bij) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB (_bind2nd__gt_uint64)
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *restrict Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
bool *Cx = (bool *) Cx_output ;
uint64_t *Ax = (uint64_t *) Ax_input ;
uint64_t y = (*((uint64_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
uint64_t aij = GBX (Ax, p, false) ;
Cx [p] = (aij > y) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
uint64_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = (x > aij) ; \
}
GrB_Info GB (_bind1st_tran__gt_uint64)
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
uint64_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint64_t x = (*((const uint64_t *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
uint64_t
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
uint64_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = (aij > y) ; \
}
GrB_Info GB (_bind2nd_tran__gt_uint64)
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint64_t y = (*((const uint64_t *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
psd.c | /*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% PPPP SSSSS DDDD %
% P P SS D D %
% PPPP SSS D D %
% P SS D D %
% P SSSSS DDDD %
% %
% %
% Read/Write Adobe Photoshop Image Format %
% %
% Software Design %
% Cristy %
% Leonard Rosenthol %
% July 1992 %
% Dirk Lemstra %
% December 2013 %
% %
% %
% Copyright 1999-2021 ImageMagick Studio LLC, a non-profit organization %
% dedicated to making software imaging solutions freely available. %
% %
% You may not use this file except in compliance with the License. You may %
% obtain a copy of the License at %
% %
% https://imagemagick.org/script/license.php %
% %
% Unless required by applicable law or agreed to in writing, software %
% distributed under the License is distributed on an "AS IS" BASIS, %
% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. %
% See the License for the specific language governing permissions and %
% limitations under the License. %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% Photoshop spec @ https://www.adobe.com/devnet-apps/photoshop/fileformatashtml
%
*/
/*
Include declarations.
*/
#include "MagickCore/studio.h"
#include "MagickCore/artifact.h"
#include "MagickCore/attribute.h"
#include "MagickCore/blob.h"
#include "MagickCore/blob-private.h"
#include "MagickCore/cache.h"
#include "MagickCore/channel.h"
#include "MagickCore/colormap.h"
#include "MagickCore/colormap-private.h"
#include "MagickCore/colorspace.h"
#include "MagickCore/colorspace-private.h"
#include "MagickCore/constitute.h"
#include "MagickCore/enhance.h"
#include "MagickCore/exception.h"
#include "MagickCore/exception-private.h"
#include "MagickCore/image.h"
#include "MagickCore/image-private.h"
#include "MagickCore/list.h"
#include "MagickCore/log.h"
#include "MagickCore/magick.h"
#include "MagickCore/memory_.h"
#include "MagickCore/module.h"
#include "MagickCore/monitor-private.h"
#include "MagickCore/option.h"
#include "MagickCore/pixel.h"
#include "MagickCore/pixel-accessor.h"
#include "MagickCore/policy.h"
#include "MagickCore/profile.h"
#include "MagickCore/property.h"
#include "MagickCore/registry.h"
#include "MagickCore/quantum-private.h"
#include "MagickCore/static.h"
#include "MagickCore/string_.h"
#include "MagickCore/string-private.h"
#include "MagickCore/thread-private.h"
#include "coders/coders-private.h"
#ifdef MAGICKCORE_ZLIB_DELEGATE
#include <zlib.h>
#endif
#include "psd-private.h"
/*
Define declaractions.
*/
#define MaxPSDChannels 56
#define PSDQuantum(x) (((ssize_t) (x)+1) & -2)
/*
Enumerated declaractions.
*/
typedef enum
{
Raw = 0,
RLE = 1,
ZipWithoutPrediction = 2,
ZipWithPrediction = 3
} PSDCompressionType;
typedef enum
{
BitmapMode = 0,
GrayscaleMode = 1,
IndexedMode = 2,
RGBMode = 3,
CMYKMode = 4,
MultichannelMode = 7,
DuotoneMode = 8,
LabMode = 9
} PSDImageType;
/*
Typedef declaractions.
*/
typedef struct _ChannelInfo
{
short
type;
size_t
size;
} ChannelInfo;
typedef struct _MaskInfo
{
Image
*image;
RectangleInfo
page;
unsigned char
background,
flags;
} MaskInfo;
typedef struct _LayerInfo
{
ChannelInfo
channel_info[MaxPSDChannels];
char
blendkey[4];
Image
*image;
MaskInfo
mask;
Quantum
opacity;
RectangleInfo
page;
size_t
offset_x,
offset_y;
unsigned char
clipping,
flags,
name[257],
visible;
unsigned short
channels;
StringInfo
*info;
} LayerInfo;
/*
Forward declarations.
*/
static MagickBooleanType
WritePSDImage(const ImageInfo *,Image *,ExceptionInfo *);
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% I s P S D %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% IsPSD()() returns MagickTrue if the image format type, identified by the
% magick string, is PSD.
%
% The format of the IsPSD method is:
%
% MagickBooleanType IsPSD(const unsigned char *magick,const size_t length)
%
% A description of each parameter follows:
%
% o magick: compare image format pattern against these bytes.
%
% o length: Specifies the length of the magick string.
%
*/
static MagickBooleanType IsPSD(const unsigned char *magick,const size_t length)
{
if (length < 4)
return(MagickFalse);
if (LocaleNCompare((const char *) magick,"8BPS",4) == 0)
return(MagickTrue);
return(MagickFalse);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% R e a d P S D I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ReadPSDImage() reads an Adobe Photoshop image file and returns it. It
% allocates the memory necessary for the new Image structure and returns a
% pointer to the new image.
%
% The format of the ReadPSDImage method is:
%
% Image *ReadPSDImage(image_info,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image_info: the image info.
%
% o exception: return any errors or warnings in this structure.
%
*/
static const char *CompositeOperatorToPSDBlendMode(Image *image)
{
switch (image->compose)
{
case ColorBurnCompositeOp:
return(image->endian == LSBEndian ? "vidi" : "idiv");
case ColorDodgeCompositeOp:
return(image->endian == LSBEndian ? " vid" : "div ");
case ColorizeCompositeOp:
return(image->endian == LSBEndian ? "rloc" : "colr");
case DarkenCompositeOp:
return(image->endian == LSBEndian ? "krad" : "dark");
case DifferenceCompositeOp:
return(image->endian == LSBEndian ? "ffid" : "diff");
case DissolveCompositeOp:
return(image->endian == LSBEndian ? "ssid" : "diss");
case ExclusionCompositeOp:
return(image->endian == LSBEndian ? "dums" : "smud");
case HardLightCompositeOp:
return(image->endian == LSBEndian ? "tiLh" : "hLit");
case HardMixCompositeOp:
return(image->endian == LSBEndian ? "xiMh" : "hMix");
case HueCompositeOp:
return(image->endian == LSBEndian ? " euh" : "hue ");
case LightenCompositeOp:
return(image->endian == LSBEndian ? "etil" : "lite");
case LinearBurnCompositeOp:
return(image->endian == LSBEndian ? "nrbl" : "lbrn");
case LinearDodgeCompositeOp:
return(image->endian == LSBEndian ? "gddl" : "lddg");
case LinearLightCompositeOp:
return(image->endian == LSBEndian ? "tiLl" : "lLit");
case LuminizeCompositeOp:
return(image->endian == LSBEndian ? " mul" : "lum ");
case MultiplyCompositeOp:
return(image->endian == LSBEndian ? " lum" : "mul ");
case OverlayCompositeOp:
return(image->endian == LSBEndian ? "revo" : "over");
case PinLightCompositeOp:
return(image->endian == LSBEndian ? "tiLp" : "pLit");
case SaturateCompositeOp:
return(image->endian == LSBEndian ? " tas" : "sat ");
case ScreenCompositeOp:
return(image->endian == LSBEndian ? "nrcs" : "scrn");
case SoftLightCompositeOp:
return(image->endian == LSBEndian ? "tiLs" : "sLit");
case VividLightCompositeOp:
return(image->endian == LSBEndian ? "tiLv" : "vLit");
case OverCompositeOp:
default:
return(image->endian == LSBEndian ? "mron" : "norm");
}
}
/*
For some reason Photoshop seems to blend semi-transparent pixels with white.
This method reverts the blending. This can be disabled by setting the
option 'psd:alpha-unblend' to off.
*/
static MagickBooleanType CorrectPSDAlphaBlend(const ImageInfo *image_info,
Image *image,ExceptionInfo* exception)
{
const char
*option;
MagickBooleanType
status;
ssize_t
y;
if ((image->alpha_trait != BlendPixelTrait) ||
(image->colorspace != sRGBColorspace))
return(MagickTrue);
option=GetImageOption(image_info,"psd:alpha-unblend");
if (IsStringFalse(option) != MagickFalse)
return(MagickTrue);
status=MagickTrue;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
Quantum
*magick_restrict q;
ssize_t
x;
if (status == MagickFalse)
continue;
q=GetAuthenticPixels(image,0,y,image->columns,1,exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
double
gamma;
ssize_t
i;
gamma=QuantumScale*GetPixelAlpha(image, q);
if (gamma != 0.0 && gamma != 1.0)
{
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
PixelChannel channel = GetPixelChannelChannel(image,i);
if (channel != AlphaPixelChannel)
q[i]=ClampToQuantum((q[i]-((1.0-gamma)*QuantumRange))/gamma);
}
}
q+=GetPixelChannels(image);
}
if (SyncAuthenticPixels(image,exception) == MagickFalse)
status=MagickFalse;
}
return(status);
}
static inline CompressionType ConvertPSDCompression(
PSDCompressionType compression)
{
switch (compression)
{
case RLE:
return RLECompression;
case ZipWithPrediction:
case ZipWithoutPrediction:
return ZipCompression;
default:
return NoCompression;
}
}
static MagickBooleanType ApplyPSDLayerOpacity(Image *image,Quantum opacity,
MagickBooleanType revert,ExceptionInfo *exception)
{
MagickBooleanType
status;
ssize_t
y;
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" applying layer opacity %.20g", (double) opacity);
if (opacity == OpaqueAlpha)
return(MagickTrue);
if (image->alpha_trait != BlendPixelTrait)
(void) SetImageAlphaChannel(image,OpaqueAlphaChannel,exception);
status=MagickTrue;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
Quantum
*magick_restrict q;
ssize_t
x;
if (status == MagickFalse)
continue;
q=GetAuthenticPixels(image,0,y,image->columns,1,exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
if (revert == MagickFalse)
SetPixelAlpha(image,(Quantum) (QuantumScale*(GetPixelAlpha(image,q))*
opacity),q);
else if (opacity > 0)
SetPixelAlpha(image,(Quantum) (QuantumRange*(GetPixelAlpha(image,q)/
(MagickRealType) opacity)),q);
q+=GetPixelChannels(image);
}
if (SyncAuthenticPixels(image,exception) == MagickFalse)
status=MagickFalse;
}
return(status);
}
static MagickBooleanType ApplyPSDOpacityMask(Image *image,const Image *mask,
Quantum background,MagickBooleanType revert,ExceptionInfo *exception)
{
Image
*complete_mask;
MagickBooleanType
status;
PixelInfo
color;
ssize_t
y;
if (image->alpha_trait == UndefinedPixelTrait)
return(MagickTrue);
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" applying opacity mask");
complete_mask=CloneImage(image,0,0,MagickTrue,exception);
if (complete_mask == (Image *) NULL)
return(MagickFalse);
complete_mask->alpha_trait=BlendPixelTrait;
GetPixelInfo(complete_mask,&color);
color.red=(MagickRealType) background;
(void) SetImageColor(complete_mask,&color,exception);
status=CompositeImage(complete_mask,mask,OverCompositeOp,MagickTrue,
mask->page.x-image->page.x,mask->page.y-image->page.y,exception);
if (status == MagickFalse)
{
complete_mask=DestroyImage(complete_mask);
return(status);
}
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
Quantum
*magick_restrict q;
Quantum
*p;
ssize_t
x;
if (status == MagickFalse)
continue;
q=GetAuthenticPixels(image,0,y,image->columns,1,exception);
p=GetAuthenticPixels(complete_mask,0,y,complete_mask->columns,1,exception);
if ((q == (Quantum *) NULL) || (p == (Quantum *) NULL))
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
MagickRealType
alpha,
intensity;
alpha=(MagickRealType) GetPixelAlpha(image,q);
intensity=GetPixelIntensity(complete_mask,p);
if (revert == MagickFalse)
SetPixelAlpha(image,ClampToQuantum(intensity*(QuantumScale*alpha)),q);
else if (intensity > 0)
SetPixelAlpha(image,ClampToQuantum((alpha/intensity)*QuantumRange),q);
q+=GetPixelChannels(image);
p+=GetPixelChannels(complete_mask);
}
if (SyncAuthenticPixels(image,exception) == MagickFalse)
status=MagickFalse;
}
complete_mask=DestroyImage(complete_mask);
return(status);
}
static void PreservePSDOpacityMask(Image *image,LayerInfo* layer_info,
ExceptionInfo *exception)
{
char
*key;
RandomInfo
*random_info;
StringInfo
*key_info;
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" preserving opacity mask");
random_info=AcquireRandomInfo();
key_info=GetRandomKey(random_info,2+1);
key=(char *) GetStringInfoDatum(key_info);
key[8]=(char) layer_info->mask.background;
key[9]='\0';
layer_info->mask.image->page.x+=layer_info->page.x;
layer_info->mask.image->page.y+=layer_info->page.y;
(void) SetImageRegistry(ImageRegistryType,(const char *) key,
layer_info->mask.image,exception);
(void) SetImageArtifact(layer_info->image,"psd:opacity-mask",
(const char *) key);
key_info=DestroyStringInfo(key_info);
random_info=DestroyRandomInfo(random_info);
}
static ssize_t DecodePSDPixels(const size_t number_compact_pixels,
const unsigned char *compact_pixels,const ssize_t depth,
const size_t number_pixels,unsigned char *pixels)
{
#define CheckNumberCompactPixels \
if (packets == 0) \
return(i); \
packets--
#define CheckNumberPixels(count) \
if (((ssize_t) i + count) > (ssize_t) number_pixels) \
return(i); \
i+=count
int
pixel;
ssize_t
i,
j;
size_t
length;
ssize_t
packets;
packets=(ssize_t) number_compact_pixels;
for (i=0; (packets > 1) && (i < (ssize_t) number_pixels); )
{
packets--;
length=(size_t) (*compact_pixels++);
if (length == 128)
continue;
if (length > 128)
{
length=256-length+1;
CheckNumberCompactPixels;
pixel=(*compact_pixels++);
for (j=0; j < (ssize_t) length; j++)
{
switch (depth)
{
case 1:
{
CheckNumberPixels(8);
*pixels++=(pixel >> 7) & 0x01 ? 0U : 255U;
*pixels++=(pixel >> 6) & 0x01 ? 0U : 255U;
*pixels++=(pixel >> 5) & 0x01 ? 0U : 255U;
*pixels++=(pixel >> 4) & 0x01 ? 0U : 255U;
*pixels++=(pixel >> 3) & 0x01 ? 0U : 255U;
*pixels++=(pixel >> 2) & 0x01 ? 0U : 255U;
*pixels++=(pixel >> 1) & 0x01 ? 0U : 255U;
*pixels++=(pixel >> 0) & 0x01 ? 0U : 255U;
break;
}
case 2:
{
CheckNumberPixels(4);
*pixels++=(unsigned char) ((pixel >> 6) & 0x03);
*pixels++=(unsigned char) ((pixel >> 4) & 0x03);
*pixels++=(unsigned char) ((pixel >> 2) & 0x03);
*pixels++=(unsigned char) ((pixel & 0x03) & 0x03);
break;
}
case 4:
{
CheckNumberPixels(2);
*pixels++=(unsigned char) ((pixel >> 4) & 0xff);
*pixels++=(unsigned char) ((pixel & 0x0f) & 0xff);
break;
}
default:
{
CheckNumberPixels(1);
*pixels++=(unsigned char) pixel;
break;
}
}
}
continue;
}
length++;
for (j=0; j < (ssize_t) length; j++)
{
CheckNumberCompactPixels;
switch (depth)
{
case 1:
{
CheckNumberPixels(8);
*pixels++=(*compact_pixels >> 7) & 0x01 ? 0U : 255U;
*pixels++=(*compact_pixels >> 6) & 0x01 ? 0U : 255U;
*pixels++=(*compact_pixels >> 5) & 0x01 ? 0U : 255U;
*pixels++=(*compact_pixels >> 4) & 0x01 ? 0U : 255U;
*pixels++=(*compact_pixels >> 3) & 0x01 ? 0U : 255U;
*pixels++=(*compact_pixels >> 2) & 0x01 ? 0U : 255U;
*pixels++=(*compact_pixels >> 1) & 0x01 ? 0U : 255U;
*pixels++=(*compact_pixels >> 0) & 0x01 ? 0U : 255U;
break;
}
case 2:
{
CheckNumberPixels(4);
*pixels++=(*compact_pixels >> 6) & 0x03;
*pixels++=(*compact_pixels >> 4) & 0x03;
*pixels++=(*compact_pixels >> 2) & 0x03;
*pixels++=(*compact_pixels & 0x03) & 0x03;
break;
}
case 4:
{
CheckNumberPixels(2);
*pixels++=(*compact_pixels >> 4) & 0xff;
*pixels++=(*compact_pixels & 0x0f) & 0xff;
break;
}
default:
{
CheckNumberPixels(1);
*pixels++=(*compact_pixels);
break;
}
}
compact_pixels++;
}
}
return(i);
}
static inline LayerInfo *DestroyLayerInfo(LayerInfo *layer_info,
const ssize_t number_layers)
{
ssize_t
i;
for (i=0; i<number_layers; i++)
{
if (layer_info[i].image != (Image *) NULL)
layer_info[i].image=DestroyImage(layer_info[i].image);
if (layer_info[i].mask.image != (Image *) NULL)
layer_info[i].mask.image=DestroyImage(layer_info[i].mask.image);
if (layer_info[i].info != (StringInfo *) NULL)
layer_info[i].info=DestroyStringInfo(layer_info[i].info);
}
return (LayerInfo *) RelinquishMagickMemory(layer_info);
}
static inline size_t GetPSDPacketSize(const Image *image)
{
if (image->storage_class == PseudoClass)
{
if (image->colors > 256)
return(2);
}
if (image->depth > 16)
return(4);
if (image->depth > 8)
return(2);
return(1);
}
static inline MagickSizeType GetPSDSize(const PSDInfo *psd_info,Image *image)
{
if (psd_info->version == 1)
return((MagickSizeType) ReadBlobLong(image));
return((MagickSizeType) ReadBlobLongLong(image));
}
static inline size_t GetPSDRowSize(Image *image)
{
if (image->depth == 1)
return(((image->columns+7)/8)*GetPSDPacketSize(image));
else
return(image->columns*GetPSDPacketSize(image));
}
static const char *ModeToString(PSDImageType type)
{
switch (type)
{
case BitmapMode: return "Bitmap";
case GrayscaleMode: return "Grayscale";
case IndexedMode: return "Indexed";
case RGBMode: return "RGB";
case CMYKMode: return "CMYK";
case MultichannelMode: return "Multichannel";
case DuotoneMode: return "Duotone";
case LabMode: return "L*A*B";
default: return "unknown";
}
}
static MagickBooleanType NegateCMYK(Image *image,ExceptionInfo *exception)
{
ChannelType
channel_mask;
MagickBooleanType
status;
channel_mask=SetImageChannelMask(image,(ChannelType)(AllChannels &~
AlphaChannel));
status=NegateImage(image,MagickFalse,exception);
(void) SetImageChannelMask(image,channel_mask);
return(status);
}
static StringInfo *ParseImageResourceBlocks(PSDInfo *psd_info,Image *image,
const unsigned char *blocks,size_t length)
{
const unsigned char
*p;
ssize_t
offset;
StringInfo
*profile;
unsigned char
name_length;
unsigned int
count;
unsigned short
id,
short_sans;
if (length < 16)
return((StringInfo *) NULL);
profile=BlobToStringInfo((const unsigned char *) NULL,length);
SetStringInfoDatum(profile,blocks);
SetStringInfoName(profile,"8bim");
for (p=blocks; (p >= blocks) && (p < (blocks+length-7)); )
{
if (LocaleNCompare((const char *) p,"8BIM",4) != 0)
break;
p+=4;
p=PushShortPixel(MSBEndian,p,&id);
p=PushCharPixel(p,&name_length);
if ((name_length % 2) == 0)
name_length++;
p+=name_length;
if (p > (blocks+length-4))
break;
p=PushLongPixel(MSBEndian,p,&count);
offset=(ssize_t) count;
if (((p+offset) < blocks) || ((p+offset) > (blocks+length)))
break;
switch (id)
{
case 0x03ed:
{
unsigned short
resolution;
/*
Resolution info.
*/
if (offset < 16)
break;
p=PushShortPixel(MSBEndian,p,&resolution);
image->resolution.x=(double) resolution;
(void) FormatImageProperty(image,"tiff:XResolution","%*g",
GetMagickPrecision(),image->resolution.x);
p=PushShortPixel(MSBEndian,p,&short_sans);
p=PushShortPixel(MSBEndian,p,&short_sans);
p=PushShortPixel(MSBEndian,p,&short_sans);
p=PushShortPixel(MSBEndian,p,&resolution);
image->resolution.y=(double) resolution;
(void) FormatImageProperty(image,"tiff:YResolution","%*g",
GetMagickPrecision(),image->resolution.y);
p=PushShortPixel(MSBEndian,p,&short_sans);
p=PushShortPixel(MSBEndian,p,&short_sans);
p=PushShortPixel(MSBEndian,p,&short_sans);
image->units=PixelsPerInchResolution;
break;
}
case 0x0421:
{
if ((offset > 4) && (*(p+4) == 0))
psd_info->has_merged_image=MagickFalse;
p+=offset;
break;
}
default:
{
p+=offset;
break;
}
}
if ((offset & 0x01) != 0)
p++;
}
return(profile);
}
static CompositeOperator PSDBlendModeToCompositeOperator(const char *mode)
{
if (mode == (const char *) NULL)
return(OverCompositeOp);
if (LocaleNCompare(mode,"norm",4) == 0)
return(OverCompositeOp);
if (LocaleNCompare(mode,"mul ",4) == 0)
return(MultiplyCompositeOp);
if (LocaleNCompare(mode,"diss",4) == 0)
return(DissolveCompositeOp);
if (LocaleNCompare(mode,"diff",4) == 0)
return(DifferenceCompositeOp);
if (LocaleNCompare(mode,"dark",4) == 0)
return(DarkenCompositeOp);
if (LocaleNCompare(mode,"lite",4) == 0)
return(LightenCompositeOp);
if (LocaleNCompare(mode,"hue ",4) == 0)
return(HueCompositeOp);
if (LocaleNCompare(mode,"sat ",4) == 0)
return(SaturateCompositeOp);
if (LocaleNCompare(mode,"colr",4) == 0)
return(ColorizeCompositeOp);
if (LocaleNCompare(mode,"lum ",4) == 0)
return(LuminizeCompositeOp);
if (LocaleNCompare(mode,"scrn",4) == 0)
return(ScreenCompositeOp);
if (LocaleNCompare(mode,"over",4) == 0)
return(OverlayCompositeOp);
if (LocaleNCompare(mode,"hLit",4) == 0)
return(HardLightCompositeOp);
if (LocaleNCompare(mode,"sLit",4) == 0)
return(SoftLightCompositeOp);
if (LocaleNCompare(mode,"smud",4) == 0)
return(ExclusionCompositeOp);
if (LocaleNCompare(mode,"div ",4) == 0)
return(ColorDodgeCompositeOp);
if (LocaleNCompare(mode,"idiv",4) == 0)
return(ColorBurnCompositeOp);
if (LocaleNCompare(mode,"lbrn",4) == 0)
return(LinearBurnCompositeOp);
if (LocaleNCompare(mode,"lddg",4) == 0)
return(LinearDodgeCompositeOp);
if (LocaleNCompare(mode,"lLit",4) == 0)
return(LinearLightCompositeOp);
if (LocaleNCompare(mode,"vLit",4) == 0)
return(VividLightCompositeOp);
if (LocaleNCompare(mode,"pLit",4) == 0)
return(PinLightCompositeOp);
if (LocaleNCompare(mode,"hMix",4) == 0)
return(HardMixCompositeOp);
return(OverCompositeOp);
}
static inline ssize_t ReadPSDString(Image *image,char *p,const size_t length)
{
ssize_t
count;
count=ReadBlob(image,length,(unsigned char *) p);
if ((count == (ssize_t) length) && (image->endian != MSBEndian))
{
char
*q;
q=p+length;
for(--q; p < q; ++p, --q)
{
*p = *p ^ *q,
*q = *p ^ *q,
*p = *p ^ *q;
}
}
return(count);
}
static inline void SetPSDPixel(Image *image,const size_t channels,
const ssize_t type,const size_t packet_size,const Quantum pixel,Quantum *q,
ExceptionInfo *exception)
{
if (image->storage_class == PseudoClass)
{
PixelInfo
*color;
Quantum
index;
index=pixel;
if (packet_size == 1)
index=(Quantum) ScaleQuantumToChar(index);
index=(Quantum) ConstrainColormapIndex(image,(ssize_t) index,
exception);
if (type == 0)
SetPixelIndex(image,index,q);
if ((type == 0) && (channels > 1))
return;
color=image->colormap+(ssize_t) GetPixelIndex(image,q);
if (type != 0)
color->alpha=(MagickRealType) pixel;
SetPixelViaPixelInfo(image,color,q);
return;
}
switch (type)
{
case -1:
{
SetPixelAlpha(image,pixel,q);
break;
}
case -2:
case 0:
{
SetPixelRed(image,pixel,q);
break;
}
case -3:
case 1:
{
SetPixelGreen(image,pixel,q);
break;
}
case -4:
case 2:
{
SetPixelBlue(image,pixel,q);
break;
}
case 3:
{
if (image->colorspace == CMYKColorspace)
SetPixelBlack(image,pixel,q);
else
if (image->alpha_trait != UndefinedPixelTrait)
SetPixelAlpha(image,pixel,q);
break;
}
case 4:
{
if ((IssRGBCompatibleColorspace(image->colorspace) != MagickFalse) &&
(channels > 3))
break;
if (image->alpha_trait != UndefinedPixelTrait)
SetPixelAlpha(image,pixel,q);
break;
}
}
}
static MagickBooleanType ReadPSDChannelPixels(Image *image,
const size_t channels,const ssize_t row,const ssize_t type,
const unsigned char *pixels,ExceptionInfo *exception)
{
Quantum
pixel;
const unsigned char
*p;
Quantum
*q;
ssize_t
x;
size_t
packet_size;
p=pixels;
q=GetAuthenticPixels(image,0,row,image->columns,1,exception);
if (q == (Quantum *) NULL)
return MagickFalse;
packet_size=GetPSDPacketSize(image);
for (x=0; x < (ssize_t) image->columns; x++)
{
if (packet_size == 1)
pixel=ScaleCharToQuantum(*p++);
else
if (packet_size == 2)
{
unsigned short
nibble;
p=PushShortPixel(MSBEndian,p,&nibble);
pixel=ScaleShortToQuantum(nibble);
}
else
{
MagickFloatType
nibble;
p=PushFloatPixel(MSBEndian,p,&nibble);
pixel=ClampToQuantum((MagickRealType) (QuantumRange*nibble));
}
if (image->depth > 1)
{
SetPSDPixel(image,channels,type,packet_size,pixel,q,exception);
q+=GetPixelChannels(image);
}
else
{
ssize_t
bit,
number_bits;
number_bits=(ssize_t) image->columns-x;
if (number_bits > 8)
number_bits=8;
for (bit = 0; bit < (ssize_t) number_bits; bit++)
{
SetPSDPixel(image,channels,type,packet_size,(((unsigned char) pixel)
& (0x01 << (7-bit))) != 0 ? 0 : QuantumRange,q,exception);
q+=GetPixelChannels(image);
x++;
}
if (x != (ssize_t) image->columns)
x--;
continue;
}
}
return(SyncAuthenticPixels(image,exception));
}
static MagickBooleanType ReadPSDChannelRaw(Image *image,const size_t channels,
const ssize_t type,ExceptionInfo *exception)
{
MagickBooleanType
status;
size_t
row_size;
ssize_t
count,
y;
unsigned char
*pixels;
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" layer data is RAW");
row_size=GetPSDRowSize(image);
pixels=(unsigned char *) AcquireQuantumMemory(row_size,sizeof(*pixels));
if (pixels == (unsigned char *) NULL)
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
(void) memset(pixels,0,row_size*sizeof(*pixels));
status=MagickTrue;
for (y=0; y < (ssize_t) image->rows; y++)
{
status=MagickFalse;
count=ReadBlob(image,row_size,pixels);
if (count != (ssize_t) row_size)
break;
status=ReadPSDChannelPixels(image,channels,y,type,pixels,exception);
if (status == MagickFalse)
break;
}
pixels=(unsigned char *) RelinquishMagickMemory(pixels);
return(status);
}
static inline MagickOffsetType *ReadPSDRLESizes(Image *image,
const PSDInfo *psd_info,const size_t size)
{
MagickOffsetType
*sizes;
ssize_t
y;
sizes=(MagickOffsetType *) AcquireQuantumMemory(size,sizeof(*sizes));
if(sizes != (MagickOffsetType *) NULL)
{
for (y=0; y < (ssize_t) size; y++)
{
if (psd_info->version == 1)
sizes[y]=(MagickOffsetType) ReadBlobShort(image);
else
sizes[y]=(MagickOffsetType) ReadBlobLong(image);
}
}
return sizes;
}
static MagickBooleanType ReadPSDChannelRLE(Image *image,const PSDInfo *psd_info,
const ssize_t type,MagickOffsetType *sizes,ExceptionInfo *exception)
{
MagickBooleanType
status;
size_t
length,
row_size;
ssize_t
count,
y;
unsigned char
*compact_pixels,
*pixels;
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" layer data is RLE compressed");
row_size=GetPSDRowSize(image);
pixels=(unsigned char *) AcquireQuantumMemory(row_size,sizeof(*pixels));
if (pixels == (unsigned char *) NULL)
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
length=0;
for (y=0; y < (ssize_t) image->rows; y++)
if ((MagickOffsetType) length < sizes[y])
length=(size_t) sizes[y];
if (length > (row_size+2048)) /* arbitrary number */
{
pixels=(unsigned char *) RelinquishMagickMemory(pixels);
ThrowBinaryException(ResourceLimitError,"InvalidLength",image->filename);
}
compact_pixels=(unsigned char *) AcquireQuantumMemory(length,sizeof(*pixels));
if (compact_pixels == (unsigned char *) NULL)
{
pixels=(unsigned char *) RelinquishMagickMemory(pixels);
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
}
(void) memset(compact_pixels,0,length*sizeof(*compact_pixels));
status=MagickTrue;
for (y=0; y < (ssize_t) image->rows; y++)
{
status=MagickFalse;
count=ReadBlob(image,(size_t) sizes[y],compact_pixels);
if (count != (ssize_t) sizes[y])
break;
count=DecodePSDPixels((size_t) sizes[y],compact_pixels,
(ssize_t) (image->depth == 1 ? 123456 : image->depth),row_size,pixels);
if (count != (ssize_t) row_size)
break;
status=ReadPSDChannelPixels(image,psd_info->channels,y,type,pixels,
exception);
if (status == MagickFalse)
break;
}
compact_pixels=(unsigned char *) RelinquishMagickMemory(compact_pixels);
pixels=(unsigned char *) RelinquishMagickMemory(pixels);
return(status);
}
#ifdef MAGICKCORE_ZLIB_DELEGATE
static void Unpredict8Bit(const Image *image,unsigned char *pixels,
const size_t count,const size_t row_size)
{
unsigned char
*p;
size_t
length,
remaining;
p=pixels;
remaining=count;
while (remaining > 0)
{
length=image->columns;
while (--length)
{
*(p+1)+=*p;
p++;
}
p++;
remaining-=row_size;
}
}
static void Unpredict16Bit(const Image *image,unsigned char *pixels,
const size_t count,const size_t row_size)
{
unsigned char
*p;
size_t
length,
remaining;
p=pixels;
remaining=count;
while (remaining > 0)
{
length=image->columns;
while (--length)
{
p[2]+=p[0]+((p[1]+p[3]) >> 8);
p[3]+=p[1];
p+=2;
}
p+=2;
remaining-=row_size;
}
}
static void Unpredict32Bit(const Image *image,unsigned char *pixels,
unsigned char *output_pixels,const size_t row_size)
{
unsigned char
*p,
*q;
ssize_t
y;
size_t
offset1,
offset2,
offset3,
remaining;
unsigned char
*start;
offset1=image->columns;
offset2=2*offset1;
offset3=3*offset1;
p=pixels;
q=output_pixels;
for (y=0; y < (ssize_t) image->rows; y++)
{
start=p;
remaining=row_size;
while (--remaining)
{
*(p+1)+=*p;
p++;
}
p=start;
remaining=image->columns;
while (remaining--)
{
*(q++)=*p;
*(q++)=*(p+offset1);
*(q++)=*(p+offset2);
*(q++)=*(p+offset3);
p++;
}
p=start+row_size;
}
}
static MagickBooleanType ReadPSDChannelZip(Image *image,const size_t channels,
const ssize_t type,const PSDCompressionType compression,
const size_t compact_size,ExceptionInfo *exception)
{
MagickBooleanType
status;
unsigned char
*p;
size_t
count,
packet_size,
row_size;
ssize_t
y;
unsigned char
*compact_pixels,
*pixels;
z_stream
stream;
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" layer data is ZIP compressed");
if ((MagickSizeType) compact_size > GetBlobSize(image))
ThrowBinaryException(CorruptImageError,"UnexpectedEndOfFile",
image->filename);
compact_pixels=(unsigned char *) AcquireQuantumMemory(compact_size,
sizeof(*compact_pixels));
if (compact_pixels == (unsigned char *) NULL)
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
packet_size=GetPSDPacketSize(image);
row_size=image->columns*packet_size;
count=image->rows*row_size;
pixels=(unsigned char *) AcquireQuantumMemory(count,sizeof(*pixels));
if (pixels == (unsigned char *) NULL)
{
compact_pixels=(unsigned char *) RelinquishMagickMemory(compact_pixels);
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
}
if (ReadBlob(image,compact_size,compact_pixels) != (ssize_t) compact_size)
{
pixels=(unsigned char *) RelinquishMagickMemory(pixels);
compact_pixels=(unsigned char *) RelinquishMagickMemory(compact_pixels);
ThrowBinaryException(CorruptImageError,"UnexpectedEndOfFile",
image->filename);
}
memset(&stream,0,sizeof(stream));
stream.data_type=Z_BINARY;
stream.next_in=(Bytef *)compact_pixels;
stream.avail_in=(uInt) compact_size;
stream.next_out=(Bytef *)pixels;
stream.avail_out=(uInt) count;
if (inflateInit(&stream) == Z_OK)
{
int
ret;
while (stream.avail_out > 0)
{
ret=inflate(&stream,Z_SYNC_FLUSH);
if ((ret != Z_OK) && (ret != Z_STREAM_END))
{
(void) inflateEnd(&stream);
compact_pixels=(unsigned char *) RelinquishMagickMemory(
compact_pixels);
pixels=(unsigned char *) RelinquishMagickMemory(pixels);
return(MagickFalse);
}
if (ret == Z_STREAM_END)
break;
}
(void) inflateEnd(&stream);
}
if (compression == ZipWithPrediction)
{
if (packet_size == 1)
Unpredict8Bit(image,pixels,count,row_size);
else if (packet_size == 2)
Unpredict16Bit(image,pixels,count,row_size);
else if (packet_size == 4)
{
unsigned char
*output_pixels;
output_pixels=(unsigned char *) AcquireQuantumMemory(count,
sizeof(*output_pixels));
if (pixels == (unsigned char *) NULL)
{
compact_pixels=(unsigned char *) RelinquishMagickMemory(
compact_pixels);
pixels=(unsigned char *) RelinquishMagickMemory(pixels);
ThrowBinaryException(ResourceLimitError,
"MemoryAllocationFailed",image->filename);
}
Unpredict32Bit(image,pixels,output_pixels,row_size);
pixels=(unsigned char *) RelinquishMagickMemory(pixels);
pixels=output_pixels;
}
}
status=MagickTrue;
p=pixels;
for (y=0; y < (ssize_t) image->rows; y++)
{
status=ReadPSDChannelPixels(image,channels,y,type,p,exception);
if (status == MagickFalse)
break;
p+=row_size;
}
compact_pixels=(unsigned char *) RelinquishMagickMemory(compact_pixels);
pixels=(unsigned char *) RelinquishMagickMemory(pixels);
return(status);
}
#endif
static MagickBooleanType ReadPSDChannel(Image *image,
const ImageInfo *image_info,const PSDInfo *psd_info,LayerInfo* layer_info,
const size_t channel,const PSDCompressionType compression,
ExceptionInfo *exception)
{
Image
*channel_image,
*mask;
MagickOffsetType
offset;
MagickBooleanType
status;
channel_image=image;
mask=(Image *) NULL;
if ((layer_info->channel_info[channel].type < -1) &&
(layer_info->mask.page.width > 0) && (layer_info->mask.page.height > 0))
{
const char
*option;
/*
Ignore mask that is not a user supplied layer mask, if the mask is
disabled or if the flags have unsupported values.
*/
option=GetImageOption(image_info,"psd:preserve-opacity-mask");
if ((layer_info->channel_info[channel].type != -2) ||
(layer_info->mask.flags > 2) || ((layer_info->mask.flags & 0x02) &&
(IsStringTrue(option) == MagickFalse)))
{
(void) SeekBlob(image,(MagickOffsetType)
layer_info->channel_info[channel].size-2,SEEK_CUR);
return(MagickTrue);
}
mask=CloneImage(image,layer_info->mask.page.width,
layer_info->mask.page.height,MagickFalse,exception);
if (mask != (Image *) NULL)
{
(void) ResetImagePixels(mask,exception);
(void) SetImageType(mask,GrayscaleType,exception);
channel_image=mask;
}
}
offset=TellBlob(image);
status=MagickFalse;
switch(compression)
{
case Raw:
status=ReadPSDChannelRaw(channel_image,psd_info->channels,
(ssize_t) layer_info->channel_info[channel].type,exception);
break;
case RLE:
{
MagickOffsetType
*sizes;
sizes=ReadPSDRLESizes(channel_image,psd_info,channel_image->rows);
if (sizes == (MagickOffsetType *) NULL)
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
status=ReadPSDChannelRLE(channel_image,psd_info,
(ssize_t) layer_info->channel_info[channel].type,sizes,exception);
sizes=(MagickOffsetType *) RelinquishMagickMemory(sizes);
}
break;
case ZipWithPrediction:
case ZipWithoutPrediction:
#ifdef MAGICKCORE_ZLIB_DELEGATE
status=ReadPSDChannelZip(channel_image,layer_info->channels,
(ssize_t) layer_info->channel_info[channel].type,compression,
layer_info->channel_info[channel].size-2,exception);
#else
(void) ThrowMagickException(exception,GetMagickModule(),
MissingDelegateWarning,"DelegateLibrarySupportNotBuiltIn",
"'%s' (ZLIB)",image->filename);
#endif
break;
default:
(void) ThrowMagickException(exception,GetMagickModule(),TypeWarning,
"CompressionNotSupported","'%.20g'",(double) compression);
break;
}
(void) SeekBlob(image,offset+layer_info->channel_info[channel].size-2,
SEEK_SET);
if (status == MagickFalse)
{
if (mask != (Image *) NULL)
(void) DestroyImage(mask);
ThrowBinaryException(CoderError,"UnableToDecompressImage",
image->filename);
}
if (mask != (Image *) NULL)
{
if (layer_info->mask.image != (Image *) NULL)
layer_info->mask.image=DestroyImage(layer_info->mask.image);
layer_info->mask.image=mask;
}
return(status);
}
static MagickBooleanType ReadPSDLayer(Image *image,const ImageInfo *image_info,
const PSDInfo *psd_info,LayerInfo* layer_info,ExceptionInfo *exception)
{
char
message[MagickPathExtent];
MagickBooleanType
status;
PSDCompressionType
compression;
ssize_t
j;
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" setting up new layer image");
if (psd_info->mode != IndexedMode)
(void) SetImageBackgroundColor(layer_info->image,exception);
layer_info->image->compose=PSDBlendModeToCompositeOperator(
layer_info->blendkey);
if (layer_info->visible == MagickFalse)
layer_info->image->compose=NoCompositeOp;
/*
Set up some hidden attributes for folks that need them.
*/
(void) FormatLocaleString(message,MagickPathExtent,"%.20g",
(double) layer_info->page.x);
(void) SetImageArtifact(layer_info->image,"psd:layer.x",message);
(void) FormatLocaleString(message,MagickPathExtent,"%.20g",
(double) layer_info->page.y);
(void) SetImageArtifact(layer_info->image,"psd:layer.y",message);
(void) FormatLocaleString(message,MagickPathExtent,"%.20g",(double)
layer_info->opacity);
(void) SetImageArtifact(layer_info->image,"psd:layer.opacity",message);
(void) SetImageProperty(layer_info->image,"label",(char *) layer_info->name,
exception);
status=MagickTrue;
for (j=0; j < (ssize_t) layer_info->channels; j++)
{
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" reading data for channel %.20g",(double) j);
compression=(PSDCompressionType) ReadBlobShort(layer_info->image);
layer_info->image->compression=ConvertPSDCompression(compression);
if (layer_info->channel_info[j].type == -1)
layer_info->image->alpha_trait=BlendPixelTrait;
status=ReadPSDChannel(layer_info->image,image_info,psd_info,layer_info,
(size_t) j,compression,exception);
if (status == MagickFalse)
break;
}
if (status != MagickFalse)
status=ApplyPSDLayerOpacity(layer_info->image,layer_info->opacity,
MagickFalse,exception);
if ((status != MagickFalse) &&
(layer_info->image->colorspace == CMYKColorspace))
status=NegateCMYK(layer_info->image,exception);
if ((status != MagickFalse) && (layer_info->mask.image != (Image *) NULL))
{
const char
*option;
layer_info->mask.image->page.x=layer_info->mask.page.x;
layer_info->mask.image->page.y=layer_info->mask.page.y;
/* Do not composite the mask when it is disabled */
if ((layer_info->mask.flags & 0x02) == 0x02)
layer_info->mask.image->compose=NoCompositeOp;
else
status=ApplyPSDOpacityMask(layer_info->image,layer_info->mask.image,
layer_info->mask.background == 0 ? 0 : QuantumRange,MagickFalse,
exception);
option=GetImageOption(image_info,"psd:preserve-opacity-mask");
if (IsStringTrue(option) != MagickFalse)
PreservePSDOpacityMask(image,layer_info,exception);
layer_info->mask.image=DestroyImage(layer_info->mask.image);
}
return(status);
}
static MagickBooleanType CheckPSDChannels(const PSDInfo *psd_info,
LayerInfo *layer_info)
{
int
channel_type;
ssize_t
i;
if (layer_info->channels < psd_info->min_channels)
return(MagickFalse);
channel_type=RedChannel;
if (psd_info->min_channels >= 3)
channel_type|=(GreenChannel | BlueChannel);
if (psd_info->min_channels >= 4)
channel_type|=BlackChannel;
for (i=0; i < (ssize_t) layer_info->channels; i++)
{
short
type;
type=layer_info->channel_info[i].type;
if ((i == 0) && (psd_info->mode == IndexedMode) && (type != 0))
return(MagickFalse);
if (type == -1)
{
channel_type|=AlphaChannel;
continue;
}
if (type < -1)
continue;
if (type == 0)
channel_type&=~RedChannel;
else if (type == 1)
channel_type&=~GreenChannel;
else if (type == 2)
channel_type&=~BlueChannel;
else if (type == 3)
channel_type&=~BlackChannel;
}
if (channel_type == 0)
return(MagickTrue);
if ((channel_type == AlphaChannel) &&
(layer_info->channels >= psd_info->min_channels + 1))
return(MagickTrue);
return(MagickFalse);
}
static void AttachPSDLayers(Image *image,LayerInfo *layer_info,
ssize_t number_layers)
{
ssize_t
i;
ssize_t
j;
for (i=0; i < number_layers; i++)
{
if (layer_info[i].image == (Image *) NULL)
{
for (j=i; j < number_layers - 1; j++)
layer_info[j] = layer_info[j+1];
number_layers--;
i--;
}
}
if (number_layers == 0)
{
layer_info=(LayerInfo *) RelinquishMagickMemory(layer_info);
return;
}
for (i=0; i < number_layers; i++)
{
if (i > 0)
layer_info[i].image->previous=layer_info[i-1].image;
if (i < (number_layers-1))
layer_info[i].image->next=layer_info[i+1].image;
layer_info[i].image->page=layer_info[i].page;
}
image->next=layer_info[0].image;
layer_info[0].image->previous=image;
layer_info=(LayerInfo *) RelinquishMagickMemory(layer_info);
}
static inline MagickBooleanType PSDSkipImage(const PSDInfo *psd_info,
const ImageInfo *image_info,const size_t index)
{
if (psd_info->has_merged_image == MagickFalse)
return(MagickFalse);
if (image_info->number_scenes == 0)
return(MagickFalse);
if (index < image_info->scene)
return(MagickTrue);
if (index > image_info->scene+image_info->number_scenes-1)
return(MagickTrue);
return(MagickFalse);
}
static void CheckMergedImageAlpha(const PSDInfo *psd_info,Image *image)
{
/*
The number of layers cannot be used to determine if the merged image
contains an alpha channel. So we enable it when we think we should.
*/
if (((psd_info->mode == GrayscaleMode) && (psd_info->channels > 1)) ||
((psd_info->mode == RGBMode) && (psd_info->channels > 3)) ||
((psd_info->mode == CMYKMode) && (psd_info->channels > 4)))
image->alpha_trait=BlendPixelTrait;
}
static void ParseAdditionalInfo(LayerInfo *layer_info)
{
char
key[5];
size_t
remaining_length;
unsigned char
*p;
unsigned int
size;
p=GetStringInfoDatum(layer_info->info);
remaining_length=GetStringInfoLength(layer_info->info);
while (remaining_length >= 12)
{
/* skip over signature */
p+=4;
key[0]=(char) (*p++);
key[1]=(char) (*p++);
key[2]=(char) (*p++);
key[3]=(char) (*p++);
key[4]='\0';
size=(unsigned int) (*p++) << 24;
size|=(unsigned int) (*p++) << 16;
size|=(unsigned int) (*p++) << 8;
size|=(unsigned int) (*p++);
size=size & 0xffffffff;
remaining_length-=12;
if ((size_t) size > remaining_length)
break;
if (LocaleNCompare(key,"luni",sizeof(key)) == 0)
{
unsigned char
*name;
unsigned int
length;
length=(unsigned int) (*p++) << 24;
length|=(unsigned int) (*p++) << 16;
length|=(unsigned int) (*p++) << 8;
length|=(unsigned int) (*p++);
if (length * 2 > size - 4)
break;
if (sizeof(layer_info->name) <= length)
break;
name=layer_info->name;
while (length > 0)
{
/* Only ASCII strings are supported */
if (*p++ != '\0')
break;
*name++=*p++;
length--;
}
if (length == 0)
*name='\0';
break;
}
else
p+=size;
remaining_length-=(size_t) size;
}
}
static MagickSizeType GetLayerInfoSize(const PSDInfo *psd_info,Image *image)
{
char
type[4];
MagickSizeType
size;
ssize_t
count;
size=GetPSDSize(psd_info,image);
if (size != 0)
return(size);
(void) ReadBlobLong(image);
count=ReadPSDString(image,type,4);
if ((count != 4) || (LocaleNCompare(type,"8BIM",4) != 0))
return(0);
count=ReadPSDString(image,type,4);
if ((count == 4) && ((LocaleNCompare(type,"Mt16",4) == 0) ||
(LocaleNCompare(type,"Mt32",4) == 0) ||
(LocaleNCompare(type,"Mtrn",4) == 0)))
{
size=GetPSDSize(psd_info,image);
if (size != 0)
return(0);
image->alpha_trait=BlendPixelTrait;
count=ReadPSDString(image,type,4);
if ((count != 4) || (LocaleNCompare(type,"8BIM",4) != 0))
return(0);
count=ReadPSDString(image,type,4);
}
if ((count == 4) && ((LocaleNCompare(type,"Lr16",4) == 0) ||
(LocaleNCompare(type,"Lr32",4) == 0)))
size=GetPSDSize(psd_info,image);
return(size);
}
static MagickBooleanType ReadPSDLayersInternal(Image *image,
const ImageInfo *image_info,const PSDInfo *psd_info,
const MagickBooleanType skip_layers,ExceptionInfo *exception)
{
char
type[4];
LayerInfo
*layer_info;
MagickSizeType
size;
MagickBooleanType
status;
ssize_t
i;
ssize_t
count,
index,
j,
number_layers;
size=GetLayerInfoSize(psd_info,image);
if (size == 0)
{
CheckMergedImageAlpha(psd_info,image);
return(MagickTrue);
}
layer_info=(LayerInfo *) NULL;
number_layers=(ssize_t) ReadBlobSignedShort(image);
if (number_layers < 0)
{
/*
The first alpha channel in the merged result contains the
transparency data for the merged result.
*/
number_layers=MagickAbsoluteValue(number_layers);
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" negative layer count corrected for");
image->alpha_trait=BlendPixelTrait;
}
/*
We only need to know if the image has an alpha channel
*/
if (skip_layers != MagickFalse)
return(MagickTrue);
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" image contains %.20g layers",(double) number_layers);
if (number_layers == 0)
ThrowBinaryException(CorruptImageError,"InvalidNumberOfLayers",
image->filename);
layer_info=(LayerInfo *) AcquireQuantumMemory((size_t) number_layers,
sizeof(*layer_info));
if (layer_info == (LayerInfo *) NULL)
{
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" allocation of LayerInfo failed");
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
}
(void) memset(layer_info,0,(size_t) number_layers*sizeof(*layer_info));
for (i=0; i < number_layers; i++)
{
ssize_t
top,
left,
bottom,
right;
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" reading layer #%.20g",(double) i+1);
top=(ssize_t) ReadBlobSignedLong(image);
left=(ssize_t) ReadBlobSignedLong(image);
bottom=(ssize_t) ReadBlobSignedLong(image);
right=(ssize_t) ReadBlobSignedLong(image);
if ((right < left) || (bottom < top))
{
layer_info=DestroyLayerInfo(layer_info,number_layers);
ThrowBinaryException(CorruptImageError,"ImproperImageHeader",
image->filename);
}
layer_info[i].page.y=top;
layer_info[i].page.x=left;
layer_info[i].page.width=(size_t) (right-left);
layer_info[i].page.height=(size_t) (bottom-top);
layer_info[i].channels=ReadBlobShort(image);
if (layer_info[i].channels > MaxPSDChannels)
{
layer_info=DestroyLayerInfo(layer_info,number_layers);
ThrowBinaryException(CorruptImageError,"MaximumChannelsExceeded",
image->filename);
}
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" offset(%.20g,%.20g), size(%.20g,%.20g), channels=%.20g",
(double) layer_info[i].page.x,(double) layer_info[i].page.y,
(double) layer_info[i].page.height,(double)
layer_info[i].page.width,(double) layer_info[i].channels);
for (j=0; j < (ssize_t) layer_info[i].channels; j++)
{
layer_info[i].channel_info[j].type=(short) ReadBlobShort(image);
if ((layer_info[i].channel_info[j].type < -4) ||
(layer_info[i].channel_info[j].type > 4))
{
layer_info=DestroyLayerInfo(layer_info,number_layers);
ThrowBinaryException(CorruptImageError,"NoSuchImageChannel",
image->filename);
}
layer_info[i].channel_info[j].size=(size_t) GetPSDSize(psd_info,
image);
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" channel[%.20g]: type=%.20g, size=%.20g",(double) j,
(double) layer_info[i].channel_info[j].type,
(double) layer_info[i].channel_info[j].size);
}
if (CheckPSDChannels(psd_info,&layer_info[i]) == MagickFalse)
{
layer_info=DestroyLayerInfo(layer_info,number_layers);
ThrowBinaryException(CorruptImageError,"ImproperImageHeader",
image->filename);
}
count=ReadPSDString(image,type,4);
if ((count != 4) || (LocaleNCompare(type,"8BIM",4) != 0))
{
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" layer type was %.4s instead of 8BIM", type);
layer_info=DestroyLayerInfo(layer_info,number_layers);
ThrowBinaryException(CorruptImageError,"ImproperImageHeader",
image->filename);
}
count=ReadPSDString(image,layer_info[i].blendkey,4);
if (count != 4)
{
layer_info=DestroyLayerInfo(layer_info,number_layers);
ThrowBinaryException(CorruptImageError,"ImproperImageHeader",
image->filename);
}
layer_info[i].opacity=(Quantum) ScaleCharToQuantum((unsigned char)
ReadBlobByte(image));
layer_info[i].clipping=(unsigned char) ReadBlobByte(image);
layer_info[i].flags=(unsigned char) ReadBlobByte(image);
layer_info[i].visible=!(layer_info[i].flags & 0x02);
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" blend=%.4s, opacity=%.20g, clipping=%s, flags=%d, visible=%s",
layer_info[i].blendkey,(double) layer_info[i].opacity,
layer_info[i].clipping ? "true" : "false",layer_info[i].flags,
layer_info[i].visible ? "true" : "false");
(void) ReadBlobByte(image); /* filler */
size=ReadBlobLong(image);
if (size != 0)
{
MagickSizeType
combined_length,
length;
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" layer contains additional info");
length=ReadBlobLong(image);
combined_length=length+4;
if (length != 0)
{
/*
Layer mask info.
*/
layer_info[i].mask.page.y=(ssize_t) ReadBlobSignedLong(image);
layer_info[i].mask.page.x=(ssize_t) ReadBlobSignedLong(image);
layer_info[i].mask.page.height=(size_t)
(ReadBlobSignedLong(image)-layer_info[i].mask.page.y);
layer_info[i].mask.page.width=(size_t) (
ReadBlobSignedLong(image)-layer_info[i].mask.page.x);
layer_info[i].mask.background=(unsigned char) ReadBlobByte(
image);
layer_info[i].mask.flags=(unsigned char) ReadBlobByte(image);
if (!(layer_info[i].mask.flags & 0x01))
{
layer_info[i].mask.page.y=layer_info[i].mask.page.y-
layer_info[i].page.y;
layer_info[i].mask.page.x=layer_info[i].mask.page.x-
layer_info[i].page.x;
}
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" layer mask: offset(%.20g,%.20g), size(%.20g,%.20g), length=%.20g",
(double) layer_info[i].mask.page.x,(double)
layer_info[i].mask.page.y,(double)
layer_info[i].mask.page.width,(double)
layer_info[i].mask.page.height,(double) ((MagickOffsetType)
length)-18);
/*
Skip over the rest of the layer mask information.
*/
if (DiscardBlobBytes(image,(MagickSizeType) (length-18)) == MagickFalse)
{
layer_info=DestroyLayerInfo(layer_info,number_layers);
ThrowBinaryException(CorruptImageError,
"UnexpectedEndOfFile",image->filename);
}
}
length=ReadBlobLong(image);
combined_length+=length+4;
if (length != 0)
{
/*
Layer blending ranges info.
*/
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" layer blending ranges: length=%.20g",(double)
((MagickOffsetType) length));
if (DiscardBlobBytes(image,length) == MagickFalse)
{
layer_info=DestroyLayerInfo(layer_info,number_layers);
ThrowBinaryException(CorruptImageError,
"UnexpectedEndOfFile",image->filename);
}
}
/*
Layer name.
*/
length=(MagickSizeType) (unsigned char) ReadBlobByte(image);
combined_length+=length+1;
if (length > 0)
(void) ReadBlob(image,(size_t) length++,layer_info[i].name);
layer_info[i].name[length]='\0';
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" layer name: %s",layer_info[i].name);
if ((length % 4) != 0)
{
length=4-(length % 4);
combined_length+=length;
/* Skip over the padding of the layer name */
if (DiscardBlobBytes(image,length) == MagickFalse)
{
layer_info=DestroyLayerInfo(layer_info,number_layers);
ThrowBinaryException(CorruptImageError,
"UnexpectedEndOfFile",image->filename);
}
}
length=(MagickSizeType) size-combined_length;
if (length > 0)
{
unsigned char
*info;
if (length > GetBlobSize(image))
{
layer_info=DestroyLayerInfo(layer_info,number_layers);
ThrowBinaryException(CorruptImageError,
"InsufficientImageDataInFile",image->filename);
}
layer_info[i].info=AcquireStringInfo((const size_t) length);
info=GetStringInfoDatum(layer_info[i].info);
(void) ReadBlob(image,(const size_t) length,info);
ParseAdditionalInfo(&layer_info[i]);
}
}
}
for (i=0; i < number_layers; i++)
{
if ((layer_info[i].page.width == 0) || (layer_info[i].page.height == 0))
{
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" layer data is empty");
if (layer_info[i].info != (StringInfo *) NULL)
layer_info[i].info=DestroyStringInfo(layer_info[i].info);
continue;
}
/*
Allocate layered image.
*/
layer_info[i].image=CloneImage(image,layer_info[i].page.width,
layer_info[i].page.height,MagickFalse,exception);
if (layer_info[i].image == (Image *) NULL)
{
layer_info=DestroyLayerInfo(layer_info,number_layers);
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" allocation of image for layer %.20g failed",(double) i);
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
}
if (layer_info[i].info != (StringInfo *) NULL)
{
(void) SetImageProfile(layer_info[i].image,"psd:additional-info",
layer_info[i].info,exception);
layer_info[i].info=DestroyStringInfo(layer_info[i].info);
}
}
if (image_info->ping != MagickFalse)
{
AttachPSDLayers(image,layer_info,number_layers);
return(MagickTrue);
}
status=MagickTrue;
index=0;
for (i=0; i < number_layers; i++)
{
if ((layer_info[i].image == (Image *) NULL) ||
(PSDSkipImage(psd_info, image_info,++index) != MagickFalse))
{
for (j=0; j < (ssize_t) layer_info[i].channels; j++)
{
if (DiscardBlobBytes(image,(MagickSizeType)
layer_info[i].channel_info[j].size) == MagickFalse)
{
layer_info=DestroyLayerInfo(layer_info,number_layers);
ThrowBinaryException(CorruptImageError,
"UnexpectedEndOfFile",image->filename);
}
}
continue;
}
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" reading data for layer %.20g",(double) i);
status=ReadPSDLayer(image,image_info,psd_info,&layer_info[i],
exception);
if (status == MagickFalse)
break;
status=SetImageProgress(image,LoadImagesTag,(MagickOffsetType) i,
(MagickSizeType) number_layers);
if (status == MagickFalse)
break;
}
if (status != MagickFalse)
AttachPSDLayers(image,layer_info,number_layers);
else
layer_info=DestroyLayerInfo(layer_info,number_layers);
return(status);
}
ModuleExport MagickBooleanType ReadPSDLayers(Image *image,
const ImageInfo *image_info,const PSDInfo *psd_info,ExceptionInfo *exception)
{
MagickBooleanType
status;
status=IsRightsAuthorized(CoderPolicyDomain,ReadPolicyRights,"PSD");
if (status == MagickFalse)
return(MagickTrue);
return(ReadPSDLayersInternal(image,image_info,psd_info,MagickFalse,
exception));
}
static MagickBooleanType ReadPSDMergedImage(const ImageInfo *image_info,
Image *image,const PSDInfo *psd_info,ExceptionInfo *exception)
{
MagickOffsetType
*sizes;
MagickBooleanType
status;
PSDCompressionType
compression;
ssize_t
i;
if ((image_info->number_scenes != 0) && (image_info->scene != 0))
return(MagickTrue);
compression=(PSDCompressionType) ReadBlobMSBShort(image);
image->compression=ConvertPSDCompression(compression);
if (compression != Raw && compression != RLE)
{
(void) ThrowMagickException(exception,GetMagickModule(),
TypeWarning,"CompressionNotSupported","'%.20g'",(double) compression);
return(MagickFalse);
}
sizes=(MagickOffsetType *) NULL;
if (compression == RLE)
{
sizes=ReadPSDRLESizes(image,psd_info,image->rows*psd_info->channels);
if (sizes == (MagickOffsetType *) NULL)
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
}
status=MagickTrue;
for (i=0; i < (ssize_t) psd_info->channels; i++)
{
ssize_t
type;
type=i;
if ((type == 1) && (psd_info->channels == 2))
type=-1;
if (compression == RLE)
status=ReadPSDChannelRLE(image,psd_info,type,sizes+(i*image->rows),
exception);
else
status=ReadPSDChannelRaw(image,psd_info->channels,type,exception);
if (status != MagickFalse)
status=SetImageProgress(image,LoadImagesTag,(MagickOffsetType) i,
psd_info->channels);
if (status == MagickFalse)
break;
}
if ((status != MagickFalse) && (image->colorspace == CMYKColorspace))
status=NegateCMYK(image,exception);
if (status != MagickFalse)
status=CorrectPSDAlphaBlend(image_info,image,exception);
sizes=(MagickOffsetType *) RelinquishMagickMemory(sizes);
return(status);
}
static Image *ReadPSDImage(const ImageInfo *image_info,ExceptionInfo *exception)
{
Image
*image;
MagickBooleanType
skip_layers;
MagickOffsetType
offset;
MagickSizeType
length;
MagickBooleanType
status;
PSDInfo
psd_info;
ssize_t
i;
size_t
image_list_length;
ssize_t
count;
StringInfo
*profile;
/*
Open image file.
*/
assert(image_info != (const ImageInfo *) NULL);
assert(image_info->signature == MagickCoreSignature);
if (image_info->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",
image_info->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
image=AcquireImage(image_info,exception);
status=OpenBlob(image_info,image,ReadBinaryBlobMode,exception);
if (status == MagickFalse)
{
image=DestroyImageList(image);
return((Image *) NULL);
}
/*
Read image header.
*/
image->endian=MSBEndian;
count=ReadBlob(image,4,(unsigned char *) psd_info.signature);
psd_info.version=ReadBlobMSBShort(image);
if ((count != 4) || (LocaleNCompare(psd_info.signature,"8BPS",4) != 0) ||
((psd_info.version != 1) && (psd_info.version != 2)))
ThrowReaderException(CorruptImageError,"ImproperImageHeader");
(void) ReadBlob(image,6,psd_info.reserved);
psd_info.channels=ReadBlobMSBShort(image);
if (psd_info.channels < 1)
ThrowReaderException(CorruptImageError,"MissingImageChannel");
if (psd_info.channels > MaxPSDChannels)
ThrowReaderException(CorruptImageError,"MaximumChannelsExceeded");
psd_info.rows=ReadBlobMSBLong(image);
psd_info.columns=ReadBlobMSBLong(image);
if ((psd_info.version == 1) && ((psd_info.rows > 30000) ||
(psd_info.columns > 30000)))
ThrowReaderException(CorruptImageError,"ImproperImageHeader");
psd_info.depth=ReadBlobMSBShort(image);
if ((psd_info.depth != 1) && (psd_info.depth != 8) &&
(psd_info.depth != 16) && (psd_info.depth != 32))
ThrowReaderException(CorruptImageError,"ImproperImageHeader");
psd_info.mode=ReadBlobMSBShort(image);
if ((psd_info.mode == IndexedMode) && (psd_info.channels > 3))
ThrowReaderException(CorruptImageError,"ImproperImageHeader");
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" Image is %.20g x %.20g with channels=%.20g, depth=%.20g, mode=%s",
(double) psd_info.columns,(double) psd_info.rows,(double)
psd_info.channels,(double) psd_info.depth,ModeToString((PSDImageType)
psd_info.mode));
if (EOFBlob(image) != MagickFalse)
ThrowReaderException(CorruptImageError,"ImproperImageHeader");
/*
Initialize image.
*/
image->depth=psd_info.depth;
image->columns=psd_info.columns;
image->rows=psd_info.rows;
status=SetImageExtent(image,image->columns,image->rows,exception);
if (status == MagickFalse)
return(DestroyImageList(image));
status=ResetImagePixels(image,exception);
if (status == MagickFalse)
return(DestroyImageList(image));
psd_info.min_channels=3;
switch (psd_info.mode)
{
case LabMode:
{
(void) SetImageColorspace(image,LabColorspace,exception);
break;
}
case CMYKMode:
{
psd_info.min_channels=4;
(void) SetImageColorspace(image,CMYKColorspace,exception);
break;
}
case BitmapMode:
case GrayscaleMode:
case DuotoneMode:
{
if (psd_info.depth != 32)
{
status=AcquireImageColormap(image,MagickMin((size_t)
(psd_info.depth < 16 ? 256 : 65536), MaxColormapSize),exception);
if (status == MagickFalse)
ThrowReaderException(ResourceLimitError,"MemoryAllocationFailed");
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" Image colormap allocated");
}
psd_info.min_channels=1;
(void) SetImageColorspace(image,GRAYColorspace,exception);
break;
}
case IndexedMode:
{
psd_info.min_channels=1;
break;
}
case MultichannelMode:
{
if ((psd_info.channels > 0) && (psd_info.channels < 3))
{
psd_info.min_channels=psd_info.channels;
(void) SetImageColorspace(image,GRAYColorspace,exception);
}
break;
}
}
if (psd_info.channels < psd_info.min_channels)
ThrowReaderException(CorruptImageError,"ImproperImageHeader");
/*
Read PSD raster colormap only present for indexed and duotone images.
*/
length=ReadBlobMSBLong(image);
if ((psd_info.mode == IndexedMode) && (length < 3))
ThrowReaderException(CorruptImageError,"ImproperImageHeader");
if (length != 0)
{
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" reading colormap");
if ((psd_info.mode == DuotoneMode) || (psd_info.depth == 32))
{
/*
Duotone image data; the format of this data is undocumented.
32 bits per pixel; the colormap is ignored.
*/
(void) SeekBlob(image,(const MagickOffsetType) length,SEEK_CUR);
}
else
{
size_t
number_colors;
/*
Read PSD raster colormap.
*/
number_colors=(size_t) length/3;
if (number_colors > 65536)
ThrowReaderException(CorruptImageError,"ImproperImageHeader");
if (AcquireImageColormap(image,number_colors,exception) == MagickFalse)
ThrowReaderException(ResourceLimitError,"MemoryAllocationFailed");
for (i=0; i < (ssize_t) image->colors; i++)
image->colormap[i].red=(MagickRealType) ScaleCharToQuantum(
(unsigned char) ReadBlobByte(image));
for (i=0; i < (ssize_t) image->colors; i++)
image->colormap[i].green=(MagickRealType) ScaleCharToQuantum(
(unsigned char) ReadBlobByte(image));
for (i=0; i < (ssize_t) image->colors; i++)
image->colormap[i].blue=(MagickRealType) ScaleCharToQuantum(
(unsigned char) ReadBlobByte(image));
image->alpha_trait=UndefinedPixelTrait;
}
}
if ((image->depth == 1) && (image->storage_class != PseudoClass))
ThrowReaderException(CorruptImageError, "ImproperImageHeader");
psd_info.has_merged_image=MagickTrue;
profile=(StringInfo *) NULL;
length=ReadBlobMSBLong(image);
if (length != 0)
{
unsigned char
*blocks;
/*
Image resources block.
*/
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" reading image resource blocks - %.20g bytes",(double)
((MagickOffsetType) length));
if (length > GetBlobSize(image))
ThrowReaderException(CorruptImageError,"InsufficientImageDataInFile");
blocks=(unsigned char *) AcquireQuantumMemory((size_t) length,
sizeof(*blocks));
if (blocks == (unsigned char *) NULL)
ThrowReaderException(ResourceLimitError,"MemoryAllocationFailed");
count=ReadBlob(image,(size_t) length,blocks);
if ((count != (ssize_t) length) || (length < 4) ||
(LocaleNCompare((char *) blocks,"8BIM",4) != 0))
{
blocks=(unsigned char *) RelinquishMagickMemory(blocks);
ThrowReaderException(CorruptImageError,"ImproperImageHeader");
}
profile=ParseImageResourceBlocks(&psd_info,image,blocks,(size_t) length);
blocks=(unsigned char *) RelinquishMagickMemory(blocks);
}
/*
Layer and mask block.
*/
length=GetPSDSize(&psd_info,image);
if (length == 8)
{
length=ReadBlobMSBLong(image);
length=ReadBlobMSBLong(image);
}
offset=TellBlob(image);
skip_layers=MagickFalse;
if ((image_info->number_scenes == 1) && (image_info->scene == 0) &&
(psd_info.has_merged_image != MagickFalse))
{
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" read composite only");
skip_layers=MagickTrue;
}
if (length == 0)
{
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" image has no layers");
}
else
{
if (ReadPSDLayersInternal(image,image_info,&psd_info,skip_layers,
exception) != MagickTrue)
{
if (profile != (StringInfo *) NULL)
profile=DestroyStringInfo(profile);
(void) CloseBlob(image);
image=DestroyImageList(image);
return((Image *) NULL);
}
/*
Skip the rest of the layer and mask information.
*/
(void) SeekBlob(image,offset+length,SEEK_SET);
}
/*
If we are only "pinging" the image, then we're done - so return.
*/
if (EOFBlob(image) != MagickFalse)
{
if (profile != (StringInfo *) NULL)
profile=DestroyStringInfo(profile);
ThrowReaderException(CorruptImageError,"UnexpectedEndOfFile");
}
if (image_info->ping != MagickFalse)
{
if (profile != (StringInfo *) NULL)
profile=DestroyStringInfo(profile);
(void) CloseBlob(image);
return(GetFirstImageInList(image));
}
/*
Read the precombined layer, present for PSD < 4 compatibility.
*/
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" reading the precombined layer");
image_list_length=GetImageListLength(image);
if ((psd_info.has_merged_image != MagickFalse) || (image_list_length == 1))
psd_info.has_merged_image=(MagickBooleanType) ReadPSDMergedImage(
image_info,image,&psd_info,exception);
if ((psd_info.has_merged_image == MagickFalse) && (image_list_length == 1) &&
(length != 0))
{
(void) SeekBlob(image,offset,SEEK_SET);
status=ReadPSDLayersInternal(image,image_info,&psd_info,MagickFalse,
exception);
if (status != MagickTrue)
{
if (profile != (StringInfo *) NULL)
profile=DestroyStringInfo(profile);
(void) CloseBlob(image);
image=DestroyImageList(image);
return((Image *) NULL);
}
image_list_length=GetImageListLength(image);
}
if (psd_info.has_merged_image == MagickFalse)
{
Image
*merged;
if (image_list_length == 1)
{
if (profile != (StringInfo *) NULL)
profile=DestroyStringInfo(profile);
ThrowReaderException(CorruptImageError,"InsufficientImageDataInFile");
}
image->background_color.alpha=(MagickRealType) TransparentAlpha;
image->background_color.alpha_trait=BlendPixelTrait;
(void) SetImageBackgroundColor(image,exception);
merged=MergeImageLayers(image,FlattenLayer,exception);
if (merged == (Image *) NULL)
{
(void) CloseBlob(image);
image=DestroyImageList(image);
return((Image *) NULL);
}
ReplaceImageInList(&image,merged);
}
if (profile != (StringInfo *) NULL)
{
Image
*next;
i=0;
next=image;
while (next != (Image *) NULL)
{
if (PSDSkipImage(&psd_info,image_info,i++) == MagickFalse)
(void) SetImageProfile(next,GetStringInfoName(profile),profile,
exception);
next=next->next;
}
profile=DestroyStringInfo(profile);
}
(void) CloseBlob(image);
return(GetFirstImageInList(image));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% R e g i s t e r P S D I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% RegisterPSDImage() adds properties for the PSD image format to
% the list of supported formats. The properties include the image format
% tag, a method to read and/or write the format, whether the format
% supports the saving of more than one frame to the same file or blob,
% whether the format supports native in-memory I/O, and a brief
% description of the format.
%
% The format of the RegisterPSDImage method is:
%
% size_t RegisterPSDImage(void)
%
*/
ModuleExport size_t RegisterPSDImage(void)
{
MagickInfo
*entry;
entry=AcquireMagickInfo("PSD","PSB","Adobe Large Document Format");
entry->decoder=(DecodeImageHandler *) ReadPSDImage;
entry->encoder=(EncodeImageHandler *) WritePSDImage;
entry->magick=(IsImageFormatHandler *) IsPSD;
entry->flags|=CoderDecoderSeekableStreamFlag;
entry->flags|=CoderEncoderSeekableStreamFlag;
(void) RegisterMagickInfo(entry);
entry=AcquireMagickInfo("PSD","PSD","Adobe Photoshop bitmap");
entry->decoder=(DecodeImageHandler *) ReadPSDImage;
entry->encoder=(EncodeImageHandler *) WritePSDImage;
entry->magick=(IsImageFormatHandler *) IsPSD;
entry->flags|=CoderDecoderSeekableStreamFlag;
entry->flags|=CoderEncoderSeekableStreamFlag;
(void) RegisterMagickInfo(entry);
return(MagickImageCoderSignature);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% U n r e g i s t e r P S D I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% UnregisterPSDImage() removes format registrations made by the
% PSD module from the list of supported formats.
%
% The format of the UnregisterPSDImage method is:
%
% UnregisterPSDImage(void)
%
*/
ModuleExport void UnregisterPSDImage(void)
{
(void) UnregisterMagickInfo("PSB");
(void) UnregisterMagickInfo("PSD");
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% W r i t e P S D I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% WritePSDImage() writes an image in the Adobe Photoshop encoded image format.
%
% The format of the WritePSDImage method is:
%
% MagickBooleanType WritePSDImage(const ImageInfo *image_info,Image *image,
% ExceptionInfo *exception)
%
% A description of each parameter follows.
%
% o image_info: the image info.
%
% o image: The image.
%
% o exception: return any errors or warnings in this structure.
%
*/
static inline ssize_t SetPSDOffset(const PSDInfo *psd_info,Image *image,
const size_t offset)
{
if (psd_info->version == 1)
return(WriteBlobMSBShort(image,(unsigned short) offset));
return(WriteBlobMSBLong(image,(unsigned int) offset));
}
static inline ssize_t WritePSDOffset(const PSDInfo *psd_info,Image *image,
const MagickSizeType size,const MagickOffsetType offset)
{
MagickOffsetType
current_offset;
ssize_t
result;
current_offset=TellBlob(image);
(void) SeekBlob(image,offset,SEEK_SET);
if (psd_info->version == 1)
result=WriteBlobMSBShort(image,(unsigned short) size);
else
result=WriteBlobMSBLong(image,(unsigned int) size);
(void) SeekBlob(image,current_offset,SEEK_SET);
return(result);
}
static inline ssize_t SetPSDSize(const PSDInfo *psd_info,Image *image,
const MagickSizeType size)
{
if (psd_info->version == 1)
return(WriteBlobLong(image,(unsigned int) size));
return(WriteBlobLongLong(image,size));
}
static inline ssize_t WritePSDSize(const PSDInfo *psd_info,Image *image,
const MagickSizeType size,const MagickOffsetType offset)
{
MagickOffsetType
current_offset;
ssize_t
result;
current_offset=TellBlob(image);
(void) SeekBlob(image,offset,SEEK_SET);
result=SetPSDSize(psd_info,image,size);
(void) SeekBlob(image,current_offset,SEEK_SET);
return(result);
}
static size_t PSDPackbitsEncodeImage(Image *image,const size_t length,
const unsigned char *pixels,unsigned char *compact_pixels,
ExceptionInfo *exception)
{
int
count;
ssize_t
i,
j;
unsigned char
*q;
unsigned char
*packbits;
/*
Compress pixels with Packbits encoding.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(pixels != (unsigned char *) NULL);
assert(compact_pixels != (unsigned char *) NULL);
packbits=(unsigned char *) AcquireQuantumMemory(128UL,sizeof(*packbits));
if (packbits == (unsigned char *) NULL)
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
q=compact_pixels;
for (i=(ssize_t) length; i != 0; )
{
switch (i)
{
case 1:
{
i--;
*q++=(unsigned char) 0;
*q++=(*pixels);
break;
}
case 2:
{
i-=2;
*q++=(unsigned char) 1;
*q++=(*pixels);
*q++=pixels[1];
break;
}
case 3:
{
i-=3;
if ((*pixels == *(pixels+1)) && (*(pixels+1) == *(pixels+2)))
{
*q++=(unsigned char) ((256-3)+1);
*q++=(*pixels);
break;
}
*q++=(unsigned char) 2;
*q++=(*pixels);
*q++=pixels[1];
*q++=pixels[2];
break;
}
default:
{
if ((*pixels == *(pixels+1)) && (*(pixels+1) == *(pixels+2)))
{
/*
Packed run.
*/
count=3;
while (((ssize_t) count < i) && (*pixels == *(pixels+count)))
{
count++;
if (count >= 127)
break;
}
i-=count;
*q++=(unsigned char) ((256-count)+1);
*q++=(*pixels);
pixels+=count;
break;
}
/*
Literal run.
*/
count=0;
while ((*(pixels+count) != *(pixels+count+1)) ||
(*(pixels+count+1) != *(pixels+count+2)))
{
packbits[count+1]=pixels[count];
count++;
if (((ssize_t) count >= (i-3)) || (count >= 127))
break;
}
i-=count;
*packbits=(unsigned char) (count-1);
for (j=0; j <= (ssize_t) count; j++)
*q++=packbits[j];
pixels+=count;
break;
}
}
}
*q++=(unsigned char) 128; /* EOD marker */
packbits=(unsigned char *) RelinquishMagickMemory(packbits);
return((size_t) (q-compact_pixels));
}
static size_t WriteCompressionStart(const PSDInfo *psd_info,Image *image,
const Image *next_image,const CompressionType compression,
const ssize_t channels)
{
size_t
length;
ssize_t
i,
y;
if (compression == RLECompression)
{
length=(size_t) WriteBlobShort(image,RLE);
for (i=0; i < channels; i++)
for (y=0; y < (ssize_t) next_image->rows; y++)
length+=SetPSDOffset(psd_info,image,0);
}
#ifdef MAGICKCORE_ZLIB_DELEGATE
else if (compression == ZipCompression)
length=(size_t) WriteBlobShort(image,ZipWithoutPrediction);
#endif
else
length=(size_t) WriteBlobShort(image,Raw);
return(length);
}
static size_t WritePSDChannel(const PSDInfo *psd_info,
const ImageInfo *image_info,Image *image,Image *next_image,
const QuantumType quantum_type, unsigned char *compact_pixels,
MagickOffsetType size_offset,const MagickBooleanType separate,
const CompressionType compression,ExceptionInfo *exception)
{
MagickBooleanType
monochrome;
QuantumInfo
*quantum_info;
const Quantum
*p;
ssize_t
i;
size_t
count,
length;
ssize_t
y;
unsigned char
*pixels;
#ifdef MAGICKCORE_ZLIB_DELEGATE
int
flush,
level;
unsigned char
*compressed_pixels;
z_stream
stream;
compressed_pixels=(unsigned char *) NULL;
flush=Z_NO_FLUSH;
#endif
count=0;
if (separate != MagickFalse)
{
size_offset=TellBlob(image)+2;
count+=WriteCompressionStart(psd_info,image,next_image,compression,1);
}
if (next_image->depth > 8)
next_image->depth=16;
monochrome=IsImageMonochrome(image) && (image->depth == 1) ?
MagickTrue : MagickFalse;
quantum_info=AcquireQuantumInfo(image_info,next_image);
if (quantum_info == (QuantumInfo *) NULL)
return(0);
pixels=(unsigned char *) GetQuantumPixels(quantum_info);
#ifdef MAGICKCORE_ZLIB_DELEGATE
if (compression == ZipCompression)
{
compressed_pixels=(unsigned char *) AcquireQuantumMemory(
MagickMinBufferExtent,sizeof(*compressed_pixels));
if (compressed_pixels == (unsigned char *) NULL)
{
quantum_info=DestroyQuantumInfo(quantum_info);
return(0);
}
memset(&stream,0,sizeof(stream));
stream.data_type=Z_BINARY;
level=Z_DEFAULT_COMPRESSION;
if ((image_info->quality > 0 && image_info->quality < 10))
level=(int) image_info->quality;
if (deflateInit(&stream,level) != Z_OK)
{
quantum_info=DestroyQuantumInfo(quantum_info);
compressed_pixels=(unsigned char *) RelinquishMagickMemory(
compressed_pixels);
return(0);
}
}
#endif
for (y=0; y < (ssize_t) next_image->rows; y++)
{
p=GetVirtualPixels(next_image,0,y,next_image->columns,1,exception);
if (p == (const Quantum *) NULL)
break;
length=ExportQuantumPixels(next_image,(CacheView *) NULL,quantum_info,
quantum_type,pixels,exception);
if (monochrome != MagickFalse)
for (i=0; i < (ssize_t) length; i++)
pixels[i]=(~pixels[i]);
if (compression == RLECompression)
{
length=PSDPackbitsEncodeImage(image,length,pixels,compact_pixels,
exception);
count+=WriteBlob(image,length,compact_pixels);
size_offset+=WritePSDOffset(psd_info,image,length,size_offset);
}
#ifdef MAGICKCORE_ZLIB_DELEGATE
else if (compression == ZipCompression)
{
stream.avail_in=(uInt) length;
stream.next_in=(Bytef *) pixels;
if (y == (ssize_t) next_image->rows-1)
flush=Z_FINISH;
do {
stream.avail_out=(uInt) MagickMinBufferExtent;
stream.next_out=(Bytef *) compressed_pixels;
if (deflate(&stream,flush) == Z_STREAM_ERROR)
break;
length=(size_t) MagickMinBufferExtent-stream.avail_out;
if (length > 0)
count+=WriteBlob(image,length,compressed_pixels);
} while (stream.avail_out == 0);
}
#endif
else
count+=WriteBlob(image,length,pixels);
}
#ifdef MAGICKCORE_ZLIB_DELEGATE
if (compression == ZipCompression)
{
(void) deflateEnd(&stream);
compressed_pixels=(unsigned char *) RelinquishMagickMemory(
compressed_pixels);
}
#endif
quantum_info=DestroyQuantumInfo(quantum_info);
return(count);
}
static unsigned char *AcquireCompactPixels(const Image *image,
ExceptionInfo *exception)
{
size_t
packet_size;
unsigned char
*compact_pixels;
packet_size=image->depth > 8UL ? 2UL : 1UL;
compact_pixels=(unsigned char *) AcquireQuantumMemory((9*
image->columns)+1,packet_size*sizeof(*compact_pixels));
if (compact_pixels == (unsigned char *) NULL)
{
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","`%s'",image->filename);
}
return(compact_pixels);
}
static size_t WritePSDChannels(const PSDInfo *psd_info,
const ImageInfo *image_info,Image *image,Image *next_image,
MagickOffsetType size_offset,const MagickBooleanType separate,
ExceptionInfo *exception)
{
CompressionType
compression;
Image
*mask;
MagickOffsetType
rows_offset;
size_t
channels,
count,
length,
offset_length;
unsigned char
*compact_pixels;
count=0;
offset_length=0;
rows_offset=0;
compact_pixels=(unsigned char *) NULL;
compression=next_image->compression;
if (image_info->compression != UndefinedCompression)
compression=image_info->compression;
if (compression == RLECompression)
{
compact_pixels=AcquireCompactPixels(next_image,exception);
if (compact_pixels == (unsigned char *) NULL)
return(0);
}
channels=1;
if (separate == MagickFalse)
{
if ((next_image->storage_class != PseudoClass) ||
(IsImageGray(next_image) != MagickFalse))
{
if (IsImageGray(next_image) == MagickFalse)
channels=(size_t) (next_image->colorspace == CMYKColorspace ? 4 :
3);
if (next_image->alpha_trait != UndefinedPixelTrait)
channels++;
}
rows_offset=TellBlob(image)+2;
count+=WriteCompressionStart(psd_info,image,next_image,compression,
(ssize_t) channels);
offset_length=(next_image->rows*(psd_info->version == 1 ? 2 : 4));
}
size_offset+=2;
if ((next_image->storage_class == PseudoClass) &&
(IsImageGray(next_image) == MagickFalse))
{
length=WritePSDChannel(psd_info,image_info,image,next_image,
IndexQuantum,compact_pixels,rows_offset,separate,compression,
exception);
if (separate != MagickFalse)
size_offset+=WritePSDSize(psd_info,image,length,size_offset)+2;
else
rows_offset+=offset_length;
count+=length;
}
else
{
if (IsImageGray(next_image) != MagickFalse)
{
length=WritePSDChannel(psd_info,image_info,image,next_image,
GrayQuantum,compact_pixels,rows_offset,separate,compression,
exception);
if (separate != MagickFalse)
size_offset+=WritePSDSize(psd_info,image,length,size_offset)+2;
else
rows_offset+=offset_length;
count+=length;
}
else
{
if (next_image->colorspace == CMYKColorspace)
(void) NegateCMYK(next_image,exception);
length=WritePSDChannel(psd_info,image_info,image,next_image,
RedQuantum,compact_pixels,rows_offset,separate,compression,
exception);
if (separate != MagickFalse)
size_offset+=WritePSDSize(psd_info,image,length,size_offset)+2;
else
rows_offset+=offset_length;
count+=length;
length=WritePSDChannel(psd_info,image_info,image,next_image,
GreenQuantum,compact_pixels,rows_offset,separate,compression,
exception);
if (separate != MagickFalse)
size_offset+=WritePSDSize(psd_info,image,length,size_offset)+2;
else
rows_offset+=offset_length;
count+=length;
length=WritePSDChannel(psd_info,image_info,image,next_image,
BlueQuantum,compact_pixels,rows_offset,separate,compression,
exception);
if (separate != MagickFalse)
size_offset+=WritePSDSize(psd_info,image,length,size_offset)+2;
else
rows_offset+=offset_length;
count+=length;
if (next_image->colorspace == CMYKColorspace)
{
length=WritePSDChannel(psd_info,image_info,image,next_image,
BlackQuantum,compact_pixels,rows_offset,separate,compression,
exception);
if (separate != MagickFalse)
size_offset+=WritePSDSize(psd_info,image,length,size_offset)+2;
else
rows_offset+=offset_length;
count+=length;
}
}
if (next_image->alpha_trait != UndefinedPixelTrait)
{
length=WritePSDChannel(psd_info,image_info,image,next_image,
AlphaQuantum,compact_pixels,rows_offset,separate,compression,
exception);
if (separate != MagickFalse)
size_offset+=WritePSDSize(psd_info,image,length,size_offset)+2;
else
rows_offset+=offset_length;
count+=length;
}
}
compact_pixels=(unsigned char *) RelinquishMagickMemory(compact_pixels);
if (next_image->colorspace == CMYKColorspace)
(void) NegateCMYK(next_image,exception);
if (separate != MagickFalse)
{
const char
*property;
property=GetImageArtifact(next_image,"psd:opacity-mask");
if (property != (const char *) NULL)
{
mask=(Image *) GetImageRegistry(ImageRegistryType,property,
exception);
if (mask != (Image *) NULL)
{
if (compression == RLECompression)
{
compact_pixels=AcquireCompactPixels(mask,exception);
if (compact_pixels == (unsigned char *) NULL)
return(0);
}
length=WritePSDChannel(psd_info,image_info,image,mask,
RedQuantum,compact_pixels,rows_offset,MagickTrue,compression,
exception);
(void) WritePSDSize(psd_info,image,length,size_offset);
count+=length;
compact_pixels=(unsigned char *) RelinquishMagickMemory(
compact_pixels);
}
}
}
return(count);
}
static size_t WritePascalString(Image *image,const char *value,size_t padding)
{
size_t
count,
length;
ssize_t
i;
/*
Max length is 255.
*/
count=0;
length=(strlen(value) > 255UL ) ? 255UL : strlen(value);
if (length == 0)
count+=WriteBlobByte(image,0);
else
{
count+=WriteBlobByte(image,(unsigned char) length);
count+=WriteBlob(image,length,(const unsigned char *) value);
}
length++;
if ((length % padding) == 0)
return(count);
for (i=0; i < (ssize_t) (padding-(length % padding)); i++)
count+=WriteBlobByte(image,0);
return(count);
}
static void WriteResolutionResourceBlock(Image *image)
{
double
x_resolution,
y_resolution;
unsigned short
units;
if (image->units == PixelsPerCentimeterResolution)
{
x_resolution=2.54*65536.0*image->resolution.x+0.5;
y_resolution=2.54*65536.0*image->resolution.y+0.5;
units=2;
}
else
{
x_resolution=65536.0*image->resolution.x+0.5;
y_resolution=65536.0*image->resolution.y+0.5;
units=1;
}
(void) WriteBlob(image,4,(const unsigned char *) "8BIM");
(void) WriteBlobMSBShort(image,0x03ED);
(void) WriteBlobMSBShort(image,0);
(void) WriteBlobMSBLong(image,16); /* resource size */
(void) WriteBlobMSBLong(image,(unsigned int) (x_resolution+0.5));
(void) WriteBlobMSBShort(image,units); /* horizontal resolution unit */
(void) WriteBlobMSBShort(image,units); /* width unit */
(void) WriteBlobMSBLong(image,(unsigned int) (y_resolution+0.5));
(void) WriteBlobMSBShort(image,units); /* vertical resolution unit */
(void) WriteBlobMSBShort(image,units); /* height unit */
}
static inline size_t WriteChannelSize(const PSDInfo *psd_info,Image *image,
const signed short channel)
{
size_t
count;
count=(size_t) WriteBlobShort(image,(const unsigned short) channel);
count+=SetPSDSize(psd_info,image,0);
return(count);
}
static void RemoveICCProfileFromResourceBlock(StringInfo *bim_profile)
{
const unsigned char
*p;
size_t
length;
unsigned char
*datum;
unsigned int
count,
long_sans;
unsigned short
id,
short_sans;
length=GetStringInfoLength(bim_profile);
if (length < 16)
return;
datum=GetStringInfoDatum(bim_profile);
for (p=datum; (p >= datum) && (p < (datum+length-16)); )
{
unsigned char
*q;
q=(unsigned char *) p;
if (LocaleNCompare((const char *) p,"8BIM",4) != 0)
break;
p=PushLongPixel(MSBEndian,p,&long_sans);
p=PushShortPixel(MSBEndian,p,&id);
p=PushShortPixel(MSBEndian,p,&short_sans);
p=PushLongPixel(MSBEndian,p,&count);
if (id == 0x0000040f)
{
ssize_t
quantum;
quantum=PSDQuantum(count)+12;
if ((quantum >= 12) && (quantum < (ssize_t) length))
{
if ((q+quantum < (datum+length-16)))
(void) memmove(q,q+quantum,length-quantum-(q-datum));
SetStringInfoLength(bim_profile,length-quantum);
}
break;
}
p+=count;
if ((count & 0x01) != 0)
p++;
}
}
static void RemoveResolutionFromResourceBlock(StringInfo *bim_profile)
{
const unsigned char
*p;
size_t
length;
unsigned char
*datum;
unsigned int
count,
long_sans;
unsigned short
id,
short_sans;
length=GetStringInfoLength(bim_profile);
if (length < 16)
return;
datum=GetStringInfoDatum(bim_profile);
for (p=datum; (p >= datum) && (p < (datum+length-16)); )
{
unsigned char
*q;
ssize_t
cnt;
q=(unsigned char *) p;
if (LocaleNCompare((const char *) p,"8BIM",4) != 0)
return;
p=PushLongPixel(MSBEndian,p,&long_sans);
p=PushShortPixel(MSBEndian,p,&id);
p=PushShortPixel(MSBEndian,p,&short_sans);
p=PushLongPixel(MSBEndian,p,&count);
cnt=PSDQuantum(count);
if (cnt < 0)
return;
if ((id == 0x000003ed) && (cnt < (ssize_t) (length-12)) &&
((ssize_t) length-(cnt+12)-(q-datum)) > 0)
{
(void) memmove(q,q+cnt+12,length-(cnt+12)-(q-datum));
SetStringInfoLength(bim_profile,length-(cnt+12));
break;
}
p+=count;
if ((count & 0x01) != 0)
p++;
}
}
static const StringInfo *GetAdditionalInformation(const ImageInfo *image_info,
Image *image,ExceptionInfo *exception)
{
#define PSDKeySize 5
#define PSDAllowedLength 36
char
key[PSDKeySize];
/* Whitelist of keys from: https://www.adobe.com/devnet-apps/photoshop/fileformatashtml/ */
const char
allowed[PSDAllowedLength][PSDKeySize] = {
"blnc", "blwh", "brit", "brst", "clbl", "clrL", "curv", "expA", "FMsk",
"GdFl", "grdm", "hue ", "hue2", "infx", "knko", "lclr", "levl", "lnsr",
"lfx2", "luni", "lrFX", "lspf", "lyid", "lyvr", "mixr", "nvrt", "phfl",
"post", "PtFl", "selc", "shpa", "sn2P", "SoCo", "thrs", "tsly", "vibA"
},
*option;
const StringInfo
*info;
MagickBooleanType
found;
size_t
i;
size_t
remaining_length,
length;
StringInfo
*profile;
unsigned char
*p;
unsigned int
size;
info=GetImageProfile(image,"psd:additional-info");
if (info == (const StringInfo *) NULL)
return((const StringInfo *) NULL);
option=GetImageOption(image_info,"psd:additional-info");
if (LocaleCompare(option,"all") == 0)
return(info);
if (LocaleCompare(option,"selective") != 0)
{
profile=RemoveImageProfile(image,"psd:additional-info");
return(DestroyStringInfo(profile));
}
length=GetStringInfoLength(info);
p=GetStringInfoDatum(info);
remaining_length=length;
length=0;
while (remaining_length >= 12)
{
/* skip over signature */
p+=4;
key[0]=(char) (*p++);
key[1]=(char) (*p++);
key[2]=(char) (*p++);
key[3]=(char) (*p++);
key[4]='\0';
size=(unsigned int) (*p++) << 24;
size|=(unsigned int) (*p++) << 16;
size|=(unsigned int) (*p++) << 8;
size|=(unsigned int) (*p++);
size=size & 0xffffffff;
remaining_length-=12;
if ((size_t) size > remaining_length)
return((const StringInfo *) NULL);
found=MagickFalse;
for (i=0; i < PSDAllowedLength; i++)
{
if (LocaleNCompare(key,allowed[i],PSDKeySize) != 0)
continue;
found=MagickTrue;
break;
}
remaining_length-=(size_t) size;
if (found == MagickFalse)
{
if (remaining_length > 0)
p=(unsigned char *) memmove(p-12,p+size,remaining_length);
continue;
}
length+=(size_t) size+12;
p+=size;
}
profile=RemoveImageProfile(image,"psd:additional-info");
if (length == 0)
return(DestroyStringInfo(profile));
SetStringInfoLength(profile,(const size_t) length);
(void) SetImageProfile(image,"psd:additional-info",info,exception);
return(profile);
}
static MagickBooleanType WritePSDLayersInternal(Image *image,
const ImageInfo *image_info,const PSDInfo *psd_info,size_t *layers_size,
ExceptionInfo *exception)
{
char
layer_name[MagickPathExtent];
const char
*property;
const StringInfo
*info;
Image
*base_image,
*next_image;
MagickBooleanType
status;
MagickOffsetType
*layer_size_offsets,
size_offset;
ssize_t
i;
size_t
layer_count,
layer_index,
length,
name_length,
rounded_size,
size;
status=MagickTrue;
base_image=GetNextImageInList(image);
if (base_image == (Image *) NULL)
base_image=image;
size=0;
size_offset=TellBlob(image);
(void) SetPSDSize(psd_info,image,0);
layer_count=0;
for (next_image=base_image; next_image != NULL; )
{
layer_count++;
next_image=GetNextImageInList(next_image);
}
if (image->alpha_trait != UndefinedPixelTrait)
size+=WriteBlobShort(image,-(unsigned short) layer_count);
else
size+=WriteBlobShort(image,(unsigned short) layer_count);
layer_size_offsets=(MagickOffsetType *) AcquireQuantumMemory(
(size_t) layer_count,sizeof(MagickOffsetType));
if (layer_size_offsets == (MagickOffsetType *) NULL)
ThrowWriterException(ResourceLimitError,"MemoryAllocationFailed");
layer_index=0;
for (next_image=base_image; next_image != NULL; )
{
Image
*mask;
unsigned char
default_color;
unsigned short
channels,
total_channels;
mask=(Image *) NULL;
property=GetImageArtifact(next_image,"psd:opacity-mask");
default_color=0;
if (property != (const char *) NULL)
{
mask=(Image *) GetImageRegistry(ImageRegistryType,property,exception);
default_color=(unsigned char) (strlen(property) == 9 ? 255 : 0);
}
size+=WriteBlobSignedLong(image,(signed int) next_image->page.y);
size+=WriteBlobSignedLong(image,(signed int) next_image->page.x);
size+=WriteBlobSignedLong(image,(signed int) (next_image->page.y+
next_image->rows));
size+=WriteBlobSignedLong(image,(signed int) (next_image->page.x+
next_image->columns));
channels=1;
if ((next_image->storage_class != PseudoClass) &&
(IsImageGray(next_image) == MagickFalse))
channels=(unsigned short) (next_image->colorspace == CMYKColorspace ? 4 :
3);
total_channels=channels;
if (next_image->alpha_trait != UndefinedPixelTrait)
total_channels++;
if (mask != (Image *) NULL)
total_channels++;
size+=WriteBlobShort(image,total_channels);
layer_size_offsets[layer_index++]=TellBlob(image);
for (i=0; i < (ssize_t) channels; i++)
size+=WriteChannelSize(psd_info,image,(signed short) i);
if (next_image->alpha_trait != UndefinedPixelTrait)
size+=WriteChannelSize(psd_info,image,-1);
if (mask != (Image *) NULL)
size+=WriteChannelSize(psd_info,image,-2);
size+=WriteBlobString(image,image->endian == LSBEndian ? "MIB8" :"8BIM");
size+=WriteBlobString(image,CompositeOperatorToPSDBlendMode(next_image));
property=GetImageArtifact(next_image,"psd:layer.opacity");
if (property != (const char *) NULL)
{
Quantum
opacity;
opacity=(Quantum) StringToInteger(property);
size+=WriteBlobByte(image,ScaleQuantumToChar(opacity));
(void) ApplyPSDLayerOpacity(next_image,opacity,MagickTrue,exception);
}
else
size+=WriteBlobByte(image,255);
size+=WriteBlobByte(image,0);
size+=WriteBlobByte(image,(const unsigned char)
(next_image->compose == NoCompositeOp ? 1 << 0x02 : 1)); /* layer properties - visible, etc. */
size+=WriteBlobByte(image,0);
info=GetAdditionalInformation(image_info,next_image,exception);
property=(const char *) GetImageProperty(next_image,"label",exception);
if (property == (const char *) NULL)
{
(void) FormatLocaleString(layer_name,MagickPathExtent,"L%.20g",
(double) layer_index);
property=layer_name;
}
name_length=strlen(property)+1;
if ((name_length % 4) != 0)
name_length+=(4-(name_length % 4));
if (info != (const StringInfo *) NULL)
name_length+=GetStringInfoLength(info);
name_length+=8;
if (mask != (Image *) NULL)
name_length+=20;
size+=WriteBlobLong(image,(unsigned int) name_length);
if (mask == (Image *) NULL)
size+=WriteBlobLong(image,0);
else
{
if (mask->compose != NoCompositeOp)
(void) ApplyPSDOpacityMask(next_image,mask,ScaleCharToQuantum(
default_color),MagickTrue,exception);
mask->page.y+=image->page.y;
mask->page.x+=image->page.x;
size+=WriteBlobLong(image,20);
size+=WriteBlobSignedLong(image,(const signed int) mask->page.y);
size+=WriteBlobSignedLong(image,(const signed int) mask->page.x);
size+=WriteBlobSignedLong(image,(const signed int) (mask->rows+
mask->page.y));
size+=WriteBlobSignedLong(image,(const signed int) (mask->columns+
mask->page.x));
size+=WriteBlobByte(image,default_color);
size+=WriteBlobByte(image,(const unsigned char)
(mask->compose == NoCompositeOp ? 2 : 0));
size+=WriteBlobMSBShort(image,0);
}
size+=WriteBlobLong(image,0);
size+=WritePascalString(image,property,4);
if (info != (const StringInfo *) NULL)
size+=WriteBlob(image,GetStringInfoLength(info),
GetStringInfoDatum(info));
next_image=GetNextImageInList(next_image);
}
/*
Now the image data!
*/
next_image=base_image;
layer_index=0;
while (next_image != NULL)
{
length=WritePSDChannels(psd_info,image_info,image,next_image,
layer_size_offsets[layer_index++],MagickTrue,exception);
if (length == 0)
{
status=MagickFalse;
break;
}
size+=length;
next_image=GetNextImageInList(next_image);
}
/*
Write the total size
*/
if (layers_size != (size_t*) NULL)
*layers_size=size;
if ((size/2) != ((size+1)/2))
rounded_size=size+1;
else
rounded_size=size;
(void) WritePSDSize(psd_info,image,rounded_size,size_offset);
layer_size_offsets=(MagickOffsetType *) RelinquishMagickMemory(
layer_size_offsets);
/*
Remove the opacity mask from the registry
*/
next_image=base_image;
while (next_image != (Image *) NULL)
{
property=GetImageArtifact(next_image,"psd:opacity-mask");
if (property != (const char *) NULL)
(void) DeleteImageRegistry(property);
next_image=GetNextImageInList(next_image);
}
return(status);
}
ModuleExport MagickBooleanType WritePSDLayers(Image * image,
const ImageInfo *image_info,const PSDInfo *psd_info,ExceptionInfo *exception)
{
MagickBooleanType
status;
status=IsRightsAuthorized(CoderPolicyDomain,WritePolicyRights,"PSD");
if (status == MagickFalse)
return(MagickTrue);
return WritePSDLayersInternal(image,image_info,psd_info,(size_t*) NULL,
exception);
}
static MagickBooleanType WritePSDImage(const ImageInfo *image_info,
Image *image,ExceptionInfo *exception)
{
const StringInfo
*icc_profile;
ImageType
type;
MagickBooleanType
status;
PSDInfo
psd_info;
ssize_t
i;
size_t
length,
num_channels;
StringInfo
*bim_profile;
/*
Open image file.
*/
assert(image_info != (const ImageInfo *) NULL);
assert(image_info->signature == MagickCoreSignature);
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
status=OpenBlob(image_info,image,WriteBinaryBlobMode,exception);
if (status == MagickFalse)
return(status);
psd_info.version=1;
if ((LocaleCompare(image_info->magick,"PSB") == 0) ||
(image->columns > 30000) || (image->rows > 30000))
psd_info.version=2;
(void) WriteBlob(image,4,(const unsigned char *) "8BPS");
(void) WriteBlobMSBShort(image,psd_info.version); /* version */
for (i=1; i <= 6; i++)
(void) WriteBlobByte(image, 0); /* 6 bytes of reserved */
/* When the image has a color profile it won't be converted to gray scale */
type=IdentifyImageCoderType(image,exception);
(void) type;
if ((GetImageProfile(image,"icc") == (StringInfo *) NULL) &&
(SetImageGray(image,exception) != MagickFalse))
num_channels=(image->alpha_trait != UndefinedPixelTrait ? 2UL : 1UL);
else
if ((image_info->type != TrueColorType) &&
(image_info->type != TrueColorAlphaType) &&
(image->storage_class == PseudoClass))
num_channels=(image->alpha_trait != UndefinedPixelTrait ? 2UL : 1UL);
else
{
if (image->storage_class == PseudoClass)
(void) SetImageStorageClass(image,DirectClass,exception);
if (image->colorspace != CMYKColorspace)
num_channels=(image->alpha_trait != UndefinedPixelTrait ? 4UL : 3UL);
else
num_channels=(image->alpha_trait != UndefinedPixelTrait ? 5UL : 4UL);
}
(void) WriteBlobMSBShort(image,(unsigned short) num_channels);
(void) WriteBlobMSBLong(image,(unsigned int) image->rows);
(void) WriteBlobMSBLong(image,(unsigned int) image->columns);
if (IsImageGray(image) != MagickFalse)
{
MagickBooleanType
monochrome;
/*
Write depth & mode.
*/
monochrome=IsImageMonochrome(image) && (image->depth == 1) ?
MagickTrue : MagickFalse;
(void) WriteBlobMSBShort(image,(unsigned short)
(monochrome != MagickFalse ? 1 : image->depth > 8 ? 16 : 8));
(void) WriteBlobMSBShort(image,(unsigned short)
(monochrome != MagickFalse ? BitmapMode : GrayscaleMode));
}
else
{
(void) WriteBlobMSBShort(image,(unsigned short) (image->storage_class ==
PseudoClass ? 8 : image->depth > 8 ? 16 : 8));
if (((image_info->colorspace != UndefinedColorspace) ||
(image->colorspace != CMYKColorspace)) &&
(image_info->colorspace != CMYKColorspace))
{
(void) TransformImageColorspace(image,sRGBColorspace,exception);
(void) WriteBlobMSBShort(image,(unsigned short)
(image->storage_class == PseudoClass ? IndexedMode : RGBMode));
}
else
{
if (image->colorspace != CMYKColorspace)
(void) TransformImageColorspace(image,CMYKColorspace,exception);
(void) WriteBlobMSBShort(image,CMYKMode);
}
}
if ((IsImageGray(image) != MagickFalse) ||
(image->storage_class == DirectClass) || (image->colors > 256))
(void) WriteBlobMSBLong(image,0);
else
{
/*
Write PSD raster colormap.
*/
(void) WriteBlobMSBLong(image,768);
for (i=0; i < (ssize_t) image->colors; i++)
(void) WriteBlobByte(image,ScaleQuantumToChar(ClampToQuantum(
image->colormap[i].red)));
for ( ; i < 256; i++)
(void) WriteBlobByte(image,0);
for (i=0; i < (ssize_t) image->colors; i++)
(void) WriteBlobByte(image,ScaleQuantumToChar(ClampToQuantum(
image->colormap[i].green)));
for ( ; i < 256; i++)
(void) WriteBlobByte(image,0);
for (i=0; i < (ssize_t) image->colors; i++)
(void) WriteBlobByte(image,ScaleQuantumToChar(ClampToQuantum(
image->colormap[i].blue)));
for ( ; i < 256; i++)
(void) WriteBlobByte(image,0);
}
/*
Image resource block.
*/
length=28; /* 0x03EB */
bim_profile=(StringInfo *) GetImageProfile(image,"8bim");
icc_profile=GetImageProfile(image,"icc");
if (bim_profile != (StringInfo *) NULL)
{
bim_profile=CloneStringInfo(bim_profile);
if (icc_profile != (StringInfo *) NULL)
RemoveICCProfileFromResourceBlock(bim_profile);
RemoveResolutionFromResourceBlock(bim_profile);
length+=PSDQuantum(GetStringInfoLength(bim_profile));
}
if (icc_profile != (const StringInfo *) NULL)
length+=PSDQuantum(GetStringInfoLength(icc_profile))+12;
(void) WriteBlobMSBLong(image,(unsigned int) length);
WriteResolutionResourceBlock(image);
if (bim_profile != (StringInfo *) NULL)
{
(void) WriteBlob(image,GetStringInfoLength(bim_profile),
GetStringInfoDatum(bim_profile));
bim_profile=DestroyStringInfo(bim_profile);
}
if (icc_profile != (StringInfo *) NULL)
{
(void) WriteBlob(image,4,(const unsigned char *) "8BIM");
(void) WriteBlobMSBShort(image,0x0000040F);
(void) WriteBlobMSBShort(image,0);
(void) WriteBlobMSBLong(image,(unsigned int) GetStringInfoLength(
icc_profile));
(void) WriteBlob(image,GetStringInfoLength(icc_profile),
GetStringInfoDatum(icc_profile));
if ((ssize_t) GetStringInfoLength(icc_profile) != PSDQuantum(GetStringInfoLength(icc_profile)))
(void) WriteBlobByte(image,0);
}
if (status != MagickFalse)
{
const char
*option;
MagickOffsetType
size_offset;
size_t
size;
size_offset=TellBlob(image);
(void) SetPSDSize(&psd_info,image,0);
option=GetImageOption(image_info,"psd:write-layers");
if (IsStringFalse(option) != MagickTrue)
{
status=WritePSDLayersInternal(image,image_info,&psd_info,&size,
exception);
(void) WritePSDSize(&psd_info,image,size+
(psd_info.version == 1 ? 8 : 12),size_offset);
}
}
(void) WriteBlobMSBLong(image,0); /* user mask data */
/*
Write composite image.
*/
if (status != MagickFalse)
{
CompressionType
compression;
compression=image->compression;
if (image_info->compression != UndefinedCompression)
image->compression=image_info->compression;
if (image->compression == ZipCompression)
image->compression=RLECompression;
if (WritePSDChannels(&psd_info,image_info,image,image,0,MagickFalse,
exception) == 0)
status=MagickFalse;
image->compression=compression;
}
(void) CloseBlob(image);
return(status);
}
|
3d25pt.c | /*
* Order-2, 3D 25 point stencil
* Adapted from PLUTO and Pochoir test bench
*
* Tareq Malas
*/
#include <stdio.h>
#include <stdlib.h>
#include <sys/time.h>
#ifdef LIKWID_PERFMON
#include <likwid.h>
#endif
#include "print_utils.h"
#define TESTS 2
#define MAX(a,b) ((a) > (b) ? a : b)
#define MIN(a,b) ((a) < (b) ? a : b)
#ifndef min
#define min(x,y) ((x) < (y)? (x) : (y))
#endif
/* Subtract the `struct timeval' values X and Y,
* storing the result in RESULT.
*
* Return 1 if the difference is negative, otherwise 0.
*/
int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y)
{
/* Perform the carry for the later subtraction by updating y. */
if (x->tv_usec < y->tv_usec)
{
int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1;
y->tv_usec -= 1000000 * nsec;
y->tv_sec += nsec;
}
if (x->tv_usec - y->tv_usec > 1000000)
{
int nsec = (x->tv_usec - y->tv_usec) / 1000000;
y->tv_usec += 1000000 * nsec;
y->tv_sec -= nsec;
}
/* Compute the time remaining to wait.
* tv_usec is certainly positive.
*/
result->tv_sec = x->tv_sec - y->tv_sec;
result->tv_usec = x->tv_usec - y->tv_usec;
/* Return 1 if result is negative. */
return x->tv_sec < y->tv_sec;
}
int main(int argc, char *argv[])
{
int t, i, j, k, test;
int Nx, Ny, Nz, Nt;
if (argc > 3) {
Nx = atoi(argv[1])+8;
Ny = atoi(argv[2])+8;
Nz = atoi(argv[3])+8;
}
if (argc > 4)
Nt = atoi(argv[4]);
double ****A = (double ****) malloc(sizeof(double***)*2);
double ***roc2 = (double ***) malloc(sizeof(double**));
A[0] = (double ***) malloc(sizeof(double**)*Nz);
A[1] = (double ***) malloc(sizeof(double**)*Nz);
roc2 = (double ***) malloc(sizeof(double**)*Nz);
for(i=0; i<Nz; i++){
A[0][i] = (double**) malloc(sizeof(double*)*Ny);
A[1][i] = (double**) malloc(sizeof(double*)*Ny);
roc2[i] = (double**) malloc(sizeof(double*)*Ny);
for(j=0;j<Ny;j++){
A[0][i][j] = (double*) malloc(sizeof(double)*Nx);
A[1][i][j] = (double*) malloc(sizeof(double)*Nx);
roc2[i][j] = (double*) malloc(sizeof(double)*Nx);
}
}
// tile size information, including extra element to decide the list length
int *tile_size = (int*) malloc(sizeof(int));
tile_size[0] = -1;
// The list is modified here before source-to-source transformations
tile_size = (int*) realloc((void *)tile_size, sizeof(int)*5);
tile_size[0] = 16;
tile_size[1] = 16;
tile_size[2] = 16;
tile_size[3] = 1024;
tile_size[4] = -1;
// for timekeeping
int ts_return = -1;
struct timeval start, end, result;
double tdiff = 0.0, min_tdiff=1.e100;
const int BASE = 1024;
// initialize variables
//
srand(42);
for (i = 1; i < Nz; i++) {
for (j = 1; j < Ny; j++) {
for (k = 1; k < Nx; k++) {
A[0][i][j][k] = 1.0 * (rand() % BASE);
roc2[i][j][k] = 2.0 * (rand() % BASE);
}
}
}
#ifdef LIKWID_PERFMON
LIKWID_MARKER_INIT;
#pragma omp parallel
{
LIKWID_MARKER_THREADINIT;
#pragma omp barrier
LIKWID_MARKER_START("calc");
}
#endif
int num_threads = 1;
#if defined(_OPENMP)
num_threads = omp_get_max_threads();
#endif
const double coef0 = -0.28472;
const double coef1 = 0.16000;
const double coef2 = -0.02000;
const double coef3 = 0.00254;
const double coef4 = -0.00018;
for(test=0; test<TESTS; test++){
gettimeofday(&start, 0);
// serial execution - Addition: 6 && Multiplication: 2
#pragma scop
for (t = 0; t < Nt; t++) {
for (i = 4; i < Nz-4; i++) {
for (j = 4; j < Ny-4; j++) {
for (k = 4; k < Nx-4; k++) {
A[(t+1)%2][i][j][k] = 2.0*A[t%2][i][j][k] - A[(t+1)%2][i][j][k] + roc2[i][j][k]*(
coef0* A[t%2][i ][j ][k ] +
coef1*(A[t%2][i-1][j ][k ] + A[t%2][i+1][j ][k ] +
A[t%2][i ][j-1][k ] + A[t%2][i ][j+1][k ] +
A[t%2][i ][j ][k-1] + A[t%2][i ][j ][k+1]) +
coef2*(A[t%2][i-2][j ][k ] + A[t%2][i+2][j ][k ] +
A[t%2][i ][j-2][k ] + A[t%2][i ][j+2][k ] +
A[t%2][i ][j ][k-2] + A[t%2][i ][j ][k+2]) +
coef3*(A[t%2][i-3][j ][k ] + A[t%2][i+3][j ][k ] +
A[t%2][i ][j-3][k ] + A[t%2][i ][j+3][k ] +
A[t%2][i ][j ][k-3] + A[t%2][i ][j ][k+3]) +
coef4*(A[t%2][i-4][j ][k ] + A[t%2][i+4][j ][k ] +
A[t%2][i ][j-4][k ] + A[t%2][i ][j+4][k ] +
A[t%2][i ][j ][k-4] + A[t%2][i ][j ][k+4]) );
}
}
}
}
#pragma endscop
gettimeofday(&end, 0);
ts_return = timeval_subtract(&result, &end, &start);
tdiff = (double) (result.tv_sec + result.tv_usec * 1.0e-6);
min_tdiff = MIN(min_tdiff, tdiff);
printf("Rank 0 TEST# %d time: %f\n", test, tdiff);
}
PRINT_RESULTS(4, "constant")
#ifdef LIKWID_PERFMON
#pragma omp parallel
{
LIKWID_MARKER_STOP("calc");
}
LIKWID_MARKER_CLOSE;
#endif
// Free allocated arrays
for(i=0; i<Nz; i++){
for(j=0;j<Ny;j++){
free(A[0][i][j]);
free(A[1][i][j]);
free(roc2[i][j]);
}
free(A[0][i]);
free(A[1][i]);
free(roc2[i]);
}
free(A[0]);
free(A[1]);
free(roc2);
return 0;
}
|
GB_unop__minv_int64_int64.c | //------------------------------------------------------------------------------
// GB_unop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_atomics.h"
#include "GB_unop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB (_unop_apply__minv_int64_int64)
// op(A') function: GB (_unop_tran__minv_int64_int64)
// C type: int64_t
// A type: int64_t
// cast: int64_t cij = aij
// unaryop: cij = GB_IMINV_SIGNED (aij, 64)
#define GB_ATYPE \
int64_t
#define GB_CTYPE \
int64_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
int64_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = GB_IMINV_SIGNED (x, 64) ;
// casting
#define GB_CAST(z, aij) \
int64_t z = aij ;
// cij = op (aij)
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
int64_t aij = Ax [pA] ; \
/* Cx [pC] = op (cast (aij)) */ \
int64_t z = aij ; \
Cx [pC] = GB_IMINV_SIGNED (z, 64) ; \
}
// true if operator is the identity op with no typecasting
#define GB_OP_IS_IDENTITY_WITH_NO_TYPECAST \
0
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_MINV || GxB_NO_INT64)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_apply__minv_int64_int64)
(
int64_t *Cx, // Cx and Ax may be aliased
const int64_t *Ax,
const int8_t *restrict Ab, // A->b if A is bitmap
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
// TODO: if OP is ONE and uniform-valued matrices are exploited, then
// do this in O(1) time
if (Ab == NULL)
{
#if ( GB_OP_IS_IDENTITY_WITH_NO_TYPECAST )
GB_memcpy (Cx, Ax, anz * sizeof (int64_t), nthreads) ;
#else
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
int64_t aij = Ax [p] ;
int64_t z = aij ;
Cx [p] = GB_IMINV_SIGNED (z, 64) ;
}
#endif
}
else
{
// bitmap case, no transpose; A->b already memcpy'd into C->b
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!Ab [p]) continue ;
int64_t aij = Ax [p] ;
int64_t z = aij ;
Cx [p] = GB_IMINV_SIGNED (z, 64) ;
}
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_tran__minv_int64_int64)
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
conv_dw_hcl_x86.c | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* License); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* AS IS BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
/*
* Copyright (c) 2021, OPEN AI LAB
* Author: qtang@openailab.com
*/
#include "convolution_param.h"
#include "conv_dw_kernel_x86.h"
#include "graph/tensor.h"
#include "graph/node.h"
#include "graph/graph.h"
#include "utility/sys_port.h"
#include "utility/float.h"
#include "utility/log.h"
#include "device/cpu/cpu_node.h"
#include "device/cpu/cpu_graph.h"
#include "device/cpu/cpu_module.h"
#include <math.h>
#include <string.h>
static void pad_int8(int8_t* input, int8_t* output, int in_h, int in_w, int out_h, int out_w, int top, int left, int8_t v)
{
int8_t* ptr = input;
int8_t* outptr = output;
int y = 0;
// fill top
for (; y < top; y++)
{
int x = 0;
for (; x < out_w; x++)
{
outptr[x] = v;
}
outptr += out_w;
}
// fill center
for (; y < (top + in_h); y++)
{
int x = 0;
for (; x < left; x++)
{
outptr[x] = v;
}
if (in_w < 12)
{
for (; x < (left + in_w); x++)
{
outptr[x] = ptr[x - left];
}
}
else
{
memcpy(outptr + left, ptr, in_w * sizeof(int8_t));
x += in_w;
}
for (; x < out_w; x++)
{
outptr[x] = v;
}
ptr += in_w;
outptr += out_w;
}
// fill bottom
for (; y < out_h; y++)
{
int x = 0;
for (; x < out_w; x++)
{
outptr[x] = v;
}
outptr += out_w;
}
}
static int convdw3x3s1_int8_sse(struct tensor* input_tensor, struct tensor* weight_tensor, struct tensor* bias_tensor,
struct tensor* output_tensor, struct conv_param* param, int num_thread)
{
int inch = input_tensor->dims[1];
int inh = input_tensor->dims[2];
int inw = input_tensor->dims[3];
int in_hw = inh * inw;
int outch = output_tensor->dims[1];
int outh = output_tensor->dims[2];
int outw = output_tensor->dims[3];
int out_hw = outh * outw;
int out_size = output_tensor->elem_num;
int pad_w = param->pad_w0;
int pad_h = param->pad_h0;
int32_t* output_int32 = (int32_t*)sys_malloc(out_size * sizeof(int32_t));
memset(output_int32, 0, out_size * sizeof(int32_t));
float* output_fp32 = (float*)sys_malloc(out_size * sizeof(float));
int8_t* output_int8 = (int8_t*)output_tensor->data;
int8_t* input_int8 = (int8_t*)input_tensor->data;
int32_t* bias_int32 = NULL;
if(bias_tensor)
bias_int32 = (int32_t*)bias_tensor->data;
/* get scale value of quantizaiton */
float input_scale = input_tensor->scale;
float* kernel_scales = weight_tensor->scale_list;
float output_scale = output_tensor->scale;
const signed char* kernel = (const signed char*)weight_tensor->data;
/* pading */
int inh_tmp = inh + pad_h + pad_h;
int inw_tmp = inw + pad_w + pad_w;
int8_t* input_tmp = NULL;
if (inh_tmp == inh && inw_tmp == inw)
input_tmp = input_int8;
else
{
input_tmp = ( int8_t* )sys_malloc((size_t)inh_tmp * inw_tmp * inch * sizeof(int8_t));
#pragma omp parallel for num_threads(num_thread)
for (int g = 0; g < inch; g++)
{
int8_t* pad_in = input_int8 + g * inh * inw;
int8_t* pad_out = input_tmp + g * inh_tmp * inw_tmp;
pad_int8(pad_in, pad_out, inh, inw, inh_tmp, inw_tmp, pad_h, pad_w, 0);
}
}
#pragma omp parallel for num_threads(num_thread)
for (int p = 0; p < outch; p++)
{
int32_t* out0 = output_int32 + p * out_hw;
int8_t* kernel0 = (int8_t* )kernel + p * 9;
int* outptr0 = out0;
int8_t* img0 = input_tmp + p * inw_tmp * inh_tmp;
int8_t* r0 = img0;
int8_t* r1 = img0 + inw_tmp;
int8_t* r2 = img0 + inw_tmp * 2;
for (int i = 0; i < outh; i++)
{
int remain = outw;
for (; remain > 0; remain--)
{
int sum0 = 0;
sum0 += ( int )r0[0] * kernel0[0];
sum0 += ( int )r0[1] * kernel0[1];
sum0 += ( int )r0[2] * kernel0[2];
sum0 += ( int )r1[0] * kernel0[3];
sum0 += ( int )r1[1] * kernel0[4];
sum0 += ( int )r1[2] * kernel0[5];
sum0 += ( int )r2[0] * kernel0[6];
sum0 += ( int )r2[1] * kernel0[7];
sum0 += ( int )r2[2] * kernel0[8];
*outptr0 += sum0;
r0++;
r1++;
r2++;
outptr0++;
}
r0 += 2;
r1 += 2;
r2 += 2;
}
kernel0 += 9;
}
/* process bias and dequant output from int32 to fp32 */
#pragma omp parallel for num_threads(num_thread)
for (int i = 0; i < outch; i++)
{
for (int j = 0; j < outh * outw; j++)
{
int output_off = i * (outh * outw) + j;
if (bias_tensor)
output_fp32[output_off] = (float )(output_int32[output_off] + bias_int32[i]) * input_scale * kernel_scales[i];
else
output_fp32[output_off] = (float )output_int32[output_off] * input_scale * kernel_scales[i];
}
}
/* process activation relu */
if (param->activation == 0)
{
#pragma omp parallel for num_threads(num_thread)
for (int i = 0; i < outch; i++)
{
for (int j = 0; j < outh * outw; j++)
{
int output_off = i * (outh * outw) + j;
if (output_fp32[output_off] < 0)
output_fp32[output_off] = 0;
}
}
}
/* process activation relu6 */
if (param->activation > 0)
{
#pragma omp parallel for num_threads(num_thread)
for (int i = 0; i < outch; i++)
{
for (int j = 0; j < outh * outw; j++)
{
int output_off = i * (outh * outw) + j;
if (output_fp32[output_off] < 0)
output_fp32[output_off] = 0;
if (output_fp32[output_off] > 6)
output_fp32[output_off] = 6;
}
}
}
/* quant from fp32 to int8 */
#pragma omp parallel for num_threads(num_thread)
for (int i = 0; i < outch; i++)
{
for (int j = 0; j < outh * outw; j++)
{
int output_off = i * (outh * outw) + j;
int32_t data_i32 = ( int32_t )(round(output_fp32[output_off] / output_scale));
if (data_i32 > 127)
data_i32 = 127;
else if (data_i32 < -127)
data_i32 = -127;
output_int8[output_off] = (int8_t)data_i32;
}
}
sys_free(output_int32);
sys_free(output_fp32);
if (!(inh_tmp == inh && inw_tmp == inw))
sys_free(input_tmp);
return 0;
}
static int convdw3x3s2_int8_sse(struct tensor* input_tensor, struct tensor* weight_tensor, struct tensor* bias_tensor,
struct tensor* output_tensor, struct conv_param* param, int num_thread)
{
int inch = input_tensor->dims[1];
int inh = input_tensor->dims[2];
int inw = input_tensor->dims[3];
int in_hw = inh * inw;
int outch = output_tensor->dims[1];
int outh = output_tensor->dims[2];
int outw = output_tensor->dims[3];
int out_hw = outh * outw;
int out_size = output_tensor->elem_num;
int pad_w = param->pad_w0;
int pad_h = param->pad_h0;
int32_t* output_int32 = (int32_t*)sys_malloc(out_size * sizeof(int32_t));
memset(output_int32, 0, out_size * sizeof(int32_t));
float* output_fp32 = (float*)sys_malloc(out_size * sizeof(float));
int8_t* output_int8 = (int8_t*)output_tensor->data;
int8_t* input_int8 = (int8_t*)input_tensor->data;
int32_t* bias_int32 = NULL;
if(bias_tensor)
bias_int32 = (int32_t*)bias_tensor->data;
/* get scale value of quantizaiton */
float input_scale = input_tensor->scale;
float* kernel_scales = weight_tensor->scale_list;
float output_scale = output_tensor->scale;
const signed char* kernel = (const signed char*)weight_tensor->data;
/* pading */
int inh_tmp = inh + pad_h + pad_h;
int inw_tmp = inw + pad_w + pad_w;
int8_t* input_tmp = NULL;
if (inh_tmp == inh && inw_tmp == inw)
input_tmp = input_int8;
else
{
input_tmp = ( int8_t* )sys_malloc((size_t)inh_tmp * inw_tmp * inch * sizeof(int8_t));
#pragma omp parallel for num_threads(num_thread)
for (int g = 0; g < inch; g++)
{
int8_t* pad_in = input_int8 + g * inh * inw;
int8_t* pad_out = input_tmp + g * inh_tmp * inw_tmp;
pad_int8(pad_in, pad_out, inh, inw, inh_tmp, inw_tmp, pad_h, pad_w, 0);
}
}
int tailstep = inw_tmp - 2 * outw + inw_tmp;
#pragma omp parallel for num_threads(num_thread)
for (int p = 0; p < outch; p++)
{
int32_t* out0 = output_int32 + p * out_hw;
int8_t* kernel0 = (int8_t* )kernel + p * 9;
int* outptr0 = out0;
int8_t* img0 = input_tmp + p * inw_tmp * inh_tmp;
int8_t* r0 = img0;
int8_t* r1 = img0 + inw_tmp;
int8_t* r2 = img0 + inw_tmp * 2;
for (int i = 0; i < outh; i++)
{
int remain = outw;
for (; remain > 0; remain--)
{
int sum0 = 0;
sum0 += ( int )r0[0] * kernel0[0];
sum0 += ( int )r0[1] * kernel0[1];
sum0 += ( int )r0[2] * kernel0[2];
sum0 += ( int )r1[0] * kernel0[3];
sum0 += ( int )r1[1] * kernel0[4];
sum0 += ( int )r1[2] * kernel0[5];
sum0 += ( int )r2[0] * kernel0[6];
sum0 += ( int )r2[1] * kernel0[7];
sum0 += ( int )r2[2] * kernel0[8];
*outptr0 += sum0;
r0 += 2;
r1 += 2;
r2 += 2;
outptr0++;
}
r0 += tailstep;
r1 += tailstep;
r2 += tailstep;
}
kernel0 += 9;
}
/* process bias and dequant output from int32 to fp32 */
#pragma omp parallel for num_threads(num_thread)
for (int i = 0; i < outch; i++)
{
for (int j = 0; j < outh * outw; j++)
{
int output_off = i * (outh * outw) + j;
if (bias_tensor)
output_fp32[output_off] = (float )(output_int32[output_off] + bias_int32[i]) * input_scale * kernel_scales[i];
else
output_fp32[output_off] = (float )output_int32[output_off] * input_scale * kernel_scales[i];
}
}
/* process activation relu */
if (param->activation == 0)
{
#pragma omp parallel for num_threads(num_thread)
for (int i = 0; i < outch; i++)
{
for (int j = 0; j < outh * outw; j++)
{
int output_off = i * (outh * outw) + j;
if (output_fp32[output_off] < 0)
output_fp32[output_off] = 0;
}
}
}
/* process activation relu6 */
if (param->activation > 0)
{
#pragma omp parallel for num_threads(num_thread)
for (int i = 0; i < outch; i++)
{
for (int j = 0; j < outh * outw; j++)
{
int output_off = i * (outh * outw) + j;
if (output_fp32[output_off] < 0)
output_fp32[output_off] = 0;
if (output_fp32[output_off] > 6)
output_fp32[output_off] = 6;
}
}
}
/* quant from fp32 to int8 */
#pragma omp parallel for num_threads(num_thread)
for (int i = 0; i < outch; i++)
{
for (int j = 0; j < outh * outw; j++)
{
int output_off = i * (outh * outw) + j;
int32_t data_i32 = ( int32_t )(round(output_fp32[output_off] / output_scale));
if (data_i32 > 127)
data_i32 = 127;
else if (data_i32 < -127)
data_i32 = -127;
output_int8[output_off] = (int8_t)data_i32;
}
}
sys_free(output_int32);
sys_free(output_fp32);
if (!(inh_tmp == inh && inw_tmp == inw))
sys_free(input_tmp);
return 0;
}
static int conv_dw_run_int8(struct tensor* input_tensor, struct tensor* weight_tensor, struct tensor* bias_tensor,
struct tensor* output_tensor, struct conv_param* param, int num_thread)
{
int ret = -1;
switch(param->stride_h)
{
case 1:
ret = convdw3x3s1_int8_sse(input_tensor, weight_tensor, bias_tensor, output_tensor, param, num_thread);
break;
case 2:
ret = convdw3x3s2_int8_sse(input_tensor, weight_tensor, bias_tensor, output_tensor, param, num_thread);
break;
default:
TLOG_ERR("Direct Convolution Int8 not support the stride %d\n", param->stride_h);
}
return ret;
}
static int run(struct node_ops* node_ops, struct exec_node* exec_node, struct exec_graph* exec_graph)
{
struct node* ir_node = exec_node->ir_node;
struct graph* ir_graph = ir_node->graph;
struct tensor* input_tensor = get_ir_graph_tensor(ir_graph, ir_node->input_tensors[0]);
struct tensor* weight_tensor = get_ir_graph_tensor(ir_graph, ir_node->input_tensors[1]);
struct tensor* bias_tensor = NULL;
struct tensor* output_tensor = get_ir_graph_tensor(ir_graph, ir_node->output_tensors[0]);
int num_thread = exec_graph->num_thread;
int cpu_affinity = exec_graph->cpu_affinity;
/* set the input data and shape again, in case of reshape or dynamic shape */
if (ir_node->input_num > 2)
bias_tensor = get_ir_graph_tensor(ir_graph, ir_node->input_tensors[2]);
struct conv_param* conv_param = ( struct conv_param* )ir_node->op.param_mem;
struct conv_priv_info* conv_priv_info = ( struct conv_priv_info* )exec_node->ops_priv;
int ret = -1;
if (exec_graph->mode == TENGINE_MODE_FP32)
ret = conv_dw_run(input_tensor, weight_tensor, bias_tensor, output_tensor, conv_priv_info, conv_param, num_thread, cpu_affinity);
else if (exec_graph->mode == TENGINE_MODE_INT8)
ret = conv_dw_run_int8(input_tensor, weight_tensor, bias_tensor, output_tensor, conv_param, num_thread);
else
{
TLOG_ERR("hcl conv run failed\n");
return -1;
}
return ret;
}
static int init_node(struct node_ops* node_ops, struct exec_node* exec_node, struct exec_graph* exec_graph)
{
return 0;
}
static int release_node(struct node_ops* node_ops, struct exec_node* exec_node, struct exec_graph* exec_graph)
{
return 0;
}
static int score(struct node_ops* node_ops, struct exec_graph* exec_graph, struct node* exec_node)
{
struct conv_param* param = ( struct conv_param* )exec_node->op.param_mem;
struct node* ir_node = exec_node;
struct graph* ir_graph = ir_node->graph;
struct tensor* input_tensor = get_ir_graph_tensor(ir_graph, ir_node->input_tensors[0]);
struct tensor* output_tensor = get_ir_graph_tensor(ir_graph, ir_node->output_tensors[0]);
int group = param->group;
int kernel_h = param->kernel_h;
int kernel_w = param->kernel_w;
int stride_h = param->stride_h;
int stride_w = param->stride_w;
int dilation_h = param->dilation_h;
int dilation_w = param->dilation_w;
int pad_h0 = param->pad_h0;
int pad_w0 = param->pad_w0;
int pad_h1 = param->pad_h1;
int pad_w1 = param->pad_w1;
int in_c = input_tensor->dims[1] / group;
int out_c = output_tensor->dims[1] / group;
/* todo support uint8 */
if (!(input_tensor->data_type == TENGINE_DT_FP32 || input_tensor->data_type == TENGINE_DT_INT8))
return 0;
if (kernel_h != kernel_w || input_tensor->dims[0] > 1)
return 0;
if (param->group > 1 && in_c == 1 && out_c == 1 && pad_h0 == pad_h1 && pad_w0 == pad_w1 && dilation_h == 1 && dilation_w == 1 && kernel_h == 3 && kernel_w == 3 &&
((stride_h == 1 && stride_w == 1) || (stride_h == 2 && stride_w == 2)))
return OPS_SCORE_BEST;
else
return 0;
}
static struct node_ops hcl_node_ops = {.prerun = NULL,
.run = run,
.reshape = NULL,
.postrun = NULL,
.init_node = init_node,
.release_node = release_node,
.score = score};
int register_conv_dw_hcl_x86_op()
{
return register_builtin_node_ops(OP_CONV, &hcl_node_ops);
}
int unregister_conv_dw_hcl_x86_op()
{
unregister_builtin_node_ops(OP_CONV, &hcl_node_ops);
return 0;
}
|
xcfun_itrf.c | /* Copyright 2014-2018 The PySCF Developers. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*
* Author: Qiming Sun <osirpt.sun@gmail.com>
*
* xcfun Library from
* https://github.com/dftlibs/xcfun
*/
#include <stdio.h>
#include <stdlib.h>
#include <assert.h>
#include <XCFun/xcfun.h>
#include "config.h"
static int eval_xc(xcfun_t* fun, int deriv, xcfun_vars vars,
int np, int ncol, double *rho, double *output)
{
xcfun_eval_setup(fun, vars, XC_PARTIAL_DERIVATIVES, deriv);
assert(ncol == xcfun_input_length(fun));
int outlen = xcfun_output_length(fun);
//xcfun_eval_vec(fun, np, rho, ncol, output, outlen);
#pragma omp parallel default(none) \
shared(fun, rho, output, np, ncol, outlen)
{
int i;
#pragma omp for nowait schedule(static)
for (i=0; i < np; i++) {
xcfun_eval(fun, rho+i*ncol, output+i*outlen);
}
}
return outlen;
}
void XCFUN_eval_xc(int nfn, int *fn_id, double *fac, double *omega,
int spin, int deriv, int np,
double *rho_u, double *rho_d, double *output)
{
int i, outlen;
double *rho;
double *gxu, *gyu, *gzu, *gxd, *gyd, *gzd, *tau_u, *tau_d;
const char *name;
assert(xcfun_is_compatible_library() == true);
xcfun_t* fun = xcfun_new();
for (i = 0; i < nfn; i++) {
name = xcfun_enumerate_parameters(fn_id[i]);
xcfun_set(fun, name, fac[i]);
if (omega[i] != 0) {
xcfun_set(fun, "RANGESEP_MU", omega[i]);
}
//xcfun_set(fun, "CAM_ALPHA", val);
//xcfun_set(fun, "CAM_BETA", val);
}
if (spin == 0) {
if (xcfun_is_metagga(fun)) {
rho = malloc(sizeof(double) * np*3);
gxu = rho_u + np;
gyu = rho_u + np * 2;
gzu = rho_u + np * 3;
tau_u = rho_u + np * 5;
for (i = 0; i < np; i++) {
rho[i*3+0] = rho_u[i];
rho[i*3+1] = gxu[i]*gxu[i] + gyu[i]*gyu[i] + gzu[i]*gzu[i];
rho[i*3+2] = tau_u[i];
}
outlen = eval_xc(fun, deriv, XC_N_GNN_TAUN, np, 3, rho, output);
free(rho);
} else if (xcfun_is_gga(fun)) {
rho = malloc(sizeof(double) * np*2);
gxu = rho_u + np;
gyu = rho_u + np * 2;
gzu = rho_u + np * 3;
for (i = 0; i < np; i++) {
rho[i*2+0] = rho_u[i];
rho[i*2+1] = gxu[i]*gxu[i] + gyu[i]*gyu[i] + gzu[i]*gzu[i];
}
outlen = eval_xc(fun, deriv, XC_N_GNN, np, 2, rho, output);
free(rho);
} else { // LDA
rho = rho_u;
outlen = eval_xc(fun, deriv, XC_N, np, 1, rho, output);
}
// xcfun computed rho*Exc[rho] for zeroth order deriviative instead of Exc[rho]
for (i = 0; i < np; i++) {
output[i*outlen] /= rho_u[i] + 1e-150;
}
} else {
if (xcfun_is_metagga(fun)) {
rho = malloc(sizeof(double) * np*7);
gxu = rho_u + np;
gyu = rho_u + np * 2;
gzu = rho_u + np * 3;
gxd = rho_d + np;
gyd = rho_d + np * 2;
gzd = rho_d + np * 3;
tau_u = rho_u + np * 5;
tau_d = rho_d + np * 5;
for (i = 0; i < np; i++) {
rho[i*7+0] = rho_u[i];
rho[i*7+1] = rho_d[i];
rho[i*7+2] = gxu[i]*gxu[i] + gyu[i]*gyu[i] + gzu[i]*gzu[i];
rho[i*7+3] = gxu[i]*gxd[i] + gyu[i]*gyd[i] + gzu[i]*gzd[i];
rho[i*7+4] = gxd[i]*gxd[i] + gyd[i]*gyd[i] + gzd[i]*gzd[i];
rho[i*7+5] = tau_u[i];
rho[i*7+6] = tau_d[i];
}
outlen = eval_xc(fun, deriv, XC_A_B_GAA_GAB_GBB_TAUA_TAUB, np, 7, rho, output);
free(rho);
} else if (xcfun_is_gga(fun)) {
rho = malloc(sizeof(double) * np*5);
gxu = rho_u + np;
gyu = rho_u + np * 2;
gzu = rho_u + np * 3;
gxd = rho_d + np;
gyd = rho_d + np * 2;
gzd = rho_d + np * 3;
for (i = 0; i < np; i++) {
rho[i*5+0] = rho_u[i];
rho[i*5+1] = rho_d[i];
rho[i*5+2] = gxu[i]*gxu[i] + gyu[i]*gyu[i] + gzu[i]*gzu[i];
rho[i*5+3] = gxu[i]*gxd[i] + gyu[i]*gyd[i] + gzu[i]*gzd[i];
rho[i*5+4] = gxd[i]*gxd[i] + gyd[i]*gyd[i] + gzd[i]*gzd[i];
}
outlen = eval_xc(fun, deriv, XC_A_B_GAA_GAB_GBB, np, 5, rho, output);
free(rho);
} else { // LDA
rho = malloc(sizeof(double) * np*2);
for (i = 0; i < np; i++) {
rho[i*2+0] = rho_u[i];
rho[i*2+1] = rho_d[i];
}
outlen = eval_xc(fun, deriv, XC_A_B, np, 2, rho, output);
free(rho);
}
for (i = 0; i < np; i++) {
output[i*outlen] /= rho_u[i] + rho_d[i] + 1e-150;
}
}
xcfun_delete(fun);
}
/*
* XC_LDA 0 // Local density
* XC_GGA 1 // Local density & gradient
* XC_MGGA 2 // Local density, gradient and kinetic energy density
*/
int XCFUN_xc_type(int fn_id)
{
xcfun_t* fun = xcfun_new();
const char *name = xcfun_enumerate_parameters(fn_id);
xcfun_set(fun, name, 1.);
int type = 0;
if (xcfun_is_metagga(fun)) {
type = 2;
} else if (xcfun_is_gga(fun)) {
type = 1;
}
xcfun_delete(fun);
return type;
}
|
GB_unop__identity_uint64_int8.c | //------------------------------------------------------------------------------
// GB_unop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_atomics.h"
#include "GB_unop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB (_unop_apply__identity_uint64_int8)
// op(A') function: GB (_unop_tran__identity_uint64_int8)
// C type: uint64_t
// A type: int8_t
// cast: uint64_t cij = (uint64_t) aij
// unaryop: cij = aij
#define GB_ATYPE \
int8_t
#define GB_CTYPE \
uint64_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
int8_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = x ;
// casting
#define GB_CAST(z, aij) \
uint64_t z = (uint64_t) aij ;
// cij = op (aij)
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
int8_t aij = Ax [pA] ; \
/* Cx [pC] = op (cast (aij)) */ \
uint64_t z = (uint64_t) aij ; \
Cx [pC] = z ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_IDENTITY || GxB_NO_UINT64 || GxB_NO_INT8)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_apply__identity_uint64_int8)
(
uint64_t *Cx, // Cx and Ax may be aliased
const int8_t *Ax,
const int8_t *restrict Ab, // A->b if A is bitmap
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
if (Ab == NULL)
{
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
int8_t aij = Ax [p] ;
uint64_t z = (uint64_t) aij ;
Cx [p] = z ;
}
}
else
{
// bitmap case, no transpose; A->b already memcpy'd into C->b
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!Ab [p]) continue ;
int8_t aij = Ax [p] ;
uint64_t z = (uint64_t) aij ;
Cx [p] = z ;
}
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_tran__identity_uint64_int8)
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
GB_binop__remainder_fp64.c |
//------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCUDA_DEV
#include "GB_emult.h"
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB (_AaddB__remainder_fp64)
// A.*B function (eWiseMult): GB (_AemultB_08__remainder_fp64)
// A.*B function (eWiseMult): GB (_AemultB_02__remainder_fp64)
// A.*B function (eWiseMult): GB (_AemultB_04__remainder_fp64)
// A.*B function (eWiseMult): GB (_AemultB_bitmap__remainder_fp64)
// A*D function (colscale): GB ((none))
// D*A function (rowscale): GB ((none))
// C+=B function (dense accum): GB (_Cdense_accumB__remainder_fp64)
// C+=b function (dense accum): GB (_Cdense_accumb__remainder_fp64)
// C+=A+B function (dense ewise3): GB ((none))
// C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__remainder_fp64)
// C=scalar+B GB (_bind1st__remainder_fp64)
// C=scalar+B' GB (_bind1st_tran__remainder_fp64)
// C=A+scalar GB (_bind2nd__remainder_fp64)
// C=A'+scalar GB (_bind2nd_tran__remainder_fp64)
// C type: double
// A type: double
// A pattern? 0
// B type: double
// B pattern? 0
// BinaryOp: cij = remainder (aij, bij)
#define GB_ATYPE \
double
#define GB_BTYPE \
double
#define GB_CTYPE \
double
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA,A_iso) \
double aij = GBX (Ax, pA, A_iso)
// true if values of A are not used
#define GB_A_IS_PATTERN \
0 \
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB,B_iso) \
double bij = GBX (Bx, pB, B_iso)
// true if values of B are not used
#define GB_B_IS_PATTERN \
0 \
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
double t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \
cij = GBX (Ax, pA, A_iso)
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \
cij = GBX (Bx, pB, B_iso)
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z,x,y,i,j) \
z = remainder (x, y) ;
// true if the binop must be flipped
#define GB_BINOP_FLIP \
1
// op is second
#define GB_OP_IS_SECOND \
0
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_REMAINDER || GxB_NO_FP64 || GxB_NO_REMAINDER_FP64)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
#if 0
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB ((none))
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
#endif
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
void GB (_Cdense_ewise3_noaccum__remainder_fp64)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_noaccum_template.c"
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumB__remainder_fp64)
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumb__remainder_fp64)
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type double
double bwork = (*((double *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
#if 0
GrB_Info GB ((none))
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix D,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
double *restrict Cx = (double *) C->x ;
#include "GB_AxB_colscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
#if 0
GrB_Info GB ((none))
(
GrB_Matrix C,
const GrB_Matrix D,
const GrB_Matrix B,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
double *restrict Cx = (double *) C->x ;
#include "GB_AxB_rowscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
//------------------------------------------------------------------------------
// eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B
//------------------------------------------------------------------------------
GrB_Info GB (_AaddB__remainder_fp64)
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool is_eWiseUnion,
const GB_void *alpha_scalar_in,
const GB_void *beta_scalar_in,
const bool Ch_is_Mh,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GB_WERK_DECLARE (M_ek_slicing, int64_t) ;
GB_WERK_DECLARE (A_ek_slicing, int64_t) ;
GB_WERK_DECLARE (B_ek_slicing, int64_t) ;
double alpha_scalar ;
double beta_scalar ;
if (is_eWiseUnion)
{
alpha_scalar = (*((double *) alpha_scalar_in)) ;
beta_scalar = (*((double *) beta_scalar_in )) ;
}
#include "GB_add_template.c"
GB_FREE_WORKSPACE ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_08__remainder_fp64)
(
GrB_Matrix C,
const int C_sparsity,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_08_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_02__remainder_fp64)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool flipxy,
const int64_t *restrict Cp_kfirst,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if GB_BINOP_FLIP
// The operator is not commutative, and does not have a flipped
// variant. For example z=atan2(y,x).
if (flipxy)
{
// use fmult(y,x)
#undef GB_FLIPPED
#define GB_FLIPPED 1
#include "GB_emult_02_template.c"
}
else
{
// use fmult(x,y)
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
}
#else
// No need to handle the flip: the operator is either commutative, or
// has been handled by changing z=div(y,x) to z=rdiv(x,y) for example.
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_04__remainder_fp64)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict Cp_kfirst,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_04_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_bitmap__remainder_fp64)
(
GrB_Matrix C,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_bitmap_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB (_bind1st__remainder_fp64)
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *restrict Bb,
int64_t bnz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
double *Cx = (double *) Cx_output ;
double x = (*((double *) x_input)) ;
double *Bx = (double *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < bnz ; p++)
{
if (!GBB (Bb, p)) continue ;
double bij = GBX (Bx, p, false) ;
Cx [p] = remainder (x, bij) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB (_bind2nd__remainder_fp64)
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *restrict Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
double *Cx = (double *) Cx_output ;
double *Ax = (double *) Ax_input ;
double y = (*((double *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
double aij = GBX (Ax, p, false) ;
Cx [p] = remainder (aij, y) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
double aij = GBX (Ax, pA, false) ; \
Cx [pC] = remainder (x, aij) ; \
}
GrB_Info GB (_bind1st_tran__remainder_fp64)
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
double
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
double x = (*((const double *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
double
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
double aij = GBX (Ax, pA, false) ; \
Cx [pC] = remainder (aij, y) ; \
}
GrB_Info GB (_bind2nd_tran__remainder_fp64)
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
double y = (*((const double *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
complex-1.c | /* { dg-do compile } */
/* { dg-options "-fopenmp -O1" } */
/* PR middle-end/30143 */
int f (int n)
{
int i;
_Complex float t;
#pragma omp parallel
for (i = 1; i < n - 1; ++i)
t+=1;
}
|
Sema.h | //===--- Sema.h - Semantic Analysis & AST Building --------------*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file defines the Sema class, which performs semantic analysis and
// builds ASTs.
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_CLANG_SEMA_SEMA_H
#define LLVM_CLANG_SEMA_SEMA_H
#include "clang/AST/Attr.h"
#include "clang/AST/DeclarationName.h"
#include "clang/AST/Expr.h"
#include "clang/AST/ExprObjC.h"
#include "clang/AST/ExternalASTSource.h"
#include "clang/AST/MangleNumberingContext.h"
#include "clang/AST/NSAPI.h"
#include "clang/AST/PrettyPrinter.h"
#include "clang/AST/TypeLoc.h"
#include "clang/Basic/ExpressionTraits.h"
#include "clang/Basic/LangOptions.h"
#include "clang/Basic/Module.h"
#include "clang/Basic/OpenMPKinds.h"
#include "clang/Basic/Specifiers.h"
#include "clang/Basic/TemplateKinds.h"
#include "clang/Basic/TypeTraits.h"
#include "clang/Sema/AnalysisBasedWarnings.h"
#include "clang/Sema/DeclSpec.h"
#include "clang/Sema/ExternalSemaSource.h"
#include "clang/Sema/IdentifierResolver.h"
#include "clang/Sema/LocInfoType.h"
#include "clang/Sema/ObjCMethodList.h"
#include "clang/Sema/Ownership.h"
#include "clang/Sema/Scope.h"
#include "clang/Sema/ScopeInfo.h"
#include "clang/Sema/TypoCorrection.h"
#include "clang/Sema/Weak.h"
#include "llvm/ADT/ArrayRef.h"
#include "llvm/ADT/Optional.h"
#include "llvm/ADT/SetVector.h"
#include "llvm/ADT/SmallPtrSet.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/TinyPtrVector.h"
#include <deque>
#include <memory>
#include <string>
#include <vector>
// HLSL Change Starts
#include "llvm/Support/OacrIgnoreCond.h" // HLSL Change - all sema use is heavily language-dependant
namespace hlsl {
struct UnusualAnnotation;
}
// HLSL Change Ends
namespace llvm {
class APSInt;
template <typename ValueT> struct DenseMapInfo;
template <typename ValueT, typename ValueInfoT> class DenseSet;
class SmallBitVector;
class InlineAsmIdentifierInfo;
}
namespace clang {
class ADLResult;
class ASTConsumer;
class ASTContext;
class ASTMutationListener;
class ASTReader;
class ASTWriter;
class ArrayType;
class AttributeList;
class BlockDecl;
class CapturedDecl;
class CXXBasePath;
class CXXBasePaths;
class CXXBindTemporaryExpr;
typedef SmallVector<CXXBaseSpecifier*, 4> CXXCastPath;
class CXXConstructorDecl;
class CXXConversionDecl;
class CXXDeleteExpr;
class CXXDestructorDecl;
class CXXFieldCollector;
class CXXMemberCallExpr;
class CXXMethodDecl;
class CXXScopeSpec;
class CXXTemporary;
class CXXTryStmt;
class CallExpr;
class ClassTemplateDecl;
class ClassTemplatePartialSpecializationDecl;
class ClassTemplateSpecializationDecl;
class VarTemplatePartialSpecializationDecl;
class CodeCompleteConsumer;
class CodeCompletionAllocator;
class CodeCompletionTUInfo;
class CodeCompletionResult;
class Decl;
class DeclAccessPair;
class DeclContext;
class DeclRefExpr;
class DeclaratorDecl;
class DeducedTemplateArgument;
class DependentDiagnostic;
class DesignatedInitExpr;
class Designation;
class EnableIfAttr;
class EnumConstantDecl;
class Expr;
class ExtVectorType;
class ExternalSemaSource;
class FormatAttr;
class FriendDecl;
class FunctionDecl;
class FunctionProtoType;
class FunctionTemplateDecl;
class ImplicitConversionSequence;
class InitListExpr;
class InitializationKind;
class InitializationSequence;
class InitializedEntity;
class IntegerLiteral;
class LabelStmt;
class LambdaExpr;
class LangOptions;
class LocalInstantiationScope;
class LookupResult;
class MacroInfo;
typedef ArrayRef<std::pair<IdentifierInfo *, SourceLocation>> ModuleIdPath;
class ModuleLoader;
class MultiLevelTemplateArgumentList;
class NamedDecl;
class ObjCCategoryDecl;
class ObjCCategoryImplDecl;
class ObjCCompatibleAliasDecl;
class ObjCContainerDecl;
class ObjCImplDecl;
class ObjCImplementationDecl;
class ObjCInterfaceDecl;
class ObjCIvarDecl;
template <class T> class ObjCList;
class ObjCMessageExpr;
class ObjCMethodDecl;
class ObjCPropertyDecl;
class ObjCProtocolDecl;
class OMPThreadPrivateDecl;
class OMPClause;
class OverloadCandidateSet;
class OverloadExpr;
class ParenListExpr;
class ParmVarDecl;
class Preprocessor;
class PseudoDestructorTypeStorage;
class PseudoObjectExpr;
class QualType;
class StandardConversionSequence;
class Stmt;
class StringLiteral;
class SwitchStmt;
class TemplateArgument;
class TemplateArgumentList;
class TemplateArgumentLoc;
class TemplateDecl;
class TemplateParameterList;
class TemplatePartialOrderingContext;
class TemplateTemplateParmDecl;
class Token;
class TypeAliasDecl;
class TypedefDecl;
class TypedefNameDecl;
class TypeLoc;
class TypoCorrectionConsumer;
class UnqualifiedId;
class UnresolvedLookupExpr;
class UnresolvedMemberExpr;
class UnresolvedSetImpl;
class UnresolvedSetIterator;
class UsingDecl;
class UsingShadowDecl;
class ValueDecl;
class VarDecl;
class VarTemplateSpecializationDecl;
class VisibilityAttr;
class VisibleDeclConsumer;
class IndirectFieldDecl;
struct DeductionFailureInfo;
class TemplateSpecCandidateSet;
namespace sema {
class AccessedEntity;
class BlockScopeInfo;
class CapturedRegionScopeInfo;
class CapturingScopeInfo;
class CompoundScopeInfo;
class DelayedDiagnostic;
class DelayedDiagnosticPool;
class FunctionScopeInfo;
class LambdaScopeInfo;
class PossiblyUnreachableDiag;
class TemplateDeductionInfo;
}
namespace threadSafety {
class BeforeSet;
void threadSafetyCleanup(BeforeSet* Cache);
}
// FIXME: No way to easily map from TemplateTypeParmTypes to
// TemplateTypeParmDecls, so we have this horrible PointerUnion.
typedef std::pair<llvm::PointerUnion<const TemplateTypeParmType*, NamedDecl*>,
SourceLocation> UnexpandedParameterPack;
/// Describes whether we've seen any nullability information for the given
/// file.
struct FileNullability {
/// The first pointer declarator (of any pointer kind) in the file that does
/// not have a corresponding nullability annotation.
SourceLocation PointerLoc;
/// Which kind of pointer declarator we saw.
uint8_t PointerKind;
/// Whether we saw any type nullability annotations in the given file.
bool SawTypeNullability = false;
};
/// A mapping from file IDs to a record of whether we've seen nullability
/// information in that file.
class FileNullabilityMap {
/// A mapping from file IDs to the nullability information for each file ID.
llvm::DenseMap<FileID, FileNullability> Map;
/// A single-element cache based on the file ID.
struct {
FileID File;
FileNullability Nullability;
} Cache;
public:
FileNullability &operator[](FileID file) {
// Check the single-element cache.
if (file == Cache.File)
return Cache.Nullability;
// It's not in the single-element cache; flush the cache if we have one.
if (!Cache.File.isInvalid()) {
Map[Cache.File] = Cache.Nullability;
}
// Pull this entry into the cache.
Cache.File = file;
Cache.Nullability = Map[file];
return Cache.Nullability;
}
};
/// Sema - This implements semantic analysis and AST building for C.
class Sema {
Sema(const Sema &) = delete;
void operator=(const Sema &) = delete;
///\brief Source of additional semantic information.
ExternalSemaSource *ExternalSource;
///\brief Whether Sema has generated a multiplexer and has to delete it.
bool isMultiplexExternalSource;
static bool mightHaveNonExternalLinkage(const DeclaratorDecl *FD);
bool isVisibleSlow(const NamedDecl *D);
bool shouldLinkPossiblyHiddenDecl(const NamedDecl *Old,
const NamedDecl *New) {
// We are about to link these. It is now safe to compute the linkage of
// the new decl. If the new decl has external linkage, we will
// link it with the hidden decl (which also has external linkage) and
// it will keep having external linkage. If it has internal linkage, we
// will not link it. Since it has no previous decls, it will remain
// with internal linkage.
if (getLangOpts().ModulesHideInternalLinkage)
return isVisible(Old) || New->isExternallyVisible();
return true;
}
public:
typedef OpaquePtr<DeclGroupRef> DeclGroupPtrTy;
typedef OpaquePtr<TemplateName> TemplateTy;
typedef OpaquePtr<QualType> TypeTy;
OpenCLOptions OpenCLFeatures;
FPOptions FPFeatures;
const LangOptions &LangOpts;
Preprocessor &PP;
ASTContext &Context;
ASTConsumer &Consumer;
DiagnosticsEngine &Diags;
SourceManager &SourceMgr;
/// \brief Flag indicating whether or not to collect detailed statistics.
bool CollectStats;
/// \brief Code-completion consumer.
CodeCompleteConsumer *CodeCompleter;
/// CurContext - This is the current declaration context of parsing.
DeclContext *CurContext;
/// \brief Generally null except when we temporarily switch decl contexts,
/// like in \see ActOnObjCTemporaryExitContainerContext.
DeclContext *OriginalLexicalContext;
/// VAListTagName - The declaration name corresponding to __va_list_tag.
/// This is used as part of a hack to omit that class from ADL results.
DeclarationName VAListTagName;
/// PackContext - Manages the stack for \#pragma pack. An alignment
/// of 0 indicates default alignment.
void *PackContext; // Really a "PragmaPackStack*"
bool MSStructPragmaOn; // True when \#pragma ms_struct on
/// \brief Controls member pointer representation format under the MS ABI.
LangOptions::PragmaMSPointersToMembersKind
MSPointerToMemberRepresentationMethod;
// HLSL Change Begin
// The HLSL rewriter doesn't define a default matrix pack,
// so we must preserve the lack of annotations to avoid changing semantics.
bool HasDefaultMatrixPack = false;
// Uses of #pragma pack_matrix change the default pack.
bool DefaultMatrixPackRowMajor = false;
// HLSL Change End.
enum PragmaVtorDispKind {
PVDK_Push, ///< #pragma vtordisp(push, mode)
PVDK_Set, ///< #pragma vtordisp(mode)
PVDK_Pop, ///< #pragma vtordisp(pop)
PVDK_Reset ///< #pragma vtordisp()
};
enum PragmaMsStackAction {
PSK_Reset, // #pragma ()
PSK_Set, // #pragma ("name")
PSK_Push, // #pragma (push[, id])
PSK_Push_Set, // #pragma (push[, id], "name")
PSK_Pop, // #pragma (pop[, id])
PSK_Pop_Set, // #pragma (pop[, id], "name")
};
/// \brief Whether to insert vtordisps prior to virtual bases in the Microsoft
/// C++ ABI. Possible values are 0, 1, and 2, which mean:
///
/// 0: Suppress all vtordisps
/// 1: Insert vtordisps in the presence of vbase overrides and non-trivial
/// structors
/// 2: Always insert vtordisps to support RTTI on partially constructed
/// objects
///
/// The stack always has at least one element in it.
SmallVector<MSVtorDispAttr::Mode, 2> VtorDispModeStack;
/// Stack of active SEH __finally scopes. Can be empty.
SmallVector<Scope*, 2> CurrentSEHFinally;
/// \brief Source location for newly created implicit MSInheritanceAttrs
SourceLocation ImplicitMSInheritanceAttrLoc;
template<typename ValueType>
struct PragmaStack {
struct Slot {
llvm::StringRef StackSlotLabel;
ValueType Value;
SourceLocation PragmaLocation;
Slot(llvm::StringRef StackSlotLabel,
ValueType Value,
SourceLocation PragmaLocation)
: StackSlotLabel(StackSlotLabel), Value(Value),
PragmaLocation(PragmaLocation) {}
};
void Act(SourceLocation PragmaLocation,
PragmaMsStackAction Action,
llvm::StringRef StackSlotLabel,
ValueType Value);
explicit PragmaStack(const ValueType &Value)
: CurrentValue(Value) {}
SmallVector<Slot, 2> Stack;
ValueType CurrentValue;
SourceLocation CurrentPragmaLocation;
};
// FIXME: We should serialize / deserialize these if they occur in a PCH (but
// we shouldn't do so if they're in a module).
PragmaStack<StringLiteral *> DataSegStack;
PragmaStack<StringLiteral *> BSSSegStack;
PragmaStack<StringLiteral *> ConstSegStack;
PragmaStack<StringLiteral *> CodeSegStack;
/// A mapping that describes the nullability we've seen in each header file.
FileNullabilityMap NullabilityMap;
/// Last section used with #pragma init_seg.
StringLiteral *CurInitSeg;
SourceLocation CurInitSegLoc;
/// VisContext - Manages the stack for \#pragma GCC visibility.
void *VisContext; // Really a "PragmaVisStack*"
/// \brief This represents the last location of a "#pragma clang optimize off"
/// directive if such a directive has not been closed by an "on" yet. If
/// optimizations are currently "on", this is set to an invalid location.
SourceLocation OptimizeOffPragmaLocation;
/// \brief Flag indicating if Sema is building a recovery call expression.
///
/// This flag is used to avoid building recovery call expressions
/// if Sema is already doing so, which would cause infinite recursions.
bool IsBuildingRecoveryCallExpr;
/// ExprNeedsCleanups - True if the current evaluation context
/// requires cleanups to be run at its conclusion.
bool ExprNeedsCleanups;
/// ExprCleanupObjects - This is the stack of objects requiring
/// cleanup that are created by the current full expression. The
/// element type here is ExprWithCleanups::Object.
SmallVector<BlockDecl*, 8> ExprCleanupObjects;
/// \brief Store a list of either DeclRefExprs or MemberExprs
/// that contain a reference to a variable (constant) that may or may not
/// be odr-used in this Expr, and we won't know until all lvalue-to-rvalue
/// and discarded value conversions have been applied to all subexpressions
/// of the enclosing full expression. This is cleared at the end of each
/// full expression.
llvm::SmallPtrSet<Expr*, 2> MaybeODRUseExprs;
/// \brief Stack containing information about each of the nested
/// function, block, and method scopes that are currently active.
///
/// This array is never empty. Clients should ignore the first
/// element, which is used to cache a single FunctionScopeInfo
/// that's used to parse every top-level function.
SmallVector<sema::FunctionScopeInfo *, 4> FunctionScopes;
typedef LazyVector<TypedefNameDecl *, ExternalSemaSource,
&ExternalSemaSource::ReadExtVectorDecls, 2, 2>
ExtVectorDeclsType;
/// ExtVectorDecls - This is a list all the extended vector types. This allows
/// us to associate a raw vector type with one of the ext_vector type names.
/// This is only necessary for issuing pretty diagnostics.
ExtVectorDeclsType ExtVectorDecls;
/// FieldCollector - Collects CXXFieldDecls during parsing of C++ classes.
std::unique_ptr<CXXFieldCollector> FieldCollector;
typedef llvm::SmallSetVector<const NamedDecl*, 16> NamedDeclSetType;
/// \brief Set containing all declared private fields that are not used.
NamedDeclSetType UnusedPrivateFields;
/// \brief Set containing all typedefs that are likely unused.
llvm::SmallSetVector<const TypedefNameDecl *, 4>
UnusedLocalTypedefNameCandidates;
/// \brief Delete-expressions to be analyzed at the end of translation unit
///
/// This list contains class members, and locations of delete-expressions
/// that could not be proven as to whether they mismatch with new-expression
/// used in initializer of the field.
typedef std::pair<SourceLocation, bool> DeleteExprLoc;
typedef llvm::SmallVector<DeleteExprLoc, 4> DeleteLocs;
llvm::MapVector<FieldDecl *, DeleteLocs> DeleteExprs;
typedef llvm::SmallPtrSet<const CXXRecordDecl*, 8> RecordDeclSetTy;
/// PureVirtualClassDiagSet - a set of class declarations which we have
/// emitted a list of pure virtual functions. Used to prevent emitting the
/// same list more than once.
std::unique_ptr<RecordDeclSetTy> PureVirtualClassDiagSet;
/// ParsingInitForAutoVars - a set of declarations with auto types for which
/// we are currently parsing the initializer.
llvm::SmallPtrSet<const Decl*, 4> ParsingInitForAutoVars;
/// \brief Look for a locally scoped extern "C" declaration by the given name.
NamedDecl *findLocallyScopedExternCDecl(DeclarationName Name);
typedef LazyVector<VarDecl *, ExternalSemaSource,
&ExternalSemaSource::ReadTentativeDefinitions, 2, 2>
TentativeDefinitionsType;
/// \brief All the tentative definitions encountered in the TU.
TentativeDefinitionsType TentativeDefinitions;
typedef LazyVector<const DeclaratorDecl *, ExternalSemaSource,
&ExternalSemaSource::ReadUnusedFileScopedDecls, 2, 2>
UnusedFileScopedDeclsType;
/// \brief The set of file scoped decls seen so far that have not been used
/// and must warn if not used. Only contains the first declaration.
UnusedFileScopedDeclsType UnusedFileScopedDecls;
typedef LazyVector<CXXConstructorDecl *, ExternalSemaSource,
&ExternalSemaSource::ReadDelegatingConstructors, 2, 2>
DelegatingCtorDeclsType;
/// \brief All the delegating constructors seen so far in the file, used for
/// cycle detection at the end of the TU.
DelegatingCtorDeclsType DelegatingCtorDecls;
/// \brief All the overriding functions seen during a class definition
/// that had their exception spec checks delayed, plus the overridden
/// function.
SmallVector<std::pair<const CXXMethodDecl*, const CXXMethodDecl*>, 2>
DelayedExceptionSpecChecks;
/// \brief All the members seen during a class definition which were both
/// explicitly defaulted and had explicitly-specified exception
/// specifications, along with the function type containing their
/// user-specified exception specification. Those exception specifications
/// were overridden with the default specifications, but we still need to
/// check whether they are compatible with the default specification, and
/// we can't do that until the nesting set of class definitions is complete.
SmallVector<std::pair<CXXMethodDecl*, const FunctionProtoType*>, 2>
DelayedDefaultedMemberExceptionSpecs;
typedef llvm::MapVector<const FunctionDecl *, LateParsedTemplate *>
LateParsedTemplateMapT;
LateParsedTemplateMapT LateParsedTemplateMap;
/// \brief Callback to the parser to parse templated functions when needed.
typedef void LateTemplateParserCB(void *P, LateParsedTemplate &LPT);
typedef void LateTemplateParserCleanupCB(void *P);
LateTemplateParserCB *LateTemplateParser;
LateTemplateParserCleanupCB *LateTemplateParserCleanup;
void *OpaqueParser;
void SetLateTemplateParser(LateTemplateParserCB *LTP,
LateTemplateParserCleanupCB *LTPCleanup,
void *P) {
LateTemplateParser = LTP;
LateTemplateParserCleanup = LTPCleanup;
OpaqueParser = P;
}
class DelayedDiagnostics;
class DelayedDiagnosticsState {
sema::DelayedDiagnosticPool *SavedPool;
friend class Sema::DelayedDiagnostics;
};
typedef DelayedDiagnosticsState ParsingDeclState;
typedef DelayedDiagnosticsState ProcessingContextState;
/// A class which encapsulates the logic for delaying diagnostics
/// during parsing and other processing.
class DelayedDiagnostics {
/// \brief The current pool of diagnostics into which delayed
/// diagnostics should go.
sema::DelayedDiagnosticPool *CurPool;
public:
DelayedDiagnostics() : CurPool(nullptr) {}
/// Adds a delayed diagnostic.
void add(const sema::DelayedDiagnostic &diag); // in DelayedDiagnostic.h
/// Determines whether diagnostics should be delayed.
bool shouldDelayDiagnostics() { return CurPool != nullptr; }
/// Returns the current delayed-diagnostics pool.
sema::DelayedDiagnosticPool *getCurrentPool() const {
return CurPool;
}
/// Enter a new scope. Access and deprecation diagnostics will be
/// collected in this pool.
DelayedDiagnosticsState push(sema::DelayedDiagnosticPool &pool) {
DelayedDiagnosticsState state;
state.SavedPool = CurPool;
CurPool = &pool;
return state;
}
/// Leave a delayed-diagnostic state that was previously pushed.
/// Do not emit any of the diagnostics. This is performed as part
/// of the bookkeeping of popping a pool "properly".
void popWithoutEmitting(DelayedDiagnosticsState state) {
CurPool = state.SavedPool;
}
/// Enter a new scope where access and deprecation diagnostics are
/// not delayed.
DelayedDiagnosticsState pushUndelayed() {
DelayedDiagnosticsState state;
state.SavedPool = CurPool;
CurPool = nullptr;
return state;
}
/// Undo a previous pushUndelayed().
void popUndelayed(DelayedDiagnosticsState state) {
assert(CurPool == nullptr);
CurPool = state.SavedPool;
}
} DelayedDiagnostics;
/// A RAII object to temporarily push a declaration context.
class ContextRAII {
private:
Sema &S;
DeclContext *SavedContext;
ProcessingContextState SavedContextState;
QualType SavedCXXThisTypeOverride;
public:
ContextRAII(Sema &S, DeclContext *ContextToPush, bool NewThisContext = true)
: S(S), SavedContext(S.CurContext),
SavedContextState(S.DelayedDiagnostics.pushUndelayed()),
SavedCXXThisTypeOverride(S.CXXThisTypeOverride)
{
assert(ContextToPush && "pushing null context");
S.CurContext = ContextToPush;
if (NewThisContext)
S.CXXThisTypeOverride = QualType();
}
void pop() {
if (!SavedContext) return;
S.CurContext = SavedContext;
S.DelayedDiagnostics.popUndelayed(SavedContextState);
S.CXXThisTypeOverride = SavedCXXThisTypeOverride;
SavedContext = nullptr;
}
~ContextRAII() {
pop();
}
};
/// \brief RAII object to handle the state changes required to synthesize
/// a function body.
class SynthesizedFunctionScope {
Sema &S;
Sema::ContextRAII SavedContext;
public:
SynthesizedFunctionScope(Sema &S, DeclContext *DC)
: S(S), SavedContext(S, DC)
{
S.PushFunctionScope();
S.PushExpressionEvaluationContext(Sema::PotentiallyEvaluated);
}
~SynthesizedFunctionScope() {
S.PopExpressionEvaluationContext();
S.PopFunctionScopeInfo();
}
};
/// WeakUndeclaredIdentifiers - Identifiers contained in
/// \#pragma weak before declared. rare. may alias another
/// identifier, declared or undeclared
llvm::MapVector<IdentifierInfo *, WeakInfo> WeakUndeclaredIdentifiers;
/// ExtnameUndeclaredIdentifiers - Identifiers contained in
/// \#pragma redefine_extname before declared. Used in Solaris system headers
/// to define functions that occur in multiple standards to call the version
/// in the currently selected standard.
llvm::DenseMap<IdentifierInfo*,AsmLabelAttr*> ExtnameUndeclaredIdentifiers;
/// \brief Load weak undeclared identifiers from the external source.
void LoadExternalWeakUndeclaredIdentifiers();
/// WeakTopLevelDecl - Translation-unit scoped declarations generated by
/// \#pragma weak during processing of other Decls.
/// I couldn't figure out a clean way to generate these in-line, so
/// we store them here and handle separately -- which is a hack.
/// It would be best to refactor this.
SmallVector<Decl*,2> WeakTopLevelDecl;
IdentifierResolver IdResolver;
/// Translation Unit Scope - useful to Objective-C actions that need
/// to lookup file scope declarations in the "ordinary" C decl namespace.
/// For example, user-defined classes, built-in "id" type, etc.
Scope *TUScope;
/// \brief The C++ "std" namespace, where the standard library resides.
LazyDeclPtr StdNamespace;
/// \brief The C++ "std::bad_alloc" class, which is defined by the C++
/// standard library.
LazyDeclPtr StdBadAlloc;
/// \brief The C++ "std::initializer_list" template, which is defined in
/// \<initializer_list>.
ClassTemplateDecl *StdInitializerList;
/// \brief The C++ "type_info" declaration, which is defined in \<typeinfo>.
RecordDecl *CXXTypeInfoDecl;
/// \brief The MSVC "_GUID" struct, which is defined in MSVC header files.
RecordDecl *MSVCGuidDecl;
/// \brief Caches identifiers/selectors for NSFoundation APIs.
// std::unique_ptr<NSAPI> NSAPIObj; // HLSL Change
/// \brief The declaration of the Objective-C NSNumber class.
ObjCInterfaceDecl *NSNumberDecl;
/// \brief The declaration of the Objective-C NSValue class.
ObjCInterfaceDecl *NSValueDecl;
/// \brief Pointer to NSNumber type (NSNumber *).
QualType NSNumberPointer;
/// \brief Pointer to NSValue type (NSValue *).
QualType NSValuePointer;
/// \brief The Objective-C NSNumber methods used to create NSNumber literals.
ObjCMethodDecl *NSNumberLiteralMethods[NSAPI::NumNSNumberLiteralMethods];
/// \brief The declaration of the Objective-C NSString class.
ObjCInterfaceDecl *NSStringDecl;
/// \brief Pointer to NSString type (NSString *).
QualType NSStringPointer;
/// \brief The declaration of the stringWithUTF8String: method.
ObjCMethodDecl *StringWithUTF8StringMethod;
/// \brief The declaration of the valueWithBytes:objCType: method.
ObjCMethodDecl *ValueWithBytesObjCTypeMethod;
/// \brief The declaration of the Objective-C NSArray class.
ObjCInterfaceDecl *NSArrayDecl;
/// \brief The declaration of the arrayWithObjects:count: method.
ObjCMethodDecl *ArrayWithObjectsMethod;
/// \brief The declaration of the Objective-C NSDictionary class.
ObjCInterfaceDecl *NSDictionaryDecl;
/// \brief The declaration of the dictionaryWithObjects:forKeys:count: method.
ObjCMethodDecl *DictionaryWithObjectsMethod;
/// \brief id<NSCopying> type.
QualType QIDNSCopying;
/// \brief will hold 'respondsToSelector:'
Selector RespondsToSelectorSel;
/// \brief counter for internal MS Asm label names.
unsigned MSAsmLabelNameCounter;
/// A flag to remember whether the implicit forms of operator new and delete
/// have been declared.
bool GlobalNewDeleteDeclared;
/// A flag to indicate that we're in a context that permits abstract
/// references to fields. This is really a
bool AllowAbstractFieldReference;
/// \brief Describes how the expressions currently being parsed are
/// evaluated at run-time, if at all.
enum ExpressionEvaluationContext {
/// \brief The current expression and its subexpressions occur within an
/// unevaluated operand (C++11 [expr]p7), such as the subexpression of
/// \c sizeof, where the type of the expression may be significant but
/// no code will be generated to evaluate the value of the expression at
/// run time.
Unevaluated,
/// \brief The current expression occurs within an unevaluated
/// operand that unconditionally permits abstract references to
/// fields, such as a SIZE operator in MS-style inline assembly.
UnevaluatedAbstract,
/// \brief The current context is "potentially evaluated" in C++11 terms,
/// but the expression is evaluated at compile-time (like the values of
/// cases in a switch statement).
ConstantEvaluated,
/// \brief The current expression is potentially evaluated at run time,
/// which means that code may be generated to evaluate the value of the
/// expression at run time.
PotentiallyEvaluated,
/// \brief The current expression is potentially evaluated, but any
/// declarations referenced inside that expression are only used if
/// in fact the current expression is used.
///
/// This value is used when parsing default function arguments, for which
/// we would like to provide diagnostics (e.g., passing non-POD arguments
/// through varargs) but do not want to mark declarations as "referenced"
/// until the default argument is used.
PotentiallyEvaluatedIfUsed
};
/// \brief Data structure used to record current or nested
/// expression evaluation contexts.
struct ExpressionEvaluationContextRecord {
/// \brief The expression evaluation context.
ExpressionEvaluationContext Context;
/// \brief Whether the enclosing context needed a cleanup.
bool ParentNeedsCleanups;
/// \brief Whether we are in a decltype expression.
bool IsDecltype;
/// \brief The number of active cleanup objects when we entered
/// this expression evaluation context.
unsigned NumCleanupObjects;
/// \brief The number of typos encountered during this expression evaluation
/// context (i.e. the number of TypoExprs created).
unsigned NumTypos;
llvm::SmallPtrSet<Expr*, 2> SavedMaybeODRUseExprs;
/// \brief The lambdas that are present within this context, if it
/// is indeed an unevaluated context.
SmallVector<LambdaExpr *, 2> Lambdas;
/// \brief The declaration that provides context for lambda expressions
/// and block literals if the normal declaration context does not
/// suffice, e.g., in a default function argument.
Decl *ManglingContextDecl;
/// \brief The context information used to mangle lambda expressions
/// and block literals within this context.
///
/// This mangling information is allocated lazily, since most contexts
/// do not have lambda expressions or block literals.
IntrusiveRefCntPtr<MangleNumberingContext> MangleNumbering;
/// \brief If we are processing a decltype type, a set of call expressions
/// for which we have deferred checking the completeness of the return type.
SmallVector<CallExpr *, 8> DelayedDecltypeCalls;
/// \brief If we are processing a decltype type, a set of temporary binding
/// expressions for which we have deferred checking the destructor.
SmallVector<CXXBindTemporaryExpr *, 8> DelayedDecltypeBinds;
ExpressionEvaluationContextRecord(ExpressionEvaluationContext Context,
unsigned NumCleanupObjects,
bool ParentNeedsCleanups,
Decl *ManglingContextDecl,
bool IsDecltype)
: Context(Context), ParentNeedsCleanups(ParentNeedsCleanups),
IsDecltype(IsDecltype), NumCleanupObjects(NumCleanupObjects),
NumTypos(0),
ManglingContextDecl(ManglingContextDecl), MangleNumbering() { }
/// \brief Retrieve the mangling numbering context, used to consistently
/// number constructs like lambdas for mangling.
MangleNumberingContext &getMangleNumberingContext(ASTContext &Ctx);
bool isUnevaluated() const {
return Context == Unevaluated || Context == UnevaluatedAbstract;
}
};
/// A stack of expression evaluation contexts.
SmallVector<ExpressionEvaluationContextRecord, 8> ExprEvalContexts;
/// \brief Compute the mangling number context for a lambda expression or
/// block literal.
///
/// \param DC - The DeclContext containing the lambda expression or
/// block literal.
/// \param[out] ManglingContextDecl - Returns the ManglingContextDecl
/// associated with the context, if relevant.
MangleNumberingContext *getCurrentMangleNumberContext(
const DeclContext *DC,
Decl *&ManglingContextDecl);
/// SpecialMemberOverloadResult - The overloading result for a special member
/// function.
///
/// This is basically a wrapper around PointerIntPair. The lowest bits of the
/// integer are used to determine whether overload resolution succeeded.
class SpecialMemberOverloadResult : public llvm::FastFoldingSetNode {
public:
enum Kind {
NoMemberOrDeleted,
Ambiguous,
Success
};
private:
llvm::PointerIntPair<CXXMethodDecl*, 2> Pair;
public:
SpecialMemberOverloadResult(const llvm::FoldingSetNodeID &ID)
: FastFoldingSetNode(ID)
{}
CXXMethodDecl *getMethod() const { return Pair.getPointer(); }
void setMethod(CXXMethodDecl *MD) { Pair.setPointer(MD); }
Kind getKind() const { return static_cast<Kind>(Pair.getInt()); }
void setKind(Kind K) { Pair.setInt(K); }
};
/// \brief A cache of special member function overload resolution results
/// for C++ records.
llvm::FoldingSet<SpecialMemberOverloadResult> SpecialMemberCache;
/// \brief The kind of translation unit we are processing.
///
/// When we're processing a complete translation unit, Sema will perform
/// end-of-translation-unit semantic tasks (such as creating
/// initializers for tentative definitions in C) once parsing has
/// completed. Modules and precompiled headers perform different kinds of
/// checks.
TranslationUnitKind TUKind;
llvm::BumpPtrAllocator BumpAlloc;
/// \brief The number of SFINAE diagnostics that have been trapped.
unsigned NumSFINAEErrors;
typedef llvm::DenseMap<ParmVarDecl *, llvm::TinyPtrVector<ParmVarDecl *>>
UnparsedDefaultArgInstantiationsMap;
/// \brief A mapping from parameters with unparsed default arguments to the
/// set of instantiations of each parameter.
///
/// This mapping is a temporary data structure used when parsing
/// nested class templates or nested classes of class templates,
/// where we might end up instantiating an inner class before the
/// default arguments of its methods have been parsed.
UnparsedDefaultArgInstantiationsMap UnparsedDefaultArgInstantiations;
// Contains the locations of the beginning of unparsed default
// argument locations.
llvm::DenseMap<ParmVarDecl *, SourceLocation> UnparsedDefaultArgLocs;
/// UndefinedInternals - all the used, undefined objects which require a
/// definition in this translation unit.
llvm::DenseMap<NamedDecl *, SourceLocation> UndefinedButUsed;
/// Obtain a sorted list of functions that are undefined but ODR-used.
void getUndefinedButUsed(
SmallVectorImpl<std::pair<NamedDecl *, SourceLocation> > &Undefined);
/// Retrieves list of suspicious delete-expressions that will be checked at
/// the end of translation unit.
const llvm::MapVector<FieldDecl *, DeleteLocs> &
getMismatchingDeleteExpressions() const;
typedef std::pair<ObjCMethodList, ObjCMethodList> GlobalMethods;
typedef llvm::DenseMap<Selector, GlobalMethods> GlobalMethodPool;
/// Method Pool - allows efficient lookup when typechecking messages to "id".
/// We need to maintain a list, since selectors can have differing signatures
/// across classes. In Cocoa, this happens to be extremely uncommon (only 1%
/// of selectors are "overloaded").
/// At the head of the list it is recorded whether there were 0, 1, or >= 2
/// methods inside categories with a particular selector.
GlobalMethodPool MethodPool;
/// Method selectors used in a \@selector expression. Used for implementation
/// of -Wselector.
llvm::MapVector<Selector, SourceLocation> ReferencedSelectors;
/// Kinds of C++ special members.
enum CXXSpecialMember {
CXXDefaultConstructor,
CXXCopyConstructor,
CXXMoveConstructor,
CXXCopyAssignment,
CXXMoveAssignment,
CXXDestructor,
CXXInvalid
};
typedef std::pair<CXXRecordDecl*, CXXSpecialMember> SpecialMemberDecl;
/// The C++ special members which we are currently in the process of
/// declaring. If this process recursively triggers the declaration of the
/// same special member, we should act as if it is not yet declared.
llvm::SmallSet<SpecialMemberDecl, 4> SpecialMembersBeingDeclared;
void ReadMethodPool(Selector Sel);
/// Private Helper predicate to check for 'self'.
bool isSelfExpr(Expr *RExpr);
bool isSelfExpr(Expr *RExpr, const ObjCMethodDecl *Method);
/// \brief Cause the active diagnostic on the DiagosticsEngine to be
/// emitted. This is closely coupled to the SemaDiagnosticBuilder class and
/// should not be used elsewhere.
void EmitCurrentDiagnostic(unsigned DiagID);
/// Records and restores the FP_CONTRACT state on entry/exit of compound
/// statements.
class FPContractStateRAII {
public:
FPContractStateRAII(Sema& S)
: S(S), OldFPContractState(S.FPFeatures.fp_contract) {}
~FPContractStateRAII() {
S.FPFeatures.fp_contract = OldFPContractState;
}
private:
Sema& S;
bool OldFPContractState : 1;
};
void addImplicitTypedef(StringRef Name, QualType T);
public:
Sema(Preprocessor &pp, ASTContext &ctxt, ASTConsumer &consumer,
TranslationUnitKind TUKind = TU_Complete,
CodeCompleteConsumer *CompletionConsumer = nullptr);
~Sema();
/// \brief Perform initialization that occurs after the parser has been
/// initialized but before it parses anything.
void Initialize();
const LangOptions &getLangOpts() const { return LangOpts; }
OpenCLOptions &getOpenCLOptions() { return OpenCLFeatures; }
FPOptions &getFPOptions() { return FPFeatures; }
DiagnosticsEngine &getDiagnostics() const { return Diags; }
SourceManager &getSourceManager() const { return SourceMgr; }
Preprocessor &getPreprocessor() const { return PP; }
ASTContext &getASTContext() const { return Context; }
ASTConsumer &getASTConsumer() const { return Consumer; }
ASTMutationListener *getASTMutationListener() const;
ExternalSemaSource* getExternalSource() const { return ExternalSource; }
///\brief Registers an external source. If an external source already exists,
/// creates a multiplex external source and appends to it.
///
///\param[in] E - A non-null external sema source.
///
void addExternalSource(ExternalSemaSource *E);
void PrintStats() const;
/// \brief Helper class that creates diagnostics with optional
/// template instantiation stacks.
///
/// This class provides a wrapper around the basic DiagnosticBuilder
/// class that emits diagnostics. SemaDiagnosticBuilder is
/// responsible for emitting the diagnostic (as DiagnosticBuilder
/// does) and, if the diagnostic comes from inside a template
/// instantiation, printing the template instantiation stack as
/// well.
class SemaDiagnosticBuilder : public DiagnosticBuilder {
Sema &SemaRef;
unsigned DiagID;
public:
SemaDiagnosticBuilder(DiagnosticBuilder &DB, Sema &SemaRef, unsigned DiagID)
: DiagnosticBuilder(DB), SemaRef(SemaRef), DiagID(DiagID) { }
~SemaDiagnosticBuilder() {
// If we aren't active, there is nothing to do.
if (!isActive()) return;
// Otherwise, we need to emit the diagnostic. First flush the underlying
// DiagnosticBuilder data, and clear the diagnostic builder itself so it
// won't emit the diagnostic in its own destructor.
//
// This seems wasteful, in that as written the DiagnosticBuilder dtor will
// do its own needless checks to see if the diagnostic needs to be
// emitted. However, because we take care to ensure that the builder
// objects never escape, a sufficiently smart compiler will be able to
// eliminate that code.
FlushCounts();
Clear();
// Dispatch to Sema to emit the diagnostic.
SemaRef.EmitCurrentDiagnostic(DiagID);
}
/// Teach operator<< to produce an object of the correct type.
template<typename T>
friend const SemaDiagnosticBuilder &operator<<(
const SemaDiagnosticBuilder &Diag, const T &Value) {
const DiagnosticBuilder &BaseDiag = Diag;
BaseDiag << Value;
return Diag;
}
};
/// \brief Emit a diagnostic.
SemaDiagnosticBuilder Diag(SourceLocation Loc, unsigned DiagID) {
DiagnosticBuilder DB = Diags.Report(Loc, DiagID);
return SemaDiagnosticBuilder(DB, *this, DiagID);
}
/// \brief Emit a partial diagnostic.
SemaDiagnosticBuilder Diag(SourceLocation Loc, const PartialDiagnostic& PD);
/// \brief Build a partial diagnostic.
PartialDiagnostic PDiag(unsigned DiagID = 0); // in SemaInternal.h
bool findMacroSpelling(SourceLocation &loc, StringRef name);
/// \brief Get a string to suggest for zero-initialization of a type.
std::string
getFixItZeroInitializerForType(QualType T, SourceLocation Loc) const;
std::string getFixItZeroLiteralForType(QualType T, SourceLocation Loc) const;
/// \brief Calls \c Lexer::getLocForEndOfToken()
SourceLocation getLocForEndOfToken(SourceLocation Loc, unsigned Offset = 0);
/// \brief Retrieve the module loader associated with the preprocessor.
ModuleLoader &getModuleLoader() const;
void emitAndClearUnusedLocalTypedefWarnings();
void ActOnEndOfTranslationUnit();
void CheckDelegatingCtorCycles();
Scope *getScopeForContext(DeclContext *Ctx);
void PushFunctionScope();
void PushBlockScope(Scope *BlockScope, BlockDecl *Block);
sema::LambdaScopeInfo *PushLambdaScope();
/// \brief This is used to inform Sema what the current TemplateParameterDepth
/// is during Parsing. Currently it is used to pass on the depth
/// when parsing generic lambda 'auto' parameters.
void RecordParsingTemplateParameterDepth(unsigned Depth);
void PushCapturedRegionScope(Scope *RegionScope, CapturedDecl *CD,
RecordDecl *RD,
CapturedRegionKind K);
void
PopFunctionScopeInfo(const sema::AnalysisBasedWarnings::Policy *WP = nullptr,
const Decl *D = nullptr,
const BlockExpr *blkExpr = nullptr);
sema::FunctionScopeInfo *getCurFunction() const {
return FunctionScopes.back();
}
sema::FunctionScopeInfo *getEnclosingFunction() const {
if (FunctionScopes.empty())
return nullptr;
for (int e = FunctionScopes.size()-1; e >= 0; --e) {
if (isa<sema::BlockScopeInfo>(FunctionScopes[e]))
continue;
return FunctionScopes[e];
}
return nullptr;
}
template <typename ExprT>
void recordUseOfEvaluatedWeak(const ExprT *E, bool IsRead=true) {
if (!isUnevaluatedContext())
getCurFunction()->recordUseOfWeak(E, IsRead);
}
void PushCompoundScope();
void PopCompoundScope();
sema::CompoundScopeInfo &getCurCompoundScope() const;
bool hasAnyUnrecoverableErrorsInThisFunction() const;
/// \brief Retrieve the current block, if any.
sema::BlockScopeInfo *getCurBlock();
/// \brief Retrieve the current lambda scope info, if any.
sema::LambdaScopeInfo *getCurLambda();
/// \brief Retrieve the current generic lambda info, if any.
sema::LambdaScopeInfo *getCurGenericLambda();
/// \brief Retrieve the current captured region, if any.
sema::CapturedRegionScopeInfo *getCurCapturedRegion();
/// WeakTopLevelDeclDecls - access to \#pragma weak-generated Decls
SmallVectorImpl<Decl *> &WeakTopLevelDecls() { return WeakTopLevelDecl; }
void ActOnComment(SourceRange Comment);
//===--------------------------------------------------------------------===//
// Type Analysis / Processing: SemaType.cpp.
//
QualType BuildQualifiedType(QualType T, SourceLocation Loc, Qualifiers Qs,
const DeclSpec *DS = nullptr);
QualType BuildQualifiedType(QualType T, SourceLocation Loc, unsigned CVRA,
const DeclSpec *DS = nullptr);
QualType BuildPointerType(QualType T,
SourceLocation Loc, DeclarationName Entity);
QualType BuildReferenceType(QualType T, bool LValueRef,
SourceLocation Loc, DeclarationName Entity);
QualType BuildArrayType(QualType T, ArrayType::ArraySizeModifier ASM,
Expr *ArraySize, unsigned Quals,
SourceRange Brackets, DeclarationName Entity);
QualType BuildExtVectorType(QualType T, Expr *ArraySize,
SourceLocation AttrLoc);
bool CheckFunctionReturnType(QualType T, SourceLocation Loc);
unsigned deduceWeakPropertyFromType(QualType T) {
if ((getLangOpts().getGC() != LangOptions::NonGC &&
T.isObjCGCWeak()) ||
(getLangOpts().ObjCAutoRefCount &&
T.getObjCLifetime() == Qualifiers::OCL_Weak))
return ObjCDeclSpec::DQ_PR_weak;
return 0;
}
/// \brief Build a function type.
///
/// This routine checks the function type according to C++ rules and
/// under the assumption that the result type and parameter types have
/// just been instantiated from a template. It therefore duplicates
/// some of the behavior of GetTypeForDeclarator, but in a much
/// simpler form that is only suitable for this narrow use case.
///
/// \param T The return type of the function.
///
/// \param ParamTypes The parameter types of the function. This array
/// will be modified to account for adjustments to the types of the
/// function parameters.
///
/// \param Loc The location of the entity whose type involves this
/// function type or, if there is no such entity, the location of the
/// type that will have function type.
///
/// \param Entity The name of the entity that involves the function
/// type, if known.
///
/// \param EPI Extra information about the function type. Usually this will
/// be taken from an existing function with the same prototype.
///
/// \returns A suitable function type, if there are no errors. The
/// unqualified type will always be a FunctionProtoType.
/// Otherwise, returns a NULL type.
QualType BuildFunctionType(QualType T,
MutableArrayRef<QualType> ParamTypes,
SourceLocation Loc, DeclarationName Entity,
const FunctionProtoType::ExtProtoInfo &EPI);
QualType BuildMemberPointerType(QualType T, QualType Class,
SourceLocation Loc,
DeclarationName Entity);
QualType BuildBlockPointerType(QualType T,
SourceLocation Loc, DeclarationName Entity);
QualType BuildParenType(QualType T);
QualType BuildAtomicType(QualType T, SourceLocation Loc);
TypeSourceInfo *GetTypeForDeclarator(Declarator &D, Scope *S);
TypeSourceInfo *GetTypeForDeclaratorCast(Declarator &D, QualType FromTy);
TypeSourceInfo *GetTypeSourceInfoForDeclarator(Declarator &D, QualType T,
TypeSourceInfo *ReturnTypeInfo);
/// \brief Package the given type and TSI into a ParsedType.
ParsedType CreateParsedType(QualType T, TypeSourceInfo *TInfo);
DeclarationNameInfo GetNameForDeclarator(Declarator &D);
DeclarationNameInfo GetNameFromUnqualifiedId(const UnqualifiedId &Name);
static QualType GetTypeFromParser(ParsedType Ty,
TypeSourceInfo **TInfo = nullptr);
CanThrowResult canThrow(const Expr *E);
const FunctionProtoType *ResolveExceptionSpec(SourceLocation Loc,
const FunctionProtoType *FPT);
void UpdateExceptionSpec(FunctionDecl *FD,
const FunctionProtoType::ExceptionSpecInfo &ESI);
bool CheckSpecifiedExceptionType(QualType &T, const SourceRange &Range);
bool CheckDistantExceptionSpec(QualType T);
bool CheckEquivalentExceptionSpec(FunctionDecl *Old, FunctionDecl *New);
bool CheckEquivalentExceptionSpec(
const FunctionProtoType *Old, SourceLocation OldLoc,
const FunctionProtoType *New, SourceLocation NewLoc);
bool CheckEquivalentExceptionSpec(
const PartialDiagnostic &DiagID, const PartialDiagnostic & NoteID,
const FunctionProtoType *Old, SourceLocation OldLoc,
const FunctionProtoType *New, SourceLocation NewLoc,
bool *MissingExceptionSpecification = nullptr,
bool *MissingEmptyExceptionSpecification = nullptr,
bool AllowNoexceptAllMatchWithNoSpec = false,
bool IsOperatorNew = false);
bool CheckExceptionSpecSubset(
const PartialDiagnostic &DiagID, const PartialDiagnostic & NoteID,
const FunctionProtoType *Superset, SourceLocation SuperLoc,
const FunctionProtoType *Subset, SourceLocation SubLoc);
bool CheckParamExceptionSpec(const PartialDiagnostic & NoteID,
const FunctionProtoType *Target, SourceLocation TargetLoc,
const FunctionProtoType *Source, SourceLocation SourceLoc);
TypeResult ActOnTypeName(Scope *S, Declarator &D);
/// \brief The parser has parsed the context-sensitive type 'instancetype'
/// in an Objective-C message declaration. Return the appropriate type.
ParsedType ActOnObjCInstanceType(SourceLocation Loc);
/// \brief Abstract class used to diagnose incomplete types.
struct TypeDiagnoser {
bool Suppressed;
TypeDiagnoser(bool Suppressed = false) : Suppressed(Suppressed) { }
virtual void diagnose(Sema &S, SourceLocation Loc, QualType T) = 0;
virtual ~TypeDiagnoser() {}
};
static int getPrintable(int I) { return I; }
static unsigned getPrintable(unsigned I) { return I; }
static bool getPrintable(bool B) { return B; }
static const char * getPrintable(const char *S) { return S; }
static StringRef getPrintable(StringRef S) { return S; }
static const std::string &getPrintable(const std::string &S) { return S; }
static const IdentifierInfo *getPrintable(const IdentifierInfo *II) {
return II;
}
static DeclarationName getPrintable(DeclarationName N) { return N; }
static QualType getPrintable(QualType T) { return T; }
static SourceRange getPrintable(SourceRange R) { return R; }
static SourceRange getPrintable(SourceLocation L) { return L; }
static SourceRange getPrintable(const Expr *E) { return E->getSourceRange(); }
static SourceRange getPrintable(TypeLoc TL) { return TL.getSourceRange();}
template <typename... Ts> class BoundTypeDiagnoser : public TypeDiagnoser {
unsigned DiagID;
std::tuple<const Ts &...> Args;
template <std::size_t... Is>
void emit(const SemaDiagnosticBuilder &DB,
llvm::index_sequence<Is...>) const {
// Apply all tuple elements to the builder in order.
bool Dummy[] = {(DB << getPrintable(std::get<Is>(Args)))...};
(void)Dummy;
}
public:
BoundTypeDiagnoser(unsigned DiagID, const Ts &...Args)
: TypeDiagnoser(DiagID == 0), DiagID(DiagID), Args(Args...) {}
void diagnose(Sema &S, SourceLocation Loc, QualType T) override {
if (Suppressed)
return;
const SemaDiagnosticBuilder &DB = S.Diag(Loc, DiagID);
emit(DB, llvm::index_sequence_for<Ts...>());
DB << T;
}
};
private:
bool RequireCompleteTypeImpl(SourceLocation Loc, QualType T,
TypeDiagnoser &Diagnoser);
VisibleModuleSet VisibleModules;
llvm::SmallVector<VisibleModuleSet, 16> VisibleModulesStack;
Module *CachedFakeTopLevelModule;
public:
/// \brief Get the module owning an entity.
Module *getOwningModule(Decl *Entity);
/// \brief Make a merged definition of an existing hidden definition \p ND
/// visible at the specified location.
void makeMergedDefinitionVisible(NamedDecl *ND, SourceLocation Loc);
bool isModuleVisible(Module *M) { return VisibleModules.isVisible(M); }
/// Determine whether a declaration is visible to name lookup.
bool isVisible(const NamedDecl *D) {
return !D->isHidden() || isVisibleSlow(D);
}
bool hasVisibleMergedDefinition(NamedDecl *Def);
/// Determine if \p D has a visible definition. If not, suggest a declaration
/// that should be made visible to expose the definition.
bool hasVisibleDefinition(NamedDecl *D, NamedDecl **Suggested,
bool OnlyNeedComplete = false);
bool hasVisibleDefinition(const NamedDecl *D) {
NamedDecl *Hidden;
return hasVisibleDefinition(const_cast<NamedDecl*>(D), &Hidden);
}
/// Determine if the template parameter \p D has a visible default argument.
bool
hasVisibleDefaultArgument(const NamedDecl *D,
llvm::SmallVectorImpl<Module *> *Modules = nullptr);
bool RequireCompleteType(SourceLocation Loc, QualType T,
TypeDiagnoser &Diagnoser);
bool RequireCompleteType(SourceLocation Loc, QualType T,
unsigned DiagID);
template <typename... Ts>
bool RequireCompleteType(SourceLocation Loc, QualType T, unsigned DiagID,
const Ts &...Args) {
BoundTypeDiagnoser<Ts...> Diagnoser(DiagID, Args...);
return RequireCompleteType(Loc, T, Diagnoser);
}
bool RequireCompleteExprType(Expr *E, TypeDiagnoser &Diagnoser);
bool RequireCompleteExprType(Expr *E, unsigned DiagID);
template <typename... Ts>
bool RequireCompleteExprType(Expr *E, unsigned DiagID, const Ts &...Args) {
BoundTypeDiagnoser<Ts...> Diagnoser(DiagID, Args...);
return RequireCompleteExprType(E, Diagnoser);
}
bool RequireLiteralType(SourceLocation Loc, QualType T,
TypeDiagnoser &Diagnoser);
bool RequireLiteralType(SourceLocation Loc, QualType T, unsigned DiagID);
template <typename... Ts>
bool RequireLiteralType(SourceLocation Loc, QualType T, unsigned DiagID,
const Ts &...Args) {
BoundTypeDiagnoser<Ts...> Diagnoser(DiagID, Args...);
return RequireLiteralType(Loc, T, Diagnoser);
}
QualType getElaboratedType(ElaboratedTypeKeyword Keyword,
const CXXScopeSpec &SS, QualType T);
QualType BuildTypeofExprType(Expr *E, SourceLocation Loc);
/// If AsUnevaluated is false, E is treated as though it were an evaluated
/// context, such as when building a type for decltype(auto).
QualType BuildDecltypeType(Expr *E, SourceLocation Loc,
bool AsUnevaluated = true);
QualType BuildUnaryTransformType(QualType BaseType,
UnaryTransformType::UTTKind UKind,
SourceLocation Loc);
//===--------------------------------------------------------------------===//
// Symbol table / Decl tracking callbacks: SemaDecl.cpp.
//
/// List of decls defined in a function prototype. This contains EnumConstants
/// that incorrectly end up in translation unit scope because there is no
/// function to pin them on. ActOnFunctionDeclarator reads this list and patches
/// them into the FunctionDecl.
std::vector<NamedDecl*> DeclsInPrototypeScope;
DeclGroupPtrTy ConvertDeclToDeclGroup(Decl *Ptr, Decl *OwnedType = nullptr);
void DiagnoseUseOfUnimplementedSelectors();
bool isSimpleTypeSpecifier(tok::TokenKind Kind) const;
ParsedType getTypeName(const IdentifierInfo &II, SourceLocation NameLoc,
Scope *S, CXXScopeSpec *SS = nullptr,
bool isClassName = false,
bool HasTrailingDot = false,
ParsedType ObjectType = ParsedType(),
bool IsCtorOrDtorName = false,
bool WantNontrivialTypeSourceInfo = false,
IdentifierInfo **CorrectedII = nullptr);
TypeSpecifierType isTagName(IdentifierInfo &II, Scope *S);
bool isMicrosoftMissingTypename(const CXXScopeSpec *SS, Scope *S);
void DiagnoseUnknownTypeName(IdentifierInfo *&II,
SourceLocation IILoc,
Scope *S,
CXXScopeSpec *SS,
ParsedType &SuggestedType,
bool AllowClassTemplates = false);
/// \brief For compatibility with MSVC, we delay parsing of some default
/// template type arguments until instantiation time. Emits a warning and
/// returns a synthesized DependentNameType that isn't really dependent on any
/// other template arguments.
ParsedType ActOnDelayedDefaultTemplateArg(const IdentifierInfo &II,
SourceLocation NameLoc);
/// \brief Describes the result of the name lookup and resolution performed
/// by \c ClassifyName().
enum NameClassificationKind {
NC_Unknown,
NC_Error,
NC_Keyword,
NC_Type,
NC_Expression,
NC_NestedNameSpecifier,
NC_TypeTemplate,
NC_VarTemplate,
NC_FunctionTemplate
};
class NameClassification {
NameClassificationKind Kind;
ExprResult Expr;
TemplateName Template;
ParsedType Type;
explicit NameClassification(NameClassificationKind Kind) : Kind(Kind) {}
public:
NameClassification(ExprResult Expr) : Kind(NC_Expression), Expr(Expr) {}
NameClassification(ParsedType Type) : Kind(NC_Type), Type(Type) {}
NameClassification(const IdentifierInfo *)
: Kind(NC_Keyword) {
}
static NameClassification Error() {
return NameClassification(NC_Error);
}
static NameClassification Unknown() {
return NameClassification(NC_Unknown);
}
static NameClassification NestedNameSpecifier() {
return NameClassification(NC_NestedNameSpecifier);
}
static NameClassification TypeTemplate(TemplateName Name) {
NameClassification Result(NC_TypeTemplate);
Result.Template = Name;
return Result;
}
static NameClassification VarTemplate(TemplateName Name) {
NameClassification Result(NC_VarTemplate);
Result.Template = Name;
return Result;
}
static NameClassification FunctionTemplate(TemplateName Name) {
NameClassification Result(NC_FunctionTemplate);
Result.Template = Name;
return Result;
}
NameClassificationKind getKind() const { return Kind; }
ParsedType getType() const {
assert(Kind == NC_Type);
return Type;
}
ExprResult getExpression() const {
assert(Kind == NC_Expression);
return Expr;
}
TemplateName getTemplateName() const {
assert(Kind == NC_TypeTemplate || Kind == NC_FunctionTemplate ||
Kind == NC_VarTemplate);
return Template;
}
TemplateNameKind getTemplateNameKind() const {
switch (Kind) {
case NC_TypeTemplate:
return TNK_Type_template;
case NC_FunctionTemplate:
return TNK_Function_template;
case NC_VarTemplate:
return TNK_Var_template;
default:
llvm_unreachable("unsupported name classification.");
}
}
};
/// \brief Perform name lookup on the given name, classifying it based on
/// the results of name lookup and the following token.
///
/// This routine is used by the parser to resolve identifiers and help direct
/// parsing. When the identifier cannot be found, this routine will attempt
/// to correct the typo and classify based on the resulting name.
///
/// \param S The scope in which we're performing name lookup.
///
/// \param SS The nested-name-specifier that precedes the name.
///
/// \param Name The identifier. If typo correction finds an alternative name,
/// this pointer parameter will be updated accordingly.
///
/// \param NameLoc The location of the identifier.
///
/// \param NextToken The token following the identifier. Used to help
/// disambiguate the name.
///
/// \param IsAddressOfOperand True if this name is the operand of a unary
/// address of ('&') expression, assuming it is classified as an
/// expression.
///
/// \param CCC The correction callback, if typo correction is desired.
NameClassification
ClassifyName(Scope *S, CXXScopeSpec &SS, IdentifierInfo *&Name,
SourceLocation NameLoc, const Token &NextToken,
bool IsAddressOfOperand,
std::unique_ptr<CorrectionCandidateCallback> CCC = nullptr);
Decl *ActOnDeclarator(Scope *S, Declarator &D);
NamedDecl *HandleDeclarator(Scope *S, Declarator &D,
MultiTemplateParamsArg TemplateParameterLists);
void RegisterLocallyScopedExternCDecl(NamedDecl *ND, Scope *S);
bool DiagnoseClassNameShadow(DeclContext *DC, DeclarationNameInfo Info);
bool diagnoseQualifiedDeclaration(CXXScopeSpec &SS, DeclContext *DC,
DeclarationName Name,
SourceLocation Loc);
void
diagnoseIgnoredQualifiers(unsigned DiagID, unsigned Quals,
SourceLocation FallbackLoc,
SourceLocation ConstQualLoc = SourceLocation(),
SourceLocation VolatileQualLoc = SourceLocation(),
SourceLocation RestrictQualLoc = SourceLocation(),
SourceLocation AtomicQualLoc = SourceLocation());
static bool adjustContextForLocalExternDecl(DeclContext *&DC);
void DiagnoseFunctionSpecifiers(const DeclSpec &DS);
void CheckShadow(Scope *S, VarDecl *D, const LookupResult& R);
void CheckShadow(Scope *S, VarDecl *D);
void CheckCastAlign(Expr *Op, QualType T, SourceRange TRange);
void handleTagNumbering(const TagDecl *Tag, Scope *TagScope);
void setTagNameForLinkagePurposes(TagDecl *TagFromDeclSpec,
TypedefNameDecl *NewTD);
void CheckTypedefForVariablyModifiedType(Scope *S, TypedefNameDecl *D);
NamedDecl* ActOnTypedefDeclarator(Scope* S, Declarator& D, DeclContext* DC,
TypeSourceInfo *TInfo,
LookupResult &Previous);
NamedDecl* ActOnTypedefNameDecl(Scope* S, DeclContext* DC, TypedefNameDecl *D,
LookupResult &Previous, bool &Redeclaration);
// HLSL Change Starts
// This enumeration is used to determine whether a variable declaration
// should shadow a prior declaration rather than merging.
enum ShadowMergeState {
ShadowMergeState_Disallowed, // shadowing is not allowed
ShadowMergeState_Possible, // shadowing is possible (but may not occur)
ShadowMergeState_Effective // the declaration should shadow a prior one
};
// HLSL Change Ends
NamedDecl *ActOnVariableDeclarator(Scope *S, Declarator &D, DeclContext *DC,
TypeSourceInfo *TInfo,
LookupResult &Previous,
MultiTemplateParamsArg TemplateParamLists,
bool &AddToScope,
ShadowMergeState MergeState = ShadowMergeState_Disallowed); // HLSL Change - add merge state
// Returns true if the variable declaration is a redeclaration
bool CheckVariableDeclaration(VarDecl *NewVD, LookupResult &Previous, ShadowMergeState MergeState = ShadowMergeState_Disallowed); // HLSL Change - add merge state
void CheckVariableDeclarationType(VarDecl *NewVD);
void CheckCompleteVariableDeclaration(VarDecl *var);
void MaybeSuggestAddingStaticToDecl(const FunctionDecl *D);
NamedDecl* ActOnFunctionDeclarator(Scope* S, Declarator& D, DeclContext* DC,
TypeSourceInfo *TInfo,
LookupResult &Previous,
MultiTemplateParamsArg TemplateParamLists,
bool &AddToScope);
bool AddOverriddenMethods(CXXRecordDecl *DC, CXXMethodDecl *MD);
bool CheckConstexprFunctionDecl(const FunctionDecl *FD);
bool CheckConstexprFunctionBody(const FunctionDecl *FD, Stmt *Body);
void DiagnoseHiddenVirtualMethods(CXXMethodDecl *MD);
void FindHiddenVirtualMethods(CXXMethodDecl *MD,
SmallVectorImpl<CXXMethodDecl*> &OverloadedMethods);
void NoteHiddenVirtualMethods(CXXMethodDecl *MD,
SmallVectorImpl<CXXMethodDecl*> &OverloadedMethods);
// Returns true if the function declaration is a redeclaration
bool CheckFunctionDeclaration(Scope *S,
FunctionDecl *NewFD, LookupResult &Previous,
bool IsExplicitSpecialization);
void CheckMain(FunctionDecl *FD, const DeclSpec &D);
void CheckMSVCRTEntryPoint(FunctionDecl *FD);
Decl *ActOnParamDeclarator(Scope *S, Declarator &D);
ParmVarDecl *BuildParmVarDeclForTypedef(DeclContext *DC,
SourceLocation Loc,
QualType T);
ParmVarDecl *CheckParameter(DeclContext *DC, SourceLocation StartLoc,
SourceLocation NameLoc, IdentifierInfo *Name,
QualType T, TypeSourceInfo *TSInfo,
StorageClass SCm, hlsl::ParameterModifier ParamMod); // HLSL Change
void ActOnParamDefaultArgument(Decl *param,
SourceLocation EqualLoc,
Expr *defarg);
void ActOnParamUnparsedDefaultArgument(Decl *param,
SourceLocation EqualLoc,
SourceLocation ArgLoc);
void ActOnParamDefaultArgumentError(Decl *param, SourceLocation EqualLoc);
bool SetParamDefaultArgument(ParmVarDecl *Param, Expr *DefaultArg,
SourceLocation EqualLoc);
void AddInitializerToDecl(Decl *dcl, Expr *init, bool DirectInit,
bool TypeMayContainAuto);
void ActOnUninitializedDecl(Decl *dcl, bool TypeMayContainAuto);
void ActOnInitializerError(Decl *Dcl);
void ActOnPureSpecifier(Decl *D, SourceLocation PureSpecLoc);
void ActOnCXXForRangeDecl(Decl *D);
StmtResult ActOnCXXForRangeIdentifier(Scope *S, SourceLocation IdentLoc,
IdentifierInfo *Ident,
ParsedAttributes &Attrs,
SourceLocation AttrEnd);
void SetDeclDeleted(Decl *dcl, SourceLocation DelLoc);
void SetDeclDefaulted(Decl *dcl, SourceLocation DefaultLoc);
void FinalizeDeclaration(Decl *D);
DeclGroupPtrTy FinalizeDeclaratorGroup(Scope *S, const DeclSpec &DS,
ArrayRef<Decl *> Group);
DeclGroupPtrTy BuildDeclaratorGroup(MutableArrayRef<Decl *> Group,
bool TypeMayContainAuto = true);
/// Should be called on all declarations that might have attached
/// documentation comments.
void ActOnDocumentableDecl(Decl *D);
void ActOnDocumentableDecls(ArrayRef<Decl *> Group);
void ActOnFinishKNRParamDeclarations(Scope *S, Declarator &D,
SourceLocation LocAfterDecls);
void CheckForFunctionRedefinition(FunctionDecl *FD,
const FunctionDecl *EffectiveDefinition =
nullptr);
Decl *ActOnStartOfFunctionDef(Scope *S, Declarator &D);
Decl *ActOnStartOfFunctionDef(Scope *S, Decl *D);
void ActOnStartOfObjCMethodDef(Scope *S, Decl *D);
bool isObjCMethodDecl(Decl *D) {
return D && isa<ObjCMethodDecl>(D);
}
/// \brief Determine whether we can delay parsing the body of a function or
/// function template until it is used, assuming we don't care about emitting
/// code for that function.
///
/// This will be \c false if we may need the body of the function in the
/// middle of parsing an expression (where it's impractical to switch to
/// parsing a different function), for instance, if it's constexpr in C++11
/// or has an 'auto' return type in C++14. These cases are essentially bugs.
bool canDelayFunctionBody(const Declarator &D);
/// \brief Determine whether we can skip parsing the body of a function
/// definition, assuming we don't care about analyzing its body or emitting
/// code for that function.
///
/// This will be \c false only if we may need the body of the function in
/// order to parse the rest of the program (for instance, if it is
/// \c constexpr in C++11 or has an 'auto' return type in C++14).
bool canSkipFunctionBody(Decl *D);
void computeNRVO(Stmt *Body, sema::FunctionScopeInfo *Scope);
Decl *ActOnFinishFunctionBody(Decl *Decl, Stmt *Body);
Decl *ActOnFinishFunctionBody(Decl *Decl, Stmt *Body, bool IsInstantiation);
Decl *ActOnSkippedFunctionBody(Decl *Decl);
void ActOnFinishInlineMethodDef(CXXMethodDecl *D);
/// ActOnFinishDelayedAttribute - Invoked when we have finished parsing an
/// attribute for which parsing is delayed.
void ActOnFinishDelayedAttribute(Scope *S, Decl *D, ParsedAttributes &Attrs);
/// \brief Diagnose any unused parameters in the given sequence of
/// ParmVarDecl pointers.
void DiagnoseUnusedParameters(ParmVarDecl * const *Begin,
ParmVarDecl * const *End);
/// \brief Diagnose whether the size of parameters or return value of a
/// function or obj-c method definition is pass-by-value and larger than a
/// specified threshold.
void DiagnoseSizeOfParametersAndReturnValue(ParmVarDecl * const *Begin,
ParmVarDecl * const *End,
QualType ReturnTy,
NamedDecl *D);
void DiagnoseInvalidJumps(Stmt *Body);
Decl *ActOnFileScopeAsmDecl(Expr *expr,
SourceLocation AsmLoc,
SourceLocation RParenLoc);
/// \brief Handle a C++11 empty-declaration and attribute-declaration.
Decl *ActOnEmptyDeclaration(Scope *S,
AttributeList *AttrList,
SourceLocation SemiLoc);
/// \brief The parser has processed a module import declaration.
///
/// \param AtLoc The location of the '@' symbol, if any.
///
/// \param ImportLoc The location of the 'import' keyword.
///
/// \param Path The module access path.
DeclResult ActOnModuleImport(SourceLocation AtLoc, SourceLocation ImportLoc,
ModuleIdPath Path);
/// \brief The parser has processed a module import translated from a
/// #include or similar preprocessing directive.
void ActOnModuleInclude(SourceLocation DirectiveLoc, Module *Mod);
/// \brief The parsed has entered a submodule.
void ActOnModuleBegin(SourceLocation DirectiveLoc, Module *Mod);
/// \brief The parser has left a submodule.
void ActOnModuleEnd(SourceLocation DirectiveLoc, Module *Mod);
/// \brief Create an implicit import of the given module at the given
/// source location, for error recovery, if possible.
///
/// This routine is typically used when an entity found by name lookup
/// is actually hidden within a module that we know about but the user
/// has forgotten to import.
void createImplicitModuleImportForErrorRecovery(SourceLocation Loc,
Module *Mod);
/// Kinds of missing import. Note, the values of these enumerators correspond
/// to %select values in diagnostics.
enum class MissingImportKind {
Declaration,
Definition,
DefaultArgument
};
/// \brief Diagnose that the specified declaration needs to be visible but
/// isn't, and suggest a module import that would resolve the problem.
void diagnoseMissingImport(SourceLocation Loc, NamedDecl *Decl,
bool NeedDefinition, bool Recover = true);
void diagnoseMissingImport(SourceLocation Loc, NamedDecl *Decl,
SourceLocation DeclLoc, ArrayRef<Module *> Modules,
MissingImportKind MIK, bool Recover);
/// \brief Retrieve a suitable printing policy.
PrintingPolicy getPrintingPolicy() const {
return getPrintingPolicy(Context, PP);
}
/// \brief Retrieve a suitable printing policy.
static PrintingPolicy getPrintingPolicy(const ASTContext &Ctx,
const Preprocessor &PP);
/// Scope actions.
void ActOnPopScope(SourceLocation Loc, Scope *S);
void ActOnTranslationUnitScope(Scope *S);
Decl *ParsedFreeStandingDeclSpec(Scope *S, AccessSpecifier AS,
DeclSpec &DS);
Decl *ParsedFreeStandingDeclSpec(Scope *S, AccessSpecifier AS,
DeclSpec &DS,
MultiTemplateParamsArg TemplateParams,
bool IsExplicitInstantiation = false);
Decl *BuildAnonymousStructOrUnion(Scope *S, DeclSpec &DS,
AccessSpecifier AS,
RecordDecl *Record,
const PrintingPolicy &Policy);
Decl *BuildMicrosoftCAnonymousStruct(Scope *S, DeclSpec &DS,
RecordDecl *Record);
bool isAcceptableTagRedeclaration(const TagDecl *Previous,
TagTypeKind NewTag, bool isDefinition,
SourceLocation NewTagLoc,
const IdentifierInfo *Name);
enum TagUseKind {
TUK_Reference, // Reference to a tag: 'struct foo *X;'
TUK_Declaration, // Fwd decl of a tag: 'struct foo;'
TUK_Definition, // Definition of a tag: 'struct foo { int X; } Y;'
TUK_Friend // Friend declaration: 'friend struct foo;'
};
struct SkipBodyInfo {
SkipBodyInfo() : ShouldSkip(false), Previous(nullptr) {}
bool ShouldSkip;
NamedDecl *Previous;
};
Decl *ActOnTag(Scope *S, unsigned TagSpec, TagUseKind TUK,
SourceLocation KWLoc, CXXScopeSpec &SS,
IdentifierInfo *Name, SourceLocation NameLoc,
AttributeList *Attr, AccessSpecifier AS,
SourceLocation ModulePrivateLoc,
MultiTemplateParamsArg TemplateParameterLists,
bool &OwnedDecl, bool &IsDependent,
SourceLocation ScopedEnumKWLoc,
bool ScopedEnumUsesClassTag, TypeResult UnderlyingType,
bool IsTypeSpecifier, SkipBodyInfo *SkipBody = nullptr);
Decl *ActOnTemplatedFriendTag(Scope *S, SourceLocation FriendLoc,
unsigned TagSpec, SourceLocation TagLoc,
CXXScopeSpec &SS,
IdentifierInfo *Name, SourceLocation NameLoc,
AttributeList *Attr,
MultiTemplateParamsArg TempParamLists);
TypeResult ActOnDependentTag(Scope *S,
unsigned TagSpec,
TagUseKind TUK,
const CXXScopeSpec &SS,
IdentifierInfo *Name,
SourceLocation TagLoc,
SourceLocation NameLoc);
void ActOnDefs(Scope *S, Decl *TagD, SourceLocation DeclStart,
IdentifierInfo *ClassName,
SmallVectorImpl<Decl *> &Decls);
Decl *ActOnField(Scope *S, Decl *TagD, SourceLocation DeclStart,
Declarator &D, Expr *BitfieldWidth);
FieldDecl *HandleField(Scope *S, RecordDecl *TagD, SourceLocation DeclStart,
Declarator &D, Expr *BitfieldWidth,
InClassInitStyle InitStyle,
AccessSpecifier AS);
MSPropertyDecl *HandleMSProperty(Scope *S, RecordDecl *TagD,
SourceLocation DeclStart,
Declarator &D, Expr *BitfieldWidth,
InClassInitStyle InitStyle,
AccessSpecifier AS,
AttributeList *MSPropertyAttr);
FieldDecl *CheckFieldDecl(DeclarationName Name, QualType T,
TypeSourceInfo *TInfo,
RecordDecl *Record, SourceLocation Loc,
bool Mutable, Expr *BitfieldWidth,
InClassInitStyle InitStyle,
SourceLocation TSSL,
AccessSpecifier AS, NamedDecl *PrevDecl,
Declarator *D = nullptr);
bool CheckNontrivialField(FieldDecl *FD);
void DiagnoseNontrivial(const CXXRecordDecl *Record, CXXSpecialMember CSM);
bool SpecialMemberIsTrivial(CXXMethodDecl *MD, CXXSpecialMember CSM,
bool Diagnose = false);
CXXSpecialMember getSpecialMember(const CXXMethodDecl *MD);
void ActOnLastBitfield(SourceLocation DeclStart,
SmallVectorImpl<Decl *> &AllIvarDecls);
Decl *ActOnIvar(Scope *S, SourceLocation DeclStart,
Declarator &D, Expr *BitfieldWidth,
tok::ObjCKeywordKind visibility);
// This is used for both record definitions and ObjC interface declarations.
void ActOnFields(Scope* S, SourceLocation RecLoc, Decl *TagDecl,
ArrayRef<Decl *> Fields,
SourceLocation LBrac, SourceLocation RBrac,
AttributeList *AttrList);
/// ActOnTagStartDefinition - Invoked when we have entered the
/// scope of a tag's definition (e.g., for an enumeration, class,
/// struct, or union).
void ActOnTagStartDefinition(Scope *S, Decl *TagDecl);
typedef void *SkippedDefinitionContext;
/// \brief Invoked when we enter a tag definition that we're skipping.
SkippedDefinitionContext ActOnTagStartSkippedDefinition(Scope *S, Decl *TD);
Decl *ActOnObjCContainerStartDefinition(Decl *IDecl);
/// ActOnStartCXXMemberDeclarations - Invoked when we have parsed a
/// C++ record definition's base-specifiers clause and are starting its
/// member declarations.
void ActOnStartCXXMemberDeclarations(Scope *S, Decl *TagDecl,
SourceLocation FinalLoc,
bool IsFinalSpelledSealed,
SourceLocation LBraceLoc);
/// ActOnTagFinishDefinition - Invoked once we have finished parsing
/// the definition of a tag (enumeration, class, struct, or union).
void ActOnTagFinishDefinition(Scope *S, Decl *TagDecl,
SourceLocation RBraceLoc);
void ActOnTagFinishSkippedDefinition(SkippedDefinitionContext Context);
void ActOnObjCContainerFinishDefinition();
/// \brief Invoked when we must temporarily exit the objective-c container
/// scope for parsing/looking-up C constructs.
///
/// Must be followed by a call to \see ActOnObjCReenterContainerContext
void ActOnObjCTemporaryExitContainerContext(DeclContext *DC);
void ActOnObjCReenterContainerContext(DeclContext *DC);
/// ActOnTagDefinitionError - Invoked when there was an unrecoverable
/// error parsing the definition of a tag.
void ActOnTagDefinitionError(Scope *S, Decl *TagDecl);
EnumConstantDecl *CheckEnumConstant(EnumDecl *Enum,
EnumConstantDecl *LastEnumConst,
SourceLocation IdLoc,
IdentifierInfo *Id,
Expr *val);
bool CheckEnumUnderlyingType(TypeSourceInfo *TI);
bool CheckEnumRedeclaration(SourceLocation EnumLoc, bool IsScoped,
QualType EnumUnderlyingTy, const EnumDecl *Prev);
/// Determine whether the body of an anonymous enumeration should be skipped.
/// \param II The name of the first enumerator.
SkipBodyInfo shouldSkipAnonEnumBody(Scope *S, IdentifierInfo *II,
SourceLocation IILoc);
Decl *ActOnEnumConstant(Scope *S, Decl *EnumDecl, Decl *LastEnumConstant,
SourceLocation IdLoc, IdentifierInfo *Id,
AttributeList *Attrs,
SourceLocation EqualLoc, Expr *Val);
void ActOnEnumBody(SourceLocation EnumLoc, SourceLocation LBraceLoc,
SourceLocation RBraceLoc, Decl *EnumDecl,
ArrayRef<Decl *> Elements,
Scope *S, AttributeList *Attr);
DeclContext *getContainingDC(DeclContext *DC);
/// Set the current declaration context until it gets popped.
void PushDeclContext(Scope *S, DeclContext *DC);
void PopDeclContext();
/// EnterDeclaratorContext - Used when we must lookup names in the context
/// of a declarator's nested name specifier.
void EnterDeclaratorContext(Scope *S, DeclContext *DC);
void ExitDeclaratorContext(Scope *S);
/// Push the parameters of D, which must be a function, into scope.
void ActOnReenterFunctionContext(Scope* S, Decl* D);
void ActOnExitFunctionContext();
DeclContext *getFunctionLevelDeclContext();
/// getCurFunctionDecl - If inside of a function body, this returns a pointer
/// to the function decl for the function being parsed. If we're currently
/// in a 'block', this returns the containing context.
FunctionDecl *getCurFunctionDecl();
/// getCurMethodDecl - If inside of a method body, this returns a pointer to
/// the method decl for the method being parsed. If we're currently
/// in a 'block', this returns the containing context.
ObjCMethodDecl *getCurMethodDecl();
/// getCurFunctionOrMethodDecl - Return the Decl for the current ObjC method
/// or C function we're in, otherwise return null. If we're currently
/// in a 'block', this returns the containing context.
NamedDecl *getCurFunctionOrMethodDecl();
/// Add this decl to the scope shadowed decl chains.
void PushOnScopeChains(NamedDecl *D, Scope *S, bool AddToContext = true);
/// \brief Make the given externally-produced declaration visible at the
/// top level scope.
///
/// \param D The externally-produced declaration to push.
///
/// \param Name The name of the externally-produced declaration.
void pushExternalDeclIntoScope(NamedDecl *D, DeclarationName Name);
/// isDeclInScope - If 'Ctx' is a function/method, isDeclInScope returns true
/// if 'D' is in Scope 'S', otherwise 'S' is ignored and isDeclInScope returns
/// true if 'D' belongs to the given declaration context.
///
/// \param AllowInlineNamespace If \c true, allow the declaration to be in the
/// enclosing namespace set of the context, rather than contained
/// directly within it.
bool isDeclInScope(NamedDecl *D, DeclContext *Ctx, Scope *S = nullptr,
bool AllowInlineNamespace = false);
/// Finds the scope corresponding to the given decl context, if it
/// happens to be an enclosing scope. Otherwise return NULL.
static Scope *getScopeForDeclContext(Scope *S, DeclContext *DC);
/// Subroutines of ActOnDeclarator().
TypedefDecl *ParseTypedefDecl(Scope *S, Declarator &D, QualType T,
TypeSourceInfo *TInfo);
bool isIncompatibleTypedef(TypeDecl *Old, TypedefNameDecl *New);
/// Attribute merging methods. Return true if a new attribute was added.
AvailabilityAttr *mergeAvailabilityAttr(NamedDecl *D, SourceRange Range,
IdentifierInfo *Platform,
VersionTuple Introduced,
VersionTuple Deprecated,
VersionTuple Obsoleted,
bool IsUnavailable,
StringRef Message,
bool Override,
unsigned AttrSpellingListIndex);
TypeVisibilityAttr *mergeTypeVisibilityAttr(Decl *D, SourceRange Range,
TypeVisibilityAttr::VisibilityType Vis,
unsigned AttrSpellingListIndex);
VisibilityAttr *mergeVisibilityAttr(Decl *D, SourceRange Range,
VisibilityAttr::VisibilityType Vis,
unsigned AttrSpellingListIndex);
DLLImportAttr *mergeDLLImportAttr(Decl *D, SourceRange Range,
unsigned AttrSpellingListIndex);
DLLExportAttr *mergeDLLExportAttr(Decl *D, SourceRange Range,
unsigned AttrSpellingListIndex);
MSInheritanceAttr *
mergeMSInheritanceAttr(Decl *D, SourceRange Range, bool BestCase,
unsigned AttrSpellingListIndex,
MSInheritanceAttr::Spelling SemanticSpelling);
FormatAttr *mergeFormatAttr(Decl *D, SourceRange Range,
IdentifierInfo *Format, int FormatIdx,
int FirstArg, unsigned AttrSpellingListIndex);
SectionAttr *mergeSectionAttr(Decl *D, SourceRange Range, StringRef Name,
unsigned AttrSpellingListIndex);
AlwaysInlineAttr *mergeAlwaysInlineAttr(Decl *D, SourceRange Range,
IdentifierInfo *Ident,
unsigned AttrSpellingListIndex);
MinSizeAttr *mergeMinSizeAttr(Decl *D, SourceRange Range,
unsigned AttrSpellingListIndex);
OptimizeNoneAttr *mergeOptimizeNoneAttr(Decl *D, SourceRange Range,
unsigned AttrSpellingListIndex);
/// \brief Describes the kind of merge to perform for availability
/// attributes (including "deprecated", "unavailable", and "availability").
enum AvailabilityMergeKind {
/// \brief Don't merge availability attributes at all.
AMK_None,
/// \brief Merge availability attributes for a redeclaration, which requires
/// an exact match.
AMK_Redeclaration,
/// \brief Merge availability attributes for an override, which requires
/// an exact match or a weakening of constraints.
AMK_Override
};
void mergeDeclAttributes(NamedDecl *New, Decl *Old,
AvailabilityMergeKind AMK = AMK_Redeclaration);
void MergeTypedefNameDecl(TypedefNameDecl *New, LookupResult &OldDecls);
bool MergeFunctionDecl(FunctionDecl *New, NamedDecl *&Old, Scope *S,
bool MergeTypeWithOld);
bool MergeCompatibleFunctionDecls(FunctionDecl *New, FunctionDecl *Old,
Scope *S, bool MergeTypeWithOld);
void mergeObjCMethodDecls(ObjCMethodDecl *New, ObjCMethodDecl *Old);
void MergeVarDecl(VarDecl *New, LookupResult &Previous, ShadowMergeState& MergeState); // HLSL Change - add merge state
void MergeVarDeclTypes(VarDecl *New, VarDecl *Old, bool MergeTypeWithOld, ShadowMergeState& MergeState); // HLSL Change - add merge state
void MergeVarDeclExceptionSpecs(VarDecl *New, VarDecl *Old);
bool MergeCXXFunctionDecl(FunctionDecl *New, FunctionDecl *Old, Scope *S);
// AssignmentAction - This is used by all the assignment diagnostic functions
// to represent what is actually causing the operation
enum AssignmentAction {
AA_Assigning,
AA_Passing,
AA_Returning,
AA_Converting,
AA_Initializing,
AA_Sending,
AA_Casting,
AA_Passing_CFAudited
};
/// C++ Overloading.
enum OverloadKind {
/// This is a legitimate overload: the existing declarations are
/// functions or function templates with different signatures.
Ovl_Overload,
/// This is not an overload because the signature exactly matches
/// an existing declaration.
Ovl_Match,
/// This is not an overload because the lookup results contain a
/// non-function.
Ovl_NonFunction
};
OverloadKind CheckOverload(Scope *S,
FunctionDecl *New,
const LookupResult &OldDecls,
NamedDecl *&OldDecl,
bool IsForUsingDecl);
bool IsOverload(FunctionDecl *New, FunctionDecl *Old, bool IsForUsingDecl);
/// \brief Checks availability of the function depending on the current
/// function context.Inside an unavailable function,unavailability is ignored.
///
/// \returns true if \p FD is unavailable and current context is inside
/// an available function, false otherwise.
bool isFunctionConsideredUnavailable(FunctionDecl *FD);
ImplicitConversionSequence
TryImplicitConversion(Expr *From, QualType ToType,
bool SuppressUserConversions,
bool AllowExplicit,
bool InOverloadResolution,
bool CStyle,
bool AllowObjCWritebackConversion);
bool IsIntegralPromotion(Expr *From, QualType FromType, QualType ToType);
bool IsFloatingPointPromotion(QualType FromType, QualType ToType);
bool IsComplexPromotion(QualType FromType, QualType ToType);
bool IsPointerConversion(Expr *From, QualType FromType, QualType ToType,
bool InOverloadResolution,
QualType& ConvertedType, bool &IncompatibleObjC);
bool isObjCPointerConversion(QualType FromType, QualType ToType,
QualType& ConvertedType, bool &IncompatibleObjC);
bool isObjCWritebackConversion(QualType FromType, QualType ToType,
QualType &ConvertedType);
bool IsBlockPointerConversion(QualType FromType, QualType ToType,
QualType& ConvertedType);
bool FunctionParamTypesAreEqual(const FunctionProtoType *OldType,
const FunctionProtoType *NewType,
unsigned *ArgPos = nullptr);
void HandleFunctionTypeMismatch(PartialDiagnostic &PDiag,
QualType FromType, QualType ToType);
void maybeExtendBlockObject(ExprResult &E);
CastKind PrepareCastToObjCObjectPointer(ExprResult &E);
bool CheckPointerConversion(Expr *From, QualType ToType,
CastKind &Kind,
CXXCastPath& BasePath,
bool IgnoreBaseAccess);
bool IsMemberPointerConversion(Expr *From, QualType FromType, QualType ToType,
bool InOverloadResolution,
QualType &ConvertedType);
bool CheckMemberPointerConversion(Expr *From, QualType ToType,
CastKind &Kind,
CXXCastPath &BasePath,
bool IgnoreBaseAccess);
bool IsQualificationConversion(QualType FromType, QualType ToType,
bool CStyle, bool &ObjCLifetimeConversion);
bool IsNoReturnConversion(QualType FromType, QualType ToType,
QualType &ResultTy);
bool DiagnoseMultipleUserDefinedConversion(Expr *From, QualType ToType);
bool isSameOrCompatibleFunctionType(CanQualType Param, CanQualType Arg);
ExprResult PerformMoveOrCopyInitialization(const InitializedEntity &Entity,
const VarDecl *NRVOCandidate,
QualType ResultType,
Expr *Value,
bool AllowNRVO = true);
bool CanPerformCopyInitialization(const InitializedEntity &Entity,
ExprResult Init);
ExprResult PerformCopyInitialization(const InitializedEntity &Entity,
SourceLocation EqualLoc,
ExprResult Init,
bool TopLevelOfInitList = false,
bool AllowExplicit = false);
ExprResult PerformObjectArgumentInitialization(Expr *From,
NestedNameSpecifier *Qualifier,
NamedDecl *FoundDecl,
CXXMethodDecl *Method);
ExprResult PerformContextuallyConvertToBool(Expr *From);
ExprResult PerformContextuallyConvertToObjCPointer(Expr *From);
/// Contexts in which a converted constant expression is required.
enum CCEKind {
CCEK_CaseValue, ///< Expression in a case label.
CCEK_Enumerator, ///< Enumerator value with fixed underlying type.
CCEK_TemplateArg, ///< Value of a non-type template parameter.
CCEK_NewExpr ///< Constant expression in a noptr-new-declarator.
};
ExprResult CheckConvertedConstantExpression(Expr *From, QualType T,
llvm::APSInt &Value, CCEKind CCE);
ExprResult CheckConvertedConstantExpression(Expr *From, QualType T,
APValue &Value, CCEKind CCE);
/// \brief Abstract base class used to perform a contextual implicit
/// conversion from an expression to any type passing a filter.
class ContextualImplicitConverter {
public:
bool Suppress;
bool SuppressConversion;
ContextualImplicitConverter(bool Suppress = false,
bool SuppressConversion = false)
: Suppress(Suppress), SuppressConversion(SuppressConversion) {}
/// \brief Determine whether the specified type is a valid destination type
/// for this conversion.
virtual bool match(QualType T) = 0;
/// \brief Emits a diagnostic complaining that the expression does not have
/// integral or enumeration type.
virtual SemaDiagnosticBuilder
diagnoseNoMatch(Sema &S, SourceLocation Loc, QualType T) = 0;
/// \brief Emits a diagnostic when the expression has incomplete class type.
virtual SemaDiagnosticBuilder
diagnoseIncomplete(Sema &S, SourceLocation Loc, QualType T) = 0;
/// \brief Emits a diagnostic when the only matching conversion function
/// is explicit.
virtual SemaDiagnosticBuilder diagnoseExplicitConv(
Sema &S, SourceLocation Loc, QualType T, QualType ConvTy) = 0;
/// \brief Emits a note for the explicit conversion function.
virtual SemaDiagnosticBuilder
noteExplicitConv(Sema &S, CXXConversionDecl *Conv, QualType ConvTy) = 0;
/// \brief Emits a diagnostic when there are multiple possible conversion
/// functions.
virtual SemaDiagnosticBuilder
diagnoseAmbiguous(Sema &S, SourceLocation Loc, QualType T) = 0;
/// \brief Emits a note for one of the candidate conversions.
virtual SemaDiagnosticBuilder
noteAmbiguous(Sema &S, CXXConversionDecl *Conv, QualType ConvTy) = 0;
/// \brief Emits a diagnostic when we picked a conversion function
/// (for cases when we are not allowed to pick a conversion function).
virtual SemaDiagnosticBuilder diagnoseConversion(
Sema &S, SourceLocation Loc, QualType T, QualType ConvTy) = 0;
virtual ~ContextualImplicitConverter() {}
};
class ICEConvertDiagnoser : public ContextualImplicitConverter {
bool AllowScopedEnumerations;
public:
ICEConvertDiagnoser(bool AllowScopedEnumerations,
bool Suppress, bool SuppressConversion)
: ContextualImplicitConverter(Suppress, SuppressConversion),
AllowScopedEnumerations(AllowScopedEnumerations) {}
/// Match an integral or (possibly scoped) enumeration type.
bool match(QualType T) override;
SemaDiagnosticBuilder
diagnoseNoMatch(Sema &S, SourceLocation Loc, QualType T) override {
return diagnoseNotInt(S, Loc, T);
}
/// \brief Emits a diagnostic complaining that the expression does not have
/// integral or enumeration type.
virtual SemaDiagnosticBuilder
diagnoseNotInt(Sema &S, SourceLocation Loc, QualType T) = 0;
};
/// Perform a contextual implicit conversion.
ExprResult PerformContextualImplicitConversion(
SourceLocation Loc, Expr *FromE, ContextualImplicitConverter &Converter);
enum ObjCSubscriptKind {
OS_Array,
OS_Dictionary,
OS_Error
};
ObjCSubscriptKind CheckSubscriptingKind(Expr *FromE);
// Note that LK_String is intentionally after the other literals, as
// this is used for diagnostics logic.
enum ObjCLiteralKind {
LK_Array,
LK_Dictionary,
LK_Numeric,
LK_Boxed,
LK_String,
LK_Block,
LK_None
};
ObjCLiteralKind CheckLiteralKind(Expr *FromE);
ExprResult PerformObjectMemberConversion(Expr *From,
NestedNameSpecifier *Qualifier,
NamedDecl *FoundDecl,
NamedDecl *Member);
// Members have to be NamespaceDecl* or TranslationUnitDecl*.
// TODO: make this is a typesafe union.
typedef llvm::SmallPtrSet<DeclContext *, 16> AssociatedNamespaceSet;
typedef llvm::SmallPtrSet<CXXRecordDecl *, 16> AssociatedClassSet;
void AddOverloadCandidate(FunctionDecl *Function,
DeclAccessPair FoundDecl,
ArrayRef<Expr *> Args,
OverloadCandidateSet& CandidateSet,
bool SuppressUserConversions = false,
bool PartialOverloading = false,
bool AllowExplicit = false);
void AddFunctionCandidates(const UnresolvedSetImpl &Functions,
ArrayRef<Expr *> Args,
OverloadCandidateSet &CandidateSet,
TemplateArgumentListInfo *ExplicitTemplateArgs = nullptr,
bool SuppressUserConversions = false,
bool PartialOverloading = false);
void AddMethodCandidate(DeclAccessPair FoundDecl,
QualType ObjectType,
Expr::Classification ObjectClassification,
ArrayRef<Expr *> Args,
OverloadCandidateSet& CandidateSet,
bool SuppressUserConversion = false);
void AddMethodCandidate(CXXMethodDecl *Method,
DeclAccessPair FoundDecl,
CXXRecordDecl *ActingContext, QualType ObjectType,
Expr::Classification ObjectClassification,
ArrayRef<Expr *> Args,
OverloadCandidateSet& CandidateSet,
bool SuppressUserConversions = false,
bool PartialOverloading = false);
void AddMethodTemplateCandidate(FunctionTemplateDecl *MethodTmpl,
DeclAccessPair FoundDecl,
CXXRecordDecl *ActingContext,
TemplateArgumentListInfo *ExplicitTemplateArgs,
QualType ObjectType,
Expr::Classification ObjectClassification,
ArrayRef<Expr *> Args,
OverloadCandidateSet& CandidateSet,
bool SuppressUserConversions = false,
bool PartialOverloading = false);
void AddTemplateOverloadCandidate(FunctionTemplateDecl *FunctionTemplate,
DeclAccessPair FoundDecl,
TemplateArgumentListInfo *ExplicitTemplateArgs,
ArrayRef<Expr *> Args,
OverloadCandidateSet& CandidateSet,
bool SuppressUserConversions = false,
bool PartialOverloading = false);
void AddConversionCandidate(CXXConversionDecl *Conversion,
DeclAccessPair FoundDecl,
CXXRecordDecl *ActingContext,
Expr *From, QualType ToType,
OverloadCandidateSet& CandidateSet,
bool AllowObjCConversionOnExplicit);
void AddTemplateConversionCandidate(FunctionTemplateDecl *FunctionTemplate,
DeclAccessPair FoundDecl,
CXXRecordDecl *ActingContext,
Expr *From, QualType ToType,
OverloadCandidateSet &CandidateSet,
bool AllowObjCConversionOnExplicit);
void AddSurrogateCandidate(CXXConversionDecl *Conversion,
DeclAccessPair FoundDecl,
CXXRecordDecl *ActingContext,
const FunctionProtoType *Proto,
Expr *Object, ArrayRef<Expr *> Args,
OverloadCandidateSet& CandidateSet);
void AddMemberOperatorCandidates(OverloadedOperatorKind Op,
SourceLocation OpLoc, ArrayRef<Expr *> Args,
OverloadCandidateSet& CandidateSet,
SourceRange OpRange = SourceRange());
void AddBuiltinCandidate(QualType ResultTy, QualType *ParamTys,
ArrayRef<Expr *> Args,
OverloadCandidateSet& CandidateSet,
bool IsAssignmentOperator = false,
unsigned NumContextualBoolArguments = 0);
void AddBuiltinOperatorCandidates(OverloadedOperatorKind Op,
SourceLocation OpLoc, ArrayRef<Expr *> Args,
OverloadCandidateSet& CandidateSet);
void AddArgumentDependentLookupCandidates(DeclarationName Name,
SourceLocation Loc,
ArrayRef<Expr *> Args,
TemplateArgumentListInfo *ExplicitTemplateArgs,
OverloadCandidateSet& CandidateSet,
bool PartialOverloading = false);
// Emit as a 'note' the specific overload candidate
void NoteOverloadCandidate(FunctionDecl *Fn, QualType DestType = QualType());
// Emit as a series of 'note's all template and non-templates
// identified by the expression Expr
void NoteAllOverloadCandidates(Expr* E, QualType DestType = QualType());
/// Check the enable_if expressions on the given function. Returns the first
/// failing attribute, or NULL if they were all successful.
EnableIfAttr *CheckEnableIf(FunctionDecl *Function, ArrayRef<Expr *> Args,
bool MissingImplicitThis = false);
// [PossiblyAFunctionType] --> [Return]
// NonFunctionType --> NonFunctionType
// R (A) --> R(A)
// R (*)(A) --> R (A)
// R (&)(A) --> R (A)
// R (S::*)(A) --> R (A)
QualType ExtractUnqualifiedFunctionType(QualType PossiblyAFunctionType);
FunctionDecl *
ResolveAddressOfOverloadedFunction(Expr *AddressOfExpr,
QualType TargetType,
bool Complain,
DeclAccessPair &Found,
bool *pHadMultipleCandidates = nullptr);
FunctionDecl *
ResolveSingleFunctionTemplateSpecialization(OverloadExpr *ovl,
bool Complain = false,
DeclAccessPair *Found = nullptr);
bool ResolveAndFixSingleFunctionTemplateSpecialization(
ExprResult &SrcExpr,
bool DoFunctionPointerConverion = false,
bool Complain = false,
const SourceRange& OpRangeForComplaining = SourceRange(),
QualType DestTypeForComplaining = QualType(),
unsigned DiagIDForComplaining = 0);
Expr *FixOverloadedFunctionReference(Expr *E,
DeclAccessPair FoundDecl,
FunctionDecl *Fn);
ExprResult FixOverloadedFunctionReference(ExprResult,
DeclAccessPair FoundDecl,
FunctionDecl *Fn);
void AddOverloadedCallCandidates(UnresolvedLookupExpr *ULE,
ArrayRef<Expr *> Args,
OverloadCandidateSet &CandidateSet,
bool PartialOverloading = false);
// An enum used to represent the different possible results of building a
// range-based for loop.
enum ForRangeStatus {
FRS_Success,
FRS_NoViableFunction,
FRS_DiagnosticIssued
};
// An enum to represent whether something is dealing with a call to begin()
// or a call to end() in a range-based for loop.
enum BeginEndFunction {
BEF_begin,
BEF_end
};
ForRangeStatus BuildForRangeBeginEndCall(Scope *S, SourceLocation Loc,
SourceLocation RangeLoc,
VarDecl *Decl,
BeginEndFunction BEF,
const DeclarationNameInfo &NameInfo,
LookupResult &MemberLookup,
OverloadCandidateSet *CandidateSet,
Expr *Range, ExprResult *CallExpr);
ExprResult BuildOverloadedCallExpr(Scope *S, Expr *Fn,
UnresolvedLookupExpr *ULE,
SourceLocation LParenLoc,
MultiExprArg Args,
SourceLocation RParenLoc,
Expr *ExecConfig,
bool AllowTypoCorrection=true);
bool buildOverloadedCallSet(Scope *S, Expr *Fn, UnresolvedLookupExpr *ULE,
MultiExprArg Args, SourceLocation RParenLoc,
OverloadCandidateSet *CandidateSet,
ExprResult *Result);
ExprResult CreateOverloadedUnaryOp(SourceLocation OpLoc,
unsigned Opc,
const UnresolvedSetImpl &Fns,
Expr *input);
ExprResult CreateOverloadedBinOp(SourceLocation OpLoc,
unsigned Opc,
const UnresolvedSetImpl &Fns,
Expr *LHS, Expr *RHS);
ExprResult CreateOverloadedArraySubscriptExpr(SourceLocation LLoc,
SourceLocation RLoc,
Expr *Base,Expr *Idx);
ExprResult
BuildCallToMemberFunction(Scope *S, Expr *MemExpr,
SourceLocation LParenLoc,
MultiExprArg Args,
SourceLocation RParenLoc);
ExprResult
BuildCallToObjectOfClassType(Scope *S, Expr *Object, SourceLocation LParenLoc,
MultiExprArg Args,
SourceLocation RParenLoc);
ExprResult BuildOverloadedArrowExpr(Scope *S, Expr *Base,
SourceLocation OpLoc,
bool *NoArrowOperatorFound = nullptr);
/// CheckCallReturnType - Checks that a call expression's return type is
/// complete. Returns true on failure. The location passed in is the location
/// that best represents the call.
bool CheckCallReturnType(QualType ReturnType, SourceLocation Loc,
CallExpr *CE, FunctionDecl *FD);
/// Helpers for dealing with blocks and functions.
bool CheckParmsForFunctionDef(ParmVarDecl *const *Param,
ParmVarDecl *const *ParamEnd,
bool CheckParameterNames);
void CheckCXXDefaultArguments(FunctionDecl *FD);
void CheckExtraCXXDefaultArguments(Declarator &D);
Scope *getNonFieldDeclScope(Scope *S);
/// \name Name lookup
///
/// These routines provide name lookup that is used during semantic
/// analysis to resolve the various kinds of names (identifiers,
/// overloaded operator names, constructor names, etc.) into zero or
/// more declarations within a particular scope. The major entry
/// points are LookupName, which performs unqualified name lookup,
/// and LookupQualifiedName, which performs qualified name lookup.
///
/// All name lookup is performed based on some specific criteria,
/// which specify what names will be visible to name lookup and how
/// far name lookup should work. These criteria are important both
/// for capturing language semantics (certain lookups will ignore
/// certain names, for example) and for performance, since name
/// lookup is often a bottleneck in the compilation of C++. Name
/// lookup criteria is specified via the LookupCriteria enumeration.
///
/// The results of name lookup can vary based on the kind of name
/// lookup performed, the current language, and the translation
/// unit. In C, for example, name lookup will either return nothing
/// (no entity found) or a single declaration. In C++, name lookup
/// can additionally refer to a set of overloaded functions or
/// result in an ambiguity. All of the possible results of name
/// lookup are captured by the LookupResult class, which provides
/// the ability to distinguish among them.
//@{
/// @brief Describes the kind of name lookup to perform.
enum LookupNameKind {
/// Ordinary name lookup, which finds ordinary names (functions,
/// variables, typedefs, etc.) in C and most kinds of names
/// (functions, variables, members, types, etc.) in C++.
LookupOrdinaryName = 0,
/// Tag name lookup, which finds the names of enums, classes,
/// structs, and unions.
LookupTagName,
/// Label name lookup.
LookupLabel,
/// Member name lookup, which finds the names of
/// class/struct/union members.
LookupMemberName,
/// Look up of an operator name (e.g., operator+) for use with
/// operator overloading. This lookup is similar to ordinary name
/// lookup, but will ignore any declarations that are class members.
LookupOperatorName,
/// Look up of a name that precedes the '::' scope resolution
/// operator in C++. This lookup completely ignores operator, object,
/// function, and enumerator names (C++ [basic.lookup.qual]p1).
LookupNestedNameSpecifierName,
/// Look up a namespace name within a C++ using directive or
/// namespace alias definition, ignoring non-namespace names (C++
/// [basic.lookup.udir]p1).
LookupNamespaceName,
/// Look up all declarations in a scope with the given name,
/// including resolved using declarations. This is appropriate
/// for checking redeclarations for a using declaration.
LookupUsingDeclName,
/// Look up an ordinary name that is going to be redeclared as a
/// name with linkage. This lookup ignores any declarations that
/// are outside of the current scope unless they have linkage. See
/// C99 6.2.2p4-5 and C++ [basic.link]p6.
LookupRedeclarationWithLinkage,
/// Look up a friend of a local class. This lookup does not look
/// outside the innermost non-class scope. See C++11 [class.friend]p11.
LookupLocalFriendName,
/// Look up the name of an Objective-C protocol.
LookupObjCProtocolName,
/// Look up implicit 'self' parameter of an objective-c method.
LookupObjCImplicitSelfParam,
/// \brief Look up any declaration with any name.
LookupAnyName
};
/// \brief Specifies whether (or how) name lookup is being performed for a
/// redeclaration (vs. a reference).
enum RedeclarationKind {
/// \brief The lookup is a reference to this name that is not for the
/// purpose of redeclaring the name.
NotForRedeclaration = 0,
/// \brief The lookup results will be used for redeclaration of a name,
/// if an entity by that name already exists.
ForRedeclaration
};
/// \brief The possible outcomes of name lookup for a literal operator.
enum LiteralOperatorLookupResult {
/// \brief The lookup resulted in an error.
LOLR_Error,
/// \brief The lookup found a single 'cooked' literal operator, which
/// expects a normal literal to be built and passed to it.
LOLR_Cooked,
/// \brief The lookup found a single 'raw' literal operator, which expects
/// a string literal containing the spelling of the literal token.
LOLR_Raw,
/// \brief The lookup found an overload set of literal operator templates,
/// which expect the characters of the spelling of the literal token to be
/// passed as a non-type template argument pack.
LOLR_Template,
/// \brief The lookup found an overload set of literal operator templates,
/// which expect the character type and characters of the spelling of the
/// string literal token to be passed as template arguments.
LOLR_StringTemplate
};
SpecialMemberOverloadResult *LookupSpecialMember(CXXRecordDecl *D,
CXXSpecialMember SM,
bool ConstArg,
bool VolatileArg,
bool RValueThis,
bool ConstThis,
bool VolatileThis);
typedef std::function<void(const TypoCorrection &)> TypoDiagnosticGenerator;
typedef std::function<ExprResult(Sema &, TypoExpr *, TypoCorrection)>
TypoRecoveryCallback;
private:
bool CppLookupName(LookupResult &R, Scope *S);
struct TypoExprState {
std::unique_ptr<TypoCorrectionConsumer> Consumer;
TypoDiagnosticGenerator DiagHandler;
TypoRecoveryCallback RecoveryHandler;
TypoExprState();
TypoExprState(TypoExprState&& other) LLVM_NOEXCEPT;
TypoExprState& operator=(TypoExprState&& other) LLVM_NOEXCEPT;
};
/// \brief The set of unhandled TypoExprs and their associated state.
llvm::MapVector<TypoExpr *, TypoExprState> DelayedTypos;
/// \brief Creates a new TypoExpr AST node.
TypoExpr *createDelayedTypo(std::unique_ptr<TypoCorrectionConsumer> TCC,
TypoDiagnosticGenerator TDG,
TypoRecoveryCallback TRC);
// \brief The set of known/encountered (unique, canonicalized) NamespaceDecls.
//
// The boolean value will be true to indicate that the namespace was loaded
// from an AST/PCH file, or false otherwise.
llvm::MapVector<NamespaceDecl*, bool> KnownNamespaces;
/// \brief Whether we have already loaded known namespaces from an extenal
/// source.
bool LoadedExternalKnownNamespaces;
/// \brief Helper for CorrectTypo and CorrectTypoDelayed used to create and
/// populate a new TypoCorrectionConsumer. Returns nullptr if typo correction
/// should be skipped entirely.
std::unique_ptr<TypoCorrectionConsumer>
makeTypoCorrectionConsumer(const DeclarationNameInfo &Typo,
Sema::LookupNameKind LookupKind, Scope *S,
CXXScopeSpec *SS,
std::unique_ptr<CorrectionCandidateCallback> CCC,
DeclContext *MemberContext, bool EnteringContext,
const ObjCObjectPointerType *OPT,
bool ErrorRecovery);
public:
const TypoExprState &getTypoExprState(TypoExpr *TE) const;
/// \brief Clears the state of the given TypoExpr.
void clearDelayedTypo(TypoExpr *TE);
/// \brief Look up a name, looking for a single declaration. Return
/// null if the results were absent, ambiguous, or overloaded.
///
/// It is preferable to use the elaborated form and explicitly handle
/// ambiguity and overloaded.
NamedDecl *LookupSingleName(Scope *S, DeclarationName Name,
SourceLocation Loc,
LookupNameKind NameKind,
RedeclarationKind Redecl
= NotForRedeclaration);
bool LookupName(LookupResult &R, Scope *S,
bool AllowBuiltinCreation = false);
bool LookupQualifiedName(LookupResult &R, DeclContext *LookupCtx,
bool InUnqualifiedLookup = false);
bool LookupQualifiedName(LookupResult &R, DeclContext *LookupCtx,
CXXScopeSpec &SS);
bool LookupParsedName(LookupResult &R, Scope *S, CXXScopeSpec *SS,
bool AllowBuiltinCreation = false,
bool EnteringContext = false);
ObjCProtocolDecl *LookupProtocol(IdentifierInfo *II, SourceLocation IdLoc,
RedeclarationKind Redecl
= NotForRedeclaration);
bool LookupInSuper(LookupResult &R, CXXRecordDecl *Class);
void LookupOverloadedOperatorName(OverloadedOperatorKind Op, Scope *S,
QualType T1, QualType T2,
UnresolvedSetImpl &Functions);
void addOverloadedOperatorToUnresolvedSet(UnresolvedSetImpl &Functions,
DeclAccessPair Operator,
QualType T1, QualType T2);
LabelDecl *LookupOrCreateLabel(IdentifierInfo *II, SourceLocation IdentLoc,
SourceLocation GnuLabelLoc = SourceLocation());
DeclContextLookupResult LookupConstructors(CXXRecordDecl *Class);
CXXConstructorDecl *LookupDefaultConstructor(CXXRecordDecl *Class);
CXXConstructorDecl *LookupCopyingConstructor(CXXRecordDecl *Class,
unsigned Quals);
CXXMethodDecl *LookupCopyingAssignment(CXXRecordDecl *Class, unsigned Quals,
bool RValueThis, unsigned ThisQuals);
CXXConstructorDecl *LookupMovingConstructor(CXXRecordDecl *Class,
unsigned Quals);
CXXMethodDecl *LookupMovingAssignment(CXXRecordDecl *Class, unsigned Quals,
bool RValueThis, unsigned ThisQuals);
CXXDestructorDecl *LookupDestructor(CXXRecordDecl *Class);
bool checkLiteralOperatorId(const CXXScopeSpec &SS, const UnqualifiedId &Id);
LiteralOperatorLookupResult LookupLiteralOperator(Scope *S, LookupResult &R,
ArrayRef<QualType> ArgTys,
bool AllowRaw,
bool AllowTemplate,
bool AllowStringTemplate);
bool isKnownName(StringRef name);
void ArgumentDependentLookup(DeclarationName Name, SourceLocation Loc,
ArrayRef<Expr *> Args, ADLResult &Functions);
void LookupVisibleDecls(Scope *S, LookupNameKind Kind,
VisibleDeclConsumer &Consumer,
bool IncludeGlobalScope = true);
void LookupVisibleDecls(DeclContext *Ctx, LookupNameKind Kind,
VisibleDeclConsumer &Consumer,
bool IncludeGlobalScope = true);
enum CorrectTypoKind {
CTK_NonError, // CorrectTypo used in a non error recovery situation.
CTK_ErrorRecovery // CorrectTypo used in normal error recovery.
};
TypoCorrection CorrectTypo(const DeclarationNameInfo &Typo,
Sema::LookupNameKind LookupKind,
Scope *S, CXXScopeSpec *SS,
std::unique_ptr<CorrectionCandidateCallback> CCC,
CorrectTypoKind Mode,
DeclContext *MemberContext = nullptr,
bool EnteringContext = false,
const ObjCObjectPointerType *OPT = nullptr,
bool RecordFailure = true);
TypoExpr *CorrectTypoDelayed(const DeclarationNameInfo &Typo,
Sema::LookupNameKind LookupKind, Scope *S,
CXXScopeSpec *SS,
std::unique_ptr<CorrectionCandidateCallback> CCC,
TypoDiagnosticGenerator TDG,
TypoRecoveryCallback TRC, CorrectTypoKind Mode,
DeclContext *MemberContext = nullptr,
bool EnteringContext = false,
const ObjCObjectPointerType *OPT = nullptr);
/// \brief Process any TypoExprs in the given Expr and its children,
/// generating diagnostics as appropriate and returning a new Expr if there
/// were typos that were all successfully corrected and ExprError if one or
/// more typos could not be corrected.
///
/// \param E The Expr to check for TypoExprs.
///
/// \param InitDecl A VarDecl to avoid because the Expr being corrected is its
/// initializer.
///
/// \param Filter A function applied to a newly rebuilt Expr to determine if
/// it is an acceptable/usable result from a single combination of typo
/// corrections. As long as the filter returns ExprError, different
/// combinations of corrections will be tried until all are exhausted.
ExprResult
CorrectDelayedTyposInExpr(Expr *E, VarDecl *InitDecl = nullptr,
llvm::function_ref<ExprResult(Expr *)> Filter =
[](Expr *E) -> ExprResult { return E; });
ExprResult
CorrectDelayedTyposInExpr(Expr *E,
llvm::function_ref<ExprResult(Expr *)> Filter) {
return CorrectDelayedTyposInExpr(E, nullptr, Filter);
}
ExprResult
CorrectDelayedTyposInExpr(ExprResult ER, VarDecl *InitDecl = nullptr,
llvm::function_ref<ExprResult(Expr *)> Filter =
[](Expr *E) -> ExprResult { return E; }) {
return ER.isInvalid() ? ER : CorrectDelayedTyposInExpr(ER.get(), Filter);
}
ExprResult
CorrectDelayedTyposInExpr(ExprResult ER,
llvm::function_ref<ExprResult(Expr *)> Filter) {
return CorrectDelayedTyposInExpr(ER, nullptr, Filter);
}
void diagnoseTypo(const TypoCorrection &Correction,
const PartialDiagnostic &TypoDiag,
bool ErrorRecovery = true);
void diagnoseTypo(const TypoCorrection &Correction,
const PartialDiagnostic &TypoDiag,
const PartialDiagnostic &PrevNote,
bool ErrorRecovery = true);
void FindAssociatedClassesAndNamespaces(SourceLocation InstantiationLoc,
ArrayRef<Expr *> Args,
AssociatedNamespaceSet &AssociatedNamespaces,
AssociatedClassSet &AssociatedClasses);
void FilterLookupForScope(LookupResult &R, DeclContext *Ctx, Scope *S,
bool ConsiderLinkage, bool AllowInlineNamespace);
void DiagnoseAmbiguousLookup(LookupResult &Result);
//@}
ObjCInterfaceDecl *getObjCInterfaceDecl(IdentifierInfo *&Id,
SourceLocation IdLoc,
bool TypoCorrection = false);
NamedDecl *LazilyCreateBuiltin(IdentifierInfo *II, unsigned ID,
Scope *S, bool ForRedeclaration,
SourceLocation Loc);
NamedDecl *ImplicitlyDefineFunction(SourceLocation Loc, IdentifierInfo &II,
Scope *S);
void AddKnownFunctionAttributes(FunctionDecl *FD);
// More parsing and symbol table subroutines.
void ProcessPragmaWeak(Scope *S, Decl *D);
// Decl attributes - this routine is the top level dispatcher.
void ProcessDeclAttributes(Scope *S, Decl *D, const Declarator &PD);
void ProcessDeclAttributeList(Scope *S, Decl *D, const AttributeList *AL,
bool IncludeCXX11Attributes = true);
bool ProcessAccessDeclAttributeList(AccessSpecDecl *ASDecl,
const AttributeList *AttrList);
void checkUnusedDeclAttributes(Declarator &D);
/// Determine if type T is a valid subject for a nonnull and similar
/// attributes. By default, we look through references (the behavior used by
/// nonnull), but if the second parameter is true, then we treat a reference
/// type as valid.
bool isValidPointerAttrType(QualType T, bool RefOkay = false);
bool CheckRegparmAttr(const AttributeList &attr, unsigned &value);
bool CheckCallingConvAttr(const AttributeList &attr, CallingConv &CC,
const FunctionDecl *FD = nullptr);
bool CheckNoReturnAttr(const AttributeList &attr);
bool checkStringLiteralArgumentAttr(const AttributeList &Attr,
unsigned ArgNum, StringRef &Str,
SourceLocation *ArgLocation = nullptr);
bool checkSectionName(SourceLocation LiteralLoc, StringRef Str);
void checkTargetAttr(SourceLocation LiteralLoc, StringRef Str);
bool checkMSInheritanceAttrOnDefinition(
CXXRecordDecl *RD, SourceRange Range, bool BestCase,
MSInheritanceAttr::Spelling SemanticSpelling);
void CheckAlignasUnderalignment(Decl *D);
/// Adjust the calling convention of a method to be the ABI default if it
/// wasn't specified explicitly. This handles method types formed from
/// function type typedefs and typename template arguments.
void adjustMemberFunctionCC(QualType &T, bool IsStatic);
// Check if there is an explicit attribute, but only look through parens.
// The intent is to look for an attribute on the current declarator, but not
// one that came from a typedef.
bool hasExplicitCallingConv(QualType &T);
/// Get the outermost AttributedType node that sets a calling convention.
/// Valid types should not have multiple attributes with different CCs.
const AttributedType *getCallingConvAttributedType(QualType T) const;
/// Check whether a nullability type specifier can be added to the given
/// type.
///
/// \param type The type to which the nullability specifier will be
/// added. On success, this type will be updated appropriately.
///
/// \param nullability The nullability specifier to add.
///
/// \param nullabilityLoc The location of the nullability specifier.
///
/// \param isContextSensitive Whether this nullability specifier was
/// written as a context-sensitive keyword (in an Objective-C
/// method) or an Objective-C property attribute, rather than as an
/// underscored type specifier.
///
/// \returns true if nullability cannot be applied, false otherwise.
bool checkNullabilityTypeSpecifier(QualType &type, NullabilityKind nullability,
SourceLocation nullabilityLoc,
bool isContextSensitive);
/// \brief Stmt attributes - this routine is the top level dispatcher.
StmtResult ProcessStmtAttributes(Stmt *Stmt, AttributeList *Attrs,
SourceRange Range);
void WarnConflictingTypedMethods(ObjCMethodDecl *Method,
ObjCMethodDecl *MethodDecl,
bool IsProtocolMethodDecl);
void CheckConflictingOverridingMethod(ObjCMethodDecl *Method,
ObjCMethodDecl *Overridden,
bool IsProtocolMethodDecl);
/// WarnExactTypedMethods - This routine issues a warning if method
/// implementation declaration matches exactly that of its declaration.
void WarnExactTypedMethods(ObjCMethodDecl *Method,
ObjCMethodDecl *MethodDecl,
bool IsProtocolMethodDecl);
typedef llvm::SmallPtrSet<Selector, 8> SelectorSet;
typedef llvm::DenseMap<Selector, ObjCMethodDecl*> ProtocolsMethodsMap;
/// CheckImplementationIvars - This routine checks if the instance variables
/// listed in the implelementation match those listed in the interface.
void CheckImplementationIvars(ObjCImplementationDecl *ImpDecl,
ObjCIvarDecl **Fields, unsigned nIvars,
SourceLocation Loc);
/// ImplMethodsVsClassMethods - This is main routine to warn if any method
/// remains unimplemented in the class or category \@implementation.
void ImplMethodsVsClassMethods(Scope *S, ObjCImplDecl* IMPDecl,
ObjCContainerDecl* IDecl,
bool IncompleteImpl = false);
/// DiagnoseUnimplementedProperties - This routine warns on those properties
/// which must be implemented by this implementation.
void DiagnoseUnimplementedProperties(Scope *S, ObjCImplDecl* IMPDecl,
ObjCContainerDecl *CDecl,
bool SynthesizeProperties);
/// Diagnose any null-resettable synthesized setters.
void diagnoseNullResettableSynthesizedSetters(const ObjCImplDecl *impDecl);
/// DefaultSynthesizeProperties - This routine default synthesizes all
/// properties which must be synthesized in the class's \@implementation.
void DefaultSynthesizeProperties (Scope *S, ObjCImplDecl* IMPDecl,
ObjCInterfaceDecl *IDecl);
void DefaultSynthesizeProperties(Scope *S, Decl *D);
/// IvarBacksCurrentMethodAccessor - This routine returns 'true' if 'IV' is
/// an ivar synthesized for 'Method' and 'Method' is a property accessor
/// declared in class 'IFace'.
bool IvarBacksCurrentMethodAccessor(ObjCInterfaceDecl *IFace,
ObjCMethodDecl *Method, ObjCIvarDecl *IV);
/// DiagnoseUnusedBackingIvarInAccessor - Issue an 'unused' warning if ivar which
/// backs the property is not used in the property's accessor.
void DiagnoseUnusedBackingIvarInAccessor(Scope *S,
const ObjCImplementationDecl *ImplD);
/// GetIvarBackingPropertyAccessor - If method is a property setter/getter and
/// it property has a backing ivar, returns this ivar; otherwise, returns NULL.
/// It also returns ivar's property on success.
ObjCIvarDecl *GetIvarBackingPropertyAccessor(const ObjCMethodDecl *Method,
const ObjCPropertyDecl *&PDecl) const;
/// Called by ActOnProperty to handle \@property declarations in
/// class extensions.
ObjCPropertyDecl *HandlePropertyInClassExtension(Scope *S,
SourceLocation AtLoc,
SourceLocation LParenLoc,
FieldDeclarator &FD,
Selector GetterSel,
Selector SetterSel,
const bool isAssign,
const bool isReadWrite,
const unsigned Attributes,
const unsigned AttributesAsWritten,
bool *isOverridingProperty,
QualType T,
TypeSourceInfo *TSI,
tok::ObjCKeywordKind MethodImplKind);
/// Called by ActOnProperty and HandlePropertyInClassExtension to
/// handle creating the ObjcPropertyDecl for a category or \@interface.
ObjCPropertyDecl *CreatePropertyDecl(Scope *S,
ObjCContainerDecl *CDecl,
SourceLocation AtLoc,
SourceLocation LParenLoc,
FieldDeclarator &FD,
Selector GetterSel,
Selector SetterSel,
const bool isAssign,
const bool isReadWrite,
const unsigned Attributes,
const unsigned AttributesAsWritten,
QualType T,
TypeSourceInfo *TSI,
tok::ObjCKeywordKind MethodImplKind,
DeclContext *lexicalDC = nullptr);
/// AtomicPropertySetterGetterRules - This routine enforces the rule (via
/// warning) when atomic property has one but not the other user-declared
/// setter or getter.
void AtomicPropertySetterGetterRules(ObjCImplDecl* IMPDecl,
ObjCContainerDecl* IDecl);
void DiagnoseOwningPropertyGetterSynthesis(const ObjCImplementationDecl *D);
void DiagnoseMissingDesignatedInitOverrides(
const ObjCImplementationDecl *ImplD,
const ObjCInterfaceDecl *IFD);
void DiagnoseDuplicateIvars(ObjCInterfaceDecl *ID, ObjCInterfaceDecl *SID);
enum MethodMatchStrategy {
MMS_loose,
MMS_strict
};
/// MatchTwoMethodDeclarations - Checks if two methods' type match and returns
/// true, or false, accordingly.
bool MatchTwoMethodDeclarations(const ObjCMethodDecl *Method,
const ObjCMethodDecl *PrevMethod,
MethodMatchStrategy strategy = MMS_strict);
/// MatchAllMethodDeclarations - Check methods declaraed in interface or
/// or protocol against those declared in their implementations.
void MatchAllMethodDeclarations(const SelectorSet &InsMap,
const SelectorSet &ClsMap,
SelectorSet &InsMapSeen,
SelectorSet &ClsMapSeen,
ObjCImplDecl* IMPDecl,
ObjCContainerDecl* IDecl,
bool &IncompleteImpl,
bool ImmediateClass,
bool WarnCategoryMethodImpl=false);
/// CheckCategoryVsClassMethodMatches - Checks that methods implemented in
/// category matches with those implemented in its primary class and
/// warns each time an exact match is found.
void CheckCategoryVsClassMethodMatches(ObjCCategoryImplDecl *CatIMP);
/// \brief Add the given method to the list of globally-known methods.
void addMethodToGlobalList(ObjCMethodList *List, ObjCMethodDecl *Method);
private:
/// AddMethodToGlobalPool - Add an instance or factory method to the global
/// pool. See descriptoin of AddInstanceMethodToGlobalPool.
void AddMethodToGlobalPool(ObjCMethodDecl *Method, bool impl, bool instance);
/// LookupMethodInGlobalPool - Returns the instance or factory method and
/// optionally warns if there are multiple signatures.
ObjCMethodDecl *LookupMethodInGlobalPool(Selector Sel, SourceRange R,
bool receiverIdOrClass,
bool instance);
public:
/// \brief - Returns instance or factory methods in global method pool for
/// given selector. If no such method or only one method found, function returns
/// false; otherwise, it returns true
bool CollectMultipleMethodsInGlobalPool(Selector Sel,
SmallVectorImpl<ObjCMethodDecl*>& Methods,
bool instance);
bool AreMultipleMethodsInGlobalPool(Selector Sel, ObjCMethodDecl *BestMethod,
SourceRange R,
bool receiverIdOrClass);
void DiagnoseMultipleMethodInGlobalPool(SmallVectorImpl<ObjCMethodDecl*> &Methods,
Selector Sel, SourceRange R,
bool receiverIdOrClass);
private:
/// \brief - Returns a selector which best matches given argument list or
/// nullptr if none could be found
ObjCMethodDecl *SelectBestMethod(Selector Sel, MultiExprArg Args,
bool IsInstance);
/// \brief Record the typo correction failure and return an empty correction.
TypoCorrection FailedCorrection(IdentifierInfo *Typo, SourceLocation TypoLoc,
bool RecordFailure = true) {
if (RecordFailure)
TypoCorrectionFailures[Typo].insert(TypoLoc);
return TypoCorrection();
}
public:
/// AddInstanceMethodToGlobalPool - All instance methods in a translation
/// unit are added to a global pool. This allows us to efficiently associate
/// a selector with a method declaraation for purposes of typechecking
/// messages sent to "id" (where the class of the object is unknown).
void AddInstanceMethodToGlobalPool(ObjCMethodDecl *Method, bool impl=false) {
AddMethodToGlobalPool(Method, impl, /*instance*/true);
}
/// AddFactoryMethodToGlobalPool - Same as above, but for factory methods.
void AddFactoryMethodToGlobalPool(ObjCMethodDecl *Method, bool impl=false) {
AddMethodToGlobalPool(Method, impl, /*instance*/false);
}
/// AddAnyMethodToGlobalPool - Add any method, instance or factory to global
/// pool.
void AddAnyMethodToGlobalPool(Decl *D);
/// LookupInstanceMethodInGlobalPool - Returns the method and warns if
/// there are multiple signatures.
ObjCMethodDecl *LookupInstanceMethodInGlobalPool(Selector Sel, SourceRange R,
bool receiverIdOrClass=false) {
return LookupMethodInGlobalPool(Sel, R, receiverIdOrClass,
/*instance*/true);
}
/// LookupFactoryMethodInGlobalPool - Returns the method and warns if
/// there are multiple signatures.
ObjCMethodDecl *LookupFactoryMethodInGlobalPool(Selector Sel, SourceRange R,
bool receiverIdOrClass=false) {
return LookupMethodInGlobalPool(Sel, R, receiverIdOrClass,
/*instance*/false);
}
const ObjCMethodDecl *SelectorsForTypoCorrection(Selector Sel,
QualType ObjectType=QualType());
/// LookupImplementedMethodInGlobalPool - Returns the method which has an
/// implementation.
ObjCMethodDecl *LookupImplementedMethodInGlobalPool(Selector Sel);
/// CollectIvarsToConstructOrDestruct - Collect those ivars which require
/// initialization.
void CollectIvarsToConstructOrDestruct(ObjCInterfaceDecl *OI,
SmallVectorImpl<ObjCIvarDecl*> &Ivars);
//===--------------------------------------------------------------------===//
// Statement Parsing Callbacks: SemaStmt.cpp.
public:
class FullExprArg {
public:
FullExprArg(Sema &actions) : E(nullptr) { }
ExprResult release() {
return E;
}
Expr *get() const { return E; }
Expr *operator->() {
return E;
}
private:
// FIXME: No need to make the entire Sema class a friend when it's just
// Sema::MakeFullExpr that needs access to the constructor below.
friend class Sema;
explicit FullExprArg(Expr *expr) : E(expr) {}
Expr *E;
};
FullExprArg MakeFullExpr(Expr *Arg) {
return MakeFullExpr(Arg, Arg ? Arg->getExprLoc() : SourceLocation());
}
FullExprArg MakeFullExpr(Expr *Arg, SourceLocation CC) {
return FullExprArg(ActOnFinishFullExpr(Arg, CC).get());
}
FullExprArg MakeFullDiscardedValueExpr(Expr *Arg) {
ExprResult FE =
ActOnFinishFullExpr(Arg, Arg ? Arg->getExprLoc() : SourceLocation(),
/*DiscardedValue*/ true);
return FullExprArg(FE.get());
}
StmtResult ActOnExprStmt(ExprResult Arg);
StmtResult ActOnExprStmtError();
StmtResult ActOnHlslDiscardStmt(SourceLocation Loc); // HLSL Change
StmtResult ActOnNullStmt(SourceLocation SemiLoc,
bool HasLeadingEmptyMacro = false);
void ActOnStartOfCompoundStmt();
void ActOnFinishOfCompoundStmt();
StmtResult ActOnCompoundStmt(SourceLocation L, SourceLocation R,
ArrayRef<Stmt *> Elts, bool isStmtExpr);
/// \brief A RAII object to enter scope of a compound statement.
class CompoundScopeRAII {
public:
CompoundScopeRAII(Sema &S): S(S) {
S.ActOnStartOfCompoundStmt();
}
~CompoundScopeRAII() {
S.ActOnFinishOfCompoundStmt();
}
private:
Sema &S;
};
/// An RAII helper that pops function a function scope on exit.
struct FunctionScopeRAII {
Sema &S;
bool Active;
FunctionScopeRAII(Sema &S) : S(S), Active(true) {}
~FunctionScopeRAII() {
if (Active)
S.PopFunctionScopeInfo();
}
void disable() { Active = false; }
};
StmtResult ActOnDeclStmt(DeclGroupPtrTy Decl,
SourceLocation StartLoc,
SourceLocation EndLoc);
void ActOnForEachDeclStmt(DeclGroupPtrTy Decl);
StmtResult ActOnForEachLValueExpr(Expr *E);
StmtResult ActOnCaseStmt(SourceLocation CaseLoc, Expr *LHSVal,
SourceLocation DotDotDotLoc, Expr *RHSVal,
SourceLocation ColonLoc);
void ActOnCaseStmtBody(Stmt *CaseStmt, Stmt *SubStmt);
StmtResult ActOnDefaultStmt(SourceLocation DefaultLoc,
SourceLocation ColonLoc,
Stmt *SubStmt, Scope *CurScope);
StmtResult ActOnLabelStmt(SourceLocation IdentLoc, LabelDecl *TheDecl,
SourceLocation ColonLoc, Stmt *SubStmt);
StmtResult ActOnAttributedStmt(SourceLocation AttrLoc,
ArrayRef<const Attr*> Attrs,
Stmt *SubStmt);
StmtResult ActOnIfStmt(SourceLocation IfLoc,
FullExprArg CondVal, Decl *CondVar,
Stmt *ThenVal,
SourceLocation ElseLoc, Stmt *ElseVal);
StmtResult ActOnStartOfSwitchStmt(SourceLocation SwitchLoc,
Expr *Cond,
Decl *CondVar);
StmtResult ActOnFinishSwitchStmt(SourceLocation SwitchLoc,
Stmt *Switch, Stmt *Body);
StmtResult ActOnWhileStmt(SourceLocation WhileLoc,
FullExprArg Cond,
Decl *CondVar, Stmt *Body);
StmtResult ActOnDoStmt(SourceLocation DoLoc, Stmt *Body,
SourceLocation WhileLoc,
SourceLocation CondLParen, Expr *Cond,
SourceLocation CondRParen);
StmtResult ActOnForStmt(SourceLocation ForLoc,
SourceLocation LParenLoc,
Stmt *First, FullExprArg Second,
Decl *SecondVar,
FullExprArg Third,
SourceLocation RParenLoc,
Stmt *Body);
ExprResult CheckObjCForCollectionOperand(SourceLocation forLoc,
Expr *collection);
StmtResult ActOnObjCForCollectionStmt(SourceLocation ForColLoc,
Stmt *First, Expr *collection,
SourceLocation RParenLoc);
StmtResult FinishObjCForCollectionStmt(Stmt *ForCollection, Stmt *Body);
enum BuildForRangeKind {
/// Initial building of a for-range statement.
BFRK_Build,
/// Instantiation or recovery rebuild of a for-range statement. Don't
/// attempt any typo-correction.
BFRK_Rebuild,
/// Determining whether a for-range statement could be built. Avoid any
/// unnecessary or irreversible actions.
BFRK_Check
};
StmtResult ActOnCXXForRangeStmt(SourceLocation ForLoc, Stmt *LoopVar,
SourceLocation ColonLoc, Expr *Collection,
SourceLocation RParenLoc,
BuildForRangeKind Kind);
StmtResult BuildCXXForRangeStmt(SourceLocation ForLoc,
SourceLocation ColonLoc,
Stmt *RangeDecl, Stmt *BeginEndDecl,
Expr *Cond, Expr *Inc,
Stmt *LoopVarDecl,
SourceLocation RParenLoc,
BuildForRangeKind Kind);
StmtResult FinishCXXForRangeStmt(Stmt *ForRange, Stmt *Body);
StmtResult ActOnGotoStmt(SourceLocation GotoLoc,
SourceLocation LabelLoc,
LabelDecl *TheDecl);
StmtResult ActOnIndirectGotoStmt(SourceLocation GotoLoc,
SourceLocation StarLoc,
Expr *DestExp);
StmtResult ActOnContinueStmt(SourceLocation ContinueLoc, Scope *CurScope);
StmtResult ActOnBreakStmt(SourceLocation BreakLoc, Scope *CurScope);
void ActOnCapturedRegionStart(SourceLocation Loc, Scope *CurScope,
CapturedRegionKind Kind, unsigned NumParams);
typedef std::pair<StringRef, QualType> CapturedParamNameType;
void ActOnCapturedRegionStart(SourceLocation Loc, Scope *CurScope,
CapturedRegionKind Kind,
ArrayRef<CapturedParamNameType> Params);
StmtResult ActOnCapturedRegionEnd(Stmt *S);
void ActOnCapturedRegionError();
RecordDecl *CreateCapturedStmtRecordDecl(CapturedDecl *&CD,
SourceLocation Loc,
unsigned NumParams);
VarDecl *getCopyElisionCandidate(QualType ReturnType, Expr *E,
bool AllowFunctionParameters);
bool isCopyElisionCandidate(QualType ReturnType, const VarDecl *VD,
bool AllowFunctionParameters);
StmtResult ActOnReturnStmt(SourceLocation ReturnLoc, Expr *RetValExp,
Scope *CurScope);
StmtResult BuildReturnStmt(SourceLocation ReturnLoc, Expr *RetValExp);
StmtResult ActOnCapScopeReturnStmt(SourceLocation ReturnLoc, Expr *RetValExp);
StmtResult ActOnGCCAsmStmt(SourceLocation AsmLoc, bool IsSimple,
bool IsVolatile, unsigned NumOutputs,
unsigned NumInputs, IdentifierInfo **Names,
MultiExprArg Constraints, MultiExprArg Exprs,
Expr *AsmString, MultiExprArg Clobbers,
SourceLocation RParenLoc);
ExprResult LookupInlineAsmIdentifier(CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
UnqualifiedId &Id,
llvm::InlineAsmIdentifierInfo &Info,
bool IsUnevaluatedContext);
bool LookupInlineAsmField(StringRef Base, StringRef Member,
unsigned &Offset, SourceLocation AsmLoc);
StmtResult ActOnMSAsmStmt(SourceLocation AsmLoc, SourceLocation LBraceLoc,
ArrayRef<Token> AsmToks,
StringRef AsmString,
unsigned NumOutputs, unsigned NumInputs,
ArrayRef<StringRef> Constraints,
ArrayRef<StringRef> Clobbers,
ArrayRef<Expr*> Exprs,
SourceLocation EndLoc);
LabelDecl *GetOrCreateMSAsmLabel(StringRef ExternalLabelName,
SourceLocation Location,
bool AlwaysCreate);
VarDecl *BuildObjCExceptionDecl(TypeSourceInfo *TInfo, QualType ExceptionType,
SourceLocation StartLoc,
SourceLocation IdLoc, IdentifierInfo *Id,
bool Invalid = false);
Decl *ActOnObjCExceptionDecl(Scope *S, Declarator &D);
StmtResult ActOnObjCAtCatchStmt(SourceLocation AtLoc, SourceLocation RParen,
Decl *Parm, Stmt *Body);
StmtResult ActOnObjCAtFinallyStmt(SourceLocation AtLoc, Stmt *Body);
StmtResult ActOnObjCAtTryStmt(SourceLocation AtLoc, Stmt *Try,
MultiStmtArg Catch, Stmt *Finally);
StmtResult BuildObjCAtThrowStmt(SourceLocation AtLoc, Expr *Throw);
StmtResult ActOnObjCAtThrowStmt(SourceLocation AtLoc, Expr *Throw,
Scope *CurScope);
ExprResult ActOnObjCAtSynchronizedOperand(SourceLocation atLoc,
Expr *operand);
StmtResult ActOnObjCAtSynchronizedStmt(SourceLocation AtLoc,
Expr *SynchExpr,
Stmt *SynchBody);
StmtResult ActOnObjCAutoreleasePoolStmt(SourceLocation AtLoc, Stmt *Body);
VarDecl *BuildExceptionDeclaration(Scope *S, TypeSourceInfo *TInfo,
SourceLocation StartLoc,
SourceLocation IdLoc,
IdentifierInfo *Id);
Decl *ActOnExceptionDeclarator(Scope *S, Declarator &D);
StmtResult ActOnCXXCatchBlock(SourceLocation CatchLoc,
Decl *ExDecl, Stmt *HandlerBlock);
StmtResult ActOnCXXTryBlock(SourceLocation TryLoc, Stmt *TryBlock,
ArrayRef<Stmt *> Handlers);
StmtResult ActOnSEHTryBlock(bool IsCXXTry, // try (true) or __try (false) ?
SourceLocation TryLoc, Stmt *TryBlock,
Stmt *Handler);
StmtResult ActOnSEHExceptBlock(SourceLocation Loc,
Expr *FilterExpr,
Stmt *Block);
void ActOnStartSEHFinallyBlock();
void ActOnAbortSEHFinallyBlock();
StmtResult ActOnFinishSEHFinallyBlock(SourceLocation Loc, Stmt *Block);
StmtResult ActOnSEHLeaveStmt(SourceLocation Loc, Scope *CurScope);
void DiagnoseReturnInConstructorExceptionHandler(CXXTryStmt *TryBlock);
bool ShouldWarnIfUnusedFileScopedDecl(const DeclaratorDecl *D) const;
/// \brief If it's a file scoped decl that must warn if not used, keep track
/// of it.
void MarkUnusedFileScopedDecl(const DeclaratorDecl *D);
/// DiagnoseUnusedExprResult - If the statement passed in is an expression
/// whose result is unused, warn.
void DiagnoseUnusedExprResult(const Stmt *S);
void DiagnoseUnusedNestedTypedefs(const RecordDecl *D);
void DiagnoseUnusedDecl(const NamedDecl *ND);
/// Emit \p DiagID if statement located on \p StmtLoc has a suspicious null
/// statement as a \p Body, and it is located on the same line.
///
/// This helps prevent bugs due to typos, such as:
/// if (condition);
/// do_stuff();
void DiagnoseEmptyStmtBody(SourceLocation StmtLoc,
const Stmt *Body,
unsigned DiagID);
/// Warn if a for/while loop statement \p S, which is followed by
/// \p PossibleBody, has a suspicious null statement as a body.
void DiagnoseEmptyLoopBody(const Stmt *S,
const Stmt *PossibleBody);
/// Warn if a value is moved to itself.
void DiagnoseSelfMove(const Expr *LHSExpr, const Expr *RHSExpr,
SourceLocation OpLoc);
ParsingDeclState PushParsingDeclaration(sema::DelayedDiagnosticPool &pool) {
return DelayedDiagnostics.push(pool);
}
void PopParsingDeclaration(ParsingDeclState state, Decl *decl);
typedef ProcessingContextState ParsingClassState;
ParsingClassState PushParsingClass() {
return DelayedDiagnostics.pushUndelayed();
}
void PopParsingClass(ParsingClassState state) {
DelayedDiagnostics.popUndelayed(state);
}
void redelayDiagnostics(sema::DelayedDiagnosticPool &pool);
enum AvailabilityDiagnostic { AD_Deprecation, AD_Unavailable, AD_Partial };
void EmitAvailabilityWarning(AvailabilityDiagnostic AD,
NamedDecl *D, StringRef Message,
SourceLocation Loc,
const ObjCInterfaceDecl *UnknownObjCClass,
const ObjCPropertyDecl *ObjCProperty,
bool ObjCPropertyAccess);
bool makeUnavailableInSystemHeader(SourceLocation loc,
StringRef message);
//===--------------------------------------------------------------------===//
// Expression Parsing Callbacks: SemaExpr.cpp.
bool CanUseDecl(NamedDecl *D);
bool DiagnoseUseOfDecl(NamedDecl *D, SourceLocation Loc,
const ObjCInterfaceDecl *UnknownObjCClass=nullptr,
bool ObjCPropertyAccess=false);
void NoteDeletedFunction(FunctionDecl *FD);
std::string getDeletedOrUnavailableSuffix(const FunctionDecl *FD);
bool DiagnosePropertyAccessorMismatch(ObjCPropertyDecl *PD,
ObjCMethodDecl *Getter,
SourceLocation Loc);
void DiagnoseSentinelCalls(NamedDecl *D, SourceLocation Loc,
ArrayRef<Expr *> Args);
void PushExpressionEvaluationContext(ExpressionEvaluationContext NewContext,
Decl *LambdaContextDecl = nullptr,
bool IsDecltype = false);
enum ReuseLambdaContextDecl_t { ReuseLambdaContextDecl };
void PushExpressionEvaluationContext(ExpressionEvaluationContext NewContext,
ReuseLambdaContextDecl_t,
bool IsDecltype = false);
void PopExpressionEvaluationContext();
void DiscardCleanupsInEvaluationContext();
ExprResult TransformToPotentiallyEvaluated(Expr *E);
ExprResult HandleExprEvaluationContextForTypeof(Expr *E);
ExprResult ActOnConstantExpression(ExprResult Res);
// Functions for marking a declaration referenced. These functions also
// contain the relevant logic for marking if a reference to a function or
// variable is an odr-use (in the C++11 sense). There are separate variants
// for expressions referring to a decl; these exist because odr-use marking
// needs to be delayed for some constant variables when we build one of the
// named expressions.
void MarkAnyDeclReferenced(SourceLocation Loc, Decl *D, bool OdrUse);
void MarkFunctionReferenced(SourceLocation Loc, FunctionDecl *Func,
bool OdrUse = true);
void MarkVariableReferenced(SourceLocation Loc, VarDecl *Var);
void MarkDeclRefReferenced(DeclRefExpr *E);
void MarkMemberReferenced(MemberExpr *E);
void UpdateMarkingForLValueToRValue(Expr *E);
void CleanupVarDeclMarking();
enum TryCaptureKind {
TryCapture_Implicit, TryCapture_ExplicitByVal, TryCapture_ExplicitByRef
};
/// \brief Try to capture the given variable.
///
/// \param Var The variable to capture.
///
/// \param Loc The location at which the capture occurs.
///
/// \param Kind The kind of capture, which may be implicit (for either a
/// block or a lambda), or explicit by-value or by-reference (for a lambda).
///
/// \param EllipsisLoc The location of the ellipsis, if one is provided in
/// an explicit lambda capture.
///
/// \param BuildAndDiagnose Whether we are actually supposed to add the
/// captures or diagnose errors. If false, this routine merely check whether
/// the capture can occur without performing the capture itself or complaining
/// if the variable cannot be captured.
///
/// \param CaptureType Will be set to the type of the field used to capture
/// this variable in the innermost block or lambda. Only valid when the
/// variable can be captured.
///
/// \param DeclRefType Will be set to the type of a reference to the capture
/// from within the current scope. Only valid when the variable can be
/// captured.
///
/// \param FunctionScopeIndexToStopAt If non-null, it points to the index
/// of the FunctionScopeInfo stack beyond which we do not attempt to capture.
/// This is useful when enclosing lambdas must speculatively capture
/// variables that may or may not be used in certain specializations of
/// a nested generic lambda.
///
/// \returns true if an error occurred (i.e., the variable cannot be
/// captured) and false if the capture succeeded.
bool tryCaptureVariable(VarDecl *Var, SourceLocation Loc, TryCaptureKind Kind,
SourceLocation EllipsisLoc, bool BuildAndDiagnose,
QualType &CaptureType,
QualType &DeclRefType,
const unsigned *const FunctionScopeIndexToStopAt);
/// \brief Try to capture the given variable.
bool tryCaptureVariable(VarDecl *Var, SourceLocation Loc,
TryCaptureKind Kind = TryCapture_Implicit,
SourceLocation EllipsisLoc = SourceLocation());
/// \brief Checks if the variable must be captured.
bool NeedToCaptureVariable(VarDecl *Var, SourceLocation Loc);
/// \brief Given a variable, determine the type that a reference to that
/// variable will have in the given scope.
QualType getCapturedDeclRefType(VarDecl *Var, SourceLocation Loc);
void MarkDeclarationsReferencedInType(SourceLocation Loc, QualType T);
void MarkDeclarationsReferencedInExpr(Expr *E,
bool SkipLocalVariables = false);
/// \brief Try to recover by turning the given expression into a
/// call. Returns true if recovery was attempted or an error was
/// emitted; this may also leave the ExprResult invalid.
bool tryToRecoverWithCall(ExprResult &E, const PartialDiagnostic &PD,
bool ForceComplain = false,
bool (*IsPlausibleResult)(QualType) = nullptr);
/// \brief Figure out if an expression could be turned into a call.
bool tryExprAsCall(Expr &E, QualType &ZeroArgCallReturnTy,
UnresolvedSetImpl &NonTemplateOverloads);
/// \brief Conditionally issue a diagnostic based on the current
/// evaluation context.
///
/// \param Statement If Statement is non-null, delay reporting the
/// diagnostic until the function body is parsed, and then do a basic
/// reachability analysis to determine if the statement is reachable.
/// If it is unreachable, the diagnostic will not be emitted.
bool DiagRuntimeBehavior(SourceLocation Loc, const Stmt *Statement,
const PartialDiagnostic &PD);
// Primary Expressions.
SourceRange getExprRange(Expr *E) const;
ExprResult ActOnIdExpression(
Scope *S, CXXScopeSpec &SS, SourceLocation TemplateKWLoc,
UnqualifiedId &Id, bool HasTrailingLParen, bool IsAddressOfOperand,
std::unique_ptr<CorrectionCandidateCallback> CCC = nullptr,
bool IsInlineAsmIdentifier = false, Token *KeywordReplacement = nullptr);
void DecomposeUnqualifiedId(const UnqualifiedId &Id,
TemplateArgumentListInfo &Buffer,
DeclarationNameInfo &NameInfo,
const TemplateArgumentListInfo *&TemplateArgs);
bool
DiagnoseEmptyLookup(Scope *S, CXXScopeSpec &SS, LookupResult &R,
std::unique_ptr<CorrectionCandidateCallback> CCC,
TemplateArgumentListInfo *ExplicitTemplateArgs = nullptr,
ArrayRef<Expr *> Args = None, TypoExpr **Out = nullptr);
ExprResult LookupInObjCMethod(LookupResult &LookUp, Scope *S,
IdentifierInfo *II,
bool AllowBuiltinCreation=false);
ExprResult ActOnDependentIdExpression(const CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
const DeclarationNameInfo &NameInfo,
bool isAddressOfOperand,
const TemplateArgumentListInfo *TemplateArgs);
ExprResult BuildDeclRefExpr(ValueDecl *D, QualType Ty,
ExprValueKind VK,
SourceLocation Loc,
const CXXScopeSpec *SS = nullptr);
ExprResult
BuildDeclRefExpr(ValueDecl *D, QualType Ty, ExprValueKind VK,
const DeclarationNameInfo &NameInfo,
const CXXScopeSpec *SS = nullptr,
NamedDecl *FoundD = nullptr,
const TemplateArgumentListInfo *TemplateArgs = nullptr);
ExprResult
BuildAnonymousStructUnionMemberReference(
const CXXScopeSpec &SS,
SourceLocation nameLoc,
IndirectFieldDecl *indirectField,
DeclAccessPair FoundDecl = DeclAccessPair::make(nullptr, AS_none),
Expr *baseObjectExpr = nullptr,
SourceLocation opLoc = SourceLocation());
ExprResult BuildPossibleImplicitMemberExpr(const CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
LookupResult &R,
const TemplateArgumentListInfo *TemplateArgs);
ExprResult BuildImplicitMemberExpr(const CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
LookupResult &R,
const TemplateArgumentListInfo *TemplateArgs,
bool IsDefiniteInstance);
bool UseArgumentDependentLookup(const CXXScopeSpec &SS,
const LookupResult &R,
bool HasTrailingLParen);
ExprResult BuildQualifiedDeclarationNameExpr(
CXXScopeSpec &SS, const DeclarationNameInfo &NameInfo,
bool IsAddressOfOperand, TypeSourceInfo **RecoveryTSI = nullptr);
ExprResult BuildDependentDeclRefExpr(const CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
const DeclarationNameInfo &NameInfo,
const TemplateArgumentListInfo *TemplateArgs);
ExprResult BuildDeclarationNameExpr(const CXXScopeSpec &SS,
LookupResult &R,
bool NeedsADL,
bool AcceptInvalidDecl = false);
ExprResult BuildDeclarationNameExpr(
const CXXScopeSpec &SS, const DeclarationNameInfo &NameInfo, NamedDecl *D,
NamedDecl *FoundD = nullptr,
const TemplateArgumentListInfo *TemplateArgs = nullptr,
bool AcceptInvalidDecl = false);
ExprResult BuildLiteralOperatorCall(LookupResult &R,
DeclarationNameInfo &SuffixInfo,
ArrayRef<Expr *> Args,
SourceLocation LitEndLoc,
TemplateArgumentListInfo *ExplicitTemplateArgs = nullptr);
ExprResult BuildPredefinedExpr(SourceLocation Loc,
PredefinedExpr::IdentType IT);
ExprResult ActOnPredefinedExpr(SourceLocation Loc, tok::TokenKind Kind);
ExprResult ActOnIntegerConstant(SourceLocation Loc, uint64_t Val);
bool CheckLoopHintExpr(Expr *E, SourceLocation Loc);
ExprResult ActOnNumericConstant(const Token &Tok, Scope *UDLScope = nullptr);
ExprResult ActOnCharacterConstant(const Token &Tok,
Scope *UDLScope = nullptr);
ExprResult ActOnParenExpr(SourceLocation L, SourceLocation R, Expr *E);
ExprResult ActOnParenListExpr(SourceLocation L,
SourceLocation R,
MultiExprArg Val);
/// ActOnStringLiteral - The specified tokens were lexed as pasted string
/// fragments (e.g. "foo" "bar" L"baz").
ExprResult ActOnStringLiteral(ArrayRef<Token> StringToks,
Scope *UDLScope = nullptr);
ExprResult ActOnGenericSelectionExpr(SourceLocation KeyLoc,
SourceLocation DefaultLoc,
SourceLocation RParenLoc,
Expr *ControllingExpr,
ArrayRef<ParsedType> ArgTypes,
ArrayRef<Expr *> ArgExprs);
ExprResult CreateGenericSelectionExpr(SourceLocation KeyLoc,
SourceLocation DefaultLoc,
SourceLocation RParenLoc,
Expr *ControllingExpr,
ArrayRef<TypeSourceInfo *> Types,
ArrayRef<Expr *> Exprs);
// Binary/Unary Operators. 'Tok' is the token for the operator.
ExprResult CreateBuiltinUnaryOp(SourceLocation OpLoc, UnaryOperatorKind Opc,
Expr *InputExpr);
ExprResult BuildUnaryOp(Scope *S, SourceLocation OpLoc,
UnaryOperatorKind Opc, Expr *Input);
ExprResult ActOnUnaryOp(Scope *S, SourceLocation OpLoc,
tok::TokenKind Op, Expr *Input);
QualType CheckAddressOfOperand(ExprResult &Operand, SourceLocation OpLoc);
ExprResult CreateUnaryExprOrTypeTraitExpr(TypeSourceInfo *TInfo,
SourceLocation OpLoc,
UnaryExprOrTypeTrait ExprKind,
SourceRange R);
ExprResult CreateUnaryExprOrTypeTraitExpr(Expr *E, SourceLocation OpLoc,
UnaryExprOrTypeTrait ExprKind);
ExprResult
ActOnUnaryExprOrTypeTraitExpr(SourceLocation OpLoc,
UnaryExprOrTypeTrait ExprKind,
bool IsType, void *TyOrEx,
const SourceRange &ArgRange);
ExprResult CheckPlaceholderExpr(Expr *E);
bool CheckVecStepExpr(Expr *E);
// HLSL Change Begins
bool CheckHLSLUnaryExprOrTypeTraitOperand(QualType ExprType, SourceLocation Loc,
UnaryExprOrTypeTrait ExprKind);
// HLSL Change Ends
bool CheckUnaryExprOrTypeTraitOperand(Expr *E, UnaryExprOrTypeTrait ExprKind);
bool CheckUnaryExprOrTypeTraitOperand(QualType ExprType, SourceLocation OpLoc,
SourceRange ExprRange,
UnaryExprOrTypeTrait ExprKind);
ExprResult ActOnSizeofParameterPackExpr(Scope *S,
SourceLocation OpLoc,
IdentifierInfo &Name,
SourceLocation NameLoc,
SourceLocation RParenLoc);
ExprResult ActOnPostfixUnaryOp(Scope *S, SourceLocation OpLoc,
tok::TokenKind Kind, Expr *Input);
ExprResult ActOnArraySubscriptExpr(Scope *S, Expr *Base, SourceLocation LLoc,
Expr *Idx, SourceLocation RLoc);
ExprResult CreateBuiltinArraySubscriptExpr(Expr *Base, SourceLocation LLoc,
Expr *Idx, SourceLocation RLoc);
// This struct is for use by ActOnMemberAccess to allow
// BuildMemberReferenceExpr to be able to reinvoke ActOnMemberAccess after
// changing the access operator from a '.' to a '->' (to see if that is the
// change needed to fix an error about an unknown member, e.g. when the class
// defines a custom operator->).
struct ActOnMemberAccessExtraArgs {
Scope *S;
UnqualifiedId &Id;
Decl *ObjCImpDecl;
};
ExprResult BuildMemberReferenceExpr(
Expr *Base, QualType BaseType, SourceLocation OpLoc, bool IsArrow,
CXXScopeSpec &SS, SourceLocation TemplateKWLoc,
NamedDecl *FirstQualifierInScope, const DeclarationNameInfo &NameInfo,
const TemplateArgumentListInfo *TemplateArgs,
ActOnMemberAccessExtraArgs *ExtraArgs = nullptr);
ExprResult
BuildMemberReferenceExpr(Expr *Base, QualType BaseType, SourceLocation OpLoc,
bool IsArrow, const CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
NamedDecl *FirstQualifierInScope, LookupResult &R,
const TemplateArgumentListInfo *TemplateArgs,
bool SuppressQualifierCheck = false,
ActOnMemberAccessExtraArgs *ExtraArgs = nullptr);
ExprResult PerformMemberExprBaseConversion(Expr *Base, bool IsArrow);
bool CheckQualifiedMemberReference(Expr *BaseExpr, QualType BaseType,
const CXXScopeSpec &SS,
const LookupResult &R);
ExprResult ActOnDependentMemberExpr(Expr *Base, QualType BaseType,
bool IsArrow, SourceLocation OpLoc,
const CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
NamedDecl *FirstQualifierInScope,
const DeclarationNameInfo &NameInfo,
const TemplateArgumentListInfo *TemplateArgs);
ExprResult ActOnMemberAccessExpr(Scope *S, Expr *Base,
SourceLocation OpLoc,
tok::TokenKind OpKind,
CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
UnqualifiedId &Member,
Decl *ObjCImpDecl);
void ActOnDefaultCtorInitializers(Decl *CDtorDecl);
bool ConvertArgumentsForCall(CallExpr *Call, Expr *Fn,
FunctionDecl *FDecl,
const FunctionProtoType *Proto,
ArrayRef<Expr *> Args,
SourceLocation RParenLoc,
bool ExecConfig = false);
void CheckStaticArrayArgument(SourceLocation CallLoc,
ParmVarDecl *Param,
const Expr *ArgExpr);
/// ActOnCallExpr - Handle a call to Fn with the specified array of arguments.
/// This provides the location of the left/right parens and a list of comma
/// locations.
ExprResult ActOnCallExpr(Scope *S, Expr *Fn, SourceLocation LParenLoc,
MultiExprArg ArgExprs, SourceLocation RParenLoc,
Expr *ExecConfig = nullptr,
bool IsExecConfig = false);
ExprResult BuildResolvedCallExpr(Expr *Fn, NamedDecl *NDecl,
SourceLocation LParenLoc,
ArrayRef<Expr *> Arg,
SourceLocation RParenLoc,
Expr *Config = nullptr,
bool IsExecConfig = false);
ExprResult ActOnCUDAExecConfigExpr(Scope *S, SourceLocation LLLLoc,
MultiExprArg ExecConfig,
SourceLocation GGGLoc);
ExprResult ActOnCastExpr(Scope *S, SourceLocation LParenLoc,
Declarator &D, ParsedType &Ty,
SourceLocation RParenLoc, Expr *CastExpr);
ExprResult BuildCStyleCastExpr(SourceLocation LParenLoc,
TypeSourceInfo *Ty,
SourceLocation RParenLoc,
Expr *Op);
CastKind PrepareScalarCast(ExprResult &src, QualType destType);
/// \brief Build an altivec or OpenCL literal.
ExprResult BuildVectorLiteral(SourceLocation LParenLoc,
SourceLocation RParenLoc, Expr *E,
TypeSourceInfo *TInfo);
ExprResult MaybeConvertParenListExprToParenExpr(Scope *S, Expr *ME);
ExprResult ActOnCompoundLiteral(SourceLocation LParenLoc,
ParsedType Ty,
SourceLocation RParenLoc,
Expr *InitExpr);
ExprResult BuildCompoundLiteralExpr(SourceLocation LParenLoc,
TypeSourceInfo *TInfo,
SourceLocation RParenLoc,
Expr *LiteralExpr);
ExprResult ActOnInitList(SourceLocation LBraceLoc,
MultiExprArg InitArgList,
SourceLocation RBraceLoc);
ExprResult ActOnDesignatedInitializer(Designation &Desig,
SourceLocation Loc,
bool GNUSyntax,
ExprResult Init);
private:
static BinaryOperatorKind ConvertTokenKindToBinaryOpcode(tok::TokenKind Kind);
public:
ExprResult ActOnBinOp(Scope *S, SourceLocation TokLoc,
tok::TokenKind Kind, Expr *LHSExpr, Expr *RHSExpr);
ExprResult BuildBinOp(Scope *S, SourceLocation OpLoc,
BinaryOperatorKind Opc, Expr *LHSExpr, Expr *RHSExpr);
ExprResult CreateBuiltinBinOp(SourceLocation OpLoc, BinaryOperatorKind Opc,
Expr *LHSExpr, Expr *RHSExpr);
/// ActOnConditionalOp - Parse a ?: operation. Note that 'LHS' may be null
/// in the case of a the GNU conditional expr extension.
ExprResult ActOnConditionalOp(SourceLocation QuestionLoc,
SourceLocation ColonLoc,
Expr *CondExpr, Expr *LHSExpr, Expr *RHSExpr);
/// ActOnAddrLabel - Parse the GNU address of label extension: "&&foo".
ExprResult ActOnAddrLabel(SourceLocation OpLoc, SourceLocation LabLoc,
LabelDecl *TheDecl);
void ActOnStartStmtExpr();
ExprResult ActOnStmtExpr(SourceLocation LPLoc, Stmt *SubStmt,
SourceLocation RPLoc); // "({..})"
void ActOnStmtExprError();
// __builtin_offsetof(type, identifier(.identifier|[expr])*)
struct OffsetOfComponent {
SourceLocation LocStart, LocEnd;
bool isBrackets; // true if [expr], false if .ident
union {
IdentifierInfo *IdentInfo;
Expr *E;
} U;
};
/// __builtin_offsetof(type, a.b[123][456].c)
ExprResult BuildBuiltinOffsetOf(SourceLocation BuiltinLoc,
TypeSourceInfo *TInfo,
OffsetOfComponent *CompPtr,
unsigned NumComponents,
SourceLocation RParenLoc);
ExprResult ActOnBuiltinOffsetOf(Scope *S,
SourceLocation BuiltinLoc,
SourceLocation TypeLoc,
ParsedType ParsedArgTy,
OffsetOfComponent *CompPtr,
unsigned NumComponents,
SourceLocation RParenLoc);
// __builtin_choose_expr(constExpr, expr1, expr2)
ExprResult ActOnChooseExpr(SourceLocation BuiltinLoc,
Expr *CondExpr, Expr *LHSExpr,
Expr *RHSExpr, SourceLocation RPLoc);
// __builtin_va_arg(expr, type)
ExprResult ActOnVAArg(SourceLocation BuiltinLoc, Expr *E, ParsedType Ty,
SourceLocation RPLoc);
ExprResult BuildVAArgExpr(SourceLocation BuiltinLoc, Expr *E,
TypeSourceInfo *TInfo, SourceLocation RPLoc);
// __null
ExprResult ActOnGNUNullExpr(SourceLocation TokenLoc);
bool CheckCaseExpression(Expr *E);
/// \brief Describes the result of an "if-exists" condition check.
enum IfExistsResult {
/// \brief The symbol exists.
IER_Exists,
/// \brief The symbol does not exist.
IER_DoesNotExist,
/// \brief The name is a dependent name, so the results will differ
/// from one instantiation to the next.
IER_Dependent,
/// \brief An error occurred.
IER_Error
};
IfExistsResult
CheckMicrosoftIfExistsSymbol(Scope *S, CXXScopeSpec &SS,
const DeclarationNameInfo &TargetNameInfo);
IfExistsResult
CheckMicrosoftIfExistsSymbol(Scope *S, SourceLocation KeywordLoc,
bool IsIfExists, CXXScopeSpec &SS,
UnqualifiedId &Name);
StmtResult BuildMSDependentExistsStmt(SourceLocation KeywordLoc,
bool IsIfExists,
NestedNameSpecifierLoc QualifierLoc,
DeclarationNameInfo NameInfo,
Stmt *Nested);
StmtResult ActOnMSDependentExistsStmt(SourceLocation KeywordLoc,
bool IsIfExists,
CXXScopeSpec &SS, UnqualifiedId &Name,
Stmt *Nested);
//===------------------------- "Block" Extension ------------------------===//
/// ActOnBlockStart - This callback is invoked when a block literal is
/// started.
void ActOnBlockStart(SourceLocation CaretLoc, Scope *CurScope);
/// ActOnBlockArguments - This callback allows processing of block arguments.
/// If there are no arguments, this is still invoked.
void ActOnBlockArguments(SourceLocation CaretLoc, Declarator &ParamInfo,
Scope *CurScope);
/// ActOnBlockError - If there is an error parsing a block, this callback
/// is invoked to pop the information about the block from the action impl.
void ActOnBlockError(SourceLocation CaretLoc, Scope *CurScope);
/// ActOnBlockStmtExpr - This is called when the body of a block statement
/// literal was successfully completed. ^(int x){...}
ExprResult ActOnBlockStmtExpr(SourceLocation CaretLoc, Stmt *Body,
Scope *CurScope);
//===---------------------------- Clang Extensions ----------------------===//
/// __builtin_convertvector(...)
ExprResult ActOnConvertVectorExpr(Expr *E, ParsedType ParsedDestTy,
SourceLocation BuiltinLoc,
SourceLocation RParenLoc);
//===---------------------------- OpenCL Features -----------------------===//
/// __builtin_astype(...)
ExprResult ActOnAsTypeExpr(Expr *E, ParsedType ParsedDestTy,
SourceLocation BuiltinLoc,
SourceLocation RParenLoc);
// HLSL Change Starts
//===---------------------------- HLSL Features -------------------------===//
/// cbuffer/tbuffer
llvm::SmallVector<Decl*, 1> HLSLBuffers;
Decl* ActOnStartHLSLBuffer(Scope* bufferScope, bool cbuffer, SourceLocation KwLoc,
IdentifierInfo *Ident, SourceLocation IdentLoc,
std::vector<hlsl::UnusualAnnotation *>& BufferAttributes,
SourceLocation LBrace);
void ActOnFinishHLSLBuffer(Decl *Dcl, SourceLocation RBrace);
Decl* getActiveHLSLBuffer() const;
void ActOnStartHLSLBufferView();
bool IsOnHLSLBufferView();
Decl *ActOnHLSLBufferView(Scope *bufferScope, SourceLocation KwLoc,
DeclGroupPtrTy &dcl, bool iscbuf);
// HLSL Change Ends
//===---------------------------- C++ Features --------------------------===//
// Act on C++ namespaces
Decl *ActOnStartNamespaceDef(Scope *S, SourceLocation InlineLoc,
SourceLocation NamespaceLoc,
SourceLocation IdentLoc,
IdentifierInfo *Ident,
SourceLocation LBrace,
AttributeList *AttrList);
void ActOnFinishNamespaceDef(Decl *Dcl, SourceLocation RBrace);
NamespaceDecl *getStdNamespace() const;
NamespaceDecl *getOrCreateStdNamespace();
CXXRecordDecl *getStdBadAlloc() const;
/// \brief Tests whether Ty is an instance of std::initializer_list and, if
/// it is and Element is not NULL, assigns the element type to Element.
bool isStdInitializerList(QualType Ty, QualType *Element);
/// \brief Looks for the std::initializer_list template and instantiates it
/// with Element, or emits an error if it's not found.
///
/// \returns The instantiated template, or null on error.
QualType BuildStdInitializerList(QualType Element, SourceLocation Loc);
/// \brief Determine whether Ctor is an initializer-list constructor, as
/// defined in [dcl.init.list]p2.
bool isInitListConstructor(const CXXConstructorDecl *Ctor);
Decl *ActOnUsingDirective(Scope *CurScope,
SourceLocation UsingLoc,
SourceLocation NamespcLoc,
CXXScopeSpec &SS,
SourceLocation IdentLoc,
IdentifierInfo *NamespcName,
AttributeList *AttrList);
void PushUsingDirective(Scope *S, UsingDirectiveDecl *UDir);
Decl *ActOnNamespaceAliasDef(Scope *CurScope,
SourceLocation NamespaceLoc,
SourceLocation AliasLoc,
IdentifierInfo *Alias,
CXXScopeSpec &SS,
SourceLocation IdentLoc,
IdentifierInfo *Ident);
void HideUsingShadowDecl(Scope *S, UsingShadowDecl *Shadow);
bool CheckUsingShadowDecl(UsingDecl *UD, NamedDecl *Target,
const LookupResult &PreviousDecls,
UsingShadowDecl *&PrevShadow);
UsingShadowDecl *BuildUsingShadowDecl(Scope *S, UsingDecl *UD,
NamedDecl *Target,
UsingShadowDecl *PrevDecl);
bool CheckUsingDeclRedeclaration(SourceLocation UsingLoc,
bool HasTypenameKeyword,
const CXXScopeSpec &SS,
SourceLocation NameLoc,
const LookupResult &Previous);
bool CheckUsingDeclQualifier(SourceLocation UsingLoc,
const CXXScopeSpec &SS,
const DeclarationNameInfo &NameInfo,
SourceLocation NameLoc);
NamedDecl *BuildUsingDeclaration(Scope *S, AccessSpecifier AS,
SourceLocation UsingLoc,
CXXScopeSpec &SS,
DeclarationNameInfo NameInfo,
AttributeList *AttrList,
bool IsInstantiation,
bool HasTypenameKeyword,
SourceLocation TypenameLoc);
bool CheckInheritingConstructorUsingDecl(UsingDecl *UD);
Decl *ActOnUsingDeclaration(Scope *CurScope,
AccessSpecifier AS,
bool HasUsingKeyword,
SourceLocation UsingLoc,
CXXScopeSpec &SS,
UnqualifiedId &Name,
AttributeList *AttrList,
bool HasTypenameKeyword,
SourceLocation TypenameLoc);
Decl *ActOnAliasDeclaration(Scope *CurScope,
AccessSpecifier AS,
MultiTemplateParamsArg TemplateParams,
SourceLocation UsingLoc,
UnqualifiedId &Name,
AttributeList *AttrList,
TypeResult Type,
Decl *DeclFromDeclSpec);
/// BuildCXXConstructExpr - Creates a complete call to a constructor,
/// including handling of its default argument expressions.
///
/// \param ConstructKind - a CXXConstructExpr::ConstructionKind
ExprResult
BuildCXXConstructExpr(SourceLocation ConstructLoc, QualType DeclInitType,
CXXConstructorDecl *Constructor, MultiExprArg Exprs,
bool HadMultipleCandidates, bool IsListInitialization,
bool IsStdInitListInitialization,
bool RequiresZeroInit, unsigned ConstructKind,
SourceRange ParenRange);
// FIXME: Can we remove this and have the above BuildCXXConstructExpr check if
// the constructor can be elidable?
ExprResult
BuildCXXConstructExpr(SourceLocation ConstructLoc, QualType DeclInitType,
CXXConstructorDecl *Constructor, bool Elidable,
MultiExprArg Exprs, bool HadMultipleCandidates,
bool IsListInitialization,
bool IsStdInitListInitialization, bool RequiresZeroInit,
unsigned ConstructKind, SourceRange ParenRange);
ExprResult BuildCXXDefaultInitExpr(SourceLocation Loc, FieldDecl *Field);
/// BuildCXXDefaultArgExpr - Creates a CXXDefaultArgExpr, instantiating
/// the default expr if needed.
ExprResult BuildCXXDefaultArgExpr(SourceLocation CallLoc,
FunctionDecl *FD,
ParmVarDecl *Param);
/// FinalizeVarWithDestructor - Prepare for calling destructor on the
/// constructed variable.
void FinalizeVarWithDestructor(VarDecl *VD, const RecordType *DeclInitType);
/// \brief Helper class that collects exception specifications for
/// implicitly-declared special member functions.
class ImplicitExceptionSpecification {
// Pointer to allow copying
Sema *Self;
// We order exception specifications thus:
// noexcept is the most restrictive, but is only used in C++11.
// throw() comes next.
// Then a throw(collected exceptions)
// Finally no specification, which is expressed as noexcept(false).
// throw(...) is used instead if any called function uses it.
ExceptionSpecificationType ComputedEST;
llvm::SmallPtrSet<CanQualType, 4> ExceptionsSeen;
SmallVector<QualType, 4> Exceptions;
void ClearExceptions() {
ExceptionsSeen.clear();
Exceptions.clear();
}
public:
explicit ImplicitExceptionSpecification(Sema &Self)
: Self(&Self), ComputedEST(EST_BasicNoexcept) {
if (!Self.getLangOpts().CPlusPlus11)
ComputedEST = EST_DynamicNone;
}
/// \brief Get the computed exception specification type.
ExceptionSpecificationType getExceptionSpecType() const {
assert(ComputedEST != EST_ComputedNoexcept &&
"noexcept(expr) should not be a possible result");
return ComputedEST;
}
/// \brief The number of exceptions in the exception specification.
unsigned size() const { return Exceptions.size(); }
/// \brief The set of exceptions in the exception specification.
const QualType *data() const { return Exceptions.data(); }
/// \brief Integrate another called method into the collected data.
void CalledDecl(SourceLocation CallLoc, const CXXMethodDecl *Method);
/// \brief Integrate an invoked expression into the collected data.
void CalledExpr(Expr *E);
/// \brief Overwrite an EPI's exception specification with this
/// computed exception specification.
FunctionProtoType::ExceptionSpecInfo getExceptionSpec() const {
FunctionProtoType::ExceptionSpecInfo ESI;
ESI.Type = getExceptionSpecType();
if (ESI.Type == EST_Dynamic) {
ESI.Exceptions = Exceptions;
} else if (ESI.Type == EST_None) {
/// C++11 [except.spec]p14:
/// The exception-specification is noexcept(false) if the set of
/// potential exceptions of the special member function contains "any"
ESI.Type = EST_ComputedNoexcept;
ESI.NoexceptExpr = Self->ActOnCXXBoolLiteral(SourceLocation(),
tok::kw_false).get();
}
return ESI;
}
};
/// \brief Determine what sort of exception specification a defaulted
/// copy constructor of a class will have.
ImplicitExceptionSpecification
ComputeDefaultedDefaultCtorExceptionSpec(SourceLocation Loc,
CXXMethodDecl *MD);
/// \brief Determine what sort of exception specification a defaulted
/// default constructor of a class will have, and whether the parameter
/// will be const.
ImplicitExceptionSpecification
ComputeDefaultedCopyCtorExceptionSpec(CXXMethodDecl *MD);
/// \brief Determine what sort of exception specification a defautled
/// copy assignment operator of a class will have, and whether the
/// parameter will be const.
ImplicitExceptionSpecification
ComputeDefaultedCopyAssignmentExceptionSpec(CXXMethodDecl *MD);
/// \brief Determine what sort of exception specification a defaulted move
/// constructor of a class will have.
ImplicitExceptionSpecification
ComputeDefaultedMoveCtorExceptionSpec(CXXMethodDecl *MD);
/// \brief Determine what sort of exception specification a defaulted move
/// assignment operator of a class will have.
ImplicitExceptionSpecification
ComputeDefaultedMoveAssignmentExceptionSpec(CXXMethodDecl *MD);
/// \brief Determine what sort of exception specification a defaulted
/// destructor of a class will have.
ImplicitExceptionSpecification
ComputeDefaultedDtorExceptionSpec(CXXMethodDecl *MD);
/// \brief Determine what sort of exception specification an inheriting
/// constructor of a class will have.
ImplicitExceptionSpecification
ComputeInheritingCtorExceptionSpec(CXXConstructorDecl *CD);
/// \brief Evaluate the implicit exception specification for a defaulted
/// special member function.
void EvaluateImplicitExceptionSpec(SourceLocation Loc, CXXMethodDecl *MD);
/// \brief Check the given exception-specification and update the
/// exception specification information with the results.
void checkExceptionSpecification(bool IsTopLevel,
ExceptionSpecificationType EST,
ArrayRef<ParsedType> DynamicExceptions,
ArrayRef<SourceRange> DynamicExceptionRanges,
Expr *NoexceptExpr,
SmallVectorImpl<QualType> &Exceptions,
FunctionProtoType::ExceptionSpecInfo &ESI);
/// \brief Determine if we're in a case where we need to (incorrectly) eagerly
/// parse an exception specification to work around a libstdc++ bug.
bool isLibstdcxxEagerExceptionSpecHack(const Declarator &D);
/// \brief Add an exception-specification to the given member function
/// (or member function template). The exception-specification was parsed
/// after the method itself was declared.
void actOnDelayedExceptionSpecification(Decl *Method,
ExceptionSpecificationType EST,
SourceRange SpecificationRange,
ArrayRef<ParsedType> DynamicExceptions,
ArrayRef<SourceRange> DynamicExceptionRanges,
Expr *NoexceptExpr);
/// \brief Determine if a special member function should have a deleted
/// definition when it is defaulted.
bool ShouldDeleteSpecialMember(CXXMethodDecl *MD, CXXSpecialMember CSM,
bool Diagnose = false);
/// \brief Declare the implicit default constructor for the given class.
///
/// \param ClassDecl The class declaration into which the implicit
/// default constructor will be added.
///
/// \returns The implicitly-declared default constructor.
CXXConstructorDecl *DeclareImplicitDefaultConstructor(
CXXRecordDecl *ClassDecl);
/// DefineImplicitDefaultConstructor - Checks for feasibility of
/// defining this constructor as the default constructor.
void DefineImplicitDefaultConstructor(SourceLocation CurrentLocation,
CXXConstructorDecl *Constructor);
/// \brief Declare the implicit destructor for the given class.
///
/// \param ClassDecl The class declaration into which the implicit
/// destructor will be added.
///
/// \returns The implicitly-declared destructor.
CXXDestructorDecl *DeclareImplicitDestructor(CXXRecordDecl *ClassDecl);
/// DefineImplicitDestructor - Checks for feasibility of
/// defining this destructor as the default destructor.
void DefineImplicitDestructor(SourceLocation CurrentLocation,
CXXDestructorDecl *Destructor);
/// \brief Build an exception spec for destructors that don't have one.
///
/// C++11 says that user-defined destructors with no exception spec get one
/// that looks as if the destructor was implicitly declared.
void AdjustDestructorExceptionSpec(CXXRecordDecl *ClassDecl,
CXXDestructorDecl *Destructor);
/// \brief Declare all inheriting constructors for the given class.
///
/// \param ClassDecl The class declaration into which the inheriting
/// constructors will be added.
void DeclareInheritingConstructors(CXXRecordDecl *ClassDecl);
/// \brief Define the specified inheriting constructor.
void DefineInheritingConstructor(SourceLocation UseLoc,
CXXConstructorDecl *Constructor);
/// \brief Declare the implicit copy constructor for the given class.
///
/// \param ClassDecl The class declaration into which the implicit
/// copy constructor will be added.
///
/// \returns The implicitly-declared copy constructor.
CXXConstructorDecl *DeclareImplicitCopyConstructor(CXXRecordDecl *ClassDecl);
/// DefineImplicitCopyConstructor - Checks for feasibility of
/// defining this constructor as the copy constructor.
void DefineImplicitCopyConstructor(SourceLocation CurrentLocation,
CXXConstructorDecl *Constructor);
/// \brief Declare the implicit move constructor for the given class.
///
/// \param ClassDecl The Class declaration into which the implicit
/// move constructor will be added.
///
/// \returns The implicitly-declared move constructor, or NULL if it wasn't
/// declared.
CXXConstructorDecl *DeclareImplicitMoveConstructor(CXXRecordDecl *ClassDecl);
/// DefineImplicitMoveConstructor - Checks for feasibility of
/// defining this constructor as the move constructor.
void DefineImplicitMoveConstructor(SourceLocation CurrentLocation,
CXXConstructorDecl *Constructor);
/// \brief Declare the implicit copy assignment operator for the given class.
///
/// \param ClassDecl The class declaration into which the implicit
/// copy assignment operator will be added.
///
/// \returns The implicitly-declared copy assignment operator.
CXXMethodDecl *DeclareImplicitCopyAssignment(CXXRecordDecl *ClassDecl);
/// \brief Defines an implicitly-declared copy assignment operator.
void DefineImplicitCopyAssignment(SourceLocation CurrentLocation,
CXXMethodDecl *MethodDecl);
/// \brief Declare the implicit move assignment operator for the given class.
///
/// \param ClassDecl The Class declaration into which the implicit
/// move assignment operator will be added.
///
/// \returns The implicitly-declared move assignment operator, or NULL if it
/// wasn't declared.
CXXMethodDecl *DeclareImplicitMoveAssignment(CXXRecordDecl *ClassDecl);
/// \brief Defines an implicitly-declared move assignment operator.
void DefineImplicitMoveAssignment(SourceLocation CurrentLocation,
CXXMethodDecl *MethodDecl);
/// \brief Force the declaration of any implicitly-declared members of this
/// class.
void ForceDeclarationOfImplicitMembers(CXXRecordDecl *Class);
/// \brief Determine whether the given function is an implicitly-deleted
/// special member function.
bool isImplicitlyDeleted(FunctionDecl *FD);
/// \brief Check whether 'this' shows up in the type of a static member
/// function after the (naturally empty) cv-qualifier-seq would be.
///
/// \returns true if an error occurred.
bool checkThisInStaticMemberFunctionType(CXXMethodDecl *Method);
/// \brief Whether this' shows up in the exception specification of a static
/// member function.
bool checkThisInStaticMemberFunctionExceptionSpec(CXXMethodDecl *Method);
/// \brief Check whether 'this' shows up in the attributes of the given
/// static member function.
///
/// \returns true if an error occurred.
bool checkThisInStaticMemberFunctionAttributes(CXXMethodDecl *Method);
/// MaybeBindToTemporary - If the passed in expression has a record type with
/// a non-trivial destructor, this will return CXXBindTemporaryExpr. Otherwise
/// it simply returns the passed in expression.
ExprResult MaybeBindToTemporary(Expr *E);
bool CompleteConstructorCall(CXXConstructorDecl *Constructor,
MultiExprArg ArgsPtr,
SourceLocation Loc,
SmallVectorImpl<Expr*> &ConvertedArgs,
bool AllowExplicit = false,
bool IsListInitialization = false);
ParsedType getInheritingConstructorName(CXXScopeSpec &SS,
SourceLocation NameLoc,
IdentifierInfo &Name);
ParsedType getDestructorName(SourceLocation TildeLoc,
IdentifierInfo &II, SourceLocation NameLoc,
Scope *S, CXXScopeSpec &SS,
ParsedType ObjectType,
bool EnteringContext);
ParsedType getDestructorType(const DeclSpec& DS, ParsedType ObjectType);
// Checks that reinterpret casts don't have undefined behavior.
void CheckCompatibleReinterpretCast(QualType SrcType, QualType DestType,
bool IsDereference, SourceRange Range);
/// ActOnCXXNamedCast - Parse {dynamic,static,reinterpret,const}_cast's.
ExprResult ActOnCXXNamedCast(SourceLocation OpLoc,
tok::TokenKind Kind,
SourceLocation LAngleBracketLoc,
Declarator &D,
SourceLocation RAngleBracketLoc,
SourceLocation LParenLoc,
Expr *E,
SourceLocation RParenLoc);
ExprResult BuildCXXNamedCast(SourceLocation OpLoc,
tok::TokenKind Kind,
TypeSourceInfo *Ty,
Expr *E,
SourceRange AngleBrackets,
SourceRange Parens);
ExprResult BuildCXXTypeId(QualType TypeInfoType,
SourceLocation TypeidLoc,
TypeSourceInfo *Operand,
SourceLocation RParenLoc);
ExprResult BuildCXXTypeId(QualType TypeInfoType,
SourceLocation TypeidLoc,
Expr *Operand,
SourceLocation RParenLoc);
/// ActOnCXXTypeid - Parse typeid( something ).
ExprResult ActOnCXXTypeid(SourceLocation OpLoc,
SourceLocation LParenLoc, bool isType,
void *TyOrExpr,
SourceLocation RParenLoc);
ExprResult BuildCXXUuidof(QualType TypeInfoType,
SourceLocation TypeidLoc,
TypeSourceInfo *Operand,
SourceLocation RParenLoc);
ExprResult BuildCXXUuidof(QualType TypeInfoType,
SourceLocation TypeidLoc,
Expr *Operand,
SourceLocation RParenLoc);
/// ActOnCXXUuidof - Parse __uuidof( something ).
ExprResult ActOnCXXUuidof(SourceLocation OpLoc,
SourceLocation LParenLoc, bool isType,
void *TyOrExpr,
SourceLocation RParenLoc);
/// \brief Handle a C++1z fold-expression: ( expr op ... op expr ).
ExprResult ActOnCXXFoldExpr(SourceLocation LParenLoc, Expr *LHS,
tok::TokenKind Operator,
SourceLocation EllipsisLoc, Expr *RHS,
SourceLocation RParenLoc);
ExprResult BuildCXXFoldExpr(SourceLocation LParenLoc, Expr *LHS,
BinaryOperatorKind Operator,
SourceLocation EllipsisLoc, Expr *RHS,
SourceLocation RParenLoc);
ExprResult BuildEmptyCXXFoldExpr(SourceLocation EllipsisLoc,
BinaryOperatorKind Operator);
//// ActOnCXXThis - Parse 'this' pointer.
ExprResult ActOnCXXThis(SourceLocation loc);
/// \brief Try to retrieve the type of the 'this' pointer.
///
/// \returns The type of 'this', if possible. Otherwise, returns a NULL type.
QualType getCurrentThisType();
/// \brief When non-NULL, the C++ 'this' expression is allowed despite the
/// current context not being a non-static member function. In such cases,
/// this provides the type used for 'this'.
QualType CXXThisTypeOverride;
/// \brief RAII object used to temporarily allow the C++ 'this' expression
/// to be used, with the given qualifiers on the current class type.
class CXXThisScopeRAII {
Sema &S;
QualType OldCXXThisTypeOverride;
bool Enabled;
public:
/// \brief Introduce a new scope where 'this' may be allowed (when enabled),
/// using the given declaration (which is either a class template or a
/// class) along with the given qualifiers.
/// along with the qualifiers placed on '*this'.
CXXThisScopeRAII(Sema &S, Decl *ContextDecl, unsigned CXXThisTypeQuals,
bool Enabled = true);
~CXXThisScopeRAII();
};
/// \brief Make sure the value of 'this' is actually available in the current
/// context, if it is a potentially evaluated context.
///
/// \param Loc The location at which the capture of 'this' occurs.
///
/// \param Explicit Whether 'this' is explicitly captured in a lambda
/// capture list.
///
/// \param FunctionScopeIndexToStopAt If non-null, it points to the index
/// of the FunctionScopeInfo stack beyond which we do not attempt to capture.
/// This is useful when enclosing lambdas must speculatively capture
/// 'this' that may or may not be used in certain specializations of
/// a nested generic lambda (depending on whether the name resolves to
/// a non-static member function or a static function).
/// \return returns 'true' if failed, 'false' if success.
bool CheckCXXThisCapture(SourceLocation Loc, bool Explicit = false,
bool BuildAndDiagnose = true,
const unsigned *const FunctionScopeIndexToStopAt = nullptr);
/// \brief Determine whether the given type is the type of *this that is used
/// outside of the body of a member function for a type that is currently
/// being defined.
bool isThisOutsideMemberFunctionBody(QualType BaseType);
/// ActOnCXXBoolLiteral - Parse {true,false} literals.
ExprResult ActOnCXXBoolLiteral(SourceLocation OpLoc, tok::TokenKind Kind);
/// ActOnObjCBoolLiteral - Parse {__objc_yes,__objc_no} literals.
ExprResult ActOnObjCBoolLiteral(SourceLocation OpLoc, tok::TokenKind Kind);
/// ActOnCXXNullPtrLiteral - Parse 'nullptr'.
ExprResult ActOnCXXNullPtrLiteral(SourceLocation Loc);
//// ActOnCXXThrow - Parse throw expressions.
ExprResult ActOnCXXThrow(Scope *S, SourceLocation OpLoc, Expr *expr);
ExprResult BuildCXXThrow(SourceLocation OpLoc, Expr *Ex,
bool IsThrownVarInScope);
bool CheckCXXThrowOperand(SourceLocation ThrowLoc, QualType ThrowTy, Expr *E);
/// ActOnCXXTypeConstructExpr - Parse construction of a specified type.
/// Can be interpreted either as function-style casting ("int(x)")
/// or class type construction ("ClassType(x,y,z)")
/// or creation of a value-initialized type ("int()").
ExprResult ActOnCXXTypeConstructExpr(ParsedType TypeRep,
SourceLocation LParenLoc,
MultiExprArg Exprs,
SourceLocation RParenLoc);
ExprResult BuildCXXTypeConstructExpr(TypeSourceInfo *Type,
SourceLocation LParenLoc,
MultiExprArg Exprs,
SourceLocation RParenLoc);
/// ActOnCXXNew - Parsed a C++ 'new' expression.
ExprResult ActOnCXXNew(SourceLocation StartLoc, bool UseGlobal,
SourceLocation PlacementLParen,
MultiExprArg PlacementArgs,
SourceLocation PlacementRParen,
SourceRange TypeIdParens, Declarator &D,
Expr *Initializer);
ExprResult BuildCXXNew(SourceRange Range, bool UseGlobal,
SourceLocation PlacementLParen,
MultiExprArg PlacementArgs,
SourceLocation PlacementRParen,
SourceRange TypeIdParens,
QualType AllocType,
TypeSourceInfo *AllocTypeInfo,
Expr *ArraySize,
SourceRange DirectInitRange,
Expr *Initializer,
bool TypeMayContainAuto = true);
bool CheckAllocatedType(QualType AllocType, SourceLocation Loc,
SourceRange R);
bool FindAllocationFunctions(SourceLocation StartLoc, SourceRange Range,
bool UseGlobal, QualType AllocType, bool IsArray,
MultiExprArg PlaceArgs,
FunctionDecl *&OperatorNew,
FunctionDecl *&OperatorDelete);
bool FindAllocationOverload(SourceLocation StartLoc, SourceRange Range,
DeclarationName Name, MultiExprArg Args,
DeclContext *Ctx,
bool AllowMissing, FunctionDecl *&Operator,
bool Diagnose = true);
void DeclareGlobalNewDelete();
void DeclareGlobalAllocationFunction(DeclarationName Name, QualType Return,
QualType Param1,
QualType Param2 = QualType(),
bool addRestrictAttr = false);
bool FindDeallocationFunction(SourceLocation StartLoc, CXXRecordDecl *RD,
DeclarationName Name, FunctionDecl* &Operator,
bool Diagnose = true);
FunctionDecl *FindUsualDeallocationFunction(SourceLocation StartLoc,
bool CanProvideSize,
DeclarationName Name);
/// ActOnCXXDelete - Parsed a C++ 'delete' expression
ExprResult ActOnCXXDelete(SourceLocation StartLoc,
bool UseGlobal, bool ArrayForm,
Expr *Operand);
DeclResult ActOnCXXConditionDeclaration(Scope *S, Declarator &D);
ExprResult CheckConditionVariable(VarDecl *ConditionVar,
SourceLocation StmtLoc,
bool ConvertToBoolean);
ExprResult ActOnNoexceptExpr(SourceLocation KeyLoc, SourceLocation LParen,
Expr *Operand, SourceLocation RParen);
ExprResult BuildCXXNoexceptExpr(SourceLocation KeyLoc, Expr *Operand,
SourceLocation RParen);
/// \brief Parsed one of the type trait support pseudo-functions.
ExprResult ActOnTypeTrait(TypeTrait Kind, SourceLocation KWLoc,
ArrayRef<ParsedType> Args,
SourceLocation RParenLoc);
ExprResult BuildTypeTrait(TypeTrait Kind, SourceLocation KWLoc,
ArrayRef<TypeSourceInfo *> Args,
SourceLocation RParenLoc);
/// ActOnArrayTypeTrait - Parsed one of the bianry type trait support
/// pseudo-functions.
ExprResult ActOnArrayTypeTrait(ArrayTypeTrait ATT,
SourceLocation KWLoc,
ParsedType LhsTy,
Expr *DimExpr,
SourceLocation RParen);
ExprResult BuildArrayTypeTrait(ArrayTypeTrait ATT,
SourceLocation KWLoc,
TypeSourceInfo *TSInfo,
Expr *DimExpr,
SourceLocation RParen);
/// ActOnExpressionTrait - Parsed one of the unary type trait support
/// pseudo-functions.
ExprResult ActOnExpressionTrait(ExpressionTrait OET,
SourceLocation KWLoc,
Expr *Queried,
SourceLocation RParen);
ExprResult BuildExpressionTrait(ExpressionTrait OET,
SourceLocation KWLoc,
Expr *Queried,
SourceLocation RParen);
ExprResult ActOnStartCXXMemberReference(Scope *S,
Expr *Base,
SourceLocation OpLoc,
tok::TokenKind OpKind,
ParsedType &ObjectType,
bool &MayBePseudoDestructor);
ExprResult BuildPseudoDestructorExpr(Expr *Base,
SourceLocation OpLoc,
tok::TokenKind OpKind,
const CXXScopeSpec &SS,
TypeSourceInfo *ScopeType,
SourceLocation CCLoc,
SourceLocation TildeLoc,
PseudoDestructorTypeStorage DestroyedType);
ExprResult ActOnPseudoDestructorExpr(Scope *S, Expr *Base,
SourceLocation OpLoc,
tok::TokenKind OpKind,
CXXScopeSpec &SS,
UnqualifiedId &FirstTypeName,
SourceLocation CCLoc,
SourceLocation TildeLoc,
UnqualifiedId &SecondTypeName);
ExprResult ActOnPseudoDestructorExpr(Scope *S, Expr *Base,
SourceLocation OpLoc,
tok::TokenKind OpKind,
SourceLocation TildeLoc,
const DeclSpec& DS);
/// MaybeCreateExprWithCleanups - If the current full-expression
/// requires any cleanups, surround it with a ExprWithCleanups node.
/// Otherwise, just returns the passed-in expression.
Expr *MaybeCreateExprWithCleanups(Expr *SubExpr);
Stmt *MaybeCreateStmtWithCleanups(Stmt *SubStmt);
ExprResult MaybeCreateExprWithCleanups(ExprResult SubExpr);
ExprResult ActOnFinishFullExpr(Expr *Expr) {
return ActOnFinishFullExpr(Expr, Expr ? Expr->getExprLoc()
: SourceLocation());
}
ExprResult ActOnFinishFullExpr(Expr *Expr, SourceLocation CC,
bool DiscardedValue = false,
bool IsConstexpr = false,
bool IsLambdaInitCaptureInitializer = false);
StmtResult ActOnFinishFullStmt(Stmt *Stmt);
// Marks SS invalid if it represents an incomplete type.
bool RequireCompleteDeclContext(CXXScopeSpec &SS, DeclContext *DC);
DeclContext *computeDeclContext(QualType T);
DeclContext *computeDeclContext(const CXXScopeSpec &SS,
bool EnteringContext = false);
bool isDependentScopeSpecifier(const CXXScopeSpec &SS);
CXXRecordDecl *getCurrentInstantiationOf(NestedNameSpecifier *NNS);
/// \brief The parser has parsed a global nested-name-specifier '::'.
///
/// \param CCLoc The location of the '::'.
///
/// \param SS The nested-name-specifier, which will be updated in-place
/// to reflect the parsed nested-name-specifier.
///
/// \returns true if an error occurred, false otherwise.
bool ActOnCXXGlobalScopeSpecifier(SourceLocation CCLoc, CXXScopeSpec &SS);
/// \brief The parser has parsed a '__super' nested-name-specifier.
///
/// \param SuperLoc The location of the '__super' keyword.
///
/// \param ColonColonLoc The location of the '::'.
///
/// \param SS The nested-name-specifier, which will be updated in-place
/// to reflect the parsed nested-name-specifier.
///
/// \returns true if an error occurred, false otherwise.
bool ActOnSuperScopeSpecifier(SourceLocation SuperLoc,
SourceLocation ColonColonLoc, CXXScopeSpec &SS);
bool isAcceptableNestedNameSpecifier(const NamedDecl *SD,
bool *CanCorrect = nullptr);
NamedDecl *FindFirstQualifierInScope(Scope *S, NestedNameSpecifier *NNS);
bool isNonTypeNestedNameSpecifier(Scope *S, CXXScopeSpec &SS,
SourceLocation IdLoc,
IdentifierInfo &II,
ParsedType ObjectType);
bool BuildCXXNestedNameSpecifier(Scope *S,
IdentifierInfo &Identifier,
SourceLocation IdentifierLoc,
SourceLocation CCLoc,
QualType ObjectType,
bool EnteringContext,
CXXScopeSpec &SS,
NamedDecl *ScopeLookupResult,
bool ErrorRecoveryLookup,
bool *IsCorrectedToColon = nullptr);
/// \brief The parser has parsed a nested-name-specifier 'identifier::'.
///
/// \param S The scope in which this nested-name-specifier occurs.
///
/// \param Identifier The identifier preceding the '::'.
///
/// \param IdentifierLoc The location of the identifier.
///
/// \param CCLoc The location of the '::'.
///
/// \param ObjectType The type of the object, if we're parsing
/// nested-name-specifier in a member access expression.
///
/// \param EnteringContext Whether we're entering the context nominated by
/// this nested-name-specifier.
///
/// \param SS The nested-name-specifier, which is both an input
/// parameter (the nested-name-specifier before this type) and an
/// output parameter (containing the full nested-name-specifier,
/// including this new type).
///
/// \param ErrorRecoveryLookup If true, then this method is called to improve
/// error recovery. In this case do not emit error message.
///
/// \param IsCorrectedToColon If not null, suggestions to replace '::' -> ':'
/// are allowed. The bool value pointed by this parameter is set to 'true'
/// if the identifier is treated as if it was followed by ':', not '::'.
///
/// \returns true if an error occurred, false otherwise.
bool ActOnCXXNestedNameSpecifier(Scope *S,
IdentifierInfo &Identifier,
SourceLocation IdentifierLoc,
SourceLocation CCLoc,
ParsedType ObjectType,
bool EnteringContext,
CXXScopeSpec &SS,
bool ErrorRecoveryLookup = false,
bool *IsCorrectedToColon = nullptr);
ExprResult ActOnDecltypeExpression(Expr *E);
bool ActOnCXXNestedNameSpecifierDecltype(CXXScopeSpec &SS,
const DeclSpec &DS,
SourceLocation ColonColonLoc);
bool IsInvalidUnlessNestedName(Scope *S, CXXScopeSpec &SS,
IdentifierInfo &Identifier,
SourceLocation IdentifierLoc,
SourceLocation ColonLoc,
ParsedType ObjectType,
bool EnteringContext);
/// \brief The parser has parsed a nested-name-specifier
/// 'template[opt] template-name < template-args >::'.
///
/// \param S The scope in which this nested-name-specifier occurs.
///
/// \param SS The nested-name-specifier, which is both an input
/// parameter (the nested-name-specifier before this type) and an
/// output parameter (containing the full nested-name-specifier,
/// including this new type).
///
/// \param TemplateKWLoc the location of the 'template' keyword, if any.
/// \param TemplateName the template name.
/// \param TemplateNameLoc The location of the template name.
/// \param LAngleLoc The location of the opening angle bracket ('<').
/// \param TemplateArgs The template arguments.
/// \param RAngleLoc The location of the closing angle bracket ('>').
/// \param CCLoc The location of the '::'.
///
/// \param EnteringContext Whether we're entering the context of the
/// nested-name-specifier.
///
///
/// \returns true if an error occurred, false otherwise.
bool ActOnCXXNestedNameSpecifier(Scope *S,
CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
TemplateTy TemplateName,
SourceLocation TemplateNameLoc,
SourceLocation LAngleLoc,
ASTTemplateArgsPtr TemplateArgs,
SourceLocation RAngleLoc,
SourceLocation CCLoc,
bool EnteringContext);
/// \brief Given a C++ nested-name-specifier, produce an annotation value
/// that the parser can use later to reconstruct the given
/// nested-name-specifier.
///
/// \param SS A nested-name-specifier.
///
/// \returns A pointer containing all of the information in the
/// nested-name-specifier \p SS.
void *SaveNestedNameSpecifierAnnotation(CXXScopeSpec &SS);
/// \brief Given an annotation pointer for a nested-name-specifier, restore
/// the nested-name-specifier structure.
///
/// \param Annotation The annotation pointer, produced by
/// \c SaveNestedNameSpecifierAnnotation().
///
/// \param AnnotationRange The source range corresponding to the annotation.
///
/// \param SS The nested-name-specifier that will be updated with the contents
/// of the annotation pointer.
void RestoreNestedNameSpecifierAnnotation(void *Annotation,
SourceRange AnnotationRange,
CXXScopeSpec &SS);
bool ShouldEnterDeclaratorScope(Scope *S, const CXXScopeSpec &SS);
/// ActOnCXXEnterDeclaratorScope - Called when a C++ scope specifier (global
/// scope or nested-name-specifier) is parsed, part of a declarator-id.
/// After this method is called, according to [C++ 3.4.3p3], names should be
/// looked up in the declarator-id's scope, until the declarator is parsed and
/// ActOnCXXExitDeclaratorScope is called.
/// The 'SS' should be a non-empty valid CXXScopeSpec.
bool ActOnCXXEnterDeclaratorScope(Scope *S, CXXScopeSpec &SS);
/// ActOnCXXExitDeclaratorScope - Called when a declarator that previously
/// invoked ActOnCXXEnterDeclaratorScope(), is finished. 'SS' is the same
/// CXXScopeSpec that was passed to ActOnCXXEnterDeclaratorScope as well.
/// Used to indicate that names should revert to being looked up in the
/// defining scope.
void ActOnCXXExitDeclaratorScope(Scope *S, const CXXScopeSpec &SS);
/// ActOnCXXEnterDeclInitializer - Invoked when we are about to parse an
/// initializer for the declaration 'Dcl'.
/// After this method is called, according to [C++ 3.4.1p13], if 'Dcl' is a
/// static data member of class X, names should be looked up in the scope of
/// class X.
void ActOnCXXEnterDeclInitializer(Scope *S, Decl *Dcl);
/// ActOnCXXExitDeclInitializer - Invoked after we are finished parsing an
/// initializer for the declaration 'Dcl'.
void ActOnCXXExitDeclInitializer(Scope *S, Decl *Dcl);
/// \brief Create a new lambda closure type.
CXXRecordDecl *createLambdaClosureType(SourceRange IntroducerRange,
TypeSourceInfo *Info,
bool KnownDependent,
LambdaCaptureDefault CaptureDefault);
/// \brief Start the definition of a lambda expression.
CXXMethodDecl *startLambdaDefinition(CXXRecordDecl *Class,
SourceRange IntroducerRange,
TypeSourceInfo *MethodType,
SourceLocation EndLoc,
ArrayRef<ParmVarDecl *> Params);
/// \brief Endow the lambda scope info with the relevant properties.
void buildLambdaScope(sema::LambdaScopeInfo *LSI,
CXXMethodDecl *CallOperator,
SourceRange IntroducerRange,
LambdaCaptureDefault CaptureDefault,
SourceLocation CaptureDefaultLoc,
bool ExplicitParams,
bool ExplicitResultType,
bool Mutable);
/// \brief Perform initialization analysis of the init-capture and perform
/// any implicit conversions such as an lvalue-to-rvalue conversion if
/// not being used to initialize a reference.
QualType performLambdaInitCaptureInitialization(SourceLocation Loc,
bool ByRef, IdentifierInfo *Id, Expr *&Init);
/// \brief Create a dummy variable within the declcontext of the lambda's
/// call operator, for name lookup purposes for a lambda init capture.
///
/// CodeGen handles emission of lambda captures, ignoring these dummy
/// variables appropriately.
VarDecl *createLambdaInitCaptureVarDecl(SourceLocation Loc,
QualType InitCaptureType, IdentifierInfo *Id, Expr *Init);
/// \brief Build the implicit field for an init-capture.
FieldDecl *buildInitCaptureField(sema::LambdaScopeInfo *LSI, VarDecl *Var);
/// \brief Note that we have finished the explicit captures for the
/// given lambda.
void finishLambdaExplicitCaptures(sema::LambdaScopeInfo *LSI);
/// \brief Introduce the lambda parameters into scope.
void addLambdaParameters(CXXMethodDecl *CallOperator, Scope *CurScope);
/// \brief Deduce a block or lambda's return type based on the return
/// statements present in the body.
void deduceClosureReturnType(sema::CapturingScopeInfo &CSI);
/// ActOnStartOfLambdaDefinition - This is called just before we start
/// parsing the body of a lambda; it analyzes the explicit captures and
/// arguments, and sets up various data-structures for the body of the
/// lambda.
void ActOnStartOfLambdaDefinition(LambdaIntroducer &Intro,
Declarator &ParamInfo, Scope *CurScope);
/// ActOnLambdaError - If there is an error parsing a lambda, this callback
/// is invoked to pop the information about the lambda.
void ActOnLambdaError(SourceLocation StartLoc, Scope *CurScope,
bool IsInstantiation = false);
/// ActOnLambdaExpr - This is called when the body of a lambda expression
/// was successfully completed.
ExprResult ActOnLambdaExpr(SourceLocation StartLoc, Stmt *Body,
Scope *CurScope);
/// \brief Complete a lambda-expression having processed and attached the
/// lambda body.
ExprResult BuildLambdaExpr(SourceLocation StartLoc, SourceLocation EndLoc,
sema::LambdaScopeInfo *LSI);
/// \brief Define the "body" of the conversion from a lambda object to a
/// function pointer.
///
/// This routine doesn't actually define a sensible body; rather, it fills
/// in the initialization expression needed to copy the lambda object into
/// the block, and IR generation actually generates the real body of the
/// block pointer conversion.
void DefineImplicitLambdaToFunctionPointerConversion(
SourceLocation CurrentLoc, CXXConversionDecl *Conv);
/// \brief Define the "body" of the conversion from a lambda object to a
/// block pointer.
///
/// This routine doesn't actually define a sensible body; rather, it fills
/// in the initialization expression needed to copy the lambda object into
/// the block, and IR generation actually generates the real body of the
/// block pointer conversion.
void DefineImplicitLambdaToBlockPointerConversion(SourceLocation CurrentLoc,
CXXConversionDecl *Conv);
ExprResult BuildBlockForLambdaConversion(SourceLocation CurrentLocation,
SourceLocation ConvLocation,
CXXConversionDecl *Conv,
Expr *Src);
// ParseObjCStringLiteral - Parse Objective-C string literals.
ExprResult ParseObjCStringLiteral(SourceLocation *AtLocs,
Expr **Strings,
unsigned NumStrings);
ExprResult BuildObjCStringLiteral(SourceLocation AtLoc, StringLiteral *S);
/// BuildObjCNumericLiteral - builds an ObjCBoxedExpr AST node for the
/// numeric literal expression. Type of the expression will be "NSNumber *"
/// or "id" if NSNumber is unavailable.
ExprResult BuildObjCNumericLiteral(SourceLocation AtLoc, Expr *Number);
ExprResult ActOnObjCBoolLiteral(SourceLocation AtLoc, SourceLocation ValueLoc,
bool Value);
ExprResult BuildObjCArrayLiteral(SourceRange SR, MultiExprArg Elements);
/// BuildObjCBoxedExpr - builds an ObjCBoxedExpr AST node for the
/// '@' prefixed parenthesized expression. The type of the expression will
/// either be "NSNumber *", "NSString *" or "NSValue *" depending on the type
/// of ValueType, which is allowed to be a built-in numeric type, "char *",
/// "const char *" or C structure with attribute 'objc_boxable'.
ExprResult BuildObjCBoxedExpr(SourceRange SR, Expr *ValueExpr);
ExprResult BuildObjCSubscriptExpression(SourceLocation RB, Expr *BaseExpr,
Expr *IndexExpr,
ObjCMethodDecl *getterMethod,
ObjCMethodDecl *setterMethod);
ExprResult BuildObjCDictionaryLiteral(SourceRange SR,
ObjCDictionaryElement *Elements,
unsigned NumElements);
ExprResult BuildObjCEncodeExpression(SourceLocation AtLoc,
TypeSourceInfo *EncodedTypeInfo,
SourceLocation RParenLoc);
ExprResult BuildCXXMemberCallExpr(Expr *Exp, NamedDecl *FoundDecl,
CXXConversionDecl *Method,
bool HadMultipleCandidates);
ExprResult ParseObjCEncodeExpression(SourceLocation AtLoc,
SourceLocation EncodeLoc,
SourceLocation LParenLoc,
ParsedType Ty,
SourceLocation RParenLoc);
/// ParseObjCSelectorExpression - Build selector expression for \@selector
ExprResult ParseObjCSelectorExpression(Selector Sel,
SourceLocation AtLoc,
SourceLocation SelLoc,
SourceLocation LParenLoc,
SourceLocation RParenLoc,
bool WarnMultipleSelectors);
/// ParseObjCProtocolExpression - Build protocol expression for \@protocol
ExprResult ParseObjCProtocolExpression(IdentifierInfo * ProtocolName,
SourceLocation AtLoc,
SourceLocation ProtoLoc,
SourceLocation LParenLoc,
SourceLocation ProtoIdLoc,
SourceLocation RParenLoc);
//===--------------------------------------------------------------------===//
// C++ Declarations
//
Decl *ActOnStartLinkageSpecification(Scope *S,
SourceLocation ExternLoc,
Expr *LangStr,
SourceLocation LBraceLoc);
Decl *ActOnFinishLinkageSpecification(Scope *S,
Decl *LinkageSpec,
SourceLocation RBraceLoc);
//===--------------------------------------------------------------------===//
// C++ Classes
//
bool isCurrentClassName(const IdentifierInfo &II, Scope *S,
const CXXScopeSpec *SS = nullptr);
bool isCurrentClassNameTypo(IdentifierInfo *&II, const CXXScopeSpec *SS);
bool ActOnAccessSpecifier(AccessSpecifier Access,
SourceLocation ASLoc,
SourceLocation ColonLoc,
AttributeList *Attrs = nullptr);
NamedDecl *ActOnCXXMemberDeclarator(Scope *S, AccessSpecifier AS,
Declarator &D,
MultiTemplateParamsArg TemplateParameterLists,
Expr *BitfieldWidth, const VirtSpecifiers &VS,
InClassInitStyle InitStyle);
void ActOnStartCXXInClassMemberInitializer();
void ActOnFinishCXXInClassMemberInitializer(Decl *VarDecl,
SourceLocation EqualLoc,
Expr *Init);
MemInitResult ActOnMemInitializer(Decl *ConstructorD,
Scope *S,
CXXScopeSpec &SS,
IdentifierInfo *MemberOrBase,
ParsedType TemplateTypeTy,
const DeclSpec &DS,
SourceLocation IdLoc,
SourceLocation LParenLoc,
ArrayRef<Expr *> Args,
SourceLocation RParenLoc,
SourceLocation EllipsisLoc);
MemInitResult ActOnMemInitializer(Decl *ConstructorD,
Scope *S,
CXXScopeSpec &SS,
IdentifierInfo *MemberOrBase,
ParsedType TemplateTypeTy,
const DeclSpec &DS,
SourceLocation IdLoc,
Expr *InitList,
SourceLocation EllipsisLoc);
MemInitResult BuildMemInitializer(Decl *ConstructorD,
Scope *S,
CXXScopeSpec &SS,
IdentifierInfo *MemberOrBase,
ParsedType TemplateTypeTy,
const DeclSpec &DS,
SourceLocation IdLoc,
Expr *Init,
SourceLocation EllipsisLoc);
MemInitResult BuildMemberInitializer(ValueDecl *Member,
Expr *Init,
SourceLocation IdLoc);
MemInitResult BuildBaseInitializer(QualType BaseType,
TypeSourceInfo *BaseTInfo,
Expr *Init,
CXXRecordDecl *ClassDecl,
SourceLocation EllipsisLoc);
MemInitResult BuildDelegatingInitializer(TypeSourceInfo *TInfo,
Expr *Init,
CXXRecordDecl *ClassDecl);
bool SetDelegatingInitializer(CXXConstructorDecl *Constructor,
CXXCtorInitializer *Initializer);
bool SetCtorInitializers(CXXConstructorDecl *Constructor, bool AnyErrors,
ArrayRef<CXXCtorInitializer *> Initializers = None);
void SetIvarInitializers(ObjCImplementationDecl *ObjCImplementation);
/// MarkBaseAndMemberDestructorsReferenced - Given a record decl,
/// mark all the non-trivial destructors of its members and bases as
/// referenced.
void MarkBaseAndMemberDestructorsReferenced(SourceLocation Loc,
CXXRecordDecl *Record);
/// \brief The list of classes whose vtables have been used within
/// this translation unit, and the source locations at which the
/// first use occurred.
typedef std::pair<CXXRecordDecl*, SourceLocation> VTableUse;
/// \brief The list of vtables that are required but have not yet been
/// materialized.
SmallVector<VTableUse, 16> VTableUses;
/// \brief The set of classes whose vtables have been used within
/// this translation unit, and a bit that will be true if the vtable is
/// required to be emitted (otherwise, it should be emitted only if needed
/// by code generation).
llvm::DenseMap<CXXRecordDecl *, bool> VTablesUsed;
/// \brief Load any externally-stored vtable uses.
void LoadExternalVTableUses();
/// \brief Note that the vtable for the given class was used at the
/// given location.
void MarkVTableUsed(SourceLocation Loc, CXXRecordDecl *Class,
bool DefinitionRequired = false);
/// \brief Mark the exception specifications of all virtual member functions
/// in the given class as needed.
void MarkVirtualMemberExceptionSpecsNeeded(SourceLocation Loc,
const CXXRecordDecl *RD);
/// MarkVirtualMembersReferenced - Will mark all members of the given
/// CXXRecordDecl referenced.
void MarkVirtualMembersReferenced(SourceLocation Loc,
const CXXRecordDecl *RD);
/// \brief Define all of the vtables that have been used in this
/// translation unit and reference any virtual members used by those
/// vtables.
///
/// \returns true if any work was done, false otherwise.
bool DefineUsedVTables();
void AddImplicitlyDeclaredMembersToClass(CXXRecordDecl *ClassDecl);
void ActOnMemInitializers(Decl *ConstructorDecl,
SourceLocation ColonLoc,
ArrayRef<CXXCtorInitializer*> MemInits,
bool AnyErrors);
void checkClassLevelDLLAttribute(CXXRecordDecl *Class);
void propagateDLLAttrToBaseClassTemplate(
CXXRecordDecl *Class, Attr *ClassAttr,
ClassTemplateSpecializationDecl *BaseTemplateSpec,
SourceLocation BaseLoc);
void CheckCompletedCXXClass(CXXRecordDecl *Record);
void ActOnFinishCXXMemberSpecification(Scope* S, SourceLocation RLoc,
Decl *TagDecl,
SourceLocation LBrac,
SourceLocation RBrac,
AttributeList *AttrList);
void ActOnFinishCXXMemberDecls();
void ActOnFinishCXXMemberDefaultArgs(Decl *D);
void ActOnReenterCXXMethodParameter(Scope *S, ParmVarDecl *Param);
unsigned ActOnReenterTemplateScope(Scope *S, Decl *Template);
void ActOnStartDelayedMemberDeclarations(Scope *S, Decl *Record);
void ActOnStartDelayedCXXMethodDeclaration(Scope *S, Decl *Method);
void ActOnDelayedCXXMethodParameter(Scope *S, Decl *Param);
void ActOnFinishDelayedMemberDeclarations(Scope *S, Decl *Record);
void ActOnFinishDelayedCXXMethodDeclaration(Scope *S, Decl *Method);
void ActOnFinishDelayedMemberInitializers(Decl *Record);
void MarkAsLateParsedTemplate(FunctionDecl *FD, Decl *FnD,
CachedTokens &Toks);
void UnmarkAsLateParsedTemplate(FunctionDecl *FD);
bool IsInsideALocalClassWithinATemplateFunction();
Decl *ActOnStaticAssertDeclaration(SourceLocation StaticAssertLoc,
Expr *AssertExpr,
Expr *AssertMessageExpr,
SourceLocation RParenLoc);
Decl *BuildStaticAssertDeclaration(SourceLocation StaticAssertLoc,
Expr *AssertExpr,
StringLiteral *AssertMessageExpr,
SourceLocation RParenLoc,
bool Failed);
FriendDecl *CheckFriendTypeDecl(SourceLocation LocStart,
SourceLocation FriendLoc,
TypeSourceInfo *TSInfo);
Decl *ActOnFriendTypeDecl(Scope *S, const DeclSpec &DS,
MultiTemplateParamsArg TemplateParams);
NamedDecl *ActOnFriendFunctionDecl(Scope *S, Declarator &D,
MultiTemplateParamsArg TemplateParams);
QualType CheckConstructorDeclarator(Declarator &D, QualType R,
StorageClass& SC);
void CheckConstructor(CXXConstructorDecl *Constructor);
QualType CheckDestructorDeclarator(Declarator &D, QualType R,
StorageClass& SC);
bool CheckDestructor(CXXDestructorDecl *Destructor);
void CheckConversionDeclarator(Declarator &D, QualType &R,
StorageClass& SC);
Decl *ActOnConversionDeclarator(CXXConversionDecl *Conversion);
void CheckExplicitlyDefaultedSpecialMember(CXXMethodDecl *MD);
void CheckExplicitlyDefaultedMemberExceptionSpec(CXXMethodDecl *MD,
const FunctionProtoType *T);
void CheckDelayedMemberExceptionSpecs();
//===--------------------------------------------------------------------===//
// C++ Derived Classes
//
/// ActOnBaseSpecifier - Parsed a base specifier
CXXBaseSpecifier *CheckBaseSpecifier(CXXRecordDecl *Class,
SourceRange SpecifierRange,
bool Virtual, AccessSpecifier Access,
TypeSourceInfo *TInfo,
SourceLocation EllipsisLoc);
BaseResult ActOnBaseSpecifier(Decl *classdecl,
SourceRange SpecifierRange,
ParsedAttributes &Attrs,
bool Virtual, AccessSpecifier Access,
ParsedType basetype,
SourceLocation BaseLoc,
SourceLocation EllipsisLoc);
bool AttachBaseSpecifiers(CXXRecordDecl *Class, CXXBaseSpecifier **Bases,
unsigned NumBases);
void ActOnBaseSpecifiers(Decl *ClassDecl, CXXBaseSpecifier **Bases,
unsigned NumBases);
bool IsDerivedFrom(QualType Derived, QualType Base);
bool IsDerivedFrom(QualType Derived, QualType Base, CXXBasePaths &Paths);
// FIXME: I don't like this name.
void BuildBasePathArray(const CXXBasePaths &Paths, CXXCastPath &BasePath);
bool CheckDerivedToBaseConversion(QualType Derived, QualType Base,
SourceLocation Loc, SourceRange Range,
CXXCastPath *BasePath = nullptr,
bool IgnoreAccess = false);
bool CheckDerivedToBaseConversion(QualType Derived, QualType Base,
unsigned InaccessibleBaseID,
unsigned AmbigiousBaseConvID,
SourceLocation Loc, SourceRange Range,
DeclarationName Name,
CXXCastPath *BasePath);
std::string getAmbiguousPathsDisplayString(CXXBasePaths &Paths);
bool CheckOverridingFunctionAttributes(const CXXMethodDecl *New,
const CXXMethodDecl *Old);
/// CheckOverridingFunctionReturnType - Checks whether the return types are
/// covariant, according to C++ [class.virtual]p5.
bool CheckOverridingFunctionReturnType(const CXXMethodDecl *New,
const CXXMethodDecl *Old);
/// CheckOverridingFunctionExceptionSpec - Checks whether the exception
/// spec is a subset of base spec.
bool CheckOverridingFunctionExceptionSpec(const CXXMethodDecl *New,
const CXXMethodDecl *Old);
bool CheckPureMethod(CXXMethodDecl *Method, SourceRange InitRange);
/// CheckOverrideControl - Check C++11 override control semantics.
void CheckOverrideControl(NamedDecl *D);
/// DiagnoseAbsenceOfOverrideControl - Diagnose if 'override' keyword was
/// not used in the declaration of an overriding method.
void DiagnoseAbsenceOfOverrideControl(NamedDecl *D);
/// CheckForFunctionMarkedFinal - Checks whether a virtual member function
/// overrides a virtual member function marked 'final', according to
/// C++11 [class.virtual]p4.
bool CheckIfOverriddenFunctionIsMarkedFinal(const CXXMethodDecl *New,
const CXXMethodDecl *Old);
//===--------------------------------------------------------------------===//
// C++ Access Control
//
enum AccessResult {
AR_accessible,
AR_inaccessible,
AR_dependent,
AR_delayed
};
bool SetMemberAccessSpecifier(NamedDecl *MemberDecl,
NamedDecl *PrevMemberDecl,
AccessSpecifier LexicalAS);
AccessResult CheckUnresolvedMemberAccess(UnresolvedMemberExpr *E,
DeclAccessPair FoundDecl);
AccessResult CheckUnresolvedLookupAccess(UnresolvedLookupExpr *E,
DeclAccessPair FoundDecl);
AccessResult CheckAllocationAccess(SourceLocation OperatorLoc,
SourceRange PlacementRange,
CXXRecordDecl *NamingClass,
DeclAccessPair FoundDecl,
bool Diagnose = true);
AccessResult CheckConstructorAccess(SourceLocation Loc,
CXXConstructorDecl *D,
const InitializedEntity &Entity,
AccessSpecifier Access,
bool IsCopyBindingRefToTemp = false);
AccessResult CheckConstructorAccess(SourceLocation Loc,
CXXConstructorDecl *D,
const InitializedEntity &Entity,
AccessSpecifier Access,
const PartialDiagnostic &PDiag);
AccessResult CheckDestructorAccess(SourceLocation Loc,
CXXDestructorDecl *Dtor,
const PartialDiagnostic &PDiag,
QualType objectType = QualType());
AccessResult CheckFriendAccess(NamedDecl *D);
AccessResult CheckMemberAccess(SourceLocation UseLoc,
CXXRecordDecl *NamingClass,
DeclAccessPair Found);
AccessResult CheckMemberOperatorAccess(SourceLocation Loc,
Expr *ObjectExpr,
Expr *ArgExpr,
DeclAccessPair FoundDecl);
AccessResult CheckAddressOfMemberAccess(Expr *OvlExpr,
DeclAccessPair FoundDecl);
AccessResult CheckBaseClassAccess(SourceLocation AccessLoc,
QualType Base, QualType Derived,
const CXXBasePath &Path,
unsigned DiagID,
bool ForceCheck = false,
bool ForceUnprivileged = false);
void CheckLookupAccess(const LookupResult &R);
bool IsSimplyAccessible(NamedDecl *decl, DeclContext *Ctx);
bool isSpecialMemberAccessibleForDeletion(CXXMethodDecl *decl,
AccessSpecifier access,
QualType objectType);
void HandleDependentAccessCheck(const DependentDiagnostic &DD,
const MultiLevelTemplateArgumentList &TemplateArgs);
void PerformDependentDiagnostics(const DeclContext *Pattern,
const MultiLevelTemplateArgumentList &TemplateArgs);
void HandleDelayedAccessCheck(sema::DelayedDiagnostic &DD, Decl *Ctx);
/// \brief When true, access checking violations are treated as SFINAE
/// failures rather than hard errors.
bool AccessCheckingSFINAE;
enum AbstractDiagSelID {
AbstractNone = -1,
AbstractReturnType,
AbstractParamType,
AbstractVariableType,
AbstractFieldType,
AbstractIvarType,
AbstractSynthesizedIvarType,
AbstractArrayType
};
bool RequireNonAbstractType(SourceLocation Loc, QualType T,
TypeDiagnoser &Diagnoser);
template <typename... Ts>
bool RequireNonAbstractType(SourceLocation Loc, QualType T, unsigned DiagID,
const Ts &...Args) {
BoundTypeDiagnoser<Ts...> Diagnoser(DiagID, Args...);
return RequireNonAbstractType(Loc, T, Diagnoser);
}
void DiagnoseAbstractType(const CXXRecordDecl *RD);
bool RequireNonAbstractType(SourceLocation Loc, QualType T, unsigned DiagID,
AbstractDiagSelID SelID = AbstractNone);
//===--------------------------------------------------------------------===//
// C++ Overloaded Operators [C++ 13.5]
//
bool CheckOverloadedOperatorDeclaration(FunctionDecl *FnDecl);
bool CheckLiteralOperatorDeclaration(FunctionDecl *FnDecl);
//===--------------------------------------------------------------------===//
// C++ Templates [C++ 14]
//
void FilterAcceptableTemplateNames(LookupResult &R,
bool AllowFunctionTemplates = true);
bool hasAnyAcceptableTemplateNames(LookupResult &R,
bool AllowFunctionTemplates = true);
void LookupTemplateName(LookupResult &R, Scope *S, CXXScopeSpec &SS,
QualType ObjectType, bool EnteringContext,
bool &MemberOfUnknownSpecialization,
bool NextIsLess = false); // HLSL Change - additional special case flag
TemplateNameKind isTemplateName(Scope *S,
CXXScopeSpec &SS,
bool hasTemplateKeyword,
UnqualifiedId &Name,
ParsedType ObjectType,
bool EnteringContext,
TemplateTy &Template,
bool &MemberOfUnknownSpecialization,
bool NextIsLess = false); // HLSL Change - additional special case flag
bool DiagnoseUnknownTemplateName(const IdentifierInfo &II,
SourceLocation IILoc,
Scope *S,
const CXXScopeSpec *SS,
TemplateTy &SuggestedTemplate,
TemplateNameKind &SuggestedKind);
void DiagnoseTemplateParameterShadow(SourceLocation Loc, Decl *PrevDecl);
TemplateDecl *AdjustDeclIfTemplate(Decl *&Decl);
Decl *ActOnTypeParameter(Scope *S, bool Typename,
SourceLocation EllipsisLoc,
SourceLocation KeyLoc,
IdentifierInfo *ParamName,
SourceLocation ParamNameLoc,
unsigned Depth, unsigned Position,
SourceLocation EqualLoc,
ParsedType DefaultArg);
QualType CheckNonTypeTemplateParameterType(QualType T, SourceLocation Loc);
Decl *ActOnNonTypeTemplateParameter(Scope *S, Declarator &D,
unsigned Depth,
unsigned Position,
SourceLocation EqualLoc,
Expr *DefaultArg);
Decl *ActOnTemplateTemplateParameter(Scope *S,
SourceLocation TmpLoc,
TemplateParameterList *Params,
SourceLocation EllipsisLoc,
IdentifierInfo *ParamName,
SourceLocation ParamNameLoc,
unsigned Depth,
unsigned Position,
SourceLocation EqualLoc,
ParsedTemplateArgument DefaultArg);
TemplateParameterList *
ActOnTemplateParameterList(unsigned Depth,
SourceLocation ExportLoc,
SourceLocation TemplateLoc,
SourceLocation LAngleLoc,
Decl **Params, unsigned NumParams,
SourceLocation RAngleLoc);
/// \brief The context in which we are checking a template parameter list.
enum TemplateParamListContext {
TPC_ClassTemplate,
TPC_VarTemplate,
TPC_FunctionTemplate,
TPC_ClassTemplateMember,
TPC_FriendClassTemplate,
TPC_FriendFunctionTemplate,
TPC_FriendFunctionTemplateDefinition,
TPC_TypeAliasTemplate
};
bool CheckTemplateParameterList(TemplateParameterList *NewParams,
TemplateParameterList *OldParams,
TemplateParamListContext TPC);
TemplateParameterList *MatchTemplateParametersToScopeSpecifier(
SourceLocation DeclStartLoc, SourceLocation DeclLoc,
const CXXScopeSpec &SS, TemplateIdAnnotation *TemplateId,
ArrayRef<TemplateParameterList *> ParamLists,
bool IsFriend, bool &IsExplicitSpecialization, bool &Invalid);
DeclResult CheckClassTemplate(Scope *S, unsigned TagSpec, TagUseKind TUK,
SourceLocation KWLoc, CXXScopeSpec &SS,
IdentifierInfo *Name, SourceLocation NameLoc,
AttributeList *Attr,
TemplateParameterList *TemplateParams,
AccessSpecifier AS,
SourceLocation ModulePrivateLoc,
SourceLocation FriendLoc,
unsigned NumOuterTemplateParamLists,
TemplateParameterList **OuterTemplateParamLists,
SkipBodyInfo *SkipBody = nullptr);
void translateTemplateArguments(const ASTTemplateArgsPtr &In,
TemplateArgumentListInfo &Out);
void NoteAllFoundTemplates(TemplateName Name);
QualType CheckTemplateIdType(TemplateName Template,
SourceLocation TemplateLoc,
TemplateArgumentListInfo &TemplateArgs);
TypeResult
ActOnTemplateIdType(CXXScopeSpec &SS, SourceLocation TemplateKWLoc,
TemplateTy Template, SourceLocation TemplateLoc,
SourceLocation LAngleLoc,
ASTTemplateArgsPtr TemplateArgs,
SourceLocation RAngleLoc,
bool IsCtorOrDtorName = false);
/// \brief Parsed an elaborated-type-specifier that refers to a template-id,
/// such as \c class T::template apply<U>.
TypeResult ActOnTagTemplateIdType(TagUseKind TUK,
TypeSpecifierType TagSpec,
SourceLocation TagLoc,
CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
TemplateTy TemplateD,
SourceLocation TemplateLoc,
SourceLocation LAngleLoc,
ASTTemplateArgsPtr TemplateArgsIn,
SourceLocation RAngleLoc);
DeclResult ActOnVarTemplateSpecialization(
Scope *S, Declarator &D, TypeSourceInfo *DI,
SourceLocation TemplateKWLoc, TemplateParameterList *TemplateParams,
StorageClass SC, bool IsPartialSpecialization);
DeclResult CheckVarTemplateId(VarTemplateDecl *Template,
SourceLocation TemplateLoc,
SourceLocation TemplateNameLoc,
const TemplateArgumentListInfo &TemplateArgs);
ExprResult CheckVarTemplateId(const CXXScopeSpec &SS,
const DeclarationNameInfo &NameInfo,
VarTemplateDecl *Template,
SourceLocation TemplateLoc,
const TemplateArgumentListInfo *TemplateArgs);
ExprResult BuildTemplateIdExpr(const CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
LookupResult &R,
bool RequiresADL,
const TemplateArgumentListInfo *TemplateArgs);
ExprResult BuildQualifiedTemplateIdExpr(CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
const DeclarationNameInfo &NameInfo,
const TemplateArgumentListInfo *TemplateArgs);
TemplateNameKind ActOnDependentTemplateName(Scope *S,
CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
UnqualifiedId &Name,
ParsedType ObjectType,
bool EnteringContext,
TemplateTy &Template);
DeclResult
ActOnClassTemplateSpecialization(Scope *S, unsigned TagSpec, TagUseKind TUK,
SourceLocation KWLoc,
SourceLocation ModulePrivateLoc,
TemplateIdAnnotation &TemplateId,
AttributeList *Attr,
MultiTemplateParamsArg TemplateParameterLists,
SkipBodyInfo *SkipBody = nullptr);
Decl *ActOnTemplateDeclarator(Scope *S,
MultiTemplateParamsArg TemplateParameterLists,
Declarator &D);
Decl *ActOnStartOfFunctionTemplateDef(Scope *FnBodyScope,
MultiTemplateParamsArg TemplateParameterLists,
Declarator &D);
bool
CheckSpecializationInstantiationRedecl(SourceLocation NewLoc,
TemplateSpecializationKind NewTSK,
NamedDecl *PrevDecl,
TemplateSpecializationKind PrevTSK,
SourceLocation PrevPtOfInstantiation,
bool &SuppressNew);
bool CheckDependentFunctionTemplateSpecialization(FunctionDecl *FD,
const TemplateArgumentListInfo &ExplicitTemplateArgs,
LookupResult &Previous);
bool CheckFunctionTemplateSpecialization(FunctionDecl *FD,
TemplateArgumentListInfo *ExplicitTemplateArgs,
LookupResult &Previous);
bool CheckMemberSpecialization(NamedDecl *Member, LookupResult &Previous);
DeclResult
ActOnExplicitInstantiation(Scope *S,
SourceLocation ExternLoc,
SourceLocation TemplateLoc,
unsigned TagSpec,
SourceLocation KWLoc,
const CXXScopeSpec &SS,
TemplateTy Template,
SourceLocation TemplateNameLoc,
SourceLocation LAngleLoc,
ASTTemplateArgsPtr TemplateArgs,
SourceLocation RAngleLoc,
AttributeList *Attr);
DeclResult
ActOnExplicitInstantiation(Scope *S,
SourceLocation ExternLoc,
SourceLocation TemplateLoc,
unsigned TagSpec,
SourceLocation KWLoc,
CXXScopeSpec &SS,
IdentifierInfo *Name,
SourceLocation NameLoc,
AttributeList *Attr);
DeclResult ActOnExplicitInstantiation(Scope *S,
SourceLocation ExternLoc,
SourceLocation TemplateLoc,
Declarator &D);
TemplateArgumentLoc
SubstDefaultTemplateArgumentIfAvailable(TemplateDecl *Template,
SourceLocation TemplateLoc,
SourceLocation RAngleLoc,
Decl *Param,
SmallVectorImpl<TemplateArgument>
&Converted,
bool &HasDefaultArg);
/// \brief Specifies the context in which a particular template
/// argument is being checked.
enum CheckTemplateArgumentKind {
/// \brief The template argument was specified in the code or was
/// instantiated with some deduced template arguments.
CTAK_Specified,
/// \brief The template argument was deduced via template argument
/// deduction.
CTAK_Deduced,
/// \brief The template argument was deduced from an array bound
/// via template argument deduction.
CTAK_DeducedFromArrayBound
};
bool CheckTemplateArgument(NamedDecl *Param,
TemplateArgumentLoc &Arg,
NamedDecl *Template,
SourceLocation TemplateLoc,
SourceLocation RAngleLoc,
unsigned ArgumentPackIndex,
SmallVectorImpl<TemplateArgument> &Converted,
CheckTemplateArgumentKind CTAK = CTAK_Specified);
/// \brief Check that the given template arguments can be be provided to
/// the given template, converting the arguments along the way.
///
/// \param Template The template to which the template arguments are being
/// provided.
///
/// \param TemplateLoc The location of the template name in the source.
///
/// \param TemplateArgs The list of template arguments. If the template is
/// a template template parameter, this function may extend the set of
/// template arguments to also include substituted, defaulted template
/// arguments.
///
/// \param PartialTemplateArgs True if the list of template arguments is
/// intentionally partial, e.g., because we're checking just the initial
/// set of template arguments.
///
/// \param Converted Will receive the converted, canonicalized template
/// arguments.
///
/// \returns true if an error occurred, false otherwise.
bool CheckTemplateArgumentList(TemplateDecl *Template,
SourceLocation TemplateLoc,
TemplateArgumentListInfo &TemplateArgs,
bool PartialTemplateArgs,
SmallVectorImpl<TemplateArgument> &Converted);
bool CheckTemplateTypeArgument(TemplateTypeParmDecl *Param,
TemplateArgumentLoc &Arg,
SmallVectorImpl<TemplateArgument> &Converted);
bool CheckTemplateArgument(TemplateTypeParmDecl *Param,
TypeSourceInfo *Arg);
ExprResult CheckTemplateArgument(NonTypeTemplateParmDecl *Param,
QualType InstantiatedParamType, Expr *Arg,
TemplateArgument &Converted,
CheckTemplateArgumentKind CTAK = CTAK_Specified);
bool CheckTemplateArgument(TemplateTemplateParmDecl *Param,
TemplateArgumentLoc &Arg,
unsigned ArgumentPackIndex);
ExprResult
BuildExpressionFromDeclTemplateArgument(const TemplateArgument &Arg,
QualType ParamType,
SourceLocation Loc);
ExprResult
BuildExpressionFromIntegralTemplateArgument(const TemplateArgument &Arg,
SourceLocation Loc);
/// \brief Enumeration describing how template parameter lists are compared
/// for equality.
enum TemplateParameterListEqualKind {
/// \brief We are matching the template parameter lists of two templates
/// that might be redeclarations.
///
/// \code
/// template<typename T> struct X;
/// template<typename T> struct X;
/// \endcode
TPL_TemplateMatch,
/// \brief We are matching the template parameter lists of two template
/// template parameters as part of matching the template parameter lists
/// of two templates that might be redeclarations.
///
/// \code
/// template<template<int I> class TT> struct X;
/// template<template<int Value> class Other> struct X;
/// \endcode
TPL_TemplateTemplateParmMatch,
/// \brief We are matching the template parameter lists of a template
/// template argument against the template parameter lists of a template
/// template parameter.
///
/// \code
/// template<template<int Value> class Metafun> struct X;
/// template<int Value> struct integer_c;
/// X<integer_c> xic;
/// \endcode
TPL_TemplateTemplateArgumentMatch
};
bool TemplateParameterListsAreEqual(TemplateParameterList *New,
TemplateParameterList *Old,
bool Complain,
TemplateParameterListEqualKind Kind,
SourceLocation TemplateArgLoc
= SourceLocation());
bool CheckTemplateDeclScope(Scope *S, TemplateParameterList *TemplateParams);
/// \brief Called when the parser has parsed a C++ typename
/// specifier, e.g., "typename T::type".
///
/// \param S The scope in which this typename type occurs.
/// \param TypenameLoc the location of the 'typename' keyword
/// \param SS the nested-name-specifier following the typename (e.g., 'T::').
/// \param II the identifier we're retrieving (e.g., 'type' in the example).
/// \param IdLoc the location of the identifier.
TypeResult
ActOnTypenameType(Scope *S, SourceLocation TypenameLoc,
const CXXScopeSpec &SS, const IdentifierInfo &II,
SourceLocation IdLoc);
/// \brief Called when the parser has parsed a C++ typename
/// specifier that ends in a template-id, e.g.,
/// "typename MetaFun::template apply<T1, T2>".
///
/// \param S The scope in which this typename type occurs.
/// \param TypenameLoc the location of the 'typename' keyword
/// \param SS the nested-name-specifier following the typename (e.g., 'T::').
/// \param TemplateLoc the location of the 'template' keyword, if any.
/// \param TemplateName The template name.
/// \param TemplateNameLoc The location of the template name.
/// \param LAngleLoc The location of the opening angle bracket ('<').
/// \param TemplateArgs The template arguments.
/// \param RAngleLoc The location of the closing angle bracket ('>').
TypeResult
ActOnTypenameType(Scope *S, SourceLocation TypenameLoc,
const CXXScopeSpec &SS,
SourceLocation TemplateLoc,
TemplateTy TemplateName,
SourceLocation TemplateNameLoc,
SourceLocation LAngleLoc,
ASTTemplateArgsPtr TemplateArgs,
SourceLocation RAngleLoc);
QualType CheckTypenameType(ElaboratedTypeKeyword Keyword,
SourceLocation KeywordLoc,
NestedNameSpecifierLoc QualifierLoc,
const IdentifierInfo &II,
SourceLocation IILoc);
TypeSourceInfo *RebuildTypeInCurrentInstantiation(TypeSourceInfo *T,
SourceLocation Loc,
DeclarationName Name);
bool RebuildNestedNameSpecifierInCurrentInstantiation(CXXScopeSpec &SS);
ExprResult RebuildExprInCurrentInstantiation(Expr *E);
bool RebuildTemplateParamsInCurrentInstantiation(
TemplateParameterList *Params);
std::string
getTemplateArgumentBindingsText(const TemplateParameterList *Params,
const TemplateArgumentList &Args);
std::string
getTemplateArgumentBindingsText(const TemplateParameterList *Params,
const TemplateArgument *Args,
unsigned NumArgs);
//===--------------------------------------------------------------------===//
// C++ Variadic Templates (C++0x [temp.variadic])
//===--------------------------------------------------------------------===//
/// Determine whether an unexpanded parameter pack might be permitted in this
/// location. Useful for error recovery.
bool isUnexpandedParameterPackPermitted();
/// \brief The context in which an unexpanded parameter pack is
/// being diagnosed.
///
/// Note that the values of this enumeration line up with the first
/// argument to the \c err_unexpanded_parameter_pack diagnostic.
enum UnexpandedParameterPackContext {
/// \brief An arbitrary expression.
UPPC_Expression = 0,
/// \brief The base type of a class type.
UPPC_BaseType,
/// \brief The type of an arbitrary declaration.
UPPC_DeclarationType,
/// \brief The type of a data member.
UPPC_DataMemberType,
/// \brief The size of a bit-field.
UPPC_BitFieldWidth,
/// \brief The expression in a static assertion.
UPPC_StaticAssertExpression,
/// \brief The fixed underlying type of an enumeration.
UPPC_FixedUnderlyingType,
/// \brief The enumerator value.
UPPC_EnumeratorValue,
/// \brief A using declaration.
UPPC_UsingDeclaration,
/// \brief A friend declaration.
UPPC_FriendDeclaration,
/// \brief A declaration qualifier.
UPPC_DeclarationQualifier,
/// \brief An initializer.
UPPC_Initializer,
/// \brief A default argument.
UPPC_DefaultArgument,
/// \brief The type of a non-type template parameter.
UPPC_NonTypeTemplateParameterType,
/// \brief The type of an exception.
UPPC_ExceptionType,
/// \brief Partial specialization.
UPPC_PartialSpecialization,
/// \brief Microsoft __if_exists.
UPPC_IfExists,
/// \brief Microsoft __if_not_exists.
UPPC_IfNotExists,
/// \brief Lambda expression.
UPPC_Lambda,
/// \brief Block expression,
UPPC_Block
};
/// \brief Diagnose unexpanded parameter packs.
///
/// \param Loc The location at which we should emit the diagnostic.
///
/// \param UPPC The context in which we are diagnosing unexpanded
/// parameter packs.
///
/// \param Unexpanded the set of unexpanded parameter packs.
///
/// \returns true if an error occurred, false otherwise.
bool DiagnoseUnexpandedParameterPacks(SourceLocation Loc,
UnexpandedParameterPackContext UPPC,
ArrayRef<UnexpandedParameterPack> Unexpanded);
/// \brief If the given type contains an unexpanded parameter pack,
/// diagnose the error.
///
/// \param Loc The source location where a diagnostc should be emitted.
///
/// \param T The type that is being checked for unexpanded parameter
/// packs.
///
/// \returns true if an error occurred, false otherwise.
bool DiagnoseUnexpandedParameterPack(SourceLocation Loc, TypeSourceInfo *T,
UnexpandedParameterPackContext UPPC);
/// \brief If the given expression contains an unexpanded parameter
/// pack, diagnose the error.
///
/// \param E The expression that is being checked for unexpanded
/// parameter packs.
///
/// \returns true if an error occurred, false otherwise.
bool DiagnoseUnexpandedParameterPack(Expr *E,
UnexpandedParameterPackContext UPPC = UPPC_Expression);
/// \brief If the given nested-name-specifier contains an unexpanded
/// parameter pack, diagnose the error.
///
/// \param SS The nested-name-specifier that is being checked for
/// unexpanded parameter packs.
///
/// \returns true if an error occurred, false otherwise.
bool DiagnoseUnexpandedParameterPack(const CXXScopeSpec &SS,
UnexpandedParameterPackContext UPPC);
/// \brief If the given name contains an unexpanded parameter pack,
/// diagnose the error.
///
/// \param NameInfo The name (with source location information) that
/// is being checked for unexpanded parameter packs.
///
/// \returns true if an error occurred, false otherwise.
bool DiagnoseUnexpandedParameterPack(const DeclarationNameInfo &NameInfo,
UnexpandedParameterPackContext UPPC);
/// \brief If the given template name contains an unexpanded parameter pack,
/// diagnose the error.
///
/// \param Loc The location of the template name.
///
/// \param Template The template name that is being checked for unexpanded
/// parameter packs.
///
/// \returns true if an error occurred, false otherwise.
bool DiagnoseUnexpandedParameterPack(SourceLocation Loc,
TemplateName Template,
UnexpandedParameterPackContext UPPC);
/// \brief If the given template argument contains an unexpanded parameter
/// pack, diagnose the error.
///
/// \param Arg The template argument that is being checked for unexpanded
/// parameter packs.
///
/// \returns true if an error occurred, false otherwise.
bool DiagnoseUnexpandedParameterPack(TemplateArgumentLoc Arg,
UnexpandedParameterPackContext UPPC);
/// \brief Collect the set of unexpanded parameter packs within the given
/// template argument.
///
/// \param Arg The template argument that will be traversed to find
/// unexpanded parameter packs.
void collectUnexpandedParameterPacks(TemplateArgument Arg,
SmallVectorImpl<UnexpandedParameterPack> &Unexpanded);
/// \brief Collect the set of unexpanded parameter packs within the given
/// template argument.
///
/// \param Arg The template argument that will be traversed to find
/// unexpanded parameter packs.
void collectUnexpandedParameterPacks(TemplateArgumentLoc Arg,
SmallVectorImpl<UnexpandedParameterPack> &Unexpanded);
/// \brief Collect the set of unexpanded parameter packs within the given
/// type.
///
/// \param T The type that will be traversed to find
/// unexpanded parameter packs.
void collectUnexpandedParameterPacks(QualType T,
SmallVectorImpl<UnexpandedParameterPack> &Unexpanded);
/// \brief Collect the set of unexpanded parameter packs within the given
/// type.
///
/// \param TL The type that will be traversed to find
/// unexpanded parameter packs.
void collectUnexpandedParameterPacks(TypeLoc TL,
SmallVectorImpl<UnexpandedParameterPack> &Unexpanded);
/// \brief Collect the set of unexpanded parameter packs within the given
/// nested-name-specifier.
///
/// \param SS The nested-name-specifier that will be traversed to find
/// unexpanded parameter packs.
void collectUnexpandedParameterPacks(CXXScopeSpec &SS,
SmallVectorImpl<UnexpandedParameterPack> &Unexpanded);
/// \brief Collect the set of unexpanded parameter packs within the given
/// name.
///
/// \param NameInfo The name that will be traversed to find
/// unexpanded parameter packs.
void collectUnexpandedParameterPacks(const DeclarationNameInfo &NameInfo,
SmallVectorImpl<UnexpandedParameterPack> &Unexpanded);
/// \brief Invoked when parsing a template argument followed by an
/// ellipsis, which creates a pack expansion.
///
/// \param Arg The template argument preceding the ellipsis, which
/// may already be invalid.
///
/// \param EllipsisLoc The location of the ellipsis.
ParsedTemplateArgument ActOnPackExpansion(const ParsedTemplateArgument &Arg,
SourceLocation EllipsisLoc);
/// \brief Invoked when parsing a type followed by an ellipsis, which
/// creates a pack expansion.
///
/// \param Type The type preceding the ellipsis, which will become
/// the pattern of the pack expansion.
///
/// \param EllipsisLoc The location of the ellipsis.
TypeResult ActOnPackExpansion(ParsedType Type, SourceLocation EllipsisLoc);
/// \brief Construct a pack expansion type from the pattern of the pack
/// expansion.
TypeSourceInfo *CheckPackExpansion(TypeSourceInfo *Pattern,
SourceLocation EllipsisLoc,
Optional<unsigned> NumExpansions);
/// \brief Construct a pack expansion type from the pattern of the pack
/// expansion.
QualType CheckPackExpansion(QualType Pattern,
SourceRange PatternRange,
SourceLocation EllipsisLoc,
Optional<unsigned> NumExpansions);
/// \brief Invoked when parsing an expression followed by an ellipsis, which
/// creates a pack expansion.
///
/// \param Pattern The expression preceding the ellipsis, which will become
/// the pattern of the pack expansion.
///
/// \param EllipsisLoc The location of the ellipsis.
ExprResult ActOnPackExpansion(Expr *Pattern, SourceLocation EllipsisLoc);
/// \brief Invoked when parsing an expression followed by an ellipsis, which
/// creates a pack expansion.
///
/// \param Pattern The expression preceding the ellipsis, which will become
/// the pattern of the pack expansion.
///
/// \param EllipsisLoc The location of the ellipsis.
ExprResult CheckPackExpansion(Expr *Pattern, SourceLocation EllipsisLoc,
Optional<unsigned> NumExpansions);
/// \brief Determine whether we could expand a pack expansion with the
/// given set of parameter packs into separate arguments by repeatedly
/// transforming the pattern.
///
/// \param EllipsisLoc The location of the ellipsis that identifies the
/// pack expansion.
///
/// \param PatternRange The source range that covers the entire pattern of
/// the pack expansion.
///
/// \param Unexpanded The set of unexpanded parameter packs within the
/// pattern.
///
/// \param ShouldExpand Will be set to \c true if the transformer should
/// expand the corresponding pack expansions into separate arguments. When
/// set, \c NumExpansions must also be set.
///
/// \param RetainExpansion Whether the caller should add an unexpanded
/// pack expansion after all of the expanded arguments. This is used
/// when extending explicitly-specified template argument packs per
/// C++0x [temp.arg.explicit]p9.
///
/// \param NumExpansions The number of separate arguments that will be in
/// the expanded form of the corresponding pack expansion. This is both an
/// input and an output parameter, which can be set by the caller if the
/// number of expansions is known a priori (e.g., due to a prior substitution)
/// and will be set by the callee when the number of expansions is known.
/// The callee must set this value when \c ShouldExpand is \c true; it may
/// set this value in other cases.
///
/// \returns true if an error occurred (e.g., because the parameter packs
/// are to be instantiated with arguments of different lengths), false
/// otherwise. If false, \c ShouldExpand (and possibly \c NumExpansions)
/// must be set.
bool CheckParameterPacksForExpansion(SourceLocation EllipsisLoc,
SourceRange PatternRange,
ArrayRef<UnexpandedParameterPack> Unexpanded,
const MultiLevelTemplateArgumentList &TemplateArgs,
bool &ShouldExpand,
bool &RetainExpansion,
Optional<unsigned> &NumExpansions);
/// \brief Determine the number of arguments in the given pack expansion
/// type.
///
/// This routine assumes that the number of arguments in the expansion is
/// consistent across all of the unexpanded parameter packs in its pattern.
///
/// Returns an empty Optional if the type can't be expanded.
Optional<unsigned> getNumArgumentsInExpansion(QualType T,
const MultiLevelTemplateArgumentList &TemplateArgs);
/// \brief Determine whether the given declarator contains any unexpanded
/// parameter packs.
///
/// This routine is used by the parser to disambiguate function declarators
/// with an ellipsis prior to the ')', e.g.,
///
/// \code
/// void f(T...);
/// \endcode
///
/// To determine whether we have an (unnamed) function parameter pack or
/// a variadic function.
///
/// \returns true if the declarator contains any unexpanded parameter packs,
/// false otherwise.
bool containsUnexpandedParameterPacks(Declarator &D);
/// \brief Returns the pattern of the pack expansion for a template argument.
///
/// \param OrigLoc The template argument to expand.
///
/// \param Ellipsis Will be set to the location of the ellipsis.
///
/// \param NumExpansions Will be set to the number of expansions that will
/// be generated from this pack expansion, if known a priori.
TemplateArgumentLoc getTemplateArgumentPackExpansionPattern(
TemplateArgumentLoc OrigLoc,
SourceLocation &Ellipsis,
Optional<unsigned> &NumExpansions) const;
//===--------------------------------------------------------------------===//
// C++ Template Argument Deduction (C++ [temp.deduct])
//===--------------------------------------------------------------------===//
QualType adjustCCAndNoReturn(QualType ArgFunctionType, QualType FunctionType);
/// \brief Describes the result of template argument deduction.
///
/// The TemplateDeductionResult enumeration describes the result of
/// template argument deduction, as returned from
/// DeduceTemplateArguments(). The separate TemplateDeductionInfo
/// structure provides additional information about the results of
/// template argument deduction, e.g., the deduced template argument
/// list (if successful) or the specific template parameters or
/// deduced arguments that were involved in the failure.
enum TemplateDeductionResult {
/// \brief Template argument deduction was successful.
TDK_Success = 0,
/// \brief The declaration was invalid; do nothing.
TDK_Invalid,
/// \brief Template argument deduction exceeded the maximum template
/// instantiation depth (which has already been diagnosed).
TDK_InstantiationDepth,
/// \brief Template argument deduction did not deduce a value
/// for every template parameter.
TDK_Incomplete,
/// \brief Template argument deduction produced inconsistent
/// deduced values for the given template parameter.
TDK_Inconsistent,
/// \brief Template argument deduction failed due to inconsistent
/// cv-qualifiers on a template parameter type that would
/// otherwise be deduced, e.g., we tried to deduce T in "const T"
/// but were given a non-const "X".
TDK_Underqualified,
/// \brief Substitution of the deduced template argument values
/// resulted in an error.
TDK_SubstitutionFailure,
/// \brief A non-depnedent component of the parameter did not match the
/// corresponding component of the argument.
TDK_NonDeducedMismatch,
/// \brief When performing template argument deduction for a function
/// template, there were too many call arguments.
TDK_TooManyArguments,
/// \brief When performing template argument deduction for a function
/// template, there were too few call arguments.
TDK_TooFewArguments,
/// \brief The explicitly-specified template arguments were not valid
/// template arguments for the given template.
TDK_InvalidExplicitArguments,
/// \brief The arguments included an overloaded function name that could
/// not be resolved to a suitable function.
TDK_FailedOverloadResolution,
/// \brief Deduction failed; that's all we know.
TDK_MiscellaneousDeductionFailure
};
TemplateDeductionResult
DeduceTemplateArguments(ClassTemplatePartialSpecializationDecl *Partial,
const TemplateArgumentList &TemplateArgs,
sema::TemplateDeductionInfo &Info);
TemplateDeductionResult
DeduceTemplateArguments(VarTemplatePartialSpecializationDecl *Partial,
const TemplateArgumentList &TemplateArgs,
sema::TemplateDeductionInfo &Info);
TemplateDeductionResult SubstituteExplicitTemplateArguments(
FunctionTemplateDecl *FunctionTemplate,
TemplateArgumentListInfo &ExplicitTemplateArgs,
SmallVectorImpl<DeducedTemplateArgument> &Deduced,
SmallVectorImpl<QualType> &ParamTypes, QualType *FunctionType,
sema::TemplateDeductionInfo &Info);
/// brief A function argument from which we performed template argument
// deduction for a call.
struct OriginalCallArg {
OriginalCallArg(QualType OriginalParamType,
unsigned ArgIdx,
QualType OriginalArgType)
: OriginalParamType(OriginalParamType), ArgIdx(ArgIdx),
OriginalArgType(OriginalArgType) { }
QualType OriginalParamType;
unsigned ArgIdx;
QualType OriginalArgType;
};
TemplateDeductionResult
FinishTemplateArgumentDeduction(FunctionTemplateDecl *FunctionTemplate,
SmallVectorImpl<DeducedTemplateArgument> &Deduced,
unsigned NumExplicitlySpecified,
FunctionDecl *&Specialization,
sema::TemplateDeductionInfo &Info,
SmallVectorImpl<OriginalCallArg> const *OriginalCallArgs = nullptr,
bool PartialOverloading = false);
TemplateDeductionResult
DeduceTemplateArguments(FunctionTemplateDecl *FunctionTemplate,
TemplateArgumentListInfo *ExplicitTemplateArgs,
ArrayRef<Expr *> Args,
FunctionDecl *&Specialization,
sema::TemplateDeductionInfo &Info,
bool PartialOverloading = false);
TemplateDeductionResult
DeduceTemplateArguments(FunctionTemplateDecl *FunctionTemplate,
TemplateArgumentListInfo *ExplicitTemplateArgs,
QualType ArgFunctionType,
FunctionDecl *&Specialization,
sema::TemplateDeductionInfo &Info,
bool InOverloadResolution = false);
TemplateDeductionResult
DeduceTemplateArguments(FunctionTemplateDecl *FunctionTemplate,
QualType ToType,
CXXConversionDecl *&Specialization,
sema::TemplateDeductionInfo &Info);
TemplateDeductionResult
DeduceTemplateArguments(FunctionTemplateDecl *FunctionTemplate,
TemplateArgumentListInfo *ExplicitTemplateArgs,
FunctionDecl *&Specialization,
sema::TemplateDeductionInfo &Info,
bool InOverloadResolution = false);
/// \brief Substitute Replacement for \p auto in \p TypeWithAuto
QualType SubstAutoType(QualType TypeWithAuto, QualType Replacement);
/// \brief Substitute Replacement for auto in TypeWithAuto
TypeSourceInfo* SubstAutoTypeSourceInfo(TypeSourceInfo *TypeWithAuto,
QualType Replacement);
/// \brief Result type of DeduceAutoType.
enum DeduceAutoResult {
DAR_Succeeded,
DAR_Failed,
DAR_FailedAlreadyDiagnosed
};
DeduceAutoResult DeduceAutoType(TypeSourceInfo *AutoType, Expr *&Initializer,
QualType &Result);
DeduceAutoResult DeduceAutoType(TypeLoc AutoTypeLoc, Expr *&Initializer,
QualType &Result);
void DiagnoseAutoDeductionFailure(VarDecl *VDecl, Expr *Init);
bool DeduceReturnType(FunctionDecl *FD, SourceLocation Loc,
bool Diagnose = true);
TypeLoc getReturnTypeLoc(FunctionDecl *FD) const;
bool DeduceFunctionTypeFromReturnExpr(FunctionDecl *FD,
SourceLocation ReturnLoc,
Expr *&RetExpr, AutoType *AT);
FunctionTemplateDecl *getMoreSpecializedTemplate(FunctionTemplateDecl *FT1,
FunctionTemplateDecl *FT2,
SourceLocation Loc,
TemplatePartialOrderingContext TPOC,
unsigned NumCallArguments1,
unsigned NumCallArguments2);
UnresolvedSetIterator
getMostSpecialized(UnresolvedSetIterator SBegin, UnresolvedSetIterator SEnd,
TemplateSpecCandidateSet &FailedCandidates,
SourceLocation Loc,
const PartialDiagnostic &NoneDiag,
const PartialDiagnostic &AmbigDiag,
const PartialDiagnostic &CandidateDiag,
bool Complain = true, QualType TargetType = QualType());
ClassTemplatePartialSpecializationDecl *
getMoreSpecializedPartialSpecialization(
ClassTemplatePartialSpecializationDecl *PS1,
ClassTemplatePartialSpecializationDecl *PS2,
SourceLocation Loc);
VarTemplatePartialSpecializationDecl *getMoreSpecializedPartialSpecialization(
VarTemplatePartialSpecializationDecl *PS1,
VarTemplatePartialSpecializationDecl *PS2, SourceLocation Loc);
void MarkUsedTemplateParameters(const TemplateArgumentList &TemplateArgs,
bool OnlyDeduced,
unsigned Depth,
llvm::SmallBitVector &Used);
void MarkDeducedTemplateParameters(
const FunctionTemplateDecl *FunctionTemplate,
llvm::SmallBitVector &Deduced) {
return MarkDeducedTemplateParameters(Context, FunctionTemplate, Deduced);
}
static void MarkDeducedTemplateParameters(ASTContext &Ctx,
const FunctionTemplateDecl *FunctionTemplate,
llvm::SmallBitVector &Deduced);
//===--------------------------------------------------------------------===//
// C++ Template Instantiation
//
MultiLevelTemplateArgumentList
getTemplateInstantiationArgs(NamedDecl *D,
const TemplateArgumentList *Innermost = nullptr,
bool RelativeToPrimary = false,
const FunctionDecl *Pattern = nullptr);
/// \brief A template instantiation that is currently in progress.
struct ActiveTemplateInstantiation {
/// \brief The kind of template instantiation we are performing
enum InstantiationKind {
/// We are instantiating a template declaration. The entity is
/// the declaration we're instantiating (e.g., a CXXRecordDecl).
TemplateInstantiation,
/// We are instantiating a default argument for a template
/// parameter. The Entity is the template, and
/// TemplateArgs/NumTemplateArguments provides the template
/// arguments as specified.
/// FIXME: Use a TemplateArgumentList
DefaultTemplateArgumentInstantiation,
/// We are instantiating a default argument for a function.
/// The Entity is the ParmVarDecl, and TemplateArgs/NumTemplateArgs
/// provides the template arguments as specified.
DefaultFunctionArgumentInstantiation,
/// We are substituting explicit template arguments provided for
/// a function template. The entity is a FunctionTemplateDecl.
ExplicitTemplateArgumentSubstitution,
/// We are substituting template argument determined as part of
/// template argument deduction for either a class template
/// partial specialization or a function template. The
/// Entity is either a ClassTemplatePartialSpecializationDecl or
/// a FunctionTemplateDecl.
DeducedTemplateArgumentSubstitution,
/// We are substituting prior template arguments into a new
/// template parameter. The template parameter itself is either a
/// NonTypeTemplateParmDecl or a TemplateTemplateParmDecl.
PriorTemplateArgumentSubstitution,
/// We are checking the validity of a default template argument that
/// has been used when naming a template-id.
DefaultTemplateArgumentChecking,
/// We are instantiating the exception specification for a function
/// template which was deferred until it was needed.
ExceptionSpecInstantiation
} Kind;
/// \brief The point of instantiation within the source code.
SourceLocation PointOfInstantiation;
/// \brief The template (or partial specialization) in which we are
/// performing the instantiation, for substitutions of prior template
/// arguments.
NamedDecl *Template;
/// \brief The entity that is being instantiated.
Decl *Entity;
/// \brief The list of template arguments we are substituting, if they
/// are not part of the entity.
const TemplateArgument *TemplateArgs;
/// \brief The number of template arguments in TemplateArgs.
unsigned NumTemplateArgs;
/// \brief The template deduction info object associated with the
/// substitution or checking of explicit or deduced template arguments.
sema::TemplateDeductionInfo *DeductionInfo;
/// \brief The source range that covers the construct that cause
/// the instantiation, e.g., the template-id that causes a class
/// template instantiation.
SourceRange InstantiationRange;
ActiveTemplateInstantiation()
: Kind(TemplateInstantiation), Template(nullptr), Entity(nullptr),
TemplateArgs(nullptr), NumTemplateArgs(0), DeductionInfo(nullptr) {}
/// \brief Determines whether this template is an actual instantiation
/// that should be counted toward the maximum instantiation depth.
bool isInstantiationRecord() const;
friend bool operator==(const ActiveTemplateInstantiation &X,
const ActiveTemplateInstantiation &Y) {
if (X.Kind != Y.Kind)
return false;
if (X.Entity != Y.Entity)
return false;
switch (X.Kind) {
case TemplateInstantiation:
case ExceptionSpecInstantiation:
return true;
case PriorTemplateArgumentSubstitution:
case DefaultTemplateArgumentChecking:
return X.Template == Y.Template && X.TemplateArgs == Y.TemplateArgs;
case DefaultTemplateArgumentInstantiation:
case ExplicitTemplateArgumentSubstitution:
case DeducedTemplateArgumentSubstitution:
case DefaultFunctionArgumentInstantiation:
return X.TemplateArgs == Y.TemplateArgs;
}
llvm_unreachable("Invalid InstantiationKind!");
}
friend bool operator!=(const ActiveTemplateInstantiation &X,
const ActiveTemplateInstantiation &Y) {
return !(X == Y);
}
};
/// \brief List of active template instantiations.
///
/// This vector is treated as a stack. As one template instantiation
/// requires another template instantiation, additional
/// instantiations are pushed onto the stack up to a
/// user-configurable limit LangOptions::InstantiationDepth.
SmallVector<ActiveTemplateInstantiation, 16>
ActiveTemplateInstantiations;
/// \brief Extra modules inspected when performing a lookup during a template
/// instantiation. Computed lazily.
SmallVector<Module*, 16> ActiveTemplateInstantiationLookupModules;
/// \brief Cache of additional modules that should be used for name lookup
/// within the current template instantiation. Computed lazily; use
/// getLookupModules() to get a complete set.
llvm::DenseSet<Module*> LookupModulesCache;
/// \brief Get the set of additional modules that should be checked during
/// name lookup. A module and its imports become visible when instanting a
/// template defined within it.
llvm::DenseSet<Module*> &getLookupModules();
/// \brief Whether we are in a SFINAE context that is not associated with
/// template instantiation.
///
/// This is used when setting up a SFINAE trap (\c see SFINAETrap) outside
/// of a template instantiation or template argument deduction.
bool InNonInstantiationSFINAEContext;
/// \brief The number of ActiveTemplateInstantiation entries in
/// \c ActiveTemplateInstantiations that are not actual instantiations and,
/// therefore, should not be counted as part of the instantiation depth.
unsigned NonInstantiationEntries;
/// \brief The last template from which a template instantiation
/// error or warning was produced.
///
/// This value is used to suppress printing of redundant template
/// instantiation backtraces when there are multiple errors in the
/// same instantiation. FIXME: Does this belong in Sema? It's tough
/// to implement it anywhere else.
ActiveTemplateInstantiation LastTemplateInstantiationErrorContext;
/// \brief The current index into pack expansion arguments that will be
/// used for substitution of parameter packs.
///
/// The pack expansion index will be -1 to indicate that parameter packs
/// should be instantiated as themselves. Otherwise, the index specifies
/// which argument within the parameter pack will be used for substitution.
int ArgumentPackSubstitutionIndex;
/// \brief RAII object used to change the argument pack substitution index
/// within a \c Sema object.
///
/// See \c ArgumentPackSubstitutionIndex for more information.
class ArgumentPackSubstitutionIndexRAII {
Sema &Self;
int OldSubstitutionIndex;
public:
ArgumentPackSubstitutionIndexRAII(Sema &Self, int NewSubstitutionIndex)
: Self(Self), OldSubstitutionIndex(Self.ArgumentPackSubstitutionIndex) {
Self.ArgumentPackSubstitutionIndex = NewSubstitutionIndex;
}
~ArgumentPackSubstitutionIndexRAII() {
Self.ArgumentPackSubstitutionIndex = OldSubstitutionIndex;
}
};
friend class ArgumentPackSubstitutionRAII;
/// \brief The stack of calls expression undergoing template instantiation.
///
/// The top of this stack is used by a fixit instantiating unresolved
/// function calls to fix the AST to match the textual change it prints.
SmallVector<CallExpr *, 8> CallsUndergoingInstantiation;
/// \brief For each declaration that involved template argument deduction, the
/// set of diagnostics that were suppressed during that template argument
/// deduction.
///
/// FIXME: Serialize this structure to the AST file.
typedef llvm::DenseMap<Decl *, SmallVector<PartialDiagnosticAt, 1> >
SuppressedDiagnosticsMap;
SuppressedDiagnosticsMap SuppressedDiagnostics;
/// \brief A stack object to be created when performing template
/// instantiation.
///
/// Construction of an object of type \c InstantiatingTemplate
/// pushes the current instantiation onto the stack of active
/// instantiations. If the size of this stack exceeds the maximum
/// number of recursive template instantiations, construction
/// produces an error and evaluates true.
///
/// Destruction of this object will pop the named instantiation off
/// the stack.
struct InstantiatingTemplate {
/// \brief Note that we are instantiating a class template,
/// function template, or a member thereof.
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
Decl *Entity,
SourceRange InstantiationRange = SourceRange());
struct ExceptionSpecification {};
/// \brief Note that we are instantiating an exception specification
/// of a function template.
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
FunctionDecl *Entity, ExceptionSpecification,
SourceRange InstantiationRange = SourceRange());
/// \brief Note that we are instantiating a default argument in a
/// template-id.
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
TemplateDecl *Template,
ArrayRef<TemplateArgument> TemplateArgs,
SourceRange InstantiationRange = SourceRange());
/// \brief Note that we are instantiating a default argument in a
/// template-id.
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
FunctionTemplateDecl *FunctionTemplate,
ArrayRef<TemplateArgument> TemplateArgs,
ActiveTemplateInstantiation::InstantiationKind Kind,
sema::TemplateDeductionInfo &DeductionInfo,
SourceRange InstantiationRange = SourceRange());
/// \brief Note that we are instantiating as part of template
/// argument deduction for a class template partial
/// specialization.
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
ClassTemplatePartialSpecializationDecl *PartialSpec,
ArrayRef<TemplateArgument> TemplateArgs,
sema::TemplateDeductionInfo &DeductionInfo,
SourceRange InstantiationRange = SourceRange());
/// \brief Note that we are instantiating as part of template
/// argument deduction for a variable template partial
/// specialization.
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
VarTemplatePartialSpecializationDecl *PartialSpec,
ArrayRef<TemplateArgument> TemplateArgs,
sema::TemplateDeductionInfo &DeductionInfo,
SourceRange InstantiationRange = SourceRange());
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
ParmVarDecl *Param,
ArrayRef<TemplateArgument> TemplateArgs,
SourceRange InstantiationRange = SourceRange());
/// \brief Note that we are substituting prior template arguments into a
/// non-type parameter.
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
NamedDecl *Template,
NonTypeTemplateParmDecl *Param,
ArrayRef<TemplateArgument> TemplateArgs,
SourceRange InstantiationRange);
/// \brief Note that we are substituting prior template arguments into a
/// template template parameter.
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
NamedDecl *Template,
TemplateTemplateParmDecl *Param,
ArrayRef<TemplateArgument> TemplateArgs,
SourceRange InstantiationRange);
/// \brief Note that we are checking the default template argument
/// against the template parameter for a given template-id.
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
TemplateDecl *Template,
NamedDecl *Param,
ArrayRef<TemplateArgument> TemplateArgs,
SourceRange InstantiationRange);
/// \brief Note that we have finished instantiating this template.
void Clear();
~InstantiatingTemplate() { Clear(); }
/// \brief Determines whether we have exceeded the maximum
/// recursive template instantiations.
bool isInvalid() const { return Invalid; }
private:
Sema &SemaRef;
bool Invalid;
bool SavedInNonInstantiationSFINAEContext;
bool CheckInstantiationDepth(SourceLocation PointOfInstantiation,
SourceRange InstantiationRange);
InstantiatingTemplate(
Sema &SemaRef, ActiveTemplateInstantiation::InstantiationKind Kind,
SourceLocation PointOfInstantiation, SourceRange InstantiationRange,
Decl *Entity, NamedDecl *Template = nullptr,
ArrayRef<TemplateArgument> TemplateArgs = ArrayRef<TemplateArgument>(),
sema::TemplateDeductionInfo *DeductionInfo = nullptr);
InstantiatingTemplate(const InstantiatingTemplate&) = delete;
InstantiatingTemplate&
operator=(const InstantiatingTemplate&) = delete;
};
void PrintInstantiationStack();
/// \brief Determines whether we are currently in a context where
/// template argument substitution failures are not considered
/// errors.
///
/// \returns An empty \c Optional if we're not in a SFINAE context.
/// Otherwise, contains a pointer that, if non-NULL, contains the nearest
/// template-deduction context object, which can be used to capture
/// diagnostics that will be suppressed.
Optional<sema::TemplateDeductionInfo *> isSFINAEContext() const;
/// \brief Determines whether we are currently in a context that
/// is not evaluated as per C++ [expr] p5.
bool isUnevaluatedContext() const {
assert(!ExprEvalContexts.empty() &&
"Must be in an expression evaluation context");
return ExprEvalContexts.back().isUnevaluated();
}
/// \brief RAII class used to determine whether SFINAE has
/// trapped any errors that occur during template argument
/// deduction.
class SFINAETrap {
Sema &SemaRef;
unsigned PrevSFINAEErrors;
bool PrevInNonInstantiationSFINAEContext;
bool PrevAccessCheckingSFINAE;
public:
explicit SFINAETrap(Sema &SemaRef, bool AccessCheckingSFINAE = false)
: SemaRef(SemaRef), PrevSFINAEErrors(SemaRef.NumSFINAEErrors),
PrevInNonInstantiationSFINAEContext(
SemaRef.InNonInstantiationSFINAEContext),
PrevAccessCheckingSFINAE(SemaRef.AccessCheckingSFINAE)
{
if (!SemaRef.isSFINAEContext())
SemaRef.InNonInstantiationSFINAEContext = true;
SemaRef.AccessCheckingSFINAE = AccessCheckingSFINAE;
}
~SFINAETrap() {
SemaRef.NumSFINAEErrors = PrevSFINAEErrors;
SemaRef.InNonInstantiationSFINAEContext
= PrevInNonInstantiationSFINAEContext;
SemaRef.AccessCheckingSFINAE = PrevAccessCheckingSFINAE;
}
/// \brief Determine whether any SFINAE errors have been trapped.
bool hasErrorOccurred() const {
return SemaRef.NumSFINAEErrors > PrevSFINAEErrors;
}
};
/// \brief RAII class used to indicate that we are performing provisional
/// semantic analysis to determine the validity of a construct, so
/// typo-correction and diagnostics in the immediate context (not within
/// implicitly-instantiated templates) should be suppressed.
class TentativeAnalysisScope {
Sema &SemaRef;
// FIXME: Using a SFINAETrap for this is a hack.
SFINAETrap Trap;
bool PrevDisableTypoCorrection;
public:
explicit TentativeAnalysisScope(Sema &SemaRef)
: SemaRef(SemaRef), Trap(SemaRef, true),
PrevDisableTypoCorrection(SemaRef.DisableTypoCorrection) {
SemaRef.DisableTypoCorrection = true;
}
~TentativeAnalysisScope() {
SemaRef.DisableTypoCorrection = PrevDisableTypoCorrection;
}
};
/// \brief The current instantiation scope used to store local
/// variables.
LocalInstantiationScope *CurrentInstantiationScope;
/// \brief Tracks whether we are in a context where typo correction is
/// disabled.
bool DisableTypoCorrection;
/// \brief The number of typos corrected by CorrectTypo.
unsigned TyposCorrected;
typedef llvm::SmallSet<SourceLocation, 2> SrcLocSet;
typedef llvm::DenseMap<IdentifierInfo *, SrcLocSet> IdentifierSourceLocations;
/// \brief A cache containing identifiers for which typo correction failed and
/// their locations, so that repeated attempts to correct an identifier in a
/// given location are ignored if typo correction already failed for it.
IdentifierSourceLocations TypoCorrectionFailures;
/// \brief Worker object for performing CFG-based warnings.
sema::AnalysisBasedWarnings AnalysisWarnings;
threadSafety::BeforeSet *ThreadSafetyDeclCache;
/// \brief An entity for which implicit template instantiation is required.
///
/// The source location associated with the declaration is the first place in
/// the source code where the declaration was "used". It is not necessarily
/// the point of instantiation (which will be either before or after the
/// namespace-scope declaration that triggered this implicit instantiation),
/// However, it is the location that diagnostics should generally refer to,
/// because users will need to know what code triggered the instantiation.
typedef std::pair<ValueDecl *, SourceLocation> PendingImplicitInstantiation;
/// \brief The queue of implicit template instantiations that are required
/// but have not yet been performed.
std::deque<PendingImplicitInstantiation> PendingInstantiations;
class SavePendingInstantiationsAndVTableUsesRAII {
public:
SavePendingInstantiationsAndVTableUsesRAII(Sema &S, bool Enabled)
: S(S), Enabled(Enabled) {
if (!Enabled) return;
SavedPendingInstantiations.swap(S.PendingInstantiations);
SavedVTableUses.swap(S.VTableUses);
}
~SavePendingInstantiationsAndVTableUsesRAII() {
if (!Enabled) return;
// Restore the set of pending vtables.
assert(S.VTableUses.empty() &&
"VTableUses should be empty before it is discarded.");
S.VTableUses.swap(SavedVTableUses);
// Restore the set of pending implicit instantiations.
assert(S.PendingInstantiations.empty() &&
"PendingInstantiations should be empty before it is discarded.");
S.PendingInstantiations.swap(SavedPendingInstantiations);
}
private:
Sema &S;
SmallVector<VTableUse, 16> SavedVTableUses;
std::deque<PendingImplicitInstantiation> SavedPendingInstantiations;
bool Enabled;
};
/// \brief The queue of implicit template instantiations that are required
/// and must be performed within the current local scope.
///
/// This queue is only used for member functions of local classes in
/// templates, which must be instantiated in the same scope as their
/// enclosing function, so that they can reference function-local
/// types, static variables, enumerators, etc.
std::deque<PendingImplicitInstantiation> PendingLocalImplicitInstantiations;
class SavePendingLocalImplicitInstantiationsRAII {
public:
SavePendingLocalImplicitInstantiationsRAII(Sema &S): S(S) {
SavedPendingLocalImplicitInstantiations.swap(
S.PendingLocalImplicitInstantiations);
}
~SavePendingLocalImplicitInstantiationsRAII() {
assert(S.PendingLocalImplicitInstantiations.empty() &&
"there shouldn't be any pending local implicit instantiations");
SavedPendingLocalImplicitInstantiations.swap(
S.PendingLocalImplicitInstantiations);
}
private:
Sema &S;
std::deque<PendingImplicitInstantiation>
SavedPendingLocalImplicitInstantiations;
};
void PerformPendingInstantiations(bool LocalOnly = false);
TypeSourceInfo *SubstType(TypeSourceInfo *T,
const MultiLevelTemplateArgumentList &TemplateArgs,
SourceLocation Loc, DeclarationName Entity);
QualType SubstType(QualType T,
const MultiLevelTemplateArgumentList &TemplateArgs,
SourceLocation Loc, DeclarationName Entity);
TypeSourceInfo *SubstType(TypeLoc TL,
const MultiLevelTemplateArgumentList &TemplateArgs,
SourceLocation Loc, DeclarationName Entity);
TypeSourceInfo *SubstFunctionDeclType(TypeSourceInfo *T,
const MultiLevelTemplateArgumentList &TemplateArgs,
SourceLocation Loc,
DeclarationName Entity,
CXXRecordDecl *ThisContext,
unsigned ThisTypeQuals);
void SubstExceptionSpec(FunctionDecl *New, const FunctionProtoType *Proto,
const MultiLevelTemplateArgumentList &Args);
ParmVarDecl *SubstParmVarDecl(ParmVarDecl *D,
const MultiLevelTemplateArgumentList &TemplateArgs,
int indexAdjustment,
Optional<unsigned> NumExpansions,
bool ExpectParameterPack);
bool SubstParmTypes(SourceLocation Loc,
ParmVarDecl **Params, unsigned NumParams,
const MultiLevelTemplateArgumentList &TemplateArgs,
SmallVectorImpl<QualType> &ParamTypes,
SmallVectorImpl<ParmVarDecl *> *OutParams = nullptr);
ExprResult SubstExpr(Expr *E,
const MultiLevelTemplateArgumentList &TemplateArgs);
/// \brief Substitute the given template arguments into a list of
/// expressions, expanding pack expansions if required.
///
/// \param Exprs The list of expressions to substitute into.
///
/// \param NumExprs The number of expressions in \p Exprs.
///
/// \param IsCall Whether this is some form of call, in which case
/// default arguments will be dropped.
///
/// \param TemplateArgs The set of template arguments to substitute.
///
/// \param Outputs Will receive all of the substituted arguments.
///
/// \returns true if an error occurred, false otherwise.
bool SubstExprs(Expr **Exprs, unsigned NumExprs, bool IsCall,
const MultiLevelTemplateArgumentList &TemplateArgs,
SmallVectorImpl<Expr *> &Outputs);
StmtResult SubstStmt(Stmt *S,
const MultiLevelTemplateArgumentList &TemplateArgs);
Decl *SubstDecl(Decl *D, DeclContext *Owner,
const MultiLevelTemplateArgumentList &TemplateArgs);
ExprResult SubstInitializer(Expr *E,
const MultiLevelTemplateArgumentList &TemplateArgs,
bool CXXDirectInit);
bool
SubstBaseSpecifiers(CXXRecordDecl *Instantiation,
CXXRecordDecl *Pattern,
const MultiLevelTemplateArgumentList &TemplateArgs);
bool
InstantiateClass(SourceLocation PointOfInstantiation,
CXXRecordDecl *Instantiation, CXXRecordDecl *Pattern,
const MultiLevelTemplateArgumentList &TemplateArgs,
TemplateSpecializationKind TSK,
bool Complain = true);
bool InstantiateEnum(SourceLocation PointOfInstantiation,
EnumDecl *Instantiation, EnumDecl *Pattern,
const MultiLevelTemplateArgumentList &TemplateArgs,
TemplateSpecializationKind TSK);
bool InstantiateInClassInitializer(
SourceLocation PointOfInstantiation, FieldDecl *Instantiation,
FieldDecl *Pattern, const MultiLevelTemplateArgumentList &TemplateArgs);
struct LateInstantiatedAttribute {
const Attr *TmplAttr;
LocalInstantiationScope *Scope;
Decl *NewDecl;
LateInstantiatedAttribute(const Attr *A, LocalInstantiationScope *S,
Decl *D)
: TmplAttr(A), Scope(S), NewDecl(D)
{ }
};
typedef SmallVector<LateInstantiatedAttribute, 16> LateInstantiatedAttrVec;
void InstantiateAttrs(const MultiLevelTemplateArgumentList &TemplateArgs,
const Decl *Pattern, Decl *Inst,
LateInstantiatedAttrVec *LateAttrs = nullptr,
LocalInstantiationScope *OuterMostScope = nullptr);
bool
InstantiateClassTemplateSpecialization(SourceLocation PointOfInstantiation,
ClassTemplateSpecializationDecl *ClassTemplateSpec,
TemplateSpecializationKind TSK,
bool Complain = true);
void InstantiateClassMembers(SourceLocation PointOfInstantiation,
CXXRecordDecl *Instantiation,
const MultiLevelTemplateArgumentList &TemplateArgs,
TemplateSpecializationKind TSK);
void InstantiateClassTemplateSpecializationMembers(
SourceLocation PointOfInstantiation,
ClassTemplateSpecializationDecl *ClassTemplateSpec,
TemplateSpecializationKind TSK);
NestedNameSpecifierLoc
SubstNestedNameSpecifierLoc(NestedNameSpecifierLoc NNS,
const MultiLevelTemplateArgumentList &TemplateArgs);
DeclarationNameInfo
SubstDeclarationNameInfo(const DeclarationNameInfo &NameInfo,
const MultiLevelTemplateArgumentList &TemplateArgs);
TemplateName
SubstTemplateName(NestedNameSpecifierLoc QualifierLoc, TemplateName Name,
SourceLocation Loc,
const MultiLevelTemplateArgumentList &TemplateArgs);
bool Subst(const TemplateArgumentLoc *Args, unsigned NumArgs,
TemplateArgumentListInfo &Result,
const MultiLevelTemplateArgumentList &TemplateArgs);
void InstantiateExceptionSpec(SourceLocation PointOfInstantiation,
FunctionDecl *Function);
void InstantiateFunctionDefinition(SourceLocation PointOfInstantiation,
FunctionDecl *Function,
bool Recursive = false,
bool DefinitionRequired = false);
VarTemplateSpecializationDecl *BuildVarTemplateInstantiation(
VarTemplateDecl *VarTemplate, VarDecl *FromVar,
const TemplateArgumentList &TemplateArgList,
const TemplateArgumentListInfo &TemplateArgsInfo,
SmallVectorImpl<TemplateArgument> &Converted,
SourceLocation PointOfInstantiation, void *InsertPos,
LateInstantiatedAttrVec *LateAttrs = nullptr,
LocalInstantiationScope *StartingScope = nullptr);
VarTemplateSpecializationDecl *CompleteVarTemplateSpecializationDecl(
VarTemplateSpecializationDecl *VarSpec, VarDecl *PatternDecl,
const MultiLevelTemplateArgumentList &TemplateArgs);
void
BuildVariableInstantiation(VarDecl *NewVar, VarDecl *OldVar,
const MultiLevelTemplateArgumentList &TemplateArgs,
LateInstantiatedAttrVec *LateAttrs,
DeclContext *Owner,
LocalInstantiationScope *StartingScope,
bool InstantiatingVarTemplate = false);
void InstantiateVariableInitializer(
VarDecl *Var, VarDecl *OldVar,
const MultiLevelTemplateArgumentList &TemplateArgs);
void InstantiateVariableDefinition(SourceLocation PointOfInstantiation,
VarDecl *Var, bool Recursive = false,
bool DefinitionRequired = false);
void InstantiateStaticDataMemberDefinition(
SourceLocation PointOfInstantiation,
VarDecl *Var,
bool Recursive = false,
bool DefinitionRequired = false);
void InstantiateMemInitializers(CXXConstructorDecl *New,
const CXXConstructorDecl *Tmpl,
const MultiLevelTemplateArgumentList &TemplateArgs);
NamedDecl *FindInstantiatedDecl(SourceLocation Loc, NamedDecl *D,
const MultiLevelTemplateArgumentList &TemplateArgs);
DeclContext *FindInstantiatedContext(SourceLocation Loc, DeclContext *DC,
const MultiLevelTemplateArgumentList &TemplateArgs);
// Objective-C declarations.
enum ObjCContainerKind {
OCK_None = -1,
OCK_Interface = 0,
OCK_Protocol,
OCK_Category,
OCK_ClassExtension,
OCK_Implementation,
OCK_CategoryImplementation
};
ObjCContainerKind getObjCContainerKind() const;
DeclResult actOnObjCTypeParam(Scope *S,
ObjCTypeParamVariance variance,
SourceLocation varianceLoc,
unsigned index,
IdentifierInfo *paramName,
SourceLocation paramLoc,
SourceLocation colonLoc,
ParsedType typeBound);
ObjCTypeParamList *actOnObjCTypeParamList(Scope *S, SourceLocation lAngleLoc,
ArrayRef<Decl *> typeParams,
SourceLocation rAngleLoc);
void popObjCTypeParamList(Scope *S, ObjCTypeParamList *typeParamList);
Decl *ActOnStartClassInterface(Scope *S,
SourceLocation AtInterfaceLoc,
IdentifierInfo *ClassName,
SourceLocation ClassLoc,
ObjCTypeParamList *typeParamList,
IdentifierInfo *SuperName,
SourceLocation SuperLoc,
ArrayRef<ParsedType> SuperTypeArgs,
SourceRange SuperTypeArgsRange,
Decl * const *ProtoRefs,
unsigned NumProtoRefs,
const SourceLocation *ProtoLocs,
SourceLocation EndProtoLoc,
AttributeList *AttrList);
void ActOnSuperClassOfClassInterface(Scope *S,
SourceLocation AtInterfaceLoc,
ObjCInterfaceDecl *IDecl,
IdentifierInfo *ClassName,
SourceLocation ClassLoc,
IdentifierInfo *SuperName,
SourceLocation SuperLoc,
ArrayRef<ParsedType> SuperTypeArgs,
SourceRange SuperTypeArgsRange);
void ActOnTypedefedProtocols(SmallVectorImpl<Decl *> &ProtocolRefs,
IdentifierInfo *SuperName,
SourceLocation SuperLoc);
Decl *ActOnCompatibilityAlias(
SourceLocation AtCompatibilityAliasLoc,
IdentifierInfo *AliasName, SourceLocation AliasLocation,
IdentifierInfo *ClassName, SourceLocation ClassLocation);
bool CheckForwardProtocolDeclarationForCircularDependency(
IdentifierInfo *PName,
SourceLocation &PLoc, SourceLocation PrevLoc,
const ObjCList<ObjCProtocolDecl> &PList);
Decl *ActOnStartProtocolInterface(
SourceLocation AtProtoInterfaceLoc,
IdentifierInfo *ProtocolName, SourceLocation ProtocolLoc,
Decl * const *ProtoRefNames, unsigned NumProtoRefs,
const SourceLocation *ProtoLocs,
SourceLocation EndProtoLoc,
AttributeList *AttrList);
Decl *ActOnStartCategoryInterface(SourceLocation AtInterfaceLoc,
IdentifierInfo *ClassName,
SourceLocation ClassLoc,
ObjCTypeParamList *typeParamList,
IdentifierInfo *CategoryName,
SourceLocation CategoryLoc,
Decl * const *ProtoRefs,
unsigned NumProtoRefs,
const SourceLocation *ProtoLocs,
SourceLocation EndProtoLoc);
Decl *ActOnStartClassImplementation(
SourceLocation AtClassImplLoc,
IdentifierInfo *ClassName, SourceLocation ClassLoc,
IdentifierInfo *SuperClassname,
SourceLocation SuperClassLoc);
Decl *ActOnStartCategoryImplementation(SourceLocation AtCatImplLoc,
IdentifierInfo *ClassName,
SourceLocation ClassLoc,
IdentifierInfo *CatName,
SourceLocation CatLoc);
DeclGroupPtrTy ActOnFinishObjCImplementation(Decl *ObjCImpDecl,
ArrayRef<Decl *> Decls);
DeclGroupPtrTy ActOnForwardClassDeclaration(SourceLocation Loc,
IdentifierInfo **IdentList,
SourceLocation *IdentLocs,
ArrayRef<ObjCTypeParamList *> TypeParamLists,
unsigned NumElts);
DeclGroupPtrTy ActOnForwardProtocolDeclaration(SourceLocation AtProtoclLoc,
const IdentifierLocPair *IdentList,
unsigned NumElts,
AttributeList *attrList);
void FindProtocolDeclaration(bool WarnOnDeclarations, bool ForObjCContainer,
const IdentifierLocPair *ProtocolId,
unsigned NumProtocols,
SmallVectorImpl<Decl *> &Protocols);
/// Given a list of identifiers (and their locations), resolve the
/// names to either Objective-C protocol qualifiers or type
/// arguments, as appropriate.
void actOnObjCTypeArgsOrProtocolQualifiers(
Scope *S,
ParsedType baseType,
SourceLocation lAngleLoc,
ArrayRef<IdentifierInfo *> identifiers,
ArrayRef<SourceLocation> identifierLocs,
SourceLocation rAngleLoc,
SourceLocation &typeArgsLAngleLoc,
SmallVectorImpl<ParsedType> &typeArgs,
SourceLocation &typeArgsRAngleLoc,
SourceLocation &protocolLAngleLoc,
SmallVectorImpl<Decl *> &protocols,
SourceLocation &protocolRAngleLoc,
bool warnOnIncompleteProtocols);
/// Build a an Objective-C protocol-qualified 'id' type where no
/// base type was specified.
TypeResult actOnObjCProtocolQualifierType(
SourceLocation lAngleLoc,
ArrayRef<Decl *> protocols,
ArrayRef<SourceLocation> protocolLocs,
SourceLocation rAngleLoc);
/// Build a specialized and/or protocol-qualified Objective-C type.
TypeResult actOnObjCTypeArgsAndProtocolQualifiers(
Scope *S,
SourceLocation Loc,
ParsedType BaseType,
SourceLocation TypeArgsLAngleLoc,
ArrayRef<ParsedType> TypeArgs,
SourceLocation TypeArgsRAngleLoc,
SourceLocation ProtocolLAngleLoc,
ArrayRef<Decl *> Protocols,
ArrayRef<SourceLocation> ProtocolLocs,
SourceLocation ProtocolRAngleLoc);
/// Build an Objective-C object pointer type.
QualType BuildObjCObjectType(QualType BaseType,
SourceLocation Loc,
SourceLocation TypeArgsLAngleLoc,
ArrayRef<TypeSourceInfo *> TypeArgs,
SourceLocation TypeArgsRAngleLoc,
SourceLocation ProtocolLAngleLoc,
ArrayRef<ObjCProtocolDecl *> Protocols,
ArrayRef<SourceLocation> ProtocolLocs,
SourceLocation ProtocolRAngleLoc,
bool FailOnError = false);
/// Check the application of the Objective-C '__kindof' qualifier to
/// the given type.
bool checkObjCKindOfType(QualType &type, SourceLocation loc);
/// Ensure attributes are consistent with type.
/// \param [in, out] Attributes The attributes to check; they will
/// be modified to be consistent with \p PropertyTy.
void CheckObjCPropertyAttributes(Decl *PropertyPtrTy,
SourceLocation Loc,
unsigned &Attributes,
bool propertyInPrimaryClass);
/// Process the specified property declaration and create decls for the
/// setters and getters as needed.
/// \param property The property declaration being processed
/// \param CD The semantic container for the property
/// \param redeclaredProperty Declaration for property if redeclared
/// in class extension.
/// \param lexicalDC Container for redeclaredProperty.
void ProcessPropertyDecl(ObjCPropertyDecl *property,
ObjCContainerDecl *CD,
ObjCPropertyDecl *redeclaredProperty = nullptr,
ObjCContainerDecl *lexicalDC = nullptr);
void DiagnosePropertyMismatch(ObjCPropertyDecl *Property,
ObjCPropertyDecl *SuperProperty,
const IdentifierInfo *Name,
bool OverridingProtocolProperty);
void DiagnoseClassExtensionDupMethods(ObjCCategoryDecl *CAT,
ObjCInterfaceDecl *ID);
Decl *ActOnAtEnd(Scope *S, SourceRange AtEnd,
ArrayRef<Decl *> allMethods = None,
ArrayRef<DeclGroupPtrTy> allTUVars = None);
Decl *ActOnProperty(Scope *S, SourceLocation AtLoc,
SourceLocation LParenLoc,
FieldDeclarator &FD, ObjCDeclSpec &ODS,
Selector GetterSel, Selector SetterSel,
bool *OverridingProperty,
tok::ObjCKeywordKind MethodImplKind,
DeclContext *lexicalDC = nullptr);
Decl *ActOnPropertyImplDecl(Scope *S,
SourceLocation AtLoc,
SourceLocation PropertyLoc,
bool ImplKind,
IdentifierInfo *PropertyId,
IdentifierInfo *PropertyIvar,
SourceLocation PropertyIvarLoc);
enum ObjCSpecialMethodKind {
OSMK_None,
OSMK_Alloc,
OSMK_New,
OSMK_Copy,
OSMK_RetainingInit,
OSMK_NonRetainingInit
};
struct ObjCArgInfo {
IdentifierInfo *Name;
SourceLocation NameLoc;
// The Type is null if no type was specified, and the DeclSpec is invalid
// in this case.
ParsedType Type;
ObjCDeclSpec DeclSpec;
/// ArgAttrs - Attribute list for this argument.
AttributeList *ArgAttrs;
};
Decl *ActOnMethodDeclaration(
Scope *S,
SourceLocation BeginLoc, // location of the + or -.
SourceLocation EndLoc, // location of the ; or {.
tok::TokenKind MethodType,
ObjCDeclSpec &ReturnQT, ParsedType ReturnType,
ArrayRef<SourceLocation> SelectorLocs, Selector Sel,
// optional arguments. The number of types/arguments is obtained
// from the Sel.getNumArgs().
ObjCArgInfo *ArgInfo,
DeclaratorChunk::ParamInfo *CParamInfo, unsigned CNumArgs, // c-style args
AttributeList *AttrList, tok::ObjCKeywordKind MethodImplKind,
bool isVariadic, bool MethodDefinition);
ObjCMethodDecl *LookupMethodInQualifiedType(Selector Sel,
const ObjCObjectPointerType *OPT,
bool IsInstance);
ObjCMethodDecl *LookupMethodInObjectType(Selector Sel, QualType Ty,
bool IsInstance);
bool CheckARCMethodDecl(ObjCMethodDecl *method);
bool inferObjCARCLifetime(ValueDecl *decl);
ExprResult
HandleExprPropertyRefExpr(const ObjCObjectPointerType *OPT,
Expr *BaseExpr,
SourceLocation OpLoc,
DeclarationName MemberName,
SourceLocation MemberLoc,
SourceLocation SuperLoc, QualType SuperType,
bool Super);
ExprResult
ActOnClassPropertyRefExpr(IdentifierInfo &receiverName,
IdentifierInfo &propertyName,
SourceLocation receiverNameLoc,
SourceLocation propertyNameLoc);
ObjCMethodDecl *tryCaptureObjCSelf(SourceLocation Loc);
/// \brief Describes the kind of message expression indicated by a message
/// send that starts with an identifier.
enum ObjCMessageKind {
/// \brief The message is sent to 'super'.
ObjCSuperMessage,
/// \brief The message is an instance message.
ObjCInstanceMessage,
/// \brief The message is a class message, and the identifier is a type
/// name.
ObjCClassMessage
};
ObjCMessageKind getObjCMessageKind(Scope *S,
IdentifierInfo *Name,
SourceLocation NameLoc,
bool IsSuper,
bool HasTrailingDot,
ParsedType &ReceiverType);
ExprResult ActOnSuperMessage(Scope *S, SourceLocation SuperLoc,
Selector Sel,
SourceLocation LBracLoc,
ArrayRef<SourceLocation> SelectorLocs,
SourceLocation RBracLoc,
MultiExprArg Args);
ExprResult BuildClassMessage(TypeSourceInfo *ReceiverTypeInfo,
QualType ReceiverType,
SourceLocation SuperLoc,
Selector Sel,
ObjCMethodDecl *Method,
SourceLocation LBracLoc,
ArrayRef<SourceLocation> SelectorLocs,
SourceLocation RBracLoc,
MultiExprArg Args,
bool isImplicit = false);
ExprResult BuildClassMessageImplicit(QualType ReceiverType,
bool isSuperReceiver,
SourceLocation Loc,
Selector Sel,
ObjCMethodDecl *Method,
MultiExprArg Args);
ExprResult ActOnClassMessage(Scope *S,
ParsedType Receiver,
Selector Sel,
SourceLocation LBracLoc,
ArrayRef<SourceLocation> SelectorLocs,
SourceLocation RBracLoc,
MultiExprArg Args);
ExprResult BuildInstanceMessage(Expr *Receiver,
QualType ReceiverType,
SourceLocation SuperLoc,
Selector Sel,
ObjCMethodDecl *Method,
SourceLocation LBracLoc,
ArrayRef<SourceLocation> SelectorLocs,
SourceLocation RBracLoc,
MultiExprArg Args,
bool isImplicit = false);
ExprResult BuildInstanceMessageImplicit(Expr *Receiver,
QualType ReceiverType,
SourceLocation Loc,
Selector Sel,
ObjCMethodDecl *Method,
MultiExprArg Args);
ExprResult ActOnInstanceMessage(Scope *S,
Expr *Receiver,
Selector Sel,
SourceLocation LBracLoc,
ArrayRef<SourceLocation> SelectorLocs,
SourceLocation RBracLoc,
MultiExprArg Args);
ExprResult BuildObjCBridgedCast(SourceLocation LParenLoc,
ObjCBridgeCastKind Kind,
SourceLocation BridgeKeywordLoc,
TypeSourceInfo *TSInfo,
Expr *SubExpr);
ExprResult ActOnObjCBridgedCast(Scope *S,
SourceLocation LParenLoc,
ObjCBridgeCastKind Kind,
SourceLocation BridgeKeywordLoc,
ParsedType Type,
SourceLocation RParenLoc,
Expr *SubExpr);
void CheckTollFreeBridgeCast(QualType castType, Expr *castExpr);
void CheckObjCBridgeRelatedCast(QualType castType, Expr *castExpr);
bool CheckTollFreeBridgeStaticCast(QualType castType, Expr *castExpr,
CastKind &Kind);
bool checkObjCBridgeRelatedComponents(SourceLocation Loc,
QualType DestType, QualType SrcType,
ObjCInterfaceDecl *&RelatedClass,
ObjCMethodDecl *&ClassMethod,
ObjCMethodDecl *&InstanceMethod,
TypedefNameDecl *&TDNDecl,
bool CfToNs);
bool CheckObjCBridgeRelatedConversions(SourceLocation Loc,
QualType DestType, QualType SrcType,
Expr *&SrcExpr);
bool ConversionToObjCStringLiteralCheck(QualType DstType, Expr *&SrcExpr);
bool checkInitMethod(ObjCMethodDecl *method, QualType receiverTypeIfCall);
/// \brief Check whether the given new method is a valid override of the
/// given overridden method, and set any properties that should be inherited.
void CheckObjCMethodOverride(ObjCMethodDecl *NewMethod,
const ObjCMethodDecl *Overridden);
/// \brief Describes the compatibility of a result type with its method.
enum ResultTypeCompatibilityKind {
RTC_Compatible,
RTC_Incompatible,
RTC_Unknown
};
void CheckObjCMethodOverrides(ObjCMethodDecl *ObjCMethod,
ObjCInterfaceDecl *CurrentClass,
ResultTypeCompatibilityKind RTC);
enum PragmaOptionsAlignKind {
POAK_Native, // #pragma options align=native
POAK_Natural, // #pragma options align=natural
POAK_Packed, // #pragma options align=packed
POAK_Power, // #pragma options align=power
POAK_Mac68k, // #pragma options align=mac68k
POAK_Reset // #pragma options align=reset
};
/// ActOnPragmaOptionsAlign - Called on well formed \#pragma options align.
void ActOnPragmaOptionsAlign(PragmaOptionsAlignKind Kind,
SourceLocation PragmaLoc);
enum PragmaPackKind {
PPK_Default, // #pragma pack([n])
PPK_Show, // #pragma pack(show), only supported by MSVC.
PPK_Push, // #pragma pack(push, [identifier], [n])
PPK_Pop // #pragma pack(pop, [identifier], [n])
};
enum PragmaMSStructKind {
PMSST_OFF, // #pragms ms_struct off
PMSST_ON // #pragms ms_struct on
};
enum PragmaMSCommentKind {
PCK_Unknown,
PCK_Linker, // #pragma comment(linker, ...)
PCK_Lib, // #pragma comment(lib, ...)
PCK_Compiler, // #pragma comment(compiler, ...)
PCK_ExeStr, // #pragma comment(exestr, ...)
PCK_User // #pragma comment(user, ...)
};
/// ActOnPragmaPack - Called on well formed \#pragma pack(...).
void ActOnPragmaPack(PragmaPackKind Kind,
IdentifierInfo *Name,
Expr *Alignment,
SourceLocation PragmaLoc,
SourceLocation LParenLoc,
SourceLocation RParenLoc);
/// ActOnPragmaPackMatrix - Called on well formed \#pragma pack_matrix(...).
void ActOnPragmaPackMatrix(bool bRowMajor, SourceLocation PragmaLoc);
/// ActOnPragmaMSStruct - Called on well formed \#pragma ms_struct [on|off].
void ActOnPragmaMSStruct(PragmaMSStructKind Kind);
/// ActOnPragmaMSComment - Called on well formed
/// \#pragma comment(kind, "arg").
void ActOnPragmaMSComment(PragmaMSCommentKind Kind, StringRef Arg);
/// ActOnPragmaMSPointersToMembers - called on well formed \#pragma
/// pointers_to_members(representation method[, general purpose
/// representation]).
void ActOnPragmaMSPointersToMembers(
LangOptions::PragmaMSPointersToMembersKind Kind,
SourceLocation PragmaLoc);
/// \brief Called on well formed \#pragma vtordisp().
void ActOnPragmaMSVtorDisp(PragmaVtorDispKind Kind, SourceLocation PragmaLoc,
MSVtorDispAttr::Mode Value);
enum PragmaSectionKind {
PSK_DataSeg,
PSK_BSSSeg,
PSK_ConstSeg,
PSK_CodeSeg,
};
bool UnifySection(StringRef SectionName,
int SectionFlags,
DeclaratorDecl *TheDecl);
bool UnifySection(StringRef SectionName,
int SectionFlags,
SourceLocation PragmaSectionLocation);
/// \brief Called on well formed \#pragma bss_seg/data_seg/const_seg/code_seg.
void ActOnPragmaMSSeg(SourceLocation PragmaLocation,
PragmaMsStackAction Action,
llvm::StringRef StackSlotLabel,
StringLiteral *SegmentName,
llvm::StringRef PragmaName);
/// \brief Called on well formed \#pragma section().
void ActOnPragmaMSSection(SourceLocation PragmaLocation,
int SectionFlags, StringLiteral *SegmentName);
/// \brief Called on well-formed \#pragma init_seg().
void ActOnPragmaMSInitSeg(SourceLocation PragmaLocation,
StringLiteral *SegmentName);
/// ActOnPragmaDetectMismatch - Call on well-formed \#pragma detect_mismatch
void ActOnPragmaDetectMismatch(StringRef Name, StringRef Value);
/// ActOnPragmaUnused - Called on well-formed '\#pragma unused'.
void ActOnPragmaUnused(const Token &Identifier,
Scope *curScope,
SourceLocation PragmaLoc);
/// ActOnPragmaVisibility - Called on well formed \#pragma GCC visibility... .
void ActOnPragmaVisibility(const IdentifierInfo* VisType,
SourceLocation PragmaLoc);
NamedDecl *DeclClonePragmaWeak(NamedDecl *ND, IdentifierInfo *II,
SourceLocation Loc);
void DeclApplyPragmaWeak(Scope *S, NamedDecl *ND, WeakInfo &W);
/// ActOnPragmaWeakID - Called on well formed \#pragma weak ident.
void ActOnPragmaWeakID(IdentifierInfo* WeakName,
SourceLocation PragmaLoc,
SourceLocation WeakNameLoc);
/// ActOnPragmaRedefineExtname - Called on well formed
/// \#pragma redefine_extname oldname newname.
void ActOnPragmaRedefineExtname(IdentifierInfo* WeakName,
IdentifierInfo* AliasName,
SourceLocation PragmaLoc,
SourceLocation WeakNameLoc,
SourceLocation AliasNameLoc);
/// ActOnPragmaWeakAlias - Called on well formed \#pragma weak ident = ident.
void ActOnPragmaWeakAlias(IdentifierInfo* WeakName,
IdentifierInfo* AliasName,
SourceLocation PragmaLoc,
SourceLocation WeakNameLoc,
SourceLocation AliasNameLoc);
/// ActOnPragmaFPContract - Called on well formed
/// \#pragma {STDC,OPENCL} FP_CONTRACT
void ActOnPragmaFPContract(tok::OnOffSwitch OOS);
/// AddAlignmentAttributesForRecord - Adds any needed alignment attributes to
/// a the record decl, to handle '\#pragma pack' and '\#pragma options align'.
void AddAlignmentAttributesForRecord(RecordDecl *RD);
/// AddMsStructLayoutForRecord - Adds ms_struct layout attribute to record.
void AddMsStructLayoutForRecord(RecordDecl *RD);
/// FreePackedContext - Deallocate and null out PackContext.
void FreePackedContext();
/// PushNamespaceVisibilityAttr - Note that we've entered a
/// namespace with a visibility attribute.
void PushNamespaceVisibilityAttr(const VisibilityAttr *Attr,
SourceLocation Loc);
/// AddPushedVisibilityAttribute - If '\#pragma GCC visibility' was used,
/// add an appropriate visibility attribute.
void AddPushedVisibilityAttribute(Decl *RD);
/// PopPragmaVisibility - Pop the top element of the visibility stack; used
/// for '\#pragma GCC visibility' and visibility attributes on namespaces.
void PopPragmaVisibility(bool IsNamespaceEnd, SourceLocation EndLoc);
/// FreeVisContext - Deallocate and null out VisContext.
void FreeVisContext();
/// AddCFAuditedAttribute - Check whether we're currently within
/// '\#pragma clang arc_cf_code_audited' and, if so, consider adding
/// the appropriate attribute.
void AddCFAuditedAttribute(Decl *D);
/// \brief Called on well formed \#pragma clang optimize.
void ActOnPragmaOptimize(bool On, SourceLocation PragmaLoc);
/// \brief Get the location for the currently active "\#pragma clang optimize
/// off". If this location is invalid, then the state of the pragma is "on".
SourceLocation getOptimizeOffPragmaLocation() const {
return OptimizeOffPragmaLocation;
}
/// \brief Only called on function definitions; if there is a pragma in scope
/// with the effect of a range-based optnone, consider marking the function
/// with attribute optnone.
void AddRangeBasedOptnone(FunctionDecl *FD);
/// \brief Adds the 'optnone' attribute to the function declaration if there
/// are no conflicts; Loc represents the location causing the 'optnone'
/// attribute to be added (usually because of a pragma).
void AddOptnoneAttributeIfNoConflicts(FunctionDecl *FD, SourceLocation Loc);
/// AddAlignedAttr - Adds an aligned attribute to a particular declaration.
void AddAlignedAttr(SourceRange AttrRange, Decl *D, Expr *E,
unsigned SpellingListIndex, bool IsPackExpansion);
void AddAlignedAttr(SourceRange AttrRange, Decl *D, TypeSourceInfo *T,
unsigned SpellingListIndex, bool IsPackExpansion);
/// AddAssumeAlignedAttr - Adds an assume_aligned attribute to a particular
/// declaration.
void AddAssumeAlignedAttr(SourceRange AttrRange, Decl *D, Expr *E, Expr *OE,
unsigned SpellingListIndex);
/// AddAlignValueAttr - Adds an align_value attribute to a particular
/// declaration.
void AddAlignValueAttr(SourceRange AttrRange, Decl *D, Expr *E,
unsigned SpellingListIndex);
/// AddLaunchBoundsAttr - Adds a launch_bounds attribute to a particular
/// declaration.
void AddLaunchBoundsAttr(SourceRange AttrRange, Decl *D, Expr *MaxThreads,
Expr *MinBlocks, unsigned SpellingListIndex);
// OpenMP directives and clauses.
private:
void *VarDataSharingAttributesStack;
/// \brief Initialization of data-sharing attributes stack.
void InitDataSharingAttributesStack();
void DestroyDataSharingAttributesStack();
ExprResult VerifyPositiveIntegerConstantInClause(Expr *Op,
OpenMPClauseKind CKind);
public:
/// \brief Check if the specified variable is used in a private clause in
/// Checks if the specified variable is used in one of the private
/// clauses in OpenMP constructs.
bool IsOpenMPCapturedVar(VarDecl *VD);
/// OpenMP constructs.
/// \param Level Relative level of nested OpenMP construct for that the check
/// is performed.
bool isOpenMPPrivateVar(VarDecl *VD, unsigned Level);
ExprResult PerformOpenMPImplicitIntegerConversion(SourceLocation OpLoc,
Expr *Op);
/// \brief Called on start of new data sharing attribute block.
void StartOpenMPDSABlock(OpenMPDirectiveKind K,
const DeclarationNameInfo &DirName, Scope *CurScope,
SourceLocation Loc);
/// \brief Start analysis of clauses.
void StartOpenMPClause(OpenMPClauseKind K);
/// \brief End analysis of clauses.
void EndOpenMPClause();
/// \brief Called on end of data sharing attribute block.
void EndOpenMPDSABlock(Stmt *CurDirective);
/// \brief Check if the current region is an OpenMP loop region and if it is,
/// mark loop control variable, used in \p Init for loop initialization, as
/// private by default.
/// \param Init First part of the for loop.
void ActOnOpenMPLoopInitialization(SourceLocation ForLoc, Stmt *Init);
// OpenMP directives and clauses.
/// \brief Called on correct id-expression from the '#pragma omp
/// threadprivate'.
ExprResult ActOnOpenMPIdExpression(Scope *CurScope,
CXXScopeSpec &ScopeSpec,
const DeclarationNameInfo &Id);
/// \brief Called on well-formed '#pragma omp threadprivate'.
DeclGroupPtrTy ActOnOpenMPThreadprivateDirective(
SourceLocation Loc,
ArrayRef<Expr *> VarList);
/// \brief Builds a new OpenMPThreadPrivateDecl and checks its correctness.
OMPThreadPrivateDecl *CheckOMPThreadPrivateDecl(
SourceLocation Loc,
ArrayRef<Expr *> VarList);
/// \brief Initialization of captured region for OpenMP region.
void ActOnOpenMPRegionStart(OpenMPDirectiveKind DKind, Scope *CurScope);
/// \brief End of OpenMP region.
///
/// \param S Statement associated with the current OpenMP region.
/// \param Clauses List of clauses for the current OpenMP region.
///
/// \returns Statement for finished OpenMP region.
StmtResult ActOnOpenMPRegionEnd(StmtResult S, ArrayRef<OMPClause *> Clauses);
StmtResult ActOnOpenMPExecutableDirective(
OpenMPDirectiveKind Kind, const DeclarationNameInfo &DirName,
OpenMPDirectiveKind CancelRegion, ArrayRef<OMPClause *> Clauses,
Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc);
/// \brief Called on well-formed '\#pragma omp parallel' after parsing
/// of the associated statement.
StmtResult ActOnOpenMPParallelDirective(ArrayRef<OMPClause *> Clauses,
Stmt *AStmt,
SourceLocation StartLoc,
SourceLocation EndLoc);
/// \brief Called on well-formed '\#pragma omp simd' after parsing
/// of the associated statement.
StmtResult ActOnOpenMPSimdDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc,
llvm::DenseMap<VarDecl *, Expr *> &VarsWithImplicitDSA);
/// \brief Called on well-formed '\#pragma omp for' after parsing
/// of the associated statement.
StmtResult ActOnOpenMPForDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc,
llvm::DenseMap<VarDecl *, Expr *> &VarsWithImplicitDSA);
/// \brief Called on well-formed '\#pragma omp for simd' after parsing
/// of the associated statement.
StmtResult ActOnOpenMPForSimdDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc,
llvm::DenseMap<VarDecl *, Expr *> &VarsWithImplicitDSA);
/// \brief Called on well-formed '\#pragma omp sections' after parsing
/// of the associated statement.
StmtResult ActOnOpenMPSectionsDirective(ArrayRef<OMPClause *> Clauses,
Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc);
/// \brief Called on well-formed '\#pragma omp section' after parsing of the
/// associated statement.
StmtResult ActOnOpenMPSectionDirective(Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc);
/// \brief Called on well-formed '\#pragma omp single' after parsing of the
/// associated statement.
StmtResult ActOnOpenMPSingleDirective(ArrayRef<OMPClause *> Clauses,
Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc);
/// \brief Called on well-formed '\#pragma omp master' after parsing of the
/// associated statement.
StmtResult ActOnOpenMPMasterDirective(Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc);
/// \brief Called on well-formed '\#pragma omp critical' after parsing of the
/// associated statement.
StmtResult ActOnOpenMPCriticalDirective(const DeclarationNameInfo &DirName,
Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc);
/// \brief Called on well-formed '\#pragma omp parallel for' after parsing
/// of the associated statement.
StmtResult ActOnOpenMPParallelForDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc,
llvm::DenseMap<VarDecl *, Expr *> &VarsWithImplicitDSA);
/// \brief Called on well-formed '\#pragma omp parallel for simd' after
/// parsing of the associated statement.
StmtResult ActOnOpenMPParallelForSimdDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc,
llvm::DenseMap<VarDecl *, Expr *> &VarsWithImplicitDSA);
/// \brief Called on well-formed '\#pragma omp parallel sections' after
/// parsing of the associated statement.
StmtResult ActOnOpenMPParallelSectionsDirective(ArrayRef<OMPClause *> Clauses,
Stmt *AStmt,
SourceLocation StartLoc,
SourceLocation EndLoc);
/// \brief Called on well-formed '\#pragma omp task' after parsing of the
/// associated statement.
StmtResult ActOnOpenMPTaskDirective(ArrayRef<OMPClause *> Clauses,
Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc);
/// \brief Called on well-formed '\#pragma omp taskyield'.
StmtResult ActOnOpenMPTaskyieldDirective(SourceLocation StartLoc,
SourceLocation EndLoc);
/// \brief Called on well-formed '\#pragma omp barrier'.
StmtResult ActOnOpenMPBarrierDirective(SourceLocation StartLoc,
SourceLocation EndLoc);
/// \brief Called on well-formed '\#pragma omp taskwait'.
StmtResult ActOnOpenMPTaskwaitDirective(SourceLocation StartLoc,
SourceLocation EndLoc);
/// \brief Called on well-formed '\#pragma omp taskgroup'.
StmtResult ActOnOpenMPTaskgroupDirective(Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc);
/// \brief Called on well-formed '\#pragma omp flush'.
StmtResult ActOnOpenMPFlushDirective(ArrayRef<OMPClause *> Clauses,
SourceLocation StartLoc,
SourceLocation EndLoc);
/// \brief Called on well-formed '\#pragma omp ordered' after parsing of the
/// associated statement.
StmtResult ActOnOpenMPOrderedDirective(Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc);
/// \brief Called on well-formed '\#pragma omp atomic' after parsing of the
/// associated statement.
StmtResult ActOnOpenMPAtomicDirective(ArrayRef<OMPClause *> Clauses,
Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc);
/// \brief Called on well-formed '\#pragma omp target' after parsing of the
/// associated statement.
StmtResult ActOnOpenMPTargetDirective(ArrayRef<OMPClause *> Clauses,
Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc);
/// \brief Called on well-formed '\#pragma omp teams' after parsing of the
/// associated statement.
StmtResult ActOnOpenMPTeamsDirective(ArrayRef<OMPClause *> Clauses,
Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc);
/// \brief Called on well-formed '\#pragma omp cancellation point'.
StmtResult
ActOnOpenMPCancellationPointDirective(SourceLocation StartLoc,
SourceLocation EndLoc,
OpenMPDirectiveKind CancelRegion);
/// \brief Called on well-formed '\#pragma omp cancel'.
StmtResult ActOnOpenMPCancelDirective(SourceLocation StartLoc,
SourceLocation EndLoc,
OpenMPDirectiveKind CancelRegion);
OMPClause *ActOnOpenMPSingleExprClause(OpenMPClauseKind Kind,
Expr *Expr,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// \brief Called on well-formed 'if' clause.
OMPClause *ActOnOpenMPIfClause(Expr *Condition, SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// \brief Called on well-formed 'final' clause.
OMPClause *ActOnOpenMPFinalClause(Expr *Condition, SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// \brief Called on well-formed 'num_threads' clause.
OMPClause *ActOnOpenMPNumThreadsClause(Expr *NumThreads,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// \brief Called on well-formed 'safelen' clause.
OMPClause *ActOnOpenMPSafelenClause(Expr *Length,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// \brief Called on well-formed 'collapse' clause.
OMPClause *ActOnOpenMPCollapseClause(Expr *NumForLoops,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
OMPClause *ActOnOpenMPSimpleClause(OpenMPClauseKind Kind,
unsigned Argument,
SourceLocation ArgumentLoc,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// \brief Called on well-formed 'default' clause.
OMPClause *ActOnOpenMPDefaultClause(OpenMPDefaultClauseKind Kind,
SourceLocation KindLoc,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// \brief Called on well-formed 'proc_bind' clause.
OMPClause *ActOnOpenMPProcBindClause(OpenMPProcBindClauseKind Kind,
SourceLocation KindLoc,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
OMPClause *ActOnOpenMPSingleExprWithArgClause(OpenMPClauseKind Kind,
unsigned Argument, Expr *Expr,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation ArgumentLoc,
SourceLocation CommaLoc,
SourceLocation EndLoc);
/// \brief Called on well-formed 'schedule' clause.
OMPClause *ActOnOpenMPScheduleClause(OpenMPScheduleClauseKind Kind,
Expr *ChunkSize, SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation KindLoc,
SourceLocation CommaLoc,
SourceLocation EndLoc);
OMPClause *ActOnOpenMPClause(OpenMPClauseKind Kind, SourceLocation StartLoc,
SourceLocation EndLoc);
/// \brief Called on well-formed 'ordered' clause.
OMPClause *ActOnOpenMPOrderedClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// \brief Called on well-formed 'nowait' clause.
OMPClause *ActOnOpenMPNowaitClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// \brief Called on well-formed 'untied' clause.
OMPClause *ActOnOpenMPUntiedClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// \brief Called on well-formed 'mergeable' clause.
OMPClause *ActOnOpenMPMergeableClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// \brief Called on well-formed 'read' clause.
OMPClause *ActOnOpenMPReadClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// \brief Called on well-formed 'write' clause.
OMPClause *ActOnOpenMPWriteClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// \brief Called on well-formed 'update' clause.
OMPClause *ActOnOpenMPUpdateClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// \brief Called on well-formed 'capture' clause.
OMPClause *ActOnOpenMPCaptureClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// \brief Called on well-formed 'seq_cst' clause.
OMPClause *ActOnOpenMPSeqCstClause(SourceLocation StartLoc,
SourceLocation EndLoc);
OMPClause *ActOnOpenMPVarListClause(
OpenMPClauseKind Kind, ArrayRef<Expr *> Vars, Expr *TailExpr,
SourceLocation StartLoc, SourceLocation LParenLoc,
SourceLocation ColonLoc, SourceLocation EndLoc,
CXXScopeSpec &ReductionIdScopeSpec,
const DeclarationNameInfo &ReductionId, OpenMPDependClauseKind DepKind,
SourceLocation DepLoc);
/// \brief Called on well-formed 'private' clause.
OMPClause *ActOnOpenMPPrivateClause(ArrayRef<Expr *> VarList,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// \brief Called on well-formed 'firstprivate' clause.
OMPClause *ActOnOpenMPFirstprivateClause(ArrayRef<Expr *> VarList,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// \brief Called on well-formed 'lastprivate' clause.
OMPClause *ActOnOpenMPLastprivateClause(ArrayRef<Expr *> VarList,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// \brief Called on well-formed 'shared' clause.
OMPClause *ActOnOpenMPSharedClause(ArrayRef<Expr *> VarList,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// \brief Called on well-formed 'reduction' clause.
OMPClause *
ActOnOpenMPReductionClause(ArrayRef<Expr *> VarList, SourceLocation StartLoc,
SourceLocation LParenLoc, SourceLocation ColonLoc,
SourceLocation EndLoc,
CXXScopeSpec &ReductionIdScopeSpec,
const DeclarationNameInfo &ReductionId);
/// \brief Called on well-formed 'linear' clause.
OMPClause *ActOnOpenMPLinearClause(ArrayRef<Expr *> VarList,
Expr *Step,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation ColonLoc,
SourceLocation EndLoc);
/// \brief Called on well-formed 'aligned' clause.
OMPClause *ActOnOpenMPAlignedClause(ArrayRef<Expr *> VarList,
Expr *Alignment,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation ColonLoc,
SourceLocation EndLoc);
/// \brief Called on well-formed 'copyin' clause.
OMPClause *ActOnOpenMPCopyinClause(ArrayRef<Expr *> VarList,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// \brief Called on well-formed 'copyprivate' clause.
OMPClause *ActOnOpenMPCopyprivateClause(ArrayRef<Expr *> VarList,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// \brief Called on well-formed 'flush' pseudo clause.
OMPClause *ActOnOpenMPFlushClause(ArrayRef<Expr *> VarList,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// \brief Called on well-formed 'depend' clause.
OMPClause *
ActOnOpenMPDependClause(OpenMPDependClauseKind DepKind, SourceLocation DepLoc,
SourceLocation ColonLoc, ArrayRef<Expr *> VarList,
SourceLocation StartLoc, SourceLocation LParenLoc,
SourceLocation EndLoc);
/// \brief The kind of conversion being performed.
enum CheckedConversionKind {
/// \brief An implicit conversion.
CCK_ImplicitConversion,
/// \brief A C-style cast.
CCK_CStyleCast,
/// \brief A functional-style cast.
CCK_FunctionalCast,
/// \brief A cast other than a C-style cast.
CCK_OtherCast
};
/// ImpCastExprToType - If Expr is not of type 'Type', insert an implicit
/// cast. If there is already an implicit cast, merge into the existing one.
/// If isLvalue, the result of the cast is an lvalue.
ExprResult ImpCastExprToType(Expr *E, QualType Type, CastKind CK,
ExprValueKind VK = VK_RValue,
const CXXCastPath *BasePath = nullptr,
CheckedConversionKind CCK
= CCK_ImplicitConversion);
/// ScalarTypeToBooleanCastKind - Returns the cast kind corresponding
/// to the conversion from scalar type ScalarTy to the Boolean type.
static CastKind ScalarTypeToBooleanCastKind(QualType ScalarTy);
/// IgnoredValueConversions - Given that an expression's result is
/// syntactically ignored, perform any conversions that are
/// required.
ExprResult IgnoredValueConversions(Expr *E);
// UsualUnaryConversions - promotes integers (C99 6.3.1.1p2) and converts
// functions and arrays to their respective pointers (C99 6.3.2.1).
ExprResult UsualUnaryConversions(Expr *E);
/// CallExprUnaryConversions - a special case of an unary conversion
/// performed on a function designator of a call expression.
ExprResult CallExprUnaryConversions(Expr *E);
// DefaultFunctionArrayConversion - converts functions and arrays
// to their respective pointers (C99 6.3.2.1).
ExprResult DefaultFunctionArrayConversion(Expr *E);
// DefaultFunctionArrayLvalueConversion - converts functions and
// arrays to their respective pointers and performs the
// lvalue-to-rvalue conversion.
ExprResult DefaultFunctionArrayLvalueConversion(Expr *E);
// DefaultLvalueConversion - performs lvalue-to-rvalue conversion on
// the operand. This is DefaultFunctionArrayLvalueConversion,
// except that it assumes the operand isn't of function or array
// type.
ExprResult DefaultLvalueConversion(Expr *E);
// DefaultArgumentPromotion (C99 6.5.2.2p6). Used for function calls that
// do not have a prototype. Integer promotions are performed on each
// argument, and arguments that have type float are promoted to double.
ExprResult DefaultArgumentPromotion(Expr *E);
// Used for emitting the right warning by DefaultVariadicArgumentPromotion
enum VariadicCallType {
VariadicFunction,
VariadicBlock,
VariadicMethod,
VariadicConstructor,
VariadicDoesNotApply
};
VariadicCallType getVariadicCallType(FunctionDecl *FDecl,
const FunctionProtoType *Proto,
Expr *Fn);
// Used for determining in which context a type is allowed to be passed to a
// vararg function.
enum VarArgKind {
VAK_Valid,
VAK_ValidInCXX11,
VAK_Undefined,
VAK_MSVCUndefined,
VAK_Invalid
};
// Determines which VarArgKind fits an expression.
VarArgKind isValidVarArgType(const QualType &Ty);
/// Check to see if the given expression is a valid argument to a variadic
/// function, issuing a diagnostic if not.
void checkVariadicArgument(const Expr *E, VariadicCallType CT);
/// Check to see if a given expression could have '.c_str()' called on it.
bool hasCStrMethod(const Expr *E);
/// GatherArgumentsForCall - Collector argument expressions for various
/// form of call prototypes.
bool GatherArgumentsForCall(SourceLocation CallLoc, FunctionDecl *FDecl,
const FunctionProtoType *Proto,
unsigned FirstParam, ArrayRef<Expr *> Args,
SmallVectorImpl<Expr *> &AllArgs,
VariadicCallType CallType = VariadicDoesNotApply,
bool AllowExplicit = false,
bool IsListInitialization = false);
// DefaultVariadicArgumentPromotion - Like DefaultArgumentPromotion, but
// will create a runtime trap if the resulting type is not a POD type.
ExprResult DefaultVariadicArgumentPromotion(Expr *E, VariadicCallType CT,
FunctionDecl *FDecl);
// UsualArithmeticConversions - performs the UsualUnaryConversions on it's
// operands and then handles various conversions that are common to binary
// operators (C99 6.3.1.8). If both operands aren't arithmetic, this
// routine returns the first non-arithmetic type found. The client is
// responsible for emitting appropriate error diagnostics.
QualType UsualArithmeticConversions(ExprResult &LHS, ExprResult &RHS,
bool IsCompAssign = false);
/// AssignConvertType - All of the 'assignment' semantic checks return this
/// enum to indicate whether the assignment was allowed. These checks are
/// done for simple assignments, as well as initialization, return from
/// function, argument passing, etc. The query is phrased in terms of a
/// source and destination type.
enum AssignConvertType {
/// Compatible - the types are compatible according to the standard.
Compatible,
/// PointerToInt - The assignment converts a pointer to an int, which we
/// accept as an extension.
PointerToInt,
/// IntToPointer - The assignment converts an int to a pointer, which we
/// accept as an extension.
IntToPointer,
/// FunctionVoidPointer - The assignment is between a function pointer and
/// void*, which the standard doesn't allow, but we accept as an extension.
FunctionVoidPointer,
/// IncompatiblePointer - The assignment is between two pointers types that
/// are not compatible, but we accept them as an extension.
IncompatiblePointer,
/// IncompatiblePointer - The assignment is between two pointers types which
/// point to integers which have a different sign, but are otherwise
/// identical. This is a subset of the above, but broken out because it's by
/// far the most common case of incompatible pointers.
IncompatiblePointerSign,
/// CompatiblePointerDiscardsQualifiers - The assignment discards
/// c/v/r qualifiers, which we accept as an extension.
CompatiblePointerDiscardsQualifiers,
/// IncompatiblePointerDiscardsQualifiers - The assignment
/// discards qualifiers that we don't permit to be discarded,
/// like address spaces.
IncompatiblePointerDiscardsQualifiers,
/// IncompatibleNestedPointerQualifiers - The assignment is between two
/// nested pointer types, and the qualifiers other than the first two
/// levels differ e.g. char ** -> const char **, but we accept them as an
/// extension.
IncompatibleNestedPointerQualifiers,
/// IncompatibleVectors - The assignment is between two vector types that
/// have the same size, which we accept as an extension.
IncompatibleVectors,
/// IntToBlockPointer - The assignment converts an int to a block
/// pointer. We disallow this.
IntToBlockPointer,
/// IncompatibleBlockPointer - The assignment is between two block
/// pointers types that are not compatible.
IncompatibleBlockPointer,
/// IncompatibleObjCQualifiedId - The assignment is between a qualified
/// id type and something else (that is incompatible with it). For example,
/// "id <XXX>" = "Foo *", where "Foo *" doesn't implement the XXX protocol.
IncompatibleObjCQualifiedId,
/// IncompatibleObjCWeakRef - Assigning a weak-unavailable object to an
/// object with __weak qualifier.
IncompatibleObjCWeakRef,
/// Incompatible - We reject this conversion outright, it is invalid to
/// represent it in the AST.
Incompatible
};
/// DiagnoseAssignmentResult - Emit a diagnostic, if required, for the
/// assignment conversion type specified by ConvTy. This returns true if the
/// conversion was invalid or false if the conversion was accepted.
bool DiagnoseAssignmentResult(AssignConvertType ConvTy,
SourceLocation Loc,
QualType DstType, QualType SrcType,
Expr *SrcExpr, AssignmentAction Action,
bool *Complained = nullptr);
/// IsValueInFlagEnum - Determine if a value is allowed as part of a flag
/// enum. If AllowMask is true, then we also allow the complement of a valid
/// value, to be used as a mask.
bool IsValueInFlagEnum(const EnumDecl *ED, const llvm::APInt &Val,
bool AllowMask) const;
/// DiagnoseAssignmentEnum - Warn if assignment to enum is a constant
/// integer not in the range of enum values.
void DiagnoseAssignmentEnum(QualType DstType, QualType SrcType,
Expr *SrcExpr);
/// CheckAssignmentConstraints - Perform type checking for assignment,
/// argument passing, variable initialization, and function return values.
/// C99 6.5.16.
AssignConvertType CheckAssignmentConstraints(SourceLocation Loc,
QualType LHSType,
QualType RHSType);
/// Check assignment constraints and prepare for a conversion of the
/// RHS to the LHS type.
AssignConvertType CheckAssignmentConstraints(QualType LHSType,
ExprResult &RHS,
CastKind &Kind);
// CheckSingleAssignmentConstraints - Currently used by
// CheckAssignmentOperands, and ActOnReturnStmt. Prior to type checking,
// this routine performs the default function/array converions.
AssignConvertType CheckSingleAssignmentConstraints(QualType LHSType,
ExprResult &RHS,
bool Diagnose = true,
bool DiagnoseCFAudited = false);
// \brief If the lhs type is a transparent union, check whether we
// can initialize the transparent union with the given expression.
AssignConvertType CheckTransparentUnionArgumentConstraints(QualType ArgType,
ExprResult &RHS);
bool IsStringLiteralToNonConstPointerConversion(Expr *From, QualType ToType);
bool CheckExceptionSpecCompatibility(Expr *From, QualType ToType);
ExprResult PerformImplicitConversion(Expr *From, QualType ToType,
AssignmentAction Action,
bool AllowExplicit = false);
ExprResult PerformImplicitConversion(Expr *From, QualType ToType,
AssignmentAction Action,
bool AllowExplicit,
ImplicitConversionSequence& ICS);
ExprResult PerformImplicitConversion(Expr *From, QualType ToType,
const ImplicitConversionSequence& ICS,
AssignmentAction Action,
CheckedConversionKind CCK
= CCK_ImplicitConversion);
ExprResult PerformImplicitConversion(Expr *From, QualType ToType,
const StandardConversionSequence& SCS,
AssignmentAction Action,
CheckedConversionKind CCK);
/// the following "Check" methods will return a valid/converted QualType
/// or a null QualType (indicating an error diagnostic was issued).
/// type checking binary operators (subroutines of CreateBuiltinBinOp).
QualType InvalidOperands(SourceLocation Loc, ExprResult &LHS,
ExprResult &RHS);
QualType CheckPointerToMemberOperands( // C++ 5.5
ExprResult &LHS, ExprResult &RHS, ExprValueKind &VK,
SourceLocation OpLoc, bool isIndirect);
QualType CheckMultiplyDivideOperands( // C99 6.5.5
ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, bool IsCompAssign,
bool IsDivide);
QualType CheckRemainderOperands( // C99 6.5.5
ExprResult &LHS, ExprResult &RHS, SourceLocation Loc,
bool IsCompAssign = false);
QualType CheckAdditionOperands( // C99 6.5.6
ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, unsigned Opc,
QualType* CompLHSTy = nullptr);
QualType CheckSubtractionOperands( // C99 6.5.6
ExprResult &LHS, ExprResult &RHS, SourceLocation Loc,
QualType* CompLHSTy = nullptr);
QualType CheckShiftOperands( // C99 6.5.7
ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, unsigned Opc,
bool IsCompAssign = false);
QualType CheckCompareOperands( // C99 6.5.8/9
ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, unsigned OpaqueOpc,
bool isRelational);
QualType CheckBitwiseOperands( // C99 6.5.[10...12]
ExprResult &LHS, ExprResult &RHS, SourceLocation Loc,
bool IsCompAssign = false);
QualType CheckLogicalOperands( // C99 6.5.[13,14]
ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, unsigned Opc);
// CheckAssignmentOperands is used for both simple and compound assignment.
// For simple assignment, pass both expressions and a null converted type.
// For compound assignment, pass both expressions and the converted type.
QualType CheckAssignmentOperands( // C99 6.5.16.[1,2]
Expr *LHSExpr, ExprResult &RHS, SourceLocation Loc, QualType CompoundType);
ExprResult checkPseudoObjectIncDec(Scope *S, SourceLocation OpLoc,
UnaryOperatorKind Opcode, Expr *Op);
ExprResult checkPseudoObjectAssignment(Scope *S, SourceLocation OpLoc,
BinaryOperatorKind Opcode,
Expr *LHS, Expr *RHS);
ExprResult checkPseudoObjectRValue(Expr *E);
Expr *recreateSyntacticForm(PseudoObjectExpr *E);
QualType CheckConditionalOperands( // C99 6.5.15
ExprResult &Cond, ExprResult &LHS, ExprResult &RHS,
ExprValueKind &VK, ExprObjectKind &OK, SourceLocation QuestionLoc);
QualType CXXCheckConditionalOperands( // C++ 5.16
ExprResult &cond, ExprResult &lhs, ExprResult &rhs,
ExprValueKind &VK, ExprObjectKind &OK, SourceLocation questionLoc);
QualType FindCompositePointerType(SourceLocation Loc, Expr *&E1, Expr *&E2,
bool *NonStandardCompositeType = nullptr);
QualType FindCompositePointerType(SourceLocation Loc,
ExprResult &E1, ExprResult &E2,
bool *NonStandardCompositeType = nullptr) {
Expr *E1Tmp = E1.get(), *E2Tmp = E2.get();
QualType Composite = FindCompositePointerType(Loc, E1Tmp, E2Tmp,
NonStandardCompositeType);
E1 = E1Tmp;
E2 = E2Tmp;
return Composite;
}
QualType FindCompositeObjCPointerType(ExprResult &LHS, ExprResult &RHS,
SourceLocation QuestionLoc);
bool DiagnoseConditionalForNull(Expr *LHSExpr, Expr *RHSExpr,
SourceLocation QuestionLoc);
void DiagnoseAlwaysNonNullPointer(Expr *E,
Expr::NullPointerConstantKind NullType,
bool IsEqual, SourceRange Range);
/// type checking for vector binary operators.
QualType CheckVectorOperands(ExprResult &LHS, ExprResult &RHS,
SourceLocation Loc, bool IsCompAssign,
bool AllowBothBool, bool AllowBoolConversion);
QualType GetSignedVectorType(QualType V);
QualType CheckVectorCompareOperands(ExprResult &LHS, ExprResult &RHS,
SourceLocation Loc, bool isRelational);
QualType CheckVectorLogicalOperands(ExprResult &LHS, ExprResult &RHS,
SourceLocation Loc);
bool isLaxVectorConversion(QualType srcType, QualType destType);
/// type checking declaration initializers (C99 6.7.8)
bool CheckForConstantInitializer(Expr *e, QualType t);
// type checking C++ declaration initializers (C++ [dcl.init]).
/// ReferenceCompareResult - Expresses the result of comparing two
/// types (cv1 T1 and cv2 T2) to determine their compatibility for the
/// purposes of initialization by reference (C++ [dcl.init.ref]p4).
enum ReferenceCompareResult {
/// Ref_Incompatible - The two types are incompatible, so direct
/// reference binding is not possible.
Ref_Incompatible = 0,
/// Ref_Related - The two types are reference-related, which means
/// that their unqualified forms (T1 and T2) are either the same
/// or T1 is a base class of T2.
Ref_Related,
/// Ref_Compatible_With_Added_Qualification - The two types are
/// reference-compatible with added qualification, meaning that
/// they are reference-compatible and the qualifiers on T1 (cv1)
/// are greater than the qualifiers on T2 (cv2).
Ref_Compatible_With_Added_Qualification,
/// Ref_Compatible - The two types are reference-compatible and
/// have equivalent qualifiers (cv1 == cv2).
Ref_Compatible
};
ReferenceCompareResult CompareReferenceRelationship(SourceLocation Loc,
QualType T1, QualType T2,
bool &DerivedToBase,
bool &ObjCConversion,
bool &ObjCLifetimeConversion);
ExprResult checkUnknownAnyCast(SourceRange TypeRange, QualType CastType,
Expr *CastExpr, CastKind &CastKind,
ExprValueKind &VK, CXXCastPath &Path);
/// \brief Force an expression with unknown-type to an expression of the
/// given type.
ExprResult forceUnknownAnyToType(Expr *E, QualType ToType);
/// \brief Type-check an expression that's being passed to an
/// __unknown_anytype parameter.
ExprResult checkUnknownAnyArg(SourceLocation callLoc,
Expr *result, QualType ¶mType);
// CheckVectorCast - check type constraints for vectors.
// Since vectors are an extension, there are no C standard reference for this.
// We allow casting between vectors and integer datatypes of the same size.
// returns true if the cast is invalid
bool CheckVectorCast(SourceRange R, QualType VectorTy, QualType Ty,
CastKind &Kind);
// CheckExtVectorCast - check type constraints for extended vectors.
// Since vectors are an extension, there are no C standard reference for this.
// We allow casting between vectors and integer datatypes of the same size,
// or vectors and the element type of that vector.
// returns the cast expr
ExprResult CheckExtVectorCast(SourceRange R, QualType DestTy, Expr *CastExpr,
CastKind &Kind);
ExprResult BuildCXXFunctionalCastExpr(TypeSourceInfo *TInfo,
SourceLocation LParenLoc,
Expr *CastExpr,
SourceLocation RParenLoc);
enum ARCConversionResult { ACR_okay, ACR_unbridged };
/// \brief Checks for invalid conversions and casts between
/// retainable pointers and other pointer kinds.
ARCConversionResult CheckObjCARCConversion(SourceRange castRange,
QualType castType, Expr *&op,
CheckedConversionKind CCK,
bool DiagnoseCFAudited = false,
BinaryOperatorKind Opc = BO_PtrMemD
);
Expr *stripARCUnbridgedCast(Expr *e);
void diagnoseARCUnbridgedCast(Expr *e);
bool CheckObjCARCUnavailableWeakConversion(QualType castType,
QualType ExprType);
/// checkRetainCycles - Check whether an Objective-C message send
/// might create an obvious retain cycle.
void checkRetainCycles(ObjCMessageExpr *msg);
void checkRetainCycles(Expr *receiver, Expr *argument);
void checkRetainCycles(VarDecl *Var, Expr *Init);
/// checkUnsafeAssigns - Check whether +1 expr is being assigned
/// to weak/__unsafe_unretained type.
bool checkUnsafeAssigns(SourceLocation Loc, QualType LHS, Expr *RHS);
/// checkUnsafeExprAssigns - Check whether +1 expr is being assigned
/// to weak/__unsafe_unretained expression.
void checkUnsafeExprAssigns(SourceLocation Loc, Expr *LHS, Expr *RHS);
/// CheckMessageArgumentTypes - Check types in an Obj-C message send.
/// \param Method - May be null.
/// \param [out] ReturnType - The return type of the send.
/// \return true iff there were any incompatible types.
bool CheckMessageArgumentTypes(QualType ReceiverType,
MultiExprArg Args, Selector Sel,
ArrayRef<SourceLocation> SelectorLocs,
ObjCMethodDecl *Method, bool isClassMessage,
bool isSuperMessage,
SourceLocation lbrac, SourceLocation rbrac,
SourceRange RecRange,
QualType &ReturnType, ExprValueKind &VK);
/// \brief Determine the result of a message send expression based on
/// the type of the receiver, the method expected to receive the message,
/// and the form of the message send.
QualType getMessageSendResultType(QualType ReceiverType,
ObjCMethodDecl *Method,
bool isClassMessage, bool isSuperMessage);
/// \brief If the given expression involves a message send to a method
/// with a related result type, emit a note describing what happened.
void EmitRelatedResultTypeNote(const Expr *E);
/// \brief Given that we had incompatible pointer types in a return
/// statement, check whether we're in a method with a related result
/// type, and if so, emit a note describing what happened.
void EmitRelatedResultTypeNoteForReturn(QualType destType);
/// CheckBooleanCondition - Diagnose problems involving the use of
/// the given expression as a boolean condition (e.g. in an if
/// statement). Also performs the standard function and array
/// decays, possibly changing the input variable.
///
/// \param Loc - A location associated with the condition, e.g. the
/// 'if' keyword.
/// \return true iff there were any errors
ExprResult CheckBooleanCondition(Expr *E, SourceLocation Loc);
ExprResult ActOnBooleanCondition(Scope *S, SourceLocation Loc,
Expr *SubExpr);
/// DiagnoseAssignmentAsCondition - Given that an expression is
/// being used as a boolean condition, warn if it's an assignment.
void DiagnoseAssignmentAsCondition(Expr *E);
/// \brief Redundant parentheses over an equality comparison can indicate
/// that the user intended an assignment used as condition.
void DiagnoseEqualityWithExtraParens(ParenExpr *ParenE);
/// CheckCXXBooleanCondition - Returns true if conversion to bool is invalid.
ExprResult CheckCXXBooleanCondition(Expr *CondExpr);
/// ConvertIntegerToTypeWarnOnOverflow - Convert the specified APInt to have
/// the specified width and sign. If an overflow occurs, detect it and emit
/// the specified diagnostic.
void ConvertIntegerToTypeWarnOnOverflow(llvm::APSInt &OldVal,
unsigned NewWidth, bool NewSign,
SourceLocation Loc, unsigned DiagID);
/// Checks that the Objective-C declaration is declared in the global scope.
/// Emits an error and marks the declaration as invalid if it's not declared
/// in the global scope.
bool CheckObjCDeclScope(Decl *D);
/// \brief Abstract base class used for diagnosing integer constant
/// expression violations.
class VerifyICEDiagnoser {
public:
bool Suppress;
VerifyICEDiagnoser(bool Suppress = false) : Suppress(Suppress) { }
virtual void diagnoseNotICE(Sema &S, SourceLocation Loc, SourceRange SR) =0;
virtual void diagnoseFold(Sema &S, SourceLocation Loc, SourceRange SR);
virtual ~VerifyICEDiagnoser() { }
};
/// VerifyIntegerConstantExpression - Verifies that an expression is an ICE,
/// and reports the appropriate diagnostics. Returns false on success.
/// Can optionally return the value of the expression.
ExprResult VerifyIntegerConstantExpression(Expr *E, llvm::APSInt *Result,
VerifyICEDiagnoser &Diagnoser,
bool AllowFold = true);
ExprResult VerifyIntegerConstantExpression(Expr *E, llvm::APSInt *Result,
unsigned DiagID,
bool AllowFold = true);
ExprResult VerifyIntegerConstantExpression(Expr *E,
llvm::APSInt *Result = nullptr);
/// VerifyBitField - verifies that a bit field expression is an ICE and has
/// the correct width, and that the field type is valid.
/// Returns false on success.
/// Can optionally return whether the bit-field is of width 0
ExprResult VerifyBitField(SourceLocation FieldLoc, IdentifierInfo *FieldName,
QualType FieldTy, bool IsMsStruct,
Expr *BitWidth, bool *ZeroWidth = nullptr);
enum CUDAFunctionTarget {
CFT_Device,
CFT_Global,
CFT_Host,
CFT_HostDevice,
CFT_InvalidTarget
};
CUDAFunctionTarget IdentifyCUDATarget(const FunctionDecl *D);
bool CheckCUDATarget(const FunctionDecl *Caller, const FunctionDecl *Callee);
/// Given a implicit special member, infer its CUDA target from the
/// calls it needs to make to underlying base/field special members.
/// \param ClassDecl the class for which the member is being created.
/// \param CSM the kind of special member.
/// \param MemberDecl the special member itself.
/// \param ConstRHS true if this is a copy operation with a const object on
/// its RHS.
/// \param Diagnose true if this call should emit diagnostics.
/// \return true if there was an error inferring.
/// The result of this call is implicit CUDA target attribute(s) attached to
/// the member declaration.
bool inferCUDATargetForImplicitSpecialMember(CXXRecordDecl *ClassDecl,
CXXSpecialMember CSM,
CXXMethodDecl *MemberDecl,
bool ConstRHS,
bool Diagnose);
/// \name Code completion
//@{
/// \brief Describes the context in which code completion occurs.
enum ParserCompletionContext {
/// \brief Code completion occurs at top-level or namespace context.
PCC_Namespace,
/// \brief Code completion occurs within a class, struct, or union.
PCC_Class,
/// \brief Code completion occurs within an Objective-C interface, protocol,
/// or category.
PCC_ObjCInterface,
/// \brief Code completion occurs within an Objective-C implementation or
/// category implementation
PCC_ObjCImplementation,
/// \brief Code completion occurs within the list of instance variables
/// in an Objective-C interface, protocol, category, or implementation.
PCC_ObjCInstanceVariableList,
/// \brief Code completion occurs following one or more template
/// headers.
PCC_Template,
/// \brief Code completion occurs following one or more template
/// headers within a class.
PCC_MemberTemplate,
/// \brief Code completion occurs within an expression.
PCC_Expression,
/// \brief Code completion occurs within a statement, which may
/// also be an expression or a declaration.
PCC_Statement,
/// \brief Code completion occurs at the beginning of the
/// initialization statement (or expression) in a for loop.
PCC_ForInit,
/// \brief Code completion occurs within the condition of an if,
/// while, switch, or for statement.
PCC_Condition,
/// \brief Code completion occurs within the body of a function on a
/// recovery path, where we do not have a specific handle on our position
/// in the grammar.
PCC_RecoveryInFunction,
/// \brief Code completion occurs where only a type is permitted.
PCC_Type,
/// \brief Code completion occurs in a parenthesized expression, which
/// might also be a type cast.
PCC_ParenthesizedExpression,
/// \brief Code completion occurs within a sequence of declaration
/// specifiers within a function, method, or block.
PCC_LocalDeclarationSpecifiers
};
void CodeCompleteModuleImport(SourceLocation ImportLoc, ModuleIdPath Path);
void CodeCompleteOrdinaryName(Scope *S,
ParserCompletionContext CompletionContext);
void CodeCompleteDeclSpec(Scope *S, DeclSpec &DS,
bool AllowNonIdentifiers,
bool AllowNestedNameSpecifiers);
struct CodeCompleteExpressionData;
void CodeCompleteExpression(Scope *S,
const CodeCompleteExpressionData &Data);
void CodeCompleteMemberReferenceExpr(Scope *S, Expr *Base,
SourceLocation OpLoc,
bool IsArrow);
void CodeCompletePostfixExpression(Scope *S, ExprResult LHS);
void CodeCompleteTag(Scope *S, unsigned TagSpec);
void CodeCompleteTypeQualifiers(DeclSpec &DS);
void CodeCompleteCase(Scope *S);
void CodeCompleteCall(Scope *S, Expr *Fn, ArrayRef<Expr *> Args);
void CodeCompleteConstructor(Scope *S, QualType Type, SourceLocation Loc,
ArrayRef<Expr *> Args);
void CodeCompleteInitializer(Scope *S, Decl *D);
void CodeCompleteReturn(Scope *S);
void CodeCompleteAfterIf(Scope *S);
void CodeCompleteAssignmentRHS(Scope *S, Expr *LHS);
void CodeCompleteQualifiedId(Scope *S, CXXScopeSpec &SS,
bool EnteringContext);
void CodeCompleteUsing(Scope *S);
void CodeCompleteUsingDirective(Scope *S);
void CodeCompleteNamespaceDecl(Scope *S);
void CodeCompleteNamespaceAliasDecl(Scope *S);
void CodeCompleteOperatorName(Scope *S);
void CodeCompleteConstructorInitializer(
Decl *Constructor,
ArrayRef<CXXCtorInitializer *> Initializers);
void CodeCompleteLambdaIntroducer(Scope *S, LambdaIntroducer &Intro,
bool AfterAmpersand);
void CodeCompleteObjCAtDirective(Scope *S);
void CodeCompleteObjCAtVisibility(Scope *S);
void CodeCompleteObjCAtStatement(Scope *S);
void CodeCompleteObjCAtExpression(Scope *S);
void CodeCompleteObjCPropertyFlags(Scope *S, ObjCDeclSpec &ODS);
void CodeCompleteObjCPropertyGetter(Scope *S);
void CodeCompleteObjCPropertySetter(Scope *S);
void CodeCompleteObjCPassingType(Scope *S, ObjCDeclSpec &DS,
bool IsParameter);
void CodeCompleteObjCMessageReceiver(Scope *S);
void CodeCompleteObjCSuperMessage(Scope *S, SourceLocation SuperLoc,
ArrayRef<IdentifierInfo *> SelIdents,
bool AtArgumentExpression);
void CodeCompleteObjCClassMessage(Scope *S, ParsedType Receiver,
ArrayRef<IdentifierInfo *> SelIdents,
bool AtArgumentExpression,
bool IsSuper = false);
void CodeCompleteObjCInstanceMessage(Scope *S, Expr *Receiver,
ArrayRef<IdentifierInfo *> SelIdents,
bool AtArgumentExpression,
ObjCInterfaceDecl *Super = nullptr);
void CodeCompleteObjCForCollection(Scope *S,
DeclGroupPtrTy IterationVar);
void CodeCompleteObjCSelector(Scope *S,
ArrayRef<IdentifierInfo *> SelIdents);
void CodeCompleteObjCProtocolReferences(IdentifierLocPair *Protocols,
unsigned NumProtocols);
void CodeCompleteObjCProtocolDecl(Scope *S);
void CodeCompleteObjCInterfaceDecl(Scope *S);
void CodeCompleteObjCSuperclass(Scope *S,
IdentifierInfo *ClassName,
SourceLocation ClassNameLoc);
void CodeCompleteObjCImplementationDecl(Scope *S);
void CodeCompleteObjCInterfaceCategory(Scope *S,
IdentifierInfo *ClassName,
SourceLocation ClassNameLoc);
void CodeCompleteObjCImplementationCategory(Scope *S,
IdentifierInfo *ClassName,
SourceLocation ClassNameLoc);
void CodeCompleteObjCPropertyDefinition(Scope *S);
void CodeCompleteObjCPropertySynthesizeIvar(Scope *S,
IdentifierInfo *PropertyName);
void CodeCompleteObjCMethodDecl(Scope *S,
bool IsInstanceMethod,
ParsedType ReturnType);
void CodeCompleteObjCMethodDeclSelector(Scope *S,
bool IsInstanceMethod,
bool AtParameterName,
ParsedType ReturnType,
ArrayRef<IdentifierInfo *> SelIdents);
void CodeCompletePreprocessorDirective(bool InConditional);
void CodeCompleteInPreprocessorConditionalExclusion(Scope *S);
void CodeCompletePreprocessorMacroName(bool IsDefinition);
void CodeCompletePreprocessorExpression();
void CodeCompletePreprocessorMacroArgument(Scope *S,
IdentifierInfo *Macro,
MacroInfo *MacroInfo,
unsigned Argument);
void CodeCompleteNaturalLanguage();
void GatherGlobalCodeCompletions(CodeCompletionAllocator &Allocator,
CodeCompletionTUInfo &CCTUInfo,
SmallVectorImpl<CodeCompletionResult> &Results);
//@}
//===--------------------------------------------------------------------===//
// Extra semantic analysis beyond the C type system
public:
SourceLocation getLocationOfStringLiteralByte(const StringLiteral *SL,
unsigned ByteNo) const;
private:
void CheckArrayAccess(const Expr *BaseExpr, const Expr *IndexExpr,
const ArraySubscriptExpr *ASE=nullptr,
bool AllowOnePastEnd=true, bool IndexNegated=false);
// HLSL Change Starts - checking array subscript access to vector or matrix member
void CheckHLSLArrayAccess(const Expr *expr);
// HLSL Change ends
void CheckArrayAccess(const Expr *E);
// Used to grab the relevant information from a FormatAttr and a
// FunctionDeclaration.
struct FormatStringInfo {
unsigned FormatIdx;
unsigned FirstDataArg;
bool HasVAListArg;
};
bool getFormatStringInfo(const FormatAttr *Format, bool IsCXXMember,
FormatStringInfo *FSI);
bool CheckFunctionCall(FunctionDecl *FDecl, CallExpr *TheCall,
const FunctionProtoType *Proto);
bool CheckObjCMethodCall(ObjCMethodDecl *Method, SourceLocation loc,
ArrayRef<const Expr *> Args);
bool CheckPointerCall(NamedDecl *NDecl, CallExpr *TheCall,
const FunctionProtoType *Proto);
bool CheckOtherCall(CallExpr *TheCall, const FunctionProtoType *Proto);
void CheckConstructorCall(FunctionDecl *FDecl,
ArrayRef<const Expr *> Args,
const FunctionProtoType *Proto,
SourceLocation Loc);
void checkCall(NamedDecl *FDecl, const FunctionProtoType *Proto,
ArrayRef<const Expr *> Args, bool IsMemberFunction,
SourceLocation Loc, SourceRange Range,
VariadicCallType CallType);
bool CheckObjCString(Expr *Arg);
ExprResult CheckBuiltinFunctionCall(FunctionDecl *FDecl,
unsigned BuiltinID, CallExpr *TheCall);
bool CheckARMBuiltinExclusiveCall(unsigned BuiltinID, CallExpr *TheCall,
unsigned MaxWidth);
bool CheckNeonBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall);
bool CheckARMBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall);
bool CheckAArch64BuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall);
bool CheckMipsBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall);
bool CheckSystemZBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall);
bool CheckX86BuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall);
bool CheckPPCBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall);
bool SemaBuiltinVAStart(CallExpr *TheCall);
bool SemaBuiltinVAStartARM(CallExpr *Call);
bool SemaBuiltinUnorderedCompare(CallExpr *TheCall);
bool SemaBuiltinFPClassification(CallExpr *TheCall, unsigned NumArgs);
public:
// Used by C++ template instantiation.
ExprResult SemaBuiltinShuffleVector(CallExpr *TheCall);
ExprResult SemaConvertVectorExpr(Expr *E, TypeSourceInfo *TInfo,
SourceLocation BuiltinLoc,
SourceLocation RParenLoc);
private:
bool SemaBuiltinPrefetch(CallExpr *TheCall);
bool SemaBuiltinAssume(CallExpr *TheCall);
bool SemaBuiltinAssumeAligned(CallExpr *TheCall);
bool SemaBuiltinLongjmp(CallExpr *TheCall);
bool SemaBuiltinSetjmp(CallExpr *TheCall);
ExprResult SemaBuiltinAtomicOverloaded(ExprResult TheCallResult);
ExprResult SemaAtomicOpsOverloaded(ExprResult TheCallResult,
AtomicExpr::AtomicOp Op);
bool SemaBuiltinConstantArg(CallExpr *TheCall, int ArgNum,
llvm::APSInt &Result);
bool SemaBuiltinConstantArgRange(CallExpr *TheCall, int ArgNum,
int Low, int High);
bool SemaBuiltinARMSpecialReg(unsigned BuiltinID, CallExpr *TheCall,
int ArgNum, unsigned ExpectedFieldNum,
bool AllowName);
bool SemaBuiltinCpuSupports(CallExpr *TheCall);
public:
enum FormatStringType {
FST_Scanf,
FST_Printf,
FST_NSString,
FST_Strftime,
FST_Strfmon,
FST_Kprintf,
FST_FreeBSDKPrintf,
FST_OSTrace,
FST_Unknown
};
static FormatStringType GetFormatStringType(const FormatAttr *Format);
void CheckFormatString(const StringLiteral *FExpr, const Expr *OrigFormatExpr,
ArrayRef<const Expr *> Args, bool HasVAListArg,
unsigned format_idx, unsigned firstDataArg,
FormatStringType Type, bool inFunctionCall,
VariadicCallType CallType,
llvm::SmallBitVector &CheckedVarArgs);
bool FormatStringHasSArg(const StringLiteral *FExpr);
bool GetFormatNSStringIdx(const FormatAttr *Format, unsigned &Idx);
private:
bool CheckFormatArguments(const FormatAttr *Format,
ArrayRef<const Expr *> Args,
bool IsCXXMember,
VariadicCallType CallType,
SourceLocation Loc, SourceRange Range,
llvm::SmallBitVector &CheckedVarArgs);
bool CheckFormatArguments(ArrayRef<const Expr *> Args,
bool HasVAListArg, unsigned format_idx,
unsigned firstDataArg, FormatStringType Type,
VariadicCallType CallType,
SourceLocation Loc, SourceRange range,
llvm::SmallBitVector &CheckedVarArgs);
void CheckAbsoluteValueFunction(const CallExpr *Call,
const FunctionDecl *FDecl,
IdentifierInfo *FnInfo);
void CheckMemaccessArguments(const CallExpr *Call,
unsigned BId,
IdentifierInfo *FnName);
void CheckStrlcpycatArguments(const CallExpr *Call,
IdentifierInfo *FnName);
void CheckStrncatArguments(const CallExpr *Call,
IdentifierInfo *FnName);
void CheckReturnValExpr(Expr *RetValExp, QualType lhsType,
SourceLocation ReturnLoc,
bool isObjCMethod = false,
const AttrVec *Attrs = nullptr,
const FunctionDecl *FD = nullptr);
void CheckFloatComparison(SourceLocation Loc, Expr* LHS, Expr* RHS);
void CheckImplicitConversions(Expr *E, SourceLocation CC = SourceLocation());
void CheckBoolLikeConversion(Expr *E, SourceLocation CC);
void CheckForIntOverflow(Expr *E);
void CheckUnsequencedOperations(Expr *E);
/// \brief Perform semantic checks on a completed expression. This will either
/// be a full-expression or a default argument expression.
void CheckCompletedExpr(Expr *E, SourceLocation CheckLoc = SourceLocation(),
bool IsConstexpr = false);
void CheckBitFieldInitialization(SourceLocation InitLoc, FieldDecl *Field,
Expr *Init);
/// \brief Check if the given expression contains 'break' or 'continue'
/// statement that produces control flow different from GCC.
void CheckBreakContinueBinding(Expr *E);
/// \brief Check whether receiver is mutable ObjC container which
/// attempts to add itself into the container
void CheckObjCCircularContainer(ObjCMessageExpr *Message);
void AnalyzeDeleteExprMismatch(const CXXDeleteExpr *DE);
void AnalyzeDeleteExprMismatch(FieldDecl *Field, SourceLocation DeleteLoc,
bool DeleteWasArrayForm);
public:
/// \brief Register a magic integral constant to be used as a type tag.
void RegisterTypeTagForDatatype(const IdentifierInfo *ArgumentKind,
uint64_t MagicValue, QualType Type,
bool LayoutCompatible, bool MustBeNull);
struct TypeTagData {
TypeTagData() {}
TypeTagData(QualType Type, bool LayoutCompatible, bool MustBeNull) :
Type(Type), LayoutCompatible(LayoutCompatible),
MustBeNull(MustBeNull)
{}
QualType Type;
/// If true, \c Type should be compared with other expression's types for
/// layout-compatibility.
unsigned LayoutCompatible : 1;
unsigned MustBeNull : 1;
};
/// A pair of ArgumentKind identifier and magic value. This uniquely
/// identifies the magic value.
typedef std::pair<const IdentifierInfo *, uint64_t> TypeTagMagicValue;
private:
/// \brief A map from magic value to type information.
std::unique_ptr<llvm::DenseMap<TypeTagMagicValue, TypeTagData>>
TypeTagForDatatypeMagicValues;
/// \brief Peform checks on a call of a function with argument_with_type_tag
/// or pointer_with_type_tag attributes.
void CheckArgumentWithTypeTag(const ArgumentWithTypeTagAttr *Attr,
const Expr * const *ExprArgs);
/// \brief The parser's current scope.
///
/// The parser maintains this state here.
Scope *CurScope;
mutable IdentifierInfo *Ident_super;
mutable IdentifierInfo *Ident___float128;
// HLSL Change Starts
bool DiagnoseHLSLDecl(Declarator& D, DeclContext* DC, Expr *BitWidth, TypeSourceInfo* TInfo, bool isParameter);
bool DiagnoseHLSLLookup(const LookupResult &R);
void TransferUnusualAttributes(Declarator& D, NamedDecl* NewDecl);
// HLSL Change Ends
/// Nullability type specifiers.
IdentifierInfo *Ident__Nonnull = nullptr;
IdentifierInfo *Ident__Nullable = nullptr;
IdentifierInfo *Ident__Null_unspecified = nullptr;
IdentifierInfo *Ident_NSError = nullptr;
protected:
friend class Parser;
friend class InitializationSequence;
friend class ASTReader;
friend class ASTDeclReader;
friend class ASTWriter;
public:
/// Retrieve the keyword associated
IdentifierInfo *getNullabilityKeyword(NullabilityKind nullability);
/// The struct behind the CFErrorRef pointer.
RecordDecl *CFError = nullptr;
/// Retrieve the identifier "NSError".
IdentifierInfo *getNSErrorIdent();
/// \brief Retrieve the parser's current scope.
///
/// This routine must only be used when it is certain that semantic analysis
/// and the parser are in precisely the same context, which is not the case
/// when, e.g., we are performing any kind of template instantiation.
/// Therefore, the only safe places to use this scope are in the parser
/// itself and in routines directly invoked from the parser and *never* from
/// template substitution or instantiation.
Scope *getCurScope() const { return CurScope; }
void incrementMSManglingNumber() const {
return CurScope->incrementMSManglingNumber();
}
IdentifierInfo *getSuperIdentifier() const;
IdentifierInfo *getFloat128Identifier() const;
Decl *getObjCDeclContext() const;
DeclContext *getCurLexicalContext() const {
return OriginalLexicalContext ? OriginalLexicalContext : CurContext;
}
AvailabilityResult getCurContextAvailability() const;
const DeclContext *getCurObjCLexicalContext() const {
const DeclContext *DC = getCurLexicalContext();
// A category implicitly has the attribute of the interface.
if (const ObjCCategoryDecl *CatD = dyn_cast<ObjCCategoryDecl>(DC))
DC = CatD->getClassInterface();
return DC;
}
/// \brief To be used for checking whether the arguments being passed to
/// function exceeds the number of parameters expected for it.
static bool TooManyArguments(size_t NumParams, size_t NumArgs,
bool PartialOverloading = false) {
// We check whether we're just after a comma in code-completion.
if (NumArgs > 0 && PartialOverloading)
return NumArgs + 1 > NumParams; // If so, we view as an extra argument.
return NumArgs > NumParams;
}
};
/// \brief RAII object that enters a new expression evaluation context.
class EnterExpressionEvaluationContext {
Sema &Actions;
public:
EnterExpressionEvaluationContext(Sema &Actions,
Sema::ExpressionEvaluationContext NewContext,
Decl *LambdaContextDecl = nullptr,
bool IsDecltype = false)
: Actions(Actions) {
Actions.PushExpressionEvaluationContext(NewContext, LambdaContextDecl,
IsDecltype);
}
EnterExpressionEvaluationContext(Sema &Actions,
Sema::ExpressionEvaluationContext NewContext,
Sema::ReuseLambdaContextDecl_t,
bool IsDecltype = false)
: Actions(Actions) {
Actions.PushExpressionEvaluationContext(NewContext,
Sema::ReuseLambdaContextDecl,
IsDecltype);
}
~EnterExpressionEvaluationContext() {
Actions.PopExpressionEvaluationContext();
}
};
DeductionFailureInfo
MakeDeductionFailureInfo(ASTContext &Context, Sema::TemplateDeductionResult TDK,
sema::TemplateDeductionInfo &Info);
/// \brief Contains a late templated function.
/// Will be parsed at the end of the translation unit, used by Sema & Parser.
struct LateParsedTemplate {
CachedTokens Toks;
/// \brief The template function declaration to be late parsed.
Decl *D;
};
} // end namespace clang
#endif
|
GB_unop__identity_int8_uint64.c | //------------------------------------------------------------------------------
// GB_unop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_atomics.h"
#include "GB_unop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB (_unop_apply__identity_int8_uint64)
// op(A') function: GB (_unop_tran__identity_int8_uint64)
// C type: int8_t
// A type: uint64_t
// cast: int8_t cij = (int8_t) aij
// unaryop: cij = aij
#define GB_ATYPE \
uint64_t
#define GB_CTYPE \
int8_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
uint64_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = x ;
// casting
#define GB_CAST(z, aij) \
int8_t z = (int8_t) aij ;
// cij = op (aij)
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
uint64_t aij = Ax [pA] ; \
/* Cx [pC] = op (cast (aij)) */ \
int8_t z = (int8_t) aij ; \
Cx [pC] = z ; \
}
// true if operator is the identity op with no typecasting
#define GB_OP_IS_IDENTITY_WITH_NO_TYPECAST \
0
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_IDENTITY || GxB_NO_INT8 || GxB_NO_UINT64)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_apply__identity_int8_uint64)
(
int8_t *Cx, // Cx and Ax may be aliased
const uint64_t *Ax,
const int8_t *restrict Ab, // A->b if A is bitmap
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
// TODO: if OP is ONE and uniform-valued matrices are exploited, then
// do this in O(1) time
if (Ab == NULL)
{
#if ( GB_OP_IS_IDENTITY_WITH_NO_TYPECAST )
GB_memcpy (Cx, Ax, anz * sizeof (uint64_t), nthreads) ;
#else
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
uint64_t aij = Ax [p] ;
int8_t z = (int8_t) aij ;
Cx [p] = z ;
}
#endif
}
else
{
// bitmap case, no transpose; A->b already memcpy'd into C->b
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!Ab [p]) continue ;
uint64_t aij = Ax [p] ;
int8_t z = (int8_t) aij ;
Cx [p] = z ;
}
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_tran__identity_int8_uint64)
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
LshFunctions.h | #ifndef _LSH_FUNCTIONS
#define _LSH_FUNCTIONS
#include <climits>
#include <cmath>
#include <cstdint>
#include <iostream>
#include <vector>
uint64_t combine(uint64_t item1, uint64_t item2) {
return item1 * 0xC4DD05BF + item2 * 0x6C8702C9;
}
void single_densified_minhash(uint64_t *result, uint64_t *point,
uint64_t point_len, uint64_t num_tables,
uint64_t hashes_per_table, uint8_t hash_range_pow,
uint32_t random_seed) {
uint64_t num_hashes_to_generate = num_tables * hashes_per_table;
std::vector<uint64_t> prelim_result(num_hashes_to_generate);
uint64_t binsize = std::ceil(UINT64_MAX / prelim_result.size());
for (uint64_t i = 0; i < num_hashes_to_generate; i++) {
prelim_result[i] = UINT64_MAX;
}
for (uint64_t i = 0; i < point_len; i++) {
uint64_t val = point[i];
val *= random_seed;
val ^= val >> 13;
val *= 0x192AF017AAFFF017;
val *= val;
uint64_t hash = val;
uint64_t binid =
std::min((uint64_t)floor(val / binsize), num_hashes_to_generate - 1);
if (prelim_result[binid] > hash) {
prelim_result[binid] = hash;
}
}
// Densify
for (size_t i = 0; i < num_hashes_to_generate; i++) {
uint64_t next = prelim_result[i];
if (next != UINT64_MAX) {
continue;
}
uint64_t count = 0;
while (next == UINT64_MAX) {
count++;
uint64_t index = combine(i, count) % num_hashes_to_generate;
next = prelim_result[index]; // Kills GPU.
if (count > 100) { // Densification failure.
next = 0;
break;
}
}
prelim_result[i] = next;
}
// Combine each K
for (uint64_t table = 0; table < num_tables; table++) {
result[table] = prelim_result[hashes_per_table * table];
for (uint64_t hash = 1; hash < hashes_per_table; hash++) {
result[table] =
combine(prelim_result[hashes_per_table * table], result[table]);
}
result[table] >>= (64 - hash_range_pow);
}
}
std::vector<uint64_t>
parallel_densified_minhash(uint64_t *points, uint64_t num_points,
uint64_t point_dimension, uint64_t num_tables,
uint64_t hashes_per_table, uint8_t hash_range_pow,
uint32_t random_seed) {
std::vector<uint64_t> result(num_tables * num_points);
#pragma omp parallel for
for (uint64_t point_id = 0; point_id < num_points; point_id += 1) {
single_densified_minhash((&result[0]) + point_id * num_tables,
points + point_id * point_dimension,
point_dimension, num_tables, hashes_per_table,
hash_range_pow, random_seed);
}
return result;
}
std::vector<uint64_t>
parallel_densified_minhash(std::vector<std::vector<uint64_t>> points,
uint64_t num_tables, uint64_t hashes_per_table,
uint8_t hash_range_pow, uint32_t random_seed) {
std::vector<uint64_t> result(num_tables * points.size());
#pragma omp parallel for
for (uint64_t point_id = 0; point_id < points.size(); point_id += 1) {
single_densified_minhash((&result[0]) + point_id * num_tables,
(&points[point_id][0]), points[point_id].size(),
num_tables, hashes_per_table, hash_range_pow,
random_seed);
}
return result;
}
std::vector<uint64_t> parallel_srp(float *dense_data, uint64_t num_points,
uint64_t data_dimension, int8_t *random_bits,
uint64_t num_tables,
uint64_t hashes_per_table) {
std::vector<uint64_t> result(num_tables * num_points);
#pragma omp parallel for
for (uint64_t data_id = 0; data_id < num_points; data_id++) {
for (uint64_t rep = 0; rep < num_tables; rep++) {
uint64_t hash = 0;
for (uint64_t bit = 0; bit < hashes_per_table; bit++) {
double sum = 0;
for (uint64_t j = 0; j < data_dimension; j++) {
double val = dense_data[data_dimension * data_id + j];
if (random_bits[rep * hashes_per_table * data_dimension +
bit * data_dimension + j] > 0) {
sum += val;
} else {
sum -= val;
}
}
hash += (sum > 0) << bit;
}
result[data_id * num_tables + rep] = hash;
}
}
return result;
}
#endif |
getStartLists.c | #include "defs.h"
double getStartLists(graph* G, edge** maxIntWtListPtr,
INT_T* maxIntWtListSizePtr) {
LONG_T *local_max, maxWeight;
edge *maxIntWtList;
LONG_T maxIntWtListSize;
LONG_T *p_start, *p_end;
double elapsed_time;
elapsed_time = get_seconds();
#ifdef _OPENMP
omp_set_num_threads(NUM_THREADS);
#pragma omp parallel
{
#endif
LONG_T i, j, n;
edge* pList;
LONG_T pCount, tmpListSize;
int tid, nthreads;
#ifdef DIAGNOSTIC
double elapsed_time_part;
#endif
#ifdef _OPENMP
tid = omp_get_thread_num();
nthreads = omp_get_num_threads();
#else
tid = 0;
nthreads = 1;
#endif
n = G->n;
/* Determine the maximum edge weight */
if (tid == 0) {
local_max = (LONG_T *) malloc(nthreads*sizeof(LONG_T));
}
/* Allocate memory for partial edge list on each thread */
tmpListSize = 1000;
pList = (edge *) malloc(tmpListSize*sizeof(edge));
pCount = 0;
#ifdef _OPENMP
#pragma omp barrier
#endif
local_max[tid] = -1;
#ifdef DIAGNOSTIC
if (tid == 0) {
elapsed_time_part = get_seconds();
}
#endif
#ifdef _OPENMP
#pragma omp for
#endif
for (i=0; i<n; i++) {
for (j=G->numEdges[i]; j<G->numEdges[i+1]; j++) {
if (G->weight[j] > local_max[tid]) {
local_max[tid] = G->weight[j];
pCount = 0;
pList[pCount].startVertex = i;
pList[pCount].endVertex = G->endV[j];
pList[pCount].w = local_max[tid];
pList[pCount].e = j;
pCount++;
} else if (G->weight[j] == local_max[tid]) {
pList[pCount].startVertex = i;
pList[pCount].endVertex = G->endV[j];
pList[pCount].w = local_max[tid];
pList[pCount].e = j;
pCount++;
}
}
}
#ifdef _OPENMP
#pragma omp barrier
#endif
if (tid == 0) {
#ifdef DIAGNOSTIC
if (tid == 0) {
elapsed_time_part = get_seconds() - elapsed_time_part;
fprintf(stderr, "Max. weight computation time: %lf seconds\n", elapsed_time_part);
}
#endif
maxWeight = local_max[0];
for (i=1; i<nthreads; i++) {
if (local_max[i] > maxWeight)
maxWeight = local_max[i];
}
// free(local_max);
}
#ifdef _OPENMP
#pragma omp barrier
#endif
if (local_max[tid] != maxWeight) {
pCount = 0;
}
/* Merge all te partial edge lists */
if (tid == 0) {
p_start = (LONG_T *) malloc(nthreads*sizeof(LONG_T));
p_end = (LONG_T *) malloc(nthreads*sizeof(LONG_T));
}
#ifdef _OPENMP
#pragma omp barrier
#endif
p_end[tid] = pCount;
p_start[tid] = 0;
#ifdef _OPENMP
#pragma omp barrier
#endif
if (tid == 0) {
for (i=1; i<nthreads; i++) {
p_end[i] = p_end[i-1] + p_end[i];
p_start[i] = p_end[i-1];
}
maxIntWtListSize = p_end[nthreads-1];
free(*maxIntWtListPtr);
maxIntWtList = (edge *) malloc((maxIntWtListSize)*sizeof(edge));
}
#ifdef _OPENMP
#pragma omp barrier
#endif
for (j=p_start[tid]; j<p_end[tid]; j++) {
(maxIntWtList[j]).startVertex = pList[j-p_start[tid]].startVertex;
(maxIntWtList[j]).endVertex = pList[j-p_start[tid]].endVertex;
(maxIntWtList[j]).e = pList[j-p_start[tid]].e;
(maxIntWtList[j]).w = pList[j-p_start[tid]].w;
}
#ifdef _OPENMP
#pragma omp barrier
#endif
free(pList);
if (tid == 0) {
free(local_max);
free(p_start);
free(p_end);
*maxIntWtListPtr = maxIntWtList;
*maxIntWtListSizePtr = maxIntWtListSize;
}
#ifdef _OPENMP
}
#endif
/* Verification */
#if 0
maxIntWtList = *maxIntWtListPtr;
for (int i=0; i<*maxIntWtListSizePtr; i++) {
fprintf(stderr, "[%ld %ld %ld %ld] ", maxIntWtList[i].startVertex,
maxIntWtList[i].endVertex, maxIntWtList[i].e, maxIntWtList[i].w);
}
#endif
elapsed_time = get_seconds() - elapsed_time;
return elapsed_time;
}
|
convolution_3x3_int8.h | // BUG1989 is pleased to support the open source community by supporting ncnn available.
//
// Copyright (C) 2019 BUG1989. All rights reserved.
//
// Licensed under the BSD 3-Clause License (the "License"); you may not use this file except
// in compliance with the License. You may obtain a copy of the License at
//
// https://opensource.org/licenses/BSD-3-Clause
//
// Unless required by applicable law or agreed to in writing, software distributed
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
// specific language governing permissions and limitations under the License.
static void conv3x3s1_int8_sse(const Mat& bottom_blob, Mat& top_blob, const Mat& _kernel, const Option& opt)
{
int w = bottom_blob.w;
int inch = bottom_blob.c;
int outw = top_blob.w;
int outh = top_blob.h;
int outch = top_blob.c;
const signed char* kernel = _kernel;
#pragma omp parallel for num_threads(opt.num_threads)
for (int p = 0; p < outch; p++)
{
Mat out0 = top_blob.channel(p);
out0.fill(0);
const signed char* kernel0 = (const signed char*)kernel + p * inch * 9;
for (int q = 0; q < inch; q++)
{
int* outptr0 = out0;
const signed char* img0 = bottom_blob.channel(q);
const signed char* r0 = img0;
const signed char* r1 = img0 + w;
const signed char* r2 = img0 + w * 2;
for (int i = 0; i < outh; i++)
{
int remain = outw;
for (; remain > 0; remain--)
{
int sum0 = 0;
sum0 += (int)r0[0] * kernel0[0];
sum0 += (int)r0[1] * kernel0[1];
sum0 += (int)r0[2] * kernel0[2];
sum0 += (int)r1[0] * kernel0[3];
sum0 += (int)r1[1] * kernel0[4];
sum0 += (int)r1[2] * kernel0[5];
sum0 += (int)r2[0] * kernel0[6];
sum0 += (int)r2[1] * kernel0[7];
sum0 += (int)r2[2] * kernel0[8];
*outptr0 += sum0;
r0++;
r1++;
r2++;
outptr0++;
}
r0 += 2;
r1 += 2;
r2 += 2;
}
kernel0 += 9;
}
}
}
static void conv3x3s1_winograd23_transform_kernel_int8_sse(const Mat& kernel, Mat& kernel_tm, int inch, int outch, const Option& opt)
{
kernel_tm.create(4 * 4, inch, outch, (size_t)2u);
// G
const short ktm[4][3] = {
{2, 0, 0},
{1, 1, 1},
{1, -1, 1},
{0, 0, 2}
};
#pragma omp parallel for num_threads(opt.num_threads)
for (int p = 0; p < outch; p++)
{
for (int q = 0; q < inch; q++)
{
const signed char* kernel0 = (const signed char*)kernel + p * inch * 9 + q * 9;
short* kernel_tm0 = kernel_tm.channel(p).row<short>(q);
// transform kernel
const signed char* k0 = kernel0;
const signed char* k1 = kernel0 + 3;
const signed char* k2 = kernel0 + 6;
// h
short tmp[4][3];
for (int i = 0; i < 4; i++)
{
tmp[i][0] = (short)k0[0] * ktm[i][0] + k0[1] * ktm[i][1] + k0[2] * ktm[i][2];
tmp[i][1] = (short)k1[0] * ktm[i][0] + k1[1] * ktm[i][1] + k1[2] * ktm[i][2];
tmp[i][2] = (short)k2[0] * ktm[i][0] + k2[1] * ktm[i][1] + k2[2] * ktm[i][2];
}
// U
for (int j = 0; j < 4; j++)
{
short* tmpp = &tmp[j][0];
for (int i = 0; i < 4; i++)
{
kernel_tm0[j * 4 + i] = tmpp[0] * ktm[i][0] + tmpp[1] * ktm[i][1] + tmpp[2] * ktm[i][2];
}
}
}
}
}
static void conv3x3s1_winograd23_int8_sse(const Mat& bottom_blob, Mat& top_blob, const Mat& kernel_tm, const Option& opt)
{
int w = bottom_blob.w;
int h = bottom_blob.h;
int inch = bottom_blob.c;
int outw = top_blob.w;
int outh = top_blob.h;
int outch = top_blob.c;
// pad to 2n+2, winograd F(2,3)
Mat bottom_blob_bordered = bottom_blob;
outw = (outw + 1) / 2 * 2;
outh = (outh + 1) / 2 * 2;
w = outw + 2;
h = outh + 2;
Option opt_b = opt;
opt_b.blob_allocator = opt.workspace_allocator;
copy_make_border(bottom_blob, bottom_blob_bordered, 0, h - bottom_blob.h, 0, w - bottom_blob.w, 0, 0.f, opt_b);
// BEGIN transform input
Mat bottom_blob_tm;
{
int w_tm = outw / 2 * 4;
int h_tm = outh / 2 * 4;
int nColBlocks = h_tm / 4; // may be the block num in Feathercnn
int nRowBlocks = w_tm / 4;
const int tiles = nColBlocks * nRowBlocks;
bottom_blob_tm.create(4 * 4, tiles, inch, 2u, opt.workspace_allocator);
// BT
// const float itm[4][4] = {
// {1.0f, 0.0f, -1.0f, 0.0f},
// {0.0f, 1.0f, 1.00f, 0.0f},
// {0.0f, -1.0f, 1.00f, 0.0f},
// {0.0f, -1.0f, 0.00f, 1.0f}
// };
#pragma omp parallel for num_threads(opt.num_threads)
for (int q = 0; q < inch; q++)
{
const signed char* img = bottom_blob_bordered.channel(q);
short* out_tm0 = bottom_blob_tm.channel(q);
for (int j = 0; j < nColBlocks; j++)
{
const signed char* r0 = img + w * j * 2;
const signed char* r1 = r0 + w;
const signed char* r2 = r1 + w;
const signed char* r3 = r2 + w;
for (int i = 0; i < nRowBlocks; i++)
{
short d0[4], d1[4], d2[4], d3[4];
short w0[4], w1[4], w2[4], w3[4];
short t0[4], t1[4], t2[4], t3[4];
// load
for (int n = 0; n < 4; n++)
{
d0[n] = r0[n];
d1[n] = r1[n];
d2[n] = r2[n];
d3[n] = r3[n];
}
// w = B_t * d
for (int n = 0; n < 4; n++)
{
w0[n] = d0[n] - d2[n];
w1[n] = d1[n] + d2[n];
w2[n] = d2[n] - d1[n];
w3[n] = d3[n] - d1[n];
}
// transpose d to d_t
{
t0[0] = w0[0];
t1[0] = w0[1];
t2[0] = w0[2];
t3[0] = w0[3];
t0[1] = w1[0];
t1[1] = w1[1];
t2[1] = w1[2];
t3[1] = w1[3];
t0[2] = w2[0];
t1[2] = w2[1];
t2[2] = w2[2];
t3[2] = w2[3];
t0[3] = w3[0];
t1[3] = w3[1];
t2[3] = w3[2];
t3[3] = w3[3];
}
// U = B_t * d_t
for (int n = 0; n < 4; n++)
{
d0[n] = t0[n] - t2[n];
d1[n] = t1[n] + t2[n];
d2[n] = t2[n] - t1[n];
d3[n] = t3[n] - t1[n];
}
// save to out_tm
for (int n = 0; n < 4; n++)
{
out_tm0[n] = d0[n];
out_tm0[n + 4] = d1[n];
out_tm0[n + 8] = d2[n];
out_tm0[n + 12] = d3[n];
}
r0 += 2;
r1 += 2;
r2 += 2;
r3 += 2;
out_tm0 += 16;
}
}
}
}
bottom_blob_bordered = Mat();
// BEGIN dot
Mat top_blob_tm;
{
int w_tm = outw / 2 * 4;
int h_tm = outh / 2 * 4;
int nColBlocks = h_tm / 4; // may be the block num in Feathercnn
int nRowBlocks = w_tm / 4;
const int tiles = nColBlocks * nRowBlocks;
top_blob_tm.create(16, tiles, outch, 4u, opt.workspace_allocator);
int nn_outch = outch >> 2;
int remain_outch_start = nn_outch << 2;
#pragma omp parallel for num_threads(opt.num_threads)
for (int pp = 0; pp < nn_outch; pp++)
{
int p = pp * 4;
Mat out0_tm = top_blob_tm.channel(p);
Mat out1_tm = top_blob_tm.channel(p + 1);
Mat out2_tm = top_blob_tm.channel(p + 2);
Mat out3_tm = top_blob_tm.channel(p + 3);
const Mat kernel0_tm = kernel_tm.channel(p);
const Mat kernel1_tm = kernel_tm.channel(p + 1);
const Mat kernel2_tm = kernel_tm.channel(p + 2);
const Mat kernel3_tm = kernel_tm.channel(p + 3);
for (int i = 0; i < tiles; i++)
{
int* output0_tm = out0_tm.row<int>(i);
int* output1_tm = out1_tm.row<int>(i);
int* output2_tm = out2_tm.row<int>(i);
int* output3_tm = out3_tm.row<int>(i);
int sum0[16] = {0};
int sum1[16] = {0};
int sum2[16] = {0};
int sum3[16] = {0};
int q = 0;
for (; q + 3 < inch; q += 4)
{
const short* r0 = bottom_blob_tm.channel(q).row<short>(i);
const short* r1 = bottom_blob_tm.channel(q + 1).row<short>(i);
const short* r2 = bottom_blob_tm.channel(q + 2).row<short>(i);
const short* r3 = bottom_blob_tm.channel(q + 3).row<short>(i);
const short* k0 = kernel0_tm.row<short>(q);
const short* k1 = kernel1_tm.row<short>(q);
const short* k2 = kernel2_tm.row<short>(q);
const short* k3 = kernel3_tm.row<short>(q);
for (int n = 0; n < 16; n++)
{
sum0[n] += (int)r0[n] * k0[n];
k0 += 16;
sum0[n] += (int)r1[n] * k0[n];
k0 += 16;
sum0[n] += (int)r2[n] * k0[n];
k0 += 16;
sum0[n] += (int)r3[n] * k0[n];
k0 -= 16 * 3;
sum1[n] += (int)r0[n] * k1[n];
k1 += 16;
sum1[n] += (int)r1[n] * k1[n];
k1 += 16;
sum1[n] += (int)r2[n] * k1[n];
k1 += 16;
sum1[n] += (int)r3[n] * k1[n];
k1 -= 16 * 3;
sum2[n] += (int)r0[n] * k2[n];
k2 += 16;
sum2[n] += (int)r1[n] * k2[n];
k2 += 16;
sum2[n] += (int)r2[n] * k2[n];
k2 += 16;
sum2[n] += (int)r3[n] * k2[n];
k2 -= 16 * 3;
sum3[n] += (int)r0[n] * k3[n];
k3 += 16;
sum3[n] += (int)r1[n] * k3[n];
k3 += 16;
sum3[n] += (int)r2[n] * k3[n];
k3 += 16;
sum3[n] += (int)r3[n] * k3[n];
k3 -= 16 * 3;
}
}
for (; q < inch; q++)
{
const short* r0 = bottom_blob_tm.channel(q).row<short>(i);
const short* k0 = kernel0_tm.row<short>(q);
const short* k1 = kernel1_tm.row<short>(q);
const short* k2 = kernel2_tm.row<short>(q);
const short* k3 = kernel3_tm.row<short>(q);
for (int n = 0; n < 16; n++)
{
sum0[n] += (int)r0[n] * k0[n];
sum1[n] += (int)r0[n] * k1[n];
sum2[n] += (int)r0[n] * k2[n];
sum3[n] += (int)r0[n] * k3[n];
}
}
for (int n = 0; n < 16; n++)
{
output0_tm[n] = sum0[n];
output1_tm[n] = sum1[n];
output2_tm[n] = sum2[n];
output3_tm[n] = sum3[n];
}
}
}
#pragma omp parallel for num_threads(opt.num_threads)
for (int p = remain_outch_start; p < outch; p++)
{
Mat out0_tm = top_blob_tm.channel(p);
const Mat kernel0_tm = kernel_tm.channel(p);
for (int i = 0; i < tiles; i++)
{
int* output0_tm = out0_tm.row<int>(i);
int sum0[16] = {0};
int q = 0;
for (; q + 3 < inch; q += 4)
{
const short* r0 = bottom_blob_tm.channel(q).row<short>(i);
const short* r1 = bottom_blob_tm.channel(q + 1).row<short>(i);
const short* r2 = bottom_blob_tm.channel(q + 2).row<short>(i);
const short* r3 = bottom_blob_tm.channel(q + 3).row<short>(i);
const short* k0 = kernel0_tm.row<short>(q);
const short* k1 = kernel0_tm.row<short>(q + 1);
const short* k2 = kernel0_tm.row<short>(q + 2);
const short* k3 = kernel0_tm.row<short>(q + 3);
for (int n = 0; n < 16; n++)
{
sum0[n] += (int)r0[n] * k0[n];
sum0[n] += (int)r1[n] * k1[n];
sum0[n] += (int)r2[n] * k2[n];
sum0[n] += (int)r3[n] * k3[n];
}
}
for (; q < inch; q++)
{
const short* r0 = bottom_blob_tm.channel(q).row<short>(i);
const short* k0 = kernel0_tm.row<short>(q);
for (int n = 0; n < 16; n++)
{
sum0[n] += (int)r0[n] * k0[n];
}
}
for (int n = 0; n < 16; n++)
{
output0_tm[n] = sum0[n];
}
}
}
}
bottom_blob_tm = Mat();
// END dot
// BEGIN transform output
Mat top_blob_bordered;
top_blob_bordered.create(outw, outh, outch, 4u, opt.workspace_allocator);
{
// AT
// const float itm[2][4] = {
// {1.0f, 1.0f, 1.0f, 0.0f},
// {0.0f, 1.0f, -1.0f, 1.0f}
// };
int w_tm = outw / 2 * 4;
int h_tm = outh / 2 * 4;
int nColBlocks = h_tm / 4; // may be the block num in Feathercnn
int nRowBlocks = w_tm / 4;
#pragma omp parallel for num_threads(opt.num_threads)
for (int p = 0; p < outch; p++)
{
Mat out_tm = top_blob_tm.channel(p);
Mat out = top_blob_bordered.channel(p);
for (int j = 0; j < nColBlocks; j++)
{
int* outRow0 = out.row<int>(j * 2);
int* outRow1 = out.row<int>(j * 2 + 1);
for (int i = 0; i < nRowBlocks; i++)
{
int* out_tile = out_tm.row<int>(j * nRowBlocks + i);
int s0[4], s1[4], s2[4], s3[4];
int w0[4], w1[4];
int d0[2], d1[2], d2[2], d3[2];
int o0[2], o1[2];
// load
for (int n = 0; n < 4; n++)
{
s0[n] = out_tile[n];
s1[n] = out_tile[n + 4];
s2[n] = out_tile[n + 8];
s3[n] = out_tile[n + 12];
}
// w = A_T * W
for (int n = 0; n < 4; n++)
{
w0[n] = s0[n] + s1[n] + s2[n];
w1[n] = s1[n] - s2[n] + s3[n];
}
// transpose w to w_t
{
d0[0] = w0[0];
d0[1] = w1[0];
d1[0] = w0[1];
d1[1] = w1[1];
d2[0] = w0[2];
d2[1] = w1[2];
d3[0] = w0[3];
d3[1] = w1[3];
}
// Y = A_T * w_t
for (int n = 0; n < 2; n++)
{
o0[n] = d0[n] + d1[n] + d2[n];
o1[n] = d1[n] - d2[n] + d3[n];
}
// save to top blob tm,why right 2,because the G' = G*2
outRow0[0] = o0[0] >> 2;
outRow0[1] = o0[1] >> 2;
outRow1[0] = o1[0] >> 2;
outRow1[1] = o1[1] >> 2;
outRow0 += 2;
outRow1 += 2;
}
}
}
}
// END transform output
// cut result pad
copy_cut_border(top_blob_bordered, top_blob, 0, top_blob_bordered.h - top_blob.h, 0, top_blob_bordered.w - top_blob.w, opt);
}
static void conv3x3s1_winograd43_transform_kernel_int8_sse(const Mat& kernel, Mat& kernel_tm, int inch, int outch, const Option& opt)
{
kernel_tm.create(6 * 6, inch, outch, (size_t)2u);
// G
// const float ktm[6][3] = {
// { 1.0f/4, 0.0f, 0.0f},
// { -1.0f/6, -1.0f/6, -1.0f/6},
// { -1.0f/6, 1.0f/6, -1.0f/6},
// { 1.0f/24, 1.0f/12, 1.0f/6},
// { 1.0f/24, -1.0f/12, 1.0f/6},
// { 0.0f, 0.0f, 1.0f}
// };
const short ktm[6][3] = {
{6, 0, 0},
{-4, -4, -4},
{-4, 4, -4},
{1, 2, 4},
{1, -2, 4},
{0, 0, 24}
};
#pragma omp parallel for num_threads(opt.num_threads)
for (int p = 0; p < outch; p++)
{
for (int q = 0; q < inch; q++)
{
const signed char* kernel0 = (const signed char*)kernel + p * inch * 9 + q * 9;
short* kernel_tm0 = kernel_tm.channel(p).row<short>(q);
// transform kernel
const signed char* k0 = kernel0;
const signed char* k1 = kernel0 + 3;
const signed char* k2 = kernel0 + 6;
// h
short tmp[6][3];
for (int i = 0; i < 6; i++)
{
tmp[i][0] = k0[0] * ktm[i][0] + k0[1] * ktm[i][1] + k0[2] * ktm[i][2];
tmp[i][1] = k1[0] * ktm[i][0] + k1[1] * ktm[i][1] + k1[2] * ktm[i][2];
tmp[i][2] = k2[0] * ktm[i][0] + k2[1] * ktm[i][1] + k2[2] * ktm[i][2];
}
// U
for (int j = 0; j < 6; j++)
{
short* tmpp = &tmp[j][0];
for (int i = 0; i < 6; i++)
{
kernel_tm0[j * 6 + i] = tmpp[0] * ktm[i][0] + tmpp[1] * ktm[i][1] + tmpp[2] * ktm[i][2];
}
}
}
}
}
static void conv3x3s1_winograd43_int8_sse(const Mat& bottom_blob, Mat& top_blob, const Mat& kernel_tm, const Option& opt)
{
int w = bottom_blob.w;
int h = bottom_blob.h;
int inch = bottom_blob.c;
int outw = top_blob.w;
int outh = top_blob.h;
int outch = top_blob.c;
// pad to 4n+2, winograd F(4,3)
Mat bottom_blob_bordered = bottom_blob;
outw = (outw + 3) / 4 * 4;
outh = (outh + 3) / 4 * 4;
w = outw + 2;
h = outh + 2;
Option opt_b = opt;
opt_b.blob_allocator = opt.workspace_allocator;
copy_make_border(bottom_blob, bottom_blob_bordered, 0, h - bottom_blob.h, 0, w - bottom_blob.w, 0, 0.f, opt_b);
// BEGIN transform input
Mat bottom_blob_tm;
{
int w_tm = outw / 4 * 6;
int h_tm = outh / 4 * 6;
int nColBlocks = h_tm / 6; // may be the block num in Feathercnn
int nRowBlocks = w_tm / 6;
const int tiles = nColBlocks * nRowBlocks;
bottom_blob_tm.create(6 * 6, tiles, inch, 2u, opt.workspace_allocator);
// BT
// const float itm[4][4] = {
// {4.0f, 0.0f, -5.0f, 0.0f, 1.0f, 0.0f},
// {0.0f,-4.0f, -4.0f, 1.0f, 1.0f, 0.0f},
// {0.0f, 4.0f, -4.0f,-1.0f, 1.0f, 0.0f},
// {0.0f,-2.0f, -1.0f, 2.0f, 1.0f, 0.0f},
// {0.0f, 2.0f, -1.0f,-2.0f, 1.0f, 0.0f},
// {0.0f, 4.0f, 0.0f,-5.0f, 0.0f, 1.0f}
// };
// 0 = 4 * r00 - 5 * r02 + r04
// 1 = -4 * (r01 + r02) + r03 + r04
// 2 = 4 * (r01 - r02) - r03 + r04
// 3 = -2 * r01 - r02 + 2 * r03 + r04
// 4 = 2 * r01 - r02 - 2 * r03 + r04
// 5 = 4 * r01 - 5 * r03 + r05
#pragma omp parallel for num_threads(opt.num_threads)
for (int q = 0; q < inch; q++)
{
const signed char* img = bottom_blob_bordered.channel(q);
short* out_tm0 = bottom_blob_tm.channel(q);
for (int j = 0; j < nColBlocks; j++)
{
const signed char* r0 = img + w * j * 4;
const signed char* r1 = r0 + w;
const signed char* r2 = r1 + w;
const signed char* r3 = r2 + w;
const signed char* r4 = r3 + w;
const signed char* r5 = r4 + w;
for (int i = 0; i < nRowBlocks; i++)
{
short d0[6], d1[6], d2[6], d3[6], d4[6], d5[6];
short w0[6], w1[6], w2[6], w3[6], w4[6], w5[6];
short t0[6], t1[6], t2[6], t3[6], t4[6], t5[6];
// load
for (int n = 0; n < 6; n++)
{
d0[n] = r0[n];
d1[n] = r1[n];
d2[n] = r2[n];
d3[n] = r3[n];
d4[n] = r4[n];
d5[n] = r5[n];
}
// w = B_t * d
for (int n = 0; n < 6; n++)
{
w0[n] = 4 * d0[n] - 5 * d2[n] + d4[n];
w1[n] = -4 * d1[n] - 4 * d2[n] + d3[n] + d4[n];
w2[n] = 4 * d1[n] - 4 * d2[n] - d3[n] + d4[n];
w3[n] = -2 * d1[n] - d2[n] + 2 * d3[n] + d4[n];
w4[n] = 2 * d1[n] - d2[n] - 2 * d3[n] + d4[n];
w5[n] = 4 * d1[n] - 5 * d3[n] + d5[n];
}
// transpose d to d_t
{
t0[0] = w0[0];
t1[0] = w0[1];
t2[0] = w0[2];
t3[0] = w0[3];
t4[0] = w0[4];
t5[0] = w0[5];
t0[1] = w1[0];
t1[1] = w1[1];
t2[1] = w1[2];
t3[1] = w1[3];
t4[1] = w1[4];
t5[1] = w1[5];
t0[2] = w2[0];
t1[2] = w2[1];
t2[2] = w2[2];
t3[2] = w2[3];
t4[2] = w2[4];
t5[2] = w2[5];
t0[3] = w3[0];
t1[3] = w3[1];
t2[3] = w3[2];
t3[3] = w3[3];
t4[3] = w3[4];
t5[3] = w3[5];
t0[4] = w4[0];
t1[4] = w4[1];
t2[4] = w4[2];
t3[4] = w4[3];
t4[4] = w4[4];
t5[4] = w4[5];
t0[5] = w5[0];
t1[5] = w5[1];
t2[5] = w5[2];
t3[5] = w5[3];
t4[5] = w5[4];
t5[5] = w5[5];
}
// d = B_t * d_t
for (int n = 0; n < 6; n++)
{
d0[n] = 4 * t0[n] - 5 * t2[n] + t4[n];
d1[n] = -4 * t1[n] - 4 * t2[n] + t3[n] + t4[n];
d2[n] = 4 * t1[n] - 4 * t2[n] - t3[n] + t4[n];
d3[n] = -2 * t1[n] - t2[n] + 2 * t3[n] + t4[n];
d4[n] = 2 * t1[n] - t2[n] - 2 * t3[n] + t4[n];
d5[n] = 4 * t1[n] - 5 * t3[n] + t5[n];
}
// save to out_tm
for (int n = 0; n < 6; n++)
{
out_tm0[n] = d0[n];
out_tm0[n + 6] = d1[n];
out_tm0[n + 12] = d2[n];
out_tm0[n + 18] = d3[n];
out_tm0[n + 24] = d4[n];
out_tm0[n + 30] = d5[n];
}
r0 += 4;
r1 += 4;
r2 += 4;
r3 += 4;
r4 += 4;
r5 += 4;
out_tm0 += 36;
}
}
}
}
bottom_blob_bordered = Mat();
// BEGIN dot
Mat top_blob_tm;
{
int w_tm = outw / 4 * 6;
int h_tm = outh / 4 * 6;
int nColBlocks = h_tm / 6; // may be the block num in Feathercnn
int nRowBlocks = w_tm / 6;
const int tiles = nColBlocks * nRowBlocks;
top_blob_tm.create(36, tiles, outch, 4u, opt.workspace_allocator);
#pragma omp parallel for num_threads(opt.num_threads)
for (int p = 0; p < outch; p++)
{
Mat out0_tm = top_blob_tm.channel(p);
const Mat kernel0_tm = kernel_tm.channel(p);
for (int i = 0; i < tiles; i++)
{
int* output0_tm = out0_tm.row<int>(i);
int sum0[36] = {0};
for (int q = 0; q < inch; q++)
{
const short* r0 = bottom_blob_tm.channel(q).row<short>(i);
const short* k0 = kernel0_tm.row<short>(q);
for (int n = 0; n < 36; n++)
{
sum0[n] += (int)r0[n] * k0[n];
}
}
for (int n = 0; n < 36; n++)
{
output0_tm[n] = sum0[n];
}
}
}
}
bottom_blob_tm = Mat();
// END dot
// BEGIN transform output
Mat top_blob_bordered;
top_blob_bordered.create(outw, outh, outch, 4u, opt.workspace_allocator);
{
// AT
// const float itm[4][6] = {
// {1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 0.0f},
// {0.0f, 1.0f, -1.0f, 2.0f, -2.0f, 0.0f},
// {0.0f, 1.0f, 1.0f, 4.0f, 4.0f, 0.0f},
// {0.0f, 1.0f, -1.0f, 8.0f, -8.0f, 1.0f}
// };
// 0 = r00 + r01 + r02 + r03 + r04
// 1 = r01 - r02 + 2 * (r03 - r04)
// 2 = r01 + r02 + 4 * (r03 + r04)
// 3 = r01 - r02 + 8 * (r03 - r04) + r05
int w_tm = outw / 4 * 6;
int h_tm = outh / 4 * 6;
int nColBlocks = h_tm / 6; // may be the block num in Feathercnn
int nRowBlocks = w_tm / 6;
#pragma omp parallel for num_threads(opt.num_threads)
for (int p = 0; p < outch; p++)
{
Mat out_tm = top_blob_tm.channel(p);
Mat out = top_blob_bordered.channel(p);
for (int j = 0; j < nColBlocks; j++)
{
int* outRow0 = out.row<int>(j * 4);
int* outRow1 = out.row<int>(j * 4 + 1);
int* outRow2 = out.row<int>(j * 4 + 2);
int* outRow3 = out.row<int>(j * 4 + 3);
for (int i = 0; i < nRowBlocks; i++)
{
int* out_tile = out_tm.row<int>(j * nRowBlocks + i);
int s0[6], s1[6], s2[6], s3[6], s4[6], s5[6];
int w0[6], w1[6], w2[6], w3[6];
int d0[4], d1[4], d2[4], d3[4], d4[4], d5[4];
int o0[4], o1[4], o2[4], o3[4];
// load
for (int n = 0; n < 6; n++)
{
s0[n] = out_tile[n];
s1[n] = out_tile[n + 6];
s2[n] = out_tile[n + 12];
s3[n] = out_tile[n + 18];
s4[n] = out_tile[n + 24];
s5[n] = out_tile[n + 30];
}
// w = A_T * W
for (int n = 0; n < 6; n++)
{
w0[n] = s0[n] + s1[n] + s2[n] + s3[n] + s4[n];
w1[n] = s1[n] - s2[n] + 2 * s3[n] - 2 * s4[n];
w2[n] = s1[n] + s2[n] + 4 * s3[n] + 4 * s4[n];
w3[n] = s1[n] - s2[n] + 8 * s3[n] - 8 * s4[n] + s5[n];
}
// transpose w to w_t
{
d0[0] = w0[0];
d0[1] = w1[0];
d0[2] = w2[0];
d0[3] = w3[0];
d1[0] = w0[1];
d1[1] = w1[1];
d1[2] = w2[1];
d1[3] = w3[1];
d2[0] = w0[2];
d2[1] = w1[2];
d2[2] = w2[2];
d2[3] = w3[2];
d3[0] = w0[3];
d3[1] = w1[3];
d3[2] = w2[3];
d3[3] = w3[3];
d4[0] = w0[4];
d4[1] = w1[4];
d4[2] = w2[4];
d4[3] = w3[4];
d5[0] = w0[5];
d5[1] = w1[5];
d5[2] = w2[5];
d5[3] = w3[5];
}
// Y = A_T * w_t
for (int n = 0; n < 4; n++)
{
o0[n] = d0[n] + d1[n] + d2[n] + d3[n] + d4[n];
o1[n] = d1[n] - d2[n] + 2 * d3[n] - 2 * d4[n];
o2[n] = d1[n] + d2[n] + 4 * d3[n] + 4 * d4[n];
o3[n] = d1[n] - d2[n] + 8 * d3[n] - 8 * d4[n] + d5[n];
}
// save to top blob tm
for (int n = 0; n < 4; n++)
{
outRow0[n] = o0[n] / 576;
outRow1[n] = o1[n] / 576;
outRow2[n] = o2[n] / 576;
outRow3[n] = o3[n] / 576;
}
outRow0 += 4;
outRow1 += 4;
outRow2 += 4;
outRow3 += 4;
}
}
}
}
// END transform output
// cut result pad
copy_cut_border(top_blob_bordered, top_blob, 0, top_blob_bordered.h - top_blob.h, 0, top_blob_bordered.w - top_blob.w, opt);
}
static void conv3x3s2_int8_sse(const Mat& bottom_blob, Mat& top_blob, const Mat& _kernel, const Option& opt)
{
int w = bottom_blob.w;
int inch = bottom_blob.c;
int outw = top_blob.w;
int outh = top_blob.h;
int outch = top_blob.c;
const int tailstep = w - 2 * outw + w;
const signed char* kernel = _kernel;
#pragma omp parallel for num_threads(opt.num_threads)
for (int p = 0; p < outch; p++)
{
Mat out0 = top_blob.channel(p);
out0.fill(0);
const signed char* kernel0 = (const signed char*)kernel + p * inch * 9;
for (int q = 0; q < inch; q++)
{
int* outptr0 = out0;
const signed char* img0 = bottom_blob.channel(q);
const signed char* r0 = img0;
const signed char* r1 = img0 + w;
const signed char* r2 = img0 + w * 2;
for (int i = 0; i < outh; i++)
{
int remain = outw;
for (; remain > 0; remain--)
{
int sum0 = 0;
sum0 += (int)r0[0] * kernel0[0];
sum0 += (int)r0[1] * kernel0[1];
sum0 += (int)r0[2] * kernel0[2];
sum0 += (int)r1[0] * kernel0[3];
sum0 += (int)r1[1] * kernel0[4];
sum0 += (int)r1[2] * kernel0[5];
sum0 += (int)r2[0] * kernel0[6];
sum0 += (int)r2[1] * kernel0[7];
sum0 += (int)r2[2] * kernel0[8];
*outptr0 += sum0;
r0 += 2;
r1 += 2;
r2 += 2;
outptr0++;
}
r0 += tailstep;
r1 += tailstep;
r2 += tailstep;
}
kernel0 += 9;
}
}
}
|
DeclOpenMP.h | //===- DeclOpenMP.h - Classes for representing OpenMP directives -*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
///
/// \file
/// This file defines OpenMP nodes for declarative directives.
///
//===----------------------------------------------------------------------===//
#ifndef LLVM_CLANG_AST_DECLOPENMP_H
#define LLVM_CLANG_AST_DECLOPENMP_H
#include "clang/AST/Decl.h"
#include "clang/AST/Expr.h"
#include "clang/AST/ExternalASTSource.h"
#include "clang/AST/OpenMPClause.h"
#include "clang/AST/Type.h"
#include "llvm/ADT/ArrayRef.h"
#include "llvm/Support/TrailingObjects.h"
namespace clang {
/// This represents '#pragma omp threadprivate ...' directive.
/// For example, in the following, both 'a' and 'A::b' are threadprivate:
///
/// \code
/// int a;
/// #pragma omp threadprivate(a)
/// struct A {
/// static int b;
/// #pragma omp threadprivate(b)
/// };
/// \endcode
///
class OMPThreadPrivateDecl final
: public Decl,
private llvm::TrailingObjects<OMPThreadPrivateDecl, Expr *> {
friend class ASTDeclReader;
friend TrailingObjects;
unsigned NumVars;
virtual void anchor();
OMPThreadPrivateDecl(Kind DK, DeclContext *DC, SourceLocation L) :
Decl(DK, DC, L), NumVars(0) { }
ArrayRef<const Expr *> getVars() const {
return llvm::makeArrayRef(getTrailingObjects<Expr *>(), NumVars);
}
MutableArrayRef<Expr *> getVars() {
return MutableArrayRef<Expr *>(getTrailingObjects<Expr *>(), NumVars);
}
void setVars(ArrayRef<Expr *> VL);
public:
static OMPThreadPrivateDecl *Create(ASTContext &C, DeclContext *DC,
SourceLocation L,
ArrayRef<Expr *> VL);
static OMPThreadPrivateDecl *CreateDeserialized(ASTContext &C,
unsigned ID, unsigned N);
typedef MutableArrayRef<Expr *>::iterator varlist_iterator;
typedef ArrayRef<const Expr *>::iterator varlist_const_iterator;
typedef llvm::iterator_range<varlist_iterator> varlist_range;
typedef llvm::iterator_range<varlist_const_iterator> varlist_const_range;
unsigned varlist_size() const { return NumVars; }
bool varlist_empty() const { return NumVars == 0; }
varlist_range varlists() {
return varlist_range(varlist_begin(), varlist_end());
}
varlist_const_range varlists() const {
return varlist_const_range(varlist_begin(), varlist_end());
}
varlist_iterator varlist_begin() { return getVars().begin(); }
varlist_iterator varlist_end() { return getVars().end(); }
varlist_const_iterator varlist_begin() const { return getVars().begin(); }
varlist_const_iterator varlist_end() const { return getVars().end(); }
static bool classof(const Decl *D) { return classofKind(D->getKind()); }
static bool classofKind(Kind K) { return K == OMPThreadPrivate; }
};
/// This represents '#pragma omp declare reduction ...' directive.
/// For example, in the following, declared reduction 'foo' for types 'int' and
/// 'float':
///
/// \code
/// #pragma omp declare reduction (foo : int,float : omp_out += omp_in) \
/// initializer (omp_priv = 0)
/// \endcode
///
/// Here 'omp_out += omp_in' is a combiner and 'omp_priv = 0' is an initializer.
class OMPDeclareReductionDecl final : public ValueDecl, public DeclContext {
// This class stores some data in DeclContext::OMPDeclareReductionDeclBits
// to save some space. Use the provided accessors to access it.
public:
enum InitKind {
CallInit, // Initialized by function call.
DirectInit, // omp_priv(<expr>)
CopyInit // omp_priv = <expr>
};
private:
friend class ASTDeclReader;
/// Combiner for declare reduction construct.
Expr *Combiner = nullptr;
/// Initializer for declare reduction construct.
Expr *Initializer = nullptr;
/// In parameter of the combiner.
Expr *In = nullptr;
/// Out parameter of the combiner.
Expr *Out = nullptr;
/// Priv parameter of the initializer.
Expr *Priv = nullptr;
/// Orig parameter of the initializer.
Expr *Orig = nullptr;
/// Reference to the previous declare reduction construct in the same
/// scope with the same name. Required for proper templates instantiation if
/// the declare reduction construct is declared inside compound statement.
LazyDeclPtr PrevDeclInScope;
virtual void anchor();
OMPDeclareReductionDecl(Kind DK, DeclContext *DC, SourceLocation L,
DeclarationName Name, QualType Ty,
OMPDeclareReductionDecl *PrevDeclInScope);
void setPrevDeclInScope(OMPDeclareReductionDecl *Prev) {
PrevDeclInScope = Prev;
}
public:
/// Create declare reduction node.
static OMPDeclareReductionDecl *
Create(ASTContext &C, DeclContext *DC, SourceLocation L, DeclarationName Name,
QualType T, OMPDeclareReductionDecl *PrevDeclInScope);
/// Create deserialized declare reduction node.
static OMPDeclareReductionDecl *CreateDeserialized(ASTContext &C,
unsigned ID);
/// Get combiner expression of the declare reduction construct.
Expr *getCombiner() { return Combiner; }
const Expr *getCombiner() const { return Combiner; }
/// Get In variable of the combiner.
Expr *getCombinerIn() { return In; }
const Expr *getCombinerIn() const { return In; }
/// Get Out variable of the combiner.
Expr *getCombinerOut() { return Out; }
const Expr *getCombinerOut() const { return Out; }
/// Set combiner expression for the declare reduction construct.
void setCombiner(Expr *E) { Combiner = E; }
/// Set combiner In and Out vars.
void setCombinerData(Expr *InE, Expr *OutE) {
In = InE;
Out = OutE;
}
/// Get initializer expression (if specified) of the declare reduction
/// construct.
Expr *getInitializer() { return Initializer; }
const Expr *getInitializer() const { return Initializer; }
/// Get initializer kind.
InitKind getInitializerKind() const {
return static_cast<InitKind>(OMPDeclareReductionDeclBits.InitializerKind);
}
/// Get Orig variable of the initializer.
Expr *getInitOrig() { return Orig; }
const Expr *getInitOrig() const { return Orig; }
/// Get Priv variable of the initializer.
Expr *getInitPriv() { return Priv; }
const Expr *getInitPriv() const { return Priv; }
/// Set initializer expression for the declare reduction construct.
void setInitializer(Expr *E, InitKind IK) {
Initializer = E;
OMPDeclareReductionDeclBits.InitializerKind = IK;
}
/// Set initializer Orig and Priv vars.
void setInitializerData(Expr *OrigE, Expr *PrivE) {
Orig = OrigE;
Priv = PrivE;
}
/// Get reference to previous declare reduction construct in the same
/// scope with the same name.
OMPDeclareReductionDecl *getPrevDeclInScope();
const OMPDeclareReductionDecl *getPrevDeclInScope() const;
static bool classof(const Decl *D) { return classofKind(D->getKind()); }
static bool classofKind(Kind K) { return K == OMPDeclareReduction; }
static DeclContext *castToDeclContext(const OMPDeclareReductionDecl *D) {
return static_cast<DeclContext *>(const_cast<OMPDeclareReductionDecl *>(D));
}
static OMPDeclareReductionDecl *castFromDeclContext(const DeclContext *DC) {
return static_cast<OMPDeclareReductionDecl *>(
const_cast<DeclContext *>(DC));
}
};
/// This represents '#pragma omp declare mapper ...' directive. Map clauses are
/// allowed to use with this directive. The following example declares a user
/// defined mapper for the type 'struct vec'. This example instructs the fields
/// 'len' and 'data' should be mapped when mapping instances of 'struct vec'.
///
/// \code
/// #pragma omp declare mapper(mid: struct vec v) map(v.len, v.data[0:N])
/// \endcode
class OMPDeclareMapperDecl final : public ValueDecl, public DeclContext {
friend class ASTDeclReader;
/// Clauses assoicated with this mapper declaration
MutableArrayRef<OMPClause *> Clauses;
/// Mapper variable, which is 'v' in the example above
Expr *MapperVarRef = nullptr;
/// Name of the mapper variable
DeclarationName VarName;
LazyDeclPtr PrevDeclInScope;
virtual void anchor();
OMPDeclareMapperDecl(Kind DK, DeclContext *DC, SourceLocation L,
DeclarationName Name, QualType Ty,
DeclarationName VarName,
OMPDeclareMapperDecl *PrevDeclInScope)
: ValueDecl(DK, DC, L, Name, Ty), DeclContext(DK), VarName(VarName),
PrevDeclInScope(PrevDeclInScope) {}
void setPrevDeclInScope(OMPDeclareMapperDecl *Prev) {
PrevDeclInScope = Prev;
}
/// Sets an array of clauses to this mapper declaration
void setClauses(ArrayRef<OMPClause *> CL);
public:
/// Creates declare mapper node.
static OMPDeclareMapperDecl *Create(ASTContext &C, DeclContext *DC,
SourceLocation L, DeclarationName Name,
QualType T, DeclarationName VarName,
OMPDeclareMapperDecl *PrevDeclInScope);
/// Creates deserialized declare mapper node.
static OMPDeclareMapperDecl *CreateDeserialized(ASTContext &C, unsigned ID,
unsigned N);
/// Creates an array of clauses to this mapper declaration and intializes
/// them.
void CreateClauses(ASTContext &C, ArrayRef<OMPClause *> CL);
using clauselist_iterator = MutableArrayRef<OMPClause *>::iterator;
using clauselist_const_iterator = ArrayRef<const OMPClause *>::iterator;
using clauselist_range = llvm::iterator_range<clauselist_iterator>;
using clauselist_const_range =
llvm::iterator_range<clauselist_const_iterator>;
unsigned clauselist_size() const { return Clauses.size(); }
bool clauselist_empty() const { return Clauses.empty(); }
clauselist_range clauselists() {
return clauselist_range(clauselist_begin(), clauselist_end());
}
clauselist_const_range clauselists() const {
return clauselist_const_range(clauselist_begin(), clauselist_end());
}
clauselist_iterator clauselist_begin() { return Clauses.begin(); }
clauselist_iterator clauselist_end() { return Clauses.end(); }
clauselist_const_iterator clauselist_begin() const { return Clauses.begin(); }
clauselist_const_iterator clauselist_end() const { return Clauses.end(); }
/// Get the variable declared in the mapper
Expr *getMapperVarRef() { return MapperVarRef; }
const Expr *getMapperVarRef() const { return MapperVarRef; }
/// Set the variable declared in the mapper
void setMapperVarRef(Expr *MapperVarRefE) { MapperVarRef = MapperVarRefE; }
/// Get the name of the variable declared in the mapper
DeclarationName getVarName() { return VarName; }
/// Get reference to previous declare mapper construct in the same
/// scope with the same name.
OMPDeclareMapperDecl *getPrevDeclInScope();
const OMPDeclareMapperDecl *getPrevDeclInScope() const;
static bool classof(const Decl *D) { return classofKind(D->getKind()); }
static bool classofKind(Kind K) { return K == OMPDeclareMapper; }
static DeclContext *castToDeclContext(const OMPDeclareMapperDecl *D) {
return static_cast<DeclContext *>(const_cast<OMPDeclareMapperDecl *>(D));
}
static OMPDeclareMapperDecl *castFromDeclContext(const DeclContext *DC) {
return static_cast<OMPDeclareMapperDecl *>(const_cast<DeclContext *>(DC));
}
};
/// Pseudo declaration for capturing expressions. Also is used for capturing of
/// non-static data members in non-static member functions.
///
/// Clang supports capturing of variables only, but OpenMP 4.5 allows to
/// privatize non-static members of current class in non-static member
/// functions. This pseudo-declaration allows properly handle this kind of
/// capture by wrapping captured expression into a variable-like declaration.
class OMPCapturedExprDecl final : public VarDecl {
friend class ASTDeclReader;
void anchor() override;
OMPCapturedExprDecl(ASTContext &C, DeclContext *DC, IdentifierInfo *Id,
QualType Type, TypeSourceInfo *TInfo,
SourceLocation StartLoc)
: VarDecl(OMPCapturedExpr, C, DC, StartLoc, StartLoc, Id, Type, TInfo,
SC_None) {
setImplicit();
}
public:
static OMPCapturedExprDecl *Create(ASTContext &C, DeclContext *DC,
IdentifierInfo *Id, QualType T,
SourceLocation StartLoc);
static OMPCapturedExprDecl *CreateDeserialized(ASTContext &C, unsigned ID);
SourceRange getSourceRange() const override LLVM_READONLY;
// Implement isa/cast/dyncast/etc.
static bool classof(const Decl *D) { return classofKind(D->getKind()); }
static bool classofKind(Kind K) { return K == OMPCapturedExpr; }
};
/// This represents '#pragma omp requires...' directive.
/// For example
///
/// \code
/// #pragma omp requires unified_address
/// \endcode
///
class OMPRequiresDecl final
: public Decl,
private llvm::TrailingObjects<OMPRequiresDecl, OMPClause *> {
friend class ASTDeclReader;
friend TrailingObjects;
// Number of clauses associated with this requires declaration
unsigned NumClauses = 0;
virtual void anchor();
OMPRequiresDecl(Kind DK, DeclContext *DC, SourceLocation L)
: Decl(DK, DC, L), NumClauses(0) {}
/// Returns an array of immutable clauses associated with this requires
/// declaration
ArrayRef<const OMPClause *> getClauses() const {
return llvm::makeArrayRef(getTrailingObjects<OMPClause *>(), NumClauses);
}
/// Returns an array of clauses associated with this requires declaration
MutableArrayRef<OMPClause *> getClauses() {
return MutableArrayRef<OMPClause *>(getTrailingObjects<OMPClause *>(),
NumClauses);
}
/// Sets an array of clauses to this requires declaration
void setClauses(ArrayRef<OMPClause *> CL);
public:
/// Create requires node.
static OMPRequiresDecl *Create(ASTContext &C, DeclContext *DC,
SourceLocation L, ArrayRef<OMPClause *> CL);
/// Create deserialized requires node.
static OMPRequiresDecl *CreateDeserialized(ASTContext &C, unsigned ID,
unsigned N);
using clauselist_iterator = MutableArrayRef<OMPClause *>::iterator;
using clauselist_const_iterator = ArrayRef<const OMPClause *>::iterator;
using clauselist_range = llvm::iterator_range<clauselist_iterator>;
using clauselist_const_range = llvm::iterator_range<clauselist_const_iterator>;
unsigned clauselist_size() const { return NumClauses; }
bool clauselist_empty() const { return NumClauses == 0; }
clauselist_range clauselists() {
return clauselist_range(clauselist_begin(), clauselist_end());
}
clauselist_const_range clauselists() const {
return clauselist_const_range(clauselist_begin(), clauselist_end());
}
clauselist_iterator clauselist_begin() { return getClauses().begin(); }
clauselist_iterator clauselist_end() { return getClauses().end(); }
clauselist_const_iterator clauselist_begin() const {
return getClauses().begin();
}
clauselist_const_iterator clauselist_end() const {
return getClauses().end();
}
static bool classof(const Decl *D) { return classofKind(D->getKind()); }
static bool classofKind(Kind K) { return K == OMPRequires; }
};
} // end namespace clang
#endif
|
parallel_for_misc_messages.c | // RUN: %clang_cc1 -fsyntax-only -fopenmp -verify %s
// RUN: %clang_cc1 -fsyntax-only -fopenmp-simd -verify %s
// expected-error@+1 {{unexpected OpenMP directive '#pragma omp parallel for'}}
#pragma omp parallel for
// expected-error@+1 {{unexpected OpenMP directive '#pragma omp parallel for'}}
#pragma omp parallel for foo
void test_no_clause() {
int i;
#pragma omp parallel for
for (i = 0; i < 16; ++i)
;
// expected-error@+2 {{statement after '#pragma omp parallel for' must be a for loop}}
#pragma omp parallel for
++i;
}
void test_branch_protected_scope() {
int i = 0;
L1:
++i;
int x[24];
#pragma omp parallel for
for (i = 0; i < 16; ++i) {
if (i == 5)
goto L1; // expected-error {{use of undeclared label 'L1'}}
else if (i == 6)
return; // expected-error {{cannot return from OpenMP region}}
else if (i == 7)
goto L2;
else if (i == 8) {
L2:
x[i]++;
}
}
if (x[0] == 0)
goto L2; // expected-error {{use of undeclared label 'L2'}}
else if (x[1] == 1)
goto L1;
}
void test_invalid_clause() {
int i;
// expected-warning@+1 {{extra tokens at the end of '#pragma omp parallel for' are ignored}}
#pragma omp parallel for foo bar
for (i = 0; i < 16; ++i)
;
}
void test_non_identifiers() {
int i, x;
// expected-warning@+1 {{extra tokens at the end of '#pragma omp parallel for' are ignored}}
#pragma omp parallel for;
for (i = 0; i < 16; ++i)
;
// expected-warning@+1 {{extra tokens at the end of '#pragma omp parallel for' are ignored}}
#pragma omp parallel for private(x);
for (i = 0; i < 16; ++i)
;
// expected-warning@+1 {{extra tokens at the end of '#pragma omp parallel for' are ignored}}
#pragma omp parallel for, private(x);
for (i = 0; i < 16; ++i)
;
}
extern int foo();
void test_collapse() {
int i;
// expected-error@+1 {{expected '('}}
#pragma omp parallel for collapse
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}}
#pragma omp parallel for collapse(
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{expected expression}}
#pragma omp parallel for collapse()
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}}
#pragma omp parallel for collapse(,
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}}
#pragma omp parallel for collapse(, )
for (i = 0; i < 16; ++i)
;
// expected-warning@+2 {{extra tokens at the end of '#pragma omp parallel for' are ignored}}
// expected-error@+1 {{expected '('}}
#pragma omp parallel for collapse 4)
for (i = 0; i < 16; ++i)
;
// expected-error@+2 {{expected ')'}}
// expected-note@+1 {{to match this '('}} expected-note@+1 {{as specified in 'collapse' clause}}
#pragma omp parallel for collapse(4
for (i = 0; i < 16; ++i)
; // expected-error {{expected 4 for loops after '#pragma omp parallel for', but found only 1}}
// expected-error@+2 {{expected ')'}}
// expected-note@+1 {{to match this '('}} expected-note@+1 {{as specified in 'collapse' clause}}
#pragma omp parallel for collapse(4,
for (i = 0; i < 16; ++i)
; // expected-error {{expected 4 for loops after '#pragma omp parallel for', but found only 1}}
// expected-error@+2 {{expected ')'}}
// expected-note@+1 {{to match this '('}} expected-note@+1 {{as specified in 'collapse' clause}}
#pragma omp parallel for collapse(4, )
for (i = 0; i < 16; ++i)
; // expected-error {{expected 4 for loops after '#pragma omp parallel for', but found only 1}}
// expected-note@+1 {{as specified in 'collapse' clause}}
#pragma omp parallel for collapse(4)
for (i = 0; i < 16; ++i)
; // expected-error {{expected 4 for loops after '#pragma omp parallel for', but found only 1}}
// expected-error@+2 {{expected ')'}}
// expected-note@+1 {{to match this '('}} expected-note@+1 {{as specified in 'collapse' clause}}
#pragma omp parallel for collapse(4 4)
for (i = 0; i < 16; ++i)
; // expected-error {{expected 4 for loops after '#pragma omp parallel for', but found only 1}}
// expected-error@+2 {{expected ')'}}
// expected-note@+1 {{to match this '('}} expected-note@+1 {{as specified in 'collapse' clause}}
#pragma omp parallel for collapse(4, , 4)
for (i = 0; i < 16; ++i)
; // expected-error {{expected 4 for loops after '#pragma omp parallel for', but found only 1}}
#pragma omp parallel for collapse(4)
for (int i1 = 0; i1 < 16; ++i1)
for (int i2 = 0; i2 < 16; ++i2)
for (int i3 = 0; i3 < 16; ++i3)
for (int i4 = 0; i4 < 16; ++i4)
foo();
// expected-error@+2 {{expected ')'}}
// expected-note@+1 {{to match this '('}} expected-note@+1 {{as specified in 'collapse' clause}}
#pragma omp parallel for collapse(4, 8)
for (i = 0; i < 16; ++i)
; // expected-error {{expected 4 for loops after '#pragma omp parallel for', but found only 1}}
// expected-error@+1 {{expression is not an integer constant expression}}
#pragma omp parallel for collapse(2.5)
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{expression is not an integer constant expression}}
#pragma omp parallel for collapse(foo())
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{argument to 'collapse' clause must be a strictly positive integer value}}
#pragma omp parallel for collapse(-5)
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{argument to 'collapse' clause must be a strictly positive integer value}}
#pragma omp parallel for collapse(0)
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{argument to 'collapse' clause must be a strictly positive integer value}}
#pragma omp parallel for collapse(5 - 5)
for (i = 0; i < 16; ++i)
;
// expected-note@+1 {{defined as firstprivate}}
#pragma omp parallel for collapse(2) firstprivate(i)
for (i = 0; i < 16; ++i)
// expected-note@+1 {{variable with automatic storage duration is predetermined as private; perhaps you forget to enclose 'omp for' directive into a parallel or another task region?}}
for (int j = 0; j < 16; ++j)
// expected-error@+2 2 {{reduction variable must be shared}}
// expected-error@+1 {{region cannot be closely nested inside 'parallel for' region; perhaps you forget to enclose 'omp for' directive into a parallel region?}}
#pragma omp for reduction(+ : i, j)
for (int k = 0; k < 16; ++k)
i += j;
}
void test_private() {
int i;
// expected-error@+2 {{expected expression}}
// expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}}
#pragma omp parallel for private(
for (i = 0; i < 16; ++i)
;
// expected-error@+2 {{expected ')'}} expected-note@+2 {{to match this '('}}
// expected-error@+1 2 {{expected expression}}
#pragma omp parallel for private(,
for (i = 0; i < 16; ++i)
;
// expected-error@+1 2 {{expected expression}}
#pragma omp parallel for private(, )
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{expected expression}}
#pragma omp parallel for private()
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{expected expression}}
#pragma omp parallel for private(int)
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{expected variable name}}
#pragma omp parallel for private(0)
for (i = 0; i < 16; ++i)
;
int x, y, z;
#pragma omp parallel for private(x)
for (i = 0; i < 16; ++i)
;
#pragma omp parallel for private(x, y)
for (i = 0; i < 16; ++i)
;
#pragma omp parallel for private(x, y, z)
for (i = 0; i < 16; ++i) {
x = y * i + z;
}
}
void test_lastprivate() {
int i;
// expected-error@+2 {{expected ')'}} expected-note@+2 {{to match this '('}}
// expected-error@+1 {{expected expression}}
#pragma omp parallel for lastprivate(
for (i = 0; i < 16; ++i)
;
// expected-error@+2 {{expected ')'}} expected-note@+2 {{to match this '('}}
// expected-error@+1 2 {{expected expression}}
#pragma omp parallel for lastprivate(,
for (i = 0; i < 16; ++i)
;
// expected-error@+1 2 {{expected expression}}
#pragma omp parallel for lastprivate(, )
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{expected expression}}
#pragma omp parallel for lastprivate()
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{expected expression}}
#pragma omp parallel for lastprivate(int)
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{expected variable name}}
#pragma omp parallel for lastprivate(0)
for (i = 0; i < 16; ++i)
;
int x, y, z;
#pragma omp parallel for lastprivate(x)
for (i = 0; i < 16; ++i)
;
#pragma omp parallel for lastprivate(x, y)
for (i = 0; i < 16; ++i)
;
#pragma omp parallel for lastprivate(x, y, z)
for (i = 0; i < 16; ++i)
;
}
void test_firstprivate() {
int i;
// expected-error@+2 {{expected ')'}} expected-note@+2 {{to match this '('}}
// expected-error@+1 {{expected expression}}
#pragma omp parallel for firstprivate(
for (i = 0; i < 16; ++i)
;
// expected-error@+2 {{expected ')'}} expected-note@+2 {{to match this '('}}
// expected-error@+1 2 {{expected expression}}
#pragma omp parallel for firstprivate(,
for (i = 0; i < 16; ++i)
;
// expected-error@+1 2 {{expected expression}}
#pragma omp parallel for firstprivate(, )
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{expected expression}}
#pragma omp parallel for firstprivate()
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{expected expression}}
#pragma omp parallel for firstprivate(int)
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{expected variable name}}
#pragma omp parallel for firstprivate(0)
for (i = 0; i < 16; ++i)
;
int x, y, z;
#pragma omp parallel for lastprivate(x) firstprivate(x)
for (i = 0; i < 16; ++i)
;
#pragma omp parallel for lastprivate(x, y) firstprivate(x, y)
for (i = 0; i < 16; ++i)
;
#pragma omp parallel for lastprivate(x, y, z) firstprivate(x, y, z)
for (i = 0; i < 16; ++i)
;
}
void test_loop_messages() {
float a[100], b[100], c[100];
// expected-error@+2 {{variable must be of integer or pointer type}}
#pragma omp parallel for
for (float fi = 0; fi < 10.0; fi++) {
c[(int)fi] = a[(int)fi] + b[(int)fi];
}
// expected-error@+2 {{variable must be of integer or pointer type}}
#pragma omp parallel for
for (double fi = 0; fi < 10.0; fi++) {
c[(int)fi] = a[(int)fi] + b[(int)fi];
}
}
|
mvt.c | /**
* mvt.c: This file was adapted from PolyBench/GPU 1.0 test suite
* to run on GPU with OpenMP 4.0 pragmas and OpenCL driver.
*
* http://www.cse.ohio-state.edu/~pouchet/software/polybench/GPU
*
* Contacts: Marcio M Pereira <mpereira@ic.unicamp.br>
* Rafael Cardoso F Sousa <rafael.cardoso@students.ic.unicamp.br>
* Luís Felipe Mattos <ra107822@students.ic.unicamp.br>
*/
#include <assert.h>
#include <math.h>
#include <stdio.h>
#include <stdlib.h>
#include <sys/time.h>
#include <unistd.h>
#ifdef _OPENMP
#include <omp.h>
#endif
#include "BenchmarksUtil.h"
#define BENCHMARK_NAME "MVT"
// define the error threshold for the results "not matching"
#define PERCENT_DIFF_ERROR_THRESHOLD 0.05
/* Problem size. */
#ifdef RUN_POLYBENCH_SIZE
#define SIZE 16384 // 4096
#elif RUN_TEST
#define SIZE 1100
#elif RUN_BENCHMARK
#define SIZE 9600
#else
#define SIZE 1000
#endif
#define N SIZE
/* Can switch DATA_TYPE between float and double */
typedef float DATA_TYPE;
void init_array(DATA_TYPE *A, DATA_TYPE *x1, DATA_TYPE *x2, DATA_TYPE *y1,
DATA_TYPE *y2, DATA_TYPE *x1_gpu, DATA_TYPE *x2_gpu) {
int i, j;
for (i = 0; i < N; i++) {
x1[i] = ((DATA_TYPE)i) / N;
x2[i] = ((DATA_TYPE)i + 1) / N;
x1_gpu[i] = x1[i];
x2_gpu[i] = x2[i];
y1[i] = ((DATA_TYPE)i + 3) / N;
y2[i] = ((DATA_TYPE)i + 4) / N;
for (j = 0; j < N; j++) {
A[i * N + j] = ((DATA_TYPE)i * j) / N;
}
}
}
void runMvt(DATA_TYPE *a, DATA_TYPE *x1, DATA_TYPE *x2, DATA_TYPE *y1,
DATA_TYPE *y2) {
int i, j;
for (i = 0; i < N; i++) {
for (j = 0; j < N; j++) {
x1[i] = x1[i] + a[i * N + j] * y1[j];
}
}
for (i = 0; i < N; i++) {
for (j = 0; j < N; j++) {
x2[i] = x2[i] + a[j * N + i] * y2[j];
}
}
}
void runMvt_OMP(DATA_TYPE *a, DATA_TYPE *x1, DATA_TYPE *x2, DATA_TYPE *y1,
DATA_TYPE *y2) {
int i, j;
#pragma omp target teams map(to: a[:N*N], y1[:N], y2[:N]) map(tofrom: x1[:N], x2[:N]) device(DEVICE_ID)
{
#pragma omp distribute parallel for private(j)
for (i = 0; i < N; i++) {
for (j = 0; j < N; j++) {
x1[i] = x1[i] + a[i * N + j] * y1[j];
}
}
#pragma omp distribute parallel for private(j)
for (i = 0; i < N; i++) {
for (j = 0; j < N; j++) {
x2[i] = x2[i] + a[j * N + i] * y2[j];
}
}
}
}
int compareResults(DATA_TYPE *x1, DATA_TYPE *x1_outputFromGpu, DATA_TYPE *x2,
DATA_TYPE *x2_outputFromGpu) {
int i, fail;
fail = 0;
for (i = 0; i < N; i++) {
if (percentDiff(x1[i], x1_outputFromGpu[i]) >
PERCENT_DIFF_ERROR_THRESHOLD) {
fail++;
}
if (percentDiff(x2[i], x2_outputFromGpu[i]) >
PERCENT_DIFF_ERROR_THRESHOLD) {
fail++;
}
}
// Print results
printf("Non-Matching CPU-GPU Outputs Beyond Error Threshold of %4.2f "
"Percent: %d\n",
PERCENT_DIFF_ERROR_THRESHOLD, fail);
return fail;
}
int main() {
double t_start, t_end;
int fail = 0;
DATA_TYPE *a;
DATA_TYPE *x1;
DATA_TYPE *x2;
DATA_TYPE *x1_outputFromGpu;
DATA_TYPE *x2_outputFromGpu;
DATA_TYPE *y_1;
DATA_TYPE *y_2;
a = (DATA_TYPE *)malloc(N * N * sizeof(DATA_TYPE));
x1 = (DATA_TYPE *)malloc(N * sizeof(DATA_TYPE));
x2 = (DATA_TYPE *)malloc(N * sizeof(DATA_TYPE));
x1_outputFromGpu = (DATA_TYPE *)malloc(N * sizeof(DATA_TYPE));
x2_outputFromGpu = (DATA_TYPE *)malloc(N * sizeof(DATA_TYPE));
y_1 = (DATA_TYPE *)malloc(N * sizeof(DATA_TYPE));
y_2 = (DATA_TYPE *)malloc(N * sizeof(DATA_TYPE));
//fprintf(stdout, "<< Matrix Vector Product and Transpose size: %d>>\n", SIZE);
printBenchmarkInfo(BENCHMARK_NAME, SIZE);
init_array(a, x1, x2, y_1, y_2, x1_outputFromGpu, x2_outputFromGpu);
t_start = rtclock();
runMvt_OMP(a, x1_outputFromGpu, x2_outputFromGpu, y_1, y_2);
t_end = rtclock();
fprintf(stdout, "GPU Runtime: %0.6lfs\n", t_end - t_start);
#ifdef RUN_TEST
t_start = rtclock();
// run the algorithm on the CPU
runMvt(a, x1, x2, y_1, y_2);
t_end = rtclock();
fprintf(stdout, "CPU Runtime: %0.6lfs\n", t_end - t_start);
fail = compareResults(x1, x1_outputFromGpu, x2, x2_outputFromGpu);
#endif
free(a);
free(x1);
free(x2);
free(x1_outputFromGpu);
free(x2_outputFromGpu);
free(y_1);
free(y_2);
return fail;
}
|
ctradd.c | /**
*
* @file
*
* PLASMA is a software package provided by:
* University of Tennessee, US,
* University of Manchester, UK.
*
* @generated from /home/luszczek/workspace/plasma/bitbucket/plasma/compute/ztradd.c, normal z -> c, Fri Sep 28 17:38:03 2018
*
**/
#include "plasma.h"
#include "plasma_async.h"
#include "plasma_context.h"
#include "plasma_descriptor.h"
#include "plasma_internal.h"
#include "plasma_tuning.h"
#include "plasma_types.h"
#include "plasma_workspace.h"
/***************************************************************************//**
*
* @ingroup plasma_tradd
*
* Performs an addition of two trapezoidal matrices similarly to the
* pctradd() function from the PBLAS library:
*
* \f[ B = \alpha * op( A ) + \beta * B, \f]
*
* where op( X ) is one of:
* \f[ op( X ) = X, \f]
* \f[ op( X ) = X^T, \f]
* \f[ op( X ) = X^H, \f]
*
* alpha and beta are scalars and A, B are matrices with op( A ) an m-by-n or
* n-by-m matrix depending on the value of transa and B an m-by-n matrix.
*
*******************************************************************************
*
* @param[in] uplo
* Specifies the shape of op( A ) and B matrices:
* - PlasmaUpper: op( A ) and B are upper trapezoidal matrices.
* - PlasmaLower: op( A ) and B are lower trapezoidal matrices.
*
* @param[in] transa
* Specifies whether the matrix A is non-transposed, transposed, or
* conjugate transposed
* - PlasmaNoTrans: op( A ) = A
* - PlasmaTrans: op( A ) = A^T
* - PlasmaConjTrans: op( A ) = A^H
*
* @param[in] m
* Number of rows of the matrices op( A ) and B.
* m >= 0.
*
* @param[in] n
* Number of columns of the matrices op( A ) and B.
* n >= 0.
*
* @param[in] alpha
* Scalar factor of A.
*
* @param[in] A
* Matrix of size lda-by-k, where k is n when transa == PlasmaNoTrans
* and m otherwise.
*
* @param[in] lda
* Leading dimension of the array A. lda >= max(1,l), where l is m
* when transa = PlasmaNoTrans and n otherwise.
*
* @param[in] beta
* Scalar factor of B.
*
* @param[in,out] B
* Matrix of size ldb-by-n.
* On exit, B = alpha * op( A ) + beta * B
*
* @param[in] ldb
* Leading dimension of the array B.
* ldb >= max(1,m).
*
*******************************************************************************
*
* @retval PlasmaSuccess successful exit
*
*******************************************************************************
*
* @sa plasma_omp_ctradd
* @sa plasma_ctradd
* @sa plasma_dtradd
* @sa plasma_stradd
*
******************************************************************************/
int plasma_ctradd(plasma_enum_t uplo, plasma_enum_t transa,
int m, int n,
plasma_complex32_t alpha, plasma_complex32_t *pA, int lda,
plasma_complex32_t beta, plasma_complex32_t *pB, int ldb)
{
// Get PLASMA context
plasma_context_t *plasma = plasma_context_self();
if (plasma == NULL) {
plasma_error("PLASMA not initialized");
return PlasmaErrorNotInitialized;
}
// Check input arguments.
if ((uplo != PlasmaUpper) &&
(uplo != PlasmaLower)) {
plasma_error("illegal value of uplo");
return -1;
}
if ((transa != PlasmaNoTrans) &&
(transa != PlasmaTrans) &&
(transa != PlasmaConjTrans)) {
plasma_error("illegal value of transa");
return -2;
}
if (m < 0) {
plasma_error("illegal value of m");
return -3;
}
if (n < 0) {
plasma_error("illegal value of n");
return -4;
}
if (pA == NULL) {
plasma_error("NULL A");
return -6;
}
int am, an;
if (transa == PlasmaNoTrans) {
am = m;
an = n;
}
else {
am = n;
an = m;
}
int bm = m;
int bn = n;
if (lda < imax(1, am)) {
plasma_error("illegal value of lda");
return -7;
}
if (pB == NULL) {
plasma_error("NULL B");
return -9;
}
if (ldb < imax(1, bm)) {
plasma_error("illegal value of ldb");
return -10;
}
// quick return
if (m == 0 || n == 0 || (alpha == 0.0 && beta == 1.0))
return PlasmaSuccess;
// Tune parameters.
if (plasma->tuning)
plasma_tune_tradd(plasma, PlasmaComplexFloat, m, n);
// Set tiling parameters.
int nb = plasma->nb;
// Create tile matrices.
plasma_desc_t A;
plasma_desc_t B;
int retval;
retval = plasma_desc_general_create(PlasmaComplexFloat, nb, nb,
am, an, 0, 0, am, an, &A);
if (retval != PlasmaSuccess) {
plasma_error("plasma_desc_general_create() failed");
return retval;
}
retval = plasma_desc_general_create(PlasmaComplexFloat, nb, nb,
bm, bn, 0, 0, bm, bn, &B);
if (retval != PlasmaSuccess) {
plasma_error("plasma_desc_general_create() failed");
plasma_desc_destroy(&A);
return retval;
}
// Initialize sequence.
plasma_sequence_t sequence;
retval = plasma_sequence_init(&sequence);
// Initialize request.
plasma_request_t request;
retval = plasma_request_init(&request);
// asynchronous block
#pragma omp parallel
#pragma omp master
{
// Translate to tile layout.
plasma_omp_cge2desc(pA, lda, A, &sequence, &request);
plasma_omp_cge2desc(pB, ldb, B, &sequence, &request);
// Call tile async function.
plasma_omp_ctradd(uplo, transa,
alpha, A,
beta, B,
&sequence, &request);
// Translate back to LAPACK layout.
plasma_omp_cdesc2ge(A, pA, lda, &sequence, &request);
plasma_omp_cdesc2ge(B, pB, ldb, &sequence, &request);
}
// implicit synchronization
// Free matrices in tile layout
plasma_desc_destroy(&A);
plasma_desc_destroy(&B);
// Return status.
int status = sequence.status;
return status;
}
/***************************************************************************//**
*
* @ingroup plasma_tradd
*
* Performs an addition of two trapezoidal matrices similarly to the
* pctradd() function from the PBLAS library. Non-blocking tile version of
* plasma_ctradd(). May return before the computation is finished. Operates
* on matrices stored by tiles. All matrices are passed through descriptors.
* All dimensions are taken from the descriptors. Allows for pipelining of
* operations at runtime.
*
*******************************************************************************
*
* @param[in] uplo
* Specifies the shape of op( A ) and B matrices:
* - PlasmaUpper: op( A ) and B are upper trapezoidal matrices.
* - PlasmaLower: op( A ) and B are lower trapezoidal matrices.
*
* @param[in] transa
* Specifies whether the matrix A is non-transposed, transposed, or
* conjugate transposed
* - PlasmaNoTrans: op( A ) = A
* - PlasmaTrans: op( A ) = A^T
* - PlasmaConjTrans: op( A ) = A^H
*
* @param[in] alpha
* The scalar alpha.
*
* @param[in] A
* Descriptor of matrix A.
*
* @param[in] beta
* The scalar beta.
*
* @param[in,out] B
* Descriptor of matrix B.
*
* @param[in] sequence
* Identifies the sequence of function calls that this call belongs to
* (for completion checks and exception handling purposes). Check the
* sequence->status for errors.
*
* @param[out] request
* Identifies this function call (for exception handling purposes).
*
* @retval void
* Errors are returned by setting sequence->status and
* request->status to error values. The sequence->status and
* request->status should never be set to PlasmaSuccess (the
* initial values) since another async call may be setting a
* failure value at the same time.
*
*******************************************************************************
*
* @sa plasma_ctradd
* @sa plasma_omp_ctradd
* @sa plasma_omp_dtradd
* @sa plasma_omp_stradd
*
******************************************************************************/
void plasma_omp_ctradd(plasma_enum_t uplo, plasma_enum_t transa,
plasma_complex32_t alpha, plasma_desc_t A,
plasma_complex32_t beta, plasma_desc_t B,
plasma_sequence_t *sequence, plasma_request_t *request)
{
// Get PLASMA context.
plasma_context_t *plasma = plasma_context_self();
if (plasma == NULL) {
plasma_error("PLASMA not initialized");
plasma_request_fail(sequence, request, PlasmaErrorIllegalValue);
return;
}
// Check input arguments.
if ((uplo != PlasmaUpper) &&
(uplo != PlasmaLower)) {
plasma_error("illegal value of uplo");
plasma_request_fail(sequence, request, PlasmaErrorIllegalValue);
return;
}
if ((transa != PlasmaNoTrans) &&
(transa != PlasmaTrans) &&
(transa != PlasmaConjTrans)) {
plasma_error("illegal value of transa");
plasma_request_fail(sequence, request, PlasmaErrorIllegalValue);
return;
}
if (plasma_desc_check(A) != PlasmaSuccess) {
plasma_error("invalid A");
plasma_request_fail(sequence, request, PlasmaErrorIllegalValue);
return;
}
if (plasma_desc_check(B) != PlasmaSuccess) {
plasma_error("invalid B");
plasma_request_fail(sequence, request, PlasmaErrorIllegalValue);
return;
}
if (sequence == NULL) {
plasma_error("NULL sequence");
plasma_request_fail(sequence, request, PlasmaErrorIllegalValue);
return;
}
if (request == NULL) {
plasma_error("NULL request");
plasma_request_fail(sequence, request, PlasmaErrorIllegalValue);
return;
}
// quick return
int am = transa == PlasmaNoTrans ? A.m : A.n;
if ((alpha == 0.0 || am == 0) && beta == 1.0)
return;
// Call parallel function.
plasma_pctradd(uplo, transa,
alpha, A,
beta, B,
sequence, request);
}
|
sparseOptimizedBlocksJacobi.h | //
// Created by mbarb on 23/02/2018.
//
#ifndef PARALLELITERATIVE_SPARSEOPTIMIZEDBLOCKSJACOBI_H
#define PARALLELITERATIVE_SPARSEOPTIMIZEDBLOCKSJACOBI_H
#include "Eigen"
#include "utils.h"
#include "sparseParallelJacobi.h"
namespace Iterative {
template <typename Scalar>
class sparseOptimizedBlocksJacobi : public sparseParallelJacobi<Scalar> {
public:
/**
*
* @param A linear system matrix
* @param b known term vector
* @param iterations max number of iterations
* @param tolerance min error tolerated
* @param workers number of threads
* @param blockSize size of the block
*/
explicit sparseOptimizedBlocksJacobi(
const Eigen::SparseMatrix<Scalar>& A,
const Eigen::ColumnVector<Scalar, Eigen::Dynamic>& b,
const ulonglong iterations,
const Scalar tolerance,
const ulong workers = 0L,
const ulonglong blockSize = 0L) :
sparseParallelJacobi<Scalar>::sparseParallelJacobi(A, b, iterations, tolerance, workers) {
this->blockSize = blockSize;
if (blockSize == 0)
this->blockSize = std::max(ulong(this->A.cols() / workers), (ulong)1L);
splitter();
}
/**
*
* @return
*/
const Eigen::ColumnVector<Scalar, Eigen::Dynamic> &solve() {
Eigen::ColumnVector<Scalar, Eigen::Dynamic> oldSolution(this->solution);
std::vector<Eigen::Matrix<Scalar,Eigen::Dynamic,Eigen::Dynamic>> inverses(blocks.size());
Eigen::Matrix<Scalar,Eigen::Dynamic, Eigen::Dynamic> I(this->blockSize,this->blockSize);
Eigen::SimplicialLDLT<Eigen::SparseMatrix<Scalar>> solver;
I.setIdentity();
// compute the inverses of the blocks and memorize it
#pragma omp parallel for firstprivate(I) private(solver)
for (int i = 0; i < blocks.size()-1; ++i) {
Eigen::SparseMatrix<Scalar> block = this->A.block(blocks[i].startCol, blocks[i].startRow, blocks[i].cols,
blocks[i].rows);
solver.compute(block);
inverses[i] = solver.solve(I);
}
{
Eigen::SparseMatrix<Scalar> block = this->A.block(blocks.back().startCol, blocks.back().startRow,
blocks.back().cols,blocks.back().rows);
if(block.cols()!=this->blockSize || block.rows()!=this->blockSize){
I.resize(block.rows(), block.cols());
I.setIdentity();
}
solver.compute(block);
inverses.back() = solver.solve(I);
}
// start iterations
std::vector<int> index;
Eigen::ColumnVector<Scalar, Eigen::Dynamic> Ax =
Eigen::ColumnVector<Scalar, Eigen::Dynamic>::Zero(this->solution.rows(),this->solution.cols());
for (this->iteration=0L; this->iteration < this->iterations; ++this->iteration) {
Ax = this->A*oldSolution;
#pragma omp parallel for schedule(dynamic)
for (auto i = 0; i < inverses.size(); ++i) {
auto oldBlock = oldSolution.segment(blocks[i].startCol, blocks[i].cols);//
// the segment of the solution b that this inverse approximates
auto block = this->solution.segment(blocks[i].startCol, blocks[i].cols);
// approximate the solution using the inverse and the solution at the previous iteration
Eigen::ColumnVector<Scalar,Eigen::Dynamic> correction =
Eigen::ColumnVector<Scalar,Eigen::Dynamic>::Zero(oldSolution.rows(), oldSolution.cols());
for (auto col = blocks[i].startCol; col < blocks[i].startCol+blocks[i].cols; ++col) {
correction+=this->A.col(col)*oldSolution[col];
}
block = inverses[i] * (this->b - Ax + correction).segment(blocks[i].startCol, blocks[i].cols);
if ((oldBlock - block).template lpNorm<1>() <= this->tolerance*block.size()) {
#pragma omp critical
index.emplace_back(i);
}
}
if (!index.empty()) {
std::sort(index.rbegin(), index.rend());
for (auto i : index) {
blocks.erase(blocks.begin() + i);
inverses.erase(inverses.begin() + i);
}
if (inverses.empty()) break;
index.clear();
}
std::swap(this->solution, oldSolution);
}
std::cout << this->iteration << std::endl;
return this->solution;
}
protected:
ulonglong blockSize;
std::vector<Index> blocks;
void splitter() {
for (ulonglong i = 0; i < this->A.cols(); i += blockSize) {
blocks.emplace_back(Index(i, std::min(blockSize, (ulonglong)this->A.cols() - i),
i, std::min(blockSize, (ulonglong)this->A.rows() - i)));
}
}
private:
};
}
#endif //PARALLELITERATIVE_SPARSEOPTIMIZEDBLOCKSJACOBI_H
|
elemwise_binary_scalar_op.h | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
/*!
* Copyright (c) 2016 by Contributors
* \file elemwise_binary_scalar_op.h
* \brief Function definition of elementwise binary scalar operators
*/
#ifndef MXNET_OPERATOR_TENSOR_ELEMWISE_BINARY_SCALAR_OP_H_
#define MXNET_OPERATOR_TENSOR_ELEMWISE_BINARY_SCALAR_OP_H_
#include <mxnet/operator_util.h>
#include <vector>
#include <utility>
#include "../mshadow_op.h"
#include "../elemwise_op_common.h"
#include "elemwise_unary_op.h"
namespace mxnet {
namespace op {
class BinaryScalarOp : public UnaryOp {
/*! \brief Tensor operation against a scalar with a dense result */
template<typename OP, typename DType, typename IType>
static void ComputeExDenseResultRsp(mshadow::Stream<cpu> *stream,
const nnvm::NodeAttrs &attrs,
const OpContext &ctx,
const NDArray &input,
const OpReqType req,
const NDArray &output) {
const double alpha = nnvm::get<double>(attrs.parsed);
CHECK_EQ(output.shape(), input.shape());
const int64_t row_count = output.shape()[0];
const int64_t items_per_row = output.shape().Size() / row_count;
const DType result_for_zero = OP::Map(DType(0), DType(alpha));
mshadow::Tensor<cpu, 1, DType> input_data = input.data().FlatTo1D<cpu, DType>(stream);
mshadow::Tensor<cpu, 1, DType> output_data = output.data().FlatTo1D<cpu, DType>(stream);
const int64_t sparse_row_count = input.aux_shape(rowsparse::kIdx).Size();
if (sparse_row_count != row_count) {
mshadow::Tensor<cpu, 1, IType> row_indexes = input.aux_data(
rowsparse::kIdx).FlatTo1D<cpu, IType>(stream);
int64_t input_iter = 0;
int64_t output_row = 0;
IType next_input_row = 0;
while (output_row < row_count) {
next_input_row = input_iter < sparse_row_count ? int64_t(row_indexes[input_iter])
: row_count;
// Split up into blocks of contiguous data and do those together
// Do contiguous dense blocks
const int64_t dense_block_count = next_input_row - output_row;
if (dense_block_count > 0) {
MXNET_ASSIGN_REQ_SWITCH(req, Req, {
mxnet_op::Kernel<mxnet_op::op_with_req<mshadow_op::identity, Req>, cpu>::Launch(
stream,
items_per_row * dense_block_count,
output_data.dptr_ + items_per_row * output_row,
result_for_zero);
});
output_row += dense_block_count;
continue;
}
// Do contiguous sparse blocks
int64_t next_non_contiguous_sparse = input_iter;
while (next_non_contiguous_sparse < sparse_row_count - 1) {
if (row_indexes[next_non_contiguous_sparse + 1]
!= row_indexes[next_non_contiguous_sparse] + 1) {
break;
}
++next_non_contiguous_sparse;
}
const int64_t sparse_block_count = next_non_contiguous_sparse - input_iter + 1;
if (sparse_block_count > 0) {
MXNET_ASSIGN_REQ_SWITCH(req, Req, {
mxnet_op::Kernel<mxnet_op::op_with_req<OP, Req>, cpu>::Launch(
stream,
items_per_row * sparse_block_count,
&output_data.dptr_[items_per_row * output_row],
&input_data.dptr_[items_per_row * input_iter],
DType(alpha));
});
output_row += sparse_block_count;
input_iter += sparse_block_count;
continue;
}
}
} else {
// All rows exist (eventually we don't have to do complex
// things to call GPU kernels because we don't need to access row indices)
MXNET_ASSIGN_REQ_SWITCH(req, Req, {
mxnet_op::Kernel<mxnet_op::op_with_req<OP, Req>, cpu>::Launch(
stream,
items_per_row * row_count,
output_data.dptr_,
input_data.dptr_,
DType(alpha));
});
}
}
/*! \brief Tensor operation against a scalar with a dense result */
template<typename OP, typename DType, typename IType>
static void ComputeExDenseResultRsp(mshadow::Stream<gpu> *stream,
const nnvm::NodeAttrs &attrs,
const OpContext &ctx,
const NDArray &input,
const OpReqType req,
const NDArray &output) {
LOG(FATAL) << "NOT IMPLEMENTED";
}
/*! \brief Tensor operation against a scalar with a dense result */
template<typename OP, typename DType, typename IType, typename CType>
static void ComputeExDenseResultCsr(mshadow::Stream<cpu> *stream,
const nnvm::NodeAttrs &attrs,
const OpContext &ctx,
const NDArray &input,
const OpReqType req,
const NDArray &output) {
CHECK_EQ(output.shape(), input.shape());
const double alpha = nnvm::get<double>(attrs.parsed);
const DType dense_fill_val = OP::Map(DType(0), DType(alpha));
const TBlob column_indexes = input.aux_data(csr::kIdx);
const size_t item_count = column_indexes.Size();
// Pre-fill dense with 0-input/output value
FillDense<DType>(stream, output.shape().Size(), dense_fill_val,
req, output.data().dptr<DType>());
mshadow::Tensor<cpu, 2, DType> out = AsRowise2D<DType>(stream, output.data());
if (item_count) {
const DType *in = input.data().dptr<DType>();
const IType *column_indexes_ptr = column_indexes.dptr<IType>();
const auto row_count = static_cast<size_t>(input.shape()[0]);
const TBlob row_starts = input.aux_data(csr::kIndPtr);
const CType *row_starts_ptr = row_starts.dptr<CType>();
#pragma omp parallel for
for (int i = 0; i < static_cast<int>(row_count); ++i) {
const bool last_row = i == static_cast<int>(row_count) - 1;
// Split up into blocks of contiguous data and do those together
const size_t row_item_start_iter = row_starts_ptr[i];
const size_t input_items_this_row = !last_row
? static_cast<size_t>(row_starts_ptr[i + 1])
- row_item_start_iter
: item_count - row_item_start_iter;
if (input_items_this_row) {
const IType *this_row_column_indexes = column_indexes_ptr + row_item_start_iter;
const DType *row_data_start = in + row_item_start_iter;
DType *output_this_row = out[i].dptr_;
// More overhead to use OMP for small loops, so don't
if (input_items_this_row > 1000) {
#pragma omp parallel for
for (CType j = 0; j < static_cast<CType>(input_items_this_row); ++j) {
const IType col = this_row_column_indexes[j];
const DType val = row_data_start[j];
output_this_row[col] = OP::Map(val, DType(alpha));
}
} else {
for (CType j = 0; j < static_cast<CType>(input_items_this_row); ++j) {
const IType col = this_row_column_indexes[j];
const DType val = row_data_start[j];
output_this_row[col] = OP::Map(val, DType(alpha));
}
}
}
}
}
}
/*! \brief Tensor operation against a scalar with a dense result */
template<typename OP, typename DType, typename IType, typename CType>
static void ComputeExDenseResultCsr(mshadow::Stream<gpu> *stream,
const nnvm::NodeAttrs &attrs,
const OpContext &ctx,
const NDArray &input,
const OpReqType req,
const NDArray &output) {
LOG(FATAL) << "NOT IMPLEMENTED";
}
template<typename xpu, typename OP, typename DType, typename IType>
static void ComputeExDenseResult(const nnvm::NodeAttrs &attrs,
const OpContext &ctx,
const NDArray &input,
const OpReqType req,
const NDArray output) {
mshadow::Stream<xpu> *stream = ctx.get_stream<xpu>();
CHECK_EQ(output.storage_type(), kDefaultStorage);
switch (input.storage_type()) {
case kRowSparseStorage: {
ComputeExDenseResultRsp<OP, DType, IType>(stream, attrs, ctx, input, req, output);
break;
}
case kCSRStorage: {
MSHADOW_IDX_TYPE_SWITCH(input.aux_data(csr::kIndPtr).type_flag_, CType, {
ComputeExDenseResultCsr<OP, DType, IType, CType>(stream, attrs, ctx, input, req, output);
});
break;
}
default:
CHECK(false) << "Unsupported sparse storage type";
break;
}
}
public:
template<typename xpu, typename OP>
static void Compute(const nnvm::NodeAttrs &attrs,
const OpContext &ctx,
const std::vector<TBlob> &inputs,
const std::vector<OpReqType> &req,
const std::vector<TBlob> &outputs) {
DCHECK_EQ(inputs.size(), 1);
DCHECK_EQ(outputs.size(), 1);
using namespace mshadow;
using namespace mshadow::expr;
Stream<xpu> *s = ctx.get_stream<xpu>();
const double alpha = nnvm::get<double>(attrs.parsed);
MSHADOW_TYPE_SWITCH(outputs[0].type_flag_, DType, {
MXNET_ASSIGN_REQ_SWITCH(req[0], Req, {
mxnet_op::Kernel<mxnet_op::op_with_req<OP, Req>, xpu>::Launch(
s, inputs[0].Size(), outputs[0].dptr<DType>(), inputs[0].dptr<DType>(), DType(alpha));
});
});
}
template<typename xpu, typename OP>
static void ComputeInt(const nnvm::NodeAttrs &attrs,
const OpContext &ctx,
const std::vector<TBlob> &inputs,
const std::vector<OpReqType> &req,
const std::vector<TBlob> &outputs) {
DCHECK_EQ(inputs.size(), 1);
DCHECK_EQ(outputs.size(), 1);
using namespace mshadow;
using namespace mshadow::expr;
Stream<xpu> *s = ctx.get_stream<xpu>();
const double alpha = nnvm::get<double>(attrs.parsed);
MXNET_INT_TYPE_SWITCH(outputs[0].type_flag_, DType, {
MXNET_ASSIGN_REQ_SWITCH(req[0], Req, {
mxnet_op::Kernel<mxnet_op::op_with_req<OP, Req>, xpu>::Launch(
s, inputs[0].Size(), outputs[0].dptr<DType>(), inputs[0].dptr<DType>(), DType(alpha));
});
});
}
template<typename xpu, typename OP>
static void ComputeLogic(const nnvm::NodeAttrs &attrs,
const OpContext &ctx,
const std::vector<TBlob> &inputs,
const std::vector<OpReqType> &req,
const std::vector<TBlob> &outputs) {
DCHECK_EQ(inputs.size(), 1);
DCHECK_EQ(outputs.size(), 1);
using namespace mshadow;
using namespace mshadow::expr;
Stream<xpu> *s = ctx.get_stream<xpu>();
const double alpha = nnvm::get<double>(attrs.parsed);
MSHADOW_TYPE_SWITCH_WITH_BOOL(inputs[0].type_flag_, DType, {
MXNET_ASSIGN_REQ_SWITCH(req[0], Req, {
mxnet_op::Kernel<mxnet_op::op_with_req<OP, Req>, xpu>::Launch(
s, inputs[0].Size(), outputs[0].dptr<bool>(), inputs[0].dptr<DType>(), DType(alpha));
});
});
}
template<typename xpu, typename OP>
static void ComputeEx(const nnvm::NodeAttrs &attrs,
const OpContext &ctx,
const std::vector<NDArray> &inputs,
const std::vector<OpReqType> &req,
const std::vector<NDArray> &outputs) {
DCHECK_EQ(inputs.size(), 1);
DCHECK_EQ(outputs.size(), 1);
const auto in_stype = inputs[0].storage_type();
const auto out_stype = outputs[0].storage_type();
if (req[0] == kNullOp) {
return;
}
if ((in_stype == kRowSparseStorage && out_stype == kRowSparseStorage) ||
(in_stype == kCSRStorage && out_stype == kCSRStorage)) {
// csr -> csr, or rsp -> rsp
UnaryOp::MapToFCompute<xpu>(attrs, ctx, inputs, req, outputs, Compute<xpu, OP>);
} else if (out_stype == kDefaultStorage &&
(in_stype == kRowSparseStorage || in_stype == kCSRStorage)) {
MSHADOW_TYPE_SWITCH(outputs[0].data().type_flag_, DType, {
MSHADOW_IDX_TYPE_SWITCH(inputs[0].aux_type(rowsparse::kIdx), IType, {
ComputeExDenseResult<xpu, OP, DType, IType>(attrs, ctx, inputs[0], req[0], outputs[0]);
});
});
} else {
LogUnimplementedOp(attrs, ctx, inputs, req, outputs);
}
}
template<typename xpu, typename OP>
static void LogicComputeEx(const nnvm::NodeAttrs &attrs,
const OpContext &ctx,
const std::vector<NDArray> &inputs,
const std::vector<OpReqType> &req,
const std::vector<NDArray> &outputs) {
DCHECK_EQ(inputs.size(), 1);
DCHECK_EQ(outputs.size(), 1);
const auto in_stype = inputs[0].storage_type();
const auto out_stype = outputs[0].storage_type();
if (req[0] == kNullOp) {
return;
}
if ((in_stype == kRowSparseStorage && out_stype == kRowSparseStorage) ||
(in_stype == kCSRStorage && out_stype == kCSRStorage)) {
// csr -> csr, or rsp -> rsp
UnaryOp::MapToFCompute<xpu>(attrs, ctx, inputs, req, outputs, Compute<xpu, OP>);
} else {
LogUnimplementedOp(attrs, ctx, inputs, req, outputs);
}
}
template<typename xpu, typename OP>
static void Backward(const nnvm::NodeAttrs &attrs,
const OpContext &ctx,
const std::vector<TBlob> &inputs,
const std::vector<OpReqType> &req,
const std::vector<TBlob> &outputs) {
using namespace mshadow;
using namespace mshadow::expr;
Stream<xpu> *s = ctx.get_stream<xpu>();
const double alpha = nnvm::get<double>(attrs.parsed);
MSHADOW_TYPE_SWITCH(outputs[0].type_flag_, DType, {
MXNET_ASSIGN_REQ_SWITCH(req[0], Req, {
mxnet::op::mxnet_op::Kernel<mxnet::op::mxnet_op::op_with_req<
mxnet::op::mxnet_op::backward_grad_tuned<OP>, Req>, xpu>::
Launch(s, inputs[0].Size(), outputs[0].dptr<DType>(),
inputs[0].dptr<DType>(), inputs[1].dptr<DType>(),
DType(alpha));
});
});
}
};
#define MXNET_OPERATOR_REGISTER_BINARY_SCALAR(name) \
NNVM_REGISTER_OP(name) \
.set_num_inputs(1) \
.set_num_outputs(1) \
.set_attr_parser([](NodeAttrs* attrs) { \
attrs->parsed = std::stod(attrs->dict["scalar"]); \
}) \
.set_attr<mxnet::FInferShape>("FInferShape", ElemwiseShape<1, 1>) \
.set_attr<nnvm::FInferType>("FInferType", ElemwiseType<1, 1>) \
.set_attr<mxnet::alm::FChangeLayout>("FChangeLayout", ElemwiseChangeLayout) \
.set_attr<nnvm::FInplaceOption>("FInplaceOption", \
[](const NodeAttrs& attrs){ \
return std::vector<std::pair<int, int> >{{0, 0}}; \
}) \
.add_argument("data", "NDArray-or-Symbol", "source input") \
.add_argument("scalar", "float", "scalar input")
} // namespace op
} // namespace mxnet
#endif // MXNET_OPERATOR_TENSOR_ELEMWISE_BINARY_SCALAR_OP_H_
|
elemwise_binary_op.h | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
/*!
* Copyright (c) 2016 by Contributors
* \file elemwise_binary_op.h
* \brief Function definition of elementwise binary operators
*/
#ifndef MXNET_OPERATOR_TENSOR_ELEMWISE_BINARY_OP_H_
#define MXNET_OPERATOR_TENSOR_ELEMWISE_BINARY_OP_H_
#include <mxnet/operator_util.h>
#include <mxnet/op_attr_types.h>
#include <vector>
#include <string>
#include <utility>
#include <typeinfo>
#include <algorithm>
#include "../mxnet_op.h"
#include "../mshadow_op.h"
#include "../../engine/openmp.h"
#include "elemwise_unary_op.h"
#include "../../common/utils.h"
#include "./init_op.h"
namespace mxnet {
namespace op {
/*! Gather binary operator functions into ElemwiseBinaryOp class */
class ElemwiseBinaryOp : public OpBase {
public:
/*! \brief For sparse, assume missing rvalue is 0 */
template<typename OP, int Req>
struct MissingRValueOp {
typedef OP Operation;
template<typename DType>
MSHADOW_XINLINE static void Map(int i, DType *out, const DType *lhs) {
KERNEL_ASSIGN(out[i], Req, OP::Map(lhs[i], DType(0)));
}
};
/*! \brief For sparse, assume missing lvalue is 0 */
template<typename OP, int Req>
struct MissingLValueOp {
typedef OP Operation;
template<typename DType>
MSHADOW_XINLINE static void Map(int i, DType *out, const DType *rhs) {
KERNEL_ASSIGN(out[i], Req, OP::Map(DType(0), rhs[i]));
}
};
private:
/*!
* \brief CSR operation requires temp space
*/
enum ResourceRequestType {
kTempSpace
};
/*!
* \brief Fill contiguous dense output rows with value computed from 0 lhs and 0 rhs input
* CPU-Only version
*/
template<typename DType, typename OP, typename xpu>
static inline size_t FillDense(mshadow::Stream<xpu> *s,
const size_t idx_l,
const size_t idx_r,
const OpReqType req,
mshadow::Tensor<xpu, 2, DType> *out,
const size_t iter_out) {
const int index_out_min = static_cast<int>(std::min(idx_l, idx_r));
if (static_cast<size_t>(index_out_min) > iter_out) {
const DType zero_input_val = OP::Map(DType(0), DType(0));
#pragma omp parallel for num_threads(engine::OpenMP::Get()->GetRecommendedOMPThreadCount())
for (int i = static_cast<int>(iter_out); i < index_out_min; ++i) {
Fill<false>(s, (*out)[i], req, zero_input_val);
}
}
return static_cast<size_t>(index_out_min); // MSVC wants OMP loops to always use 'int'
}
static inline bool IsSameArray(const NDArray& a1, const NDArray& a2) {
return a1.var() == a2.var();
}
public:
/*! \brief Minimum of three */
static MSHADOW_XINLINE size_t minthree(const size_t a, const size_t b, const size_t c) {
return a < b ? (a < c ? a : c) : (b < c ? b : c);
}
private:
template<typename xpu, typename LOP, typename ROP, typename DType>
static void BackwardUseNone_(const nnvm::NodeAttrs &attrs,
const OpContext &ctx,
const std::vector<TBlob> &inputs,
const std::vector<OpReqType> &req,
const std::vector<TBlob> &outputs) {
using namespace mxnet_op;
Stream<xpu> *s = ctx.get_stream<xpu>();
const int size = static_cast<int>((outputs[0].Size() + DataType<DType>::kLanes - 1)
/ DataType<DType>::kLanes);
const DType *ograd_dptr = inputs[0].dptr<DType>();
if (std::is_same<LOP, mshadow_op::identity>::value && req[0] == kWriteInplace) {
CHECK_EQ(ograd_dptr, outputs[0].dptr<DType>());
} else if (req[0] != kNullOp) {
DType *lgrad_dptr = outputs[0].dptr<DType>();
MXNET_ASSIGN_REQ_SWITCH(req[0], Req, {
Kernel<mxnet_op::op_with_req<LOP, Req>, xpu>::Launch(s, size, lgrad_dptr, ograd_dptr);
});
}
if (std::is_same<ROP, mshadow_op::identity>::value && req[1] == kWriteInplace) {
CHECK_EQ(ograd_dptr, outputs[1].dptr<DType>());
} else if (req[1] != kNullOp) {
DType *rgrad_dptr = outputs[1].dptr<DType>();
MXNET_ASSIGN_REQ_SWITCH(req[1], Req, {
Kernel<mxnet_op::op_with_req<ROP, Req>, xpu>::Launch(s, size, rgrad_dptr, ograd_dptr);
});
}
}
template<typename xpu, typename LOP, typename ROP, typename DType>
static void BackwardUseIn_(const nnvm::NodeAttrs &attrs,
const OpContext &ctx,
const std::vector<TBlob> &inputs,
const std::vector<OpReqType> &req,
const std::vector<TBlob> &outputs) {
DCHECK_EQ(outputs.size(), 2U);
DCHECK_EQ(inputs.size(), 3U);
mxnet_op::Stream<xpu> *s = ctx.get_stream<xpu>();
const DType *ograd_dptr = inputs[0].dptr<DType>();
const DType *lhs_dptr = inputs[1].dptr<DType>();
const DType *rhs_dptr = inputs[2].dptr<DType>();
MXNET_ASSIGN_REQ_SWITCH(req[0], Req, {
const int size = static_cast<int>(
(outputs[0].Size() + mxnet_op::DataType<DType>::kLanes - 1)
/ mxnet_op::DataType<DType>::kLanes);
DType * lgrad_dptr = outputs[0].dptr<DType>();
mxnet_op::Kernel<mxnet_op::op_with_req<mxnet_op::backward_grad_tuned<LOP>, Req>, xpu>::Launch(
s, size, lgrad_dptr, ograd_dptr, lhs_dptr, rhs_dptr);});
MXNET_ASSIGN_REQ_SWITCH(req[1], Req, {
const int size = static_cast<int>(
(outputs[1].Size() + mxnet_op::DataType<DType>::kLanes - 1)
/ mxnet_op::DataType<DType>::kLanes);
DType * rgrad_dptr = outputs[1].dptr<DType>();
mxnet_op::Kernel<mxnet_op::op_with_req<mxnet_op::backward_grad_tuned<ROP>, Req>, xpu>::Launch(
s, size, rgrad_dptr, ograd_dptr, lhs_dptr, rhs_dptr);});
}
template<
typename xpu,
typename LOP,
typename ROP,
bool in0_ok_dense = false,
bool in1_ok_dense = false,
bool in2_ok_dense = false,
typename BackupCompute>
static inline void RspRspOpBackward(const nnvm::NodeAttrs &attrs,
const OpContext &ctx,
const std::vector<NDArray> &inputs,
const std::vector<OpReqType> &req,
const std::vector<NDArray> &outputs,
BackupCompute backup_compute) {
mshadow::Stream<xpu> *s = ctx.get_stream<xpu>();
// lhs grad
if (req[0] != kNullOp) {
// RspRspOp can handle dense outputs so long as OP(0, 0) == 0
RspRspOp<LOP>(
s, attrs, ctx, inputs[1], inputs[2], req[0], outputs[0],
false, false, false, false);
// lhs in-place
RspRspOp<op::mshadow_op::mul>(
s, attrs, ctx, outputs[0], inputs[0], req[0], outputs[0],
false, false, true, false);
}
// rhs grad
if (req[1] != kNullOp) {
RspRspOp<ROP>(
s, attrs, ctx, inputs[1], inputs[2], req[1], outputs[1],
false, false, false, false);
// rhs in-place
RspRspOp<op::mshadow_op::mul>(
s, attrs, ctx, inputs[0], outputs[1], req[1], outputs[1],
false, false, true, false);
}
}
template<typename xpu, typename LOP, typename ROP>
static inline void DnsCsrCsrOpBackward(const nnvm::NodeAttrs &attrs,
const OpContext &ctx,
const std::vector<NDArray> &inputs,
const std::vector<OpReqType> &req,
const std::vector<NDArray> &outputs) {
const bool supported_ops = std::is_same<mshadow_op::right, LOP>::value &&
std::is_same<mshadow_op::left, ROP>::value;
CHECK(supported_ops)
<< "Only backward for mul is supported (LOP should be right, ROP should be left)";
const NDArray& out_grad = inputs[0];
const NDArray& lhs_in = inputs[1];
const NDArray& rhs_in = inputs[2];
const NDArray& lhs_grad = outputs[0];
const NDArray& rhs_grad = outputs[1];
const bool reverse = (outputs[0].storage_type() == kCSRStorage);
if (reverse) {
DnsCsrCsrOp<xpu, mshadow_op::mul>(attrs, ctx, out_grad, rhs_in, req[0], lhs_grad, false);
Compute<xpu, mshadow_op::mul>(attrs, ctx, {out_grad.data(), lhs_in.data()}, {req[1]},
{rhs_grad.data()});
} else {
DnsCsrCsrOp<xpu, mshadow_op::mul>(attrs, ctx, out_grad, lhs_in, req[1], rhs_grad, false);
Compute<xpu, mshadow_op::mul>(attrs, ctx, {out_grad.data(), rhs_in.data()}, {req[0]},
{lhs_grad.data()});
}
}
public:
/*! \brief Binary op handling for lhr/rhs: RspDns, RspRsp, DnsRsp, or RspRsp->Dns result */
template<typename OP>
static void RspRspOp(mshadow::Stream<cpu> *s,
const nnvm::NodeAttrs &attrs,
const OpContext &ctx,
const NDArray &lhs,
const NDArray &rhs,
OpReqType req,
const NDArray &output,
bool lhs_may_be_dense,
bool rhs_may_be_dense,
bool allow_inplace,
bool scatter);
/*! \brief Binary op handling for lhr/rhs: RspDns, RspRsp, DnsRsp, or RspRsp->Dns result */
template<typename OP>
static void RspRspOp(mshadow::Stream<gpu> *s,
const nnvm::NodeAttrs &attrs,
const OpContext &ctx,
const NDArray &lhs,
const NDArray &rhs,
OpReqType req,
const NDArray &output,
bool lhs_may_be_dense,
bool rhs_may_be_dense,
bool allow_inplace,
bool scatter);
/*! \brief CSR -op- CSR binary operator for non-canonical NDArray */
template<typename OP>
static void CsrCsrOp(mshadow::Stream<cpu> *s,
const nnvm::NodeAttrs &attrs,
const OpContext &ctx,
const NDArray &lhs,
const NDArray &rhs,
OpReqType req,
const NDArray &output);
/*! \brief CSR -op- CSR binary operator for non-canonical NDArray */
template<typename OP>
static void CsrCsrOp(mshadow::Stream<gpu> *s,
const nnvm::NodeAttrs &attrs,
const OpContext &ctx,
const NDArray &lhs,
const NDArray &rhs,
OpReqType req,
const NDArray &output);
/*! \brief DNS -op- CSR binary operator for non-canonical NDArray */
template<typename OP>
static void DnsCsrDnsOp(mshadow::Stream<cpu> *s,
const nnvm::NodeAttrs &attrs,
const OpContext &ctx,
const NDArray &lhs,
const NDArray &rhs,
OpReqType req,
const NDArray &output,
const bool reverse);
/*! \brief DNS -op- CSR binary operator for non-canonical NDArray */
template<typename OP>
static void DnsCsrDnsOp(mshadow::Stream<gpu> *s,
const nnvm::NodeAttrs &attrs,
const OpContext &ctx,
const NDArray &lhs,
const NDArray &rhs,
OpReqType req,
const NDArray &output,
const bool reverse);
/*! \brief DNS -op- CSR binary operator for non-canonical NDArray */
template<typename xpu, typename OP>
static void DnsCsrCsrOp(const nnvm::NodeAttrs &attrs,
const OpContext &ctx,
const NDArray &lhs,
const NDArray &rhs,
OpReqType req,
const NDArray &output,
const bool reverse);
/*! \brief DNS -op- RSP binary operator for non-canonical NDArray */
template<typename xpu, typename OP>
static void DnsRspDnsOp(mshadow::Stream<xpu> *s,
const nnvm::NodeAttrs &attrs,
const OpContext &ctx,
const NDArray &lhs,
const NDArray &rhs,
OpReqType req,
const NDArray &output,
const bool reverse);
public:
/*!
* \brief Rsp-op-Rsp operation which produces a dense result
* \param attrs Attributes
* \param dev_mask Device mask
* \param dispatch_mode Dispatch Mode
* \param in_attrs Input storage attributes
* \param out_attrs Output storage attributes
* \return true if handled
*/
static bool SparseSparseWithDenseResult(const nnvm::NodeAttrs& attrs,
int dev_mask,
DispatchMode* dispatch_mode,
std::vector<int> *in_attrs,
std::vector<int> *out_attrs);
/*!
* \brief Allow one of the binary inputs to be dense and still produce a sparse output.
* Typically used for sparse * dense = sparse.
* Note: for csr, it dispatches to fallback other than csr, csr -> csr
* \param attrs Attributes
* \param dev_mask Device mask
* \param dispatch_mode Dispatch Mode
* \param in_attrs Input storage attributes
* \param out_attrs Output storage attributes
* \return true if handled
*/
static bool PreferSparseStorageType(const nnvm::NodeAttrs& attrs,
int dev_mask,
DispatchMode* dispatch_mode,
std::vector<int> *in_attrs,
std::vector<int> *out_attrs) {
using namespace common;
CHECK_EQ(in_attrs->size(), 2U) << " in operator " << attrs.name;
CHECK_EQ(out_attrs->size(), 1U) << " in operator " << attrs.name;
const auto& lhs_stype = in_attrs->at(0);
const auto& rhs_stype = in_attrs->at(1);
auto& out_stype = out_attrs->at(0);
bool dispatched = false;
const bool invalid_ctx = dev_mask != mshadow::cpu::kDevMask;
const auto dispatch_ex = invalid_ctx ? DispatchMode::kFComputeFallback :
DispatchMode::kFComputeEx;
if (!dispatched && ContainsOnlyStorage(*in_attrs, kDefaultStorage)) {
// dns, dns -> dns
dispatched = storage_type_assign(&out_stype, kDefaultStorage,
dispatch_mode, DispatchMode::kFCompute);
}
if (!dispatched && ContainsOnlyStorage(*in_attrs, kRowSparseStorage)) {
// rsp, rsp -> rsp
dispatched = storage_type_assign(&out_stype, kRowSparseStorage,
dispatch_mode, dispatch_ex);
}
if (!dispatched && ContainsOnlyStorage(*in_attrs, kCSRStorage)) {
// csr, csr -> csr
dispatched = storage_type_assign(&out_stype, kCSRStorage,
dispatch_mode, dispatch_ex);
}
if (!dispatched &&
((lhs_stype == kRowSparseStorage && rhs_stype == kDefaultStorage) ||
(lhs_stype == kDefaultStorage && rhs_stype == kRowSparseStorage))) {
// rsp, dns -> rsp
// dns, rsp -> rsp
dispatched = storage_type_assign(&out_stype, kRowSparseStorage,
dispatch_mode, dispatch_ex);
}
if (!dispatched &&
((lhs_stype == kCSRStorage && rhs_stype == kDefaultStorage) ||
(lhs_stype == kDefaultStorage && rhs_stype == kCSRStorage))) {
// csr, dns -> csr
// dns, csr -> csr
dispatched = storage_type_assign(&out_stype, kCSRStorage,
dispatch_mode, DispatchMode::kFComputeEx);
}
if (!dispatched) {
dispatched = dispatch_fallback(out_attrs, dispatch_mode);
}
return dispatched;
}
/*!
* \brief Allow one of the inputs to be dense and produce a dense output,
* for rsp inputs only support when both inputs are rsp type.
* \param attrs Attributes
* \param dev_mask Device mask
* \param dispatch_mode Dispatch Mode
* \param in_attrs Input storage attributes
* \param out_attrs Output storage attributes
* \return true if handled
*/
template<bool cpu_only, bool rsp, bool csr>
static bool PreferDenseStorageType(const nnvm::NodeAttrs& attrs,
const int dev_mask,
DispatchMode* dispatch_mode,
std::vector<int> *in_attrs,
std::vector<int> *out_attrs) {
using namespace common;
CHECK_EQ(in_attrs->size(), 2);
CHECK_EQ(out_attrs->size(), 1);
const auto lhs_stype = (*in_attrs)[0];
const auto rhs_stype = (*in_attrs)[1];
bool dispatched = false;
const bool invalid_ctx = cpu_only && dev_mask != mshadow::cpu::kDevMask;
const auto dispatch_ex = invalid_ctx ? DispatchMode::kFComputeFallback :
DispatchMode::kFComputeEx;
if (!dispatched && ContainsOnlyStorage(*in_attrs, kDefaultStorage)) {
// dns, dns ... -> dns
dispatched = storage_type_assign(out_attrs, kDefaultStorage,
dispatch_mode, DispatchMode::kFCompute);
}
if (!dispatched && rsp && ContainsOnlyStorage(*in_attrs, kRowSparseStorage)) {
// rsp, rsp, ... -> rsp
dispatched = storage_type_assign(out_attrs, kRowSparseStorage,
dispatch_mode, DispatchMode::kFComputeEx);
}
if (!dispatched && csr && ContainsOnlyStorage(*in_attrs, kCSRStorage)) {
// csr, csr, ... -> csr
dispatched = storage_type_assign(out_attrs, kCSRStorage,
dispatch_mode, dispatch_ex);
}
if (!dispatched && ((lhs_stype == kDefaultStorage && rhs_stype == kCSRStorage) ||
(lhs_stype == kCSRStorage && rhs_stype == kDefaultStorage))) {
// dense, csr -> dense / csr, dense -> dense
dispatched = storage_type_assign(out_attrs, kDefaultStorage,
dispatch_mode, DispatchMode::kFComputeEx);
}
if (!dispatched && ((lhs_stype == kDefaultStorage && rhs_stype == kRowSparseStorage) ||
(lhs_stype == kRowSparseStorage && rhs_stype == kDefaultStorage))) {
// dense, rsp -> dense / rsp, dense -> dense
dispatched = storage_type_assign(out_attrs, kDefaultStorage,
dispatch_mode, DispatchMode::kFComputeEx);
}
if (!dispatched) {
dispatch_fallback(out_attrs, dispatch_mode);
}
return true;
}
/*!
* \brief Backward pass computing input gradient using forward inputs
* \param attrs Attributes
* \param dev_mask Device mask
* \param dispatch_mode Dispatch Mode
* \param in_attrs Input storage attributes
* \param out_attrs Output storage attributes
* \return true if handled
*/
static bool BackwardUseInStorageType(const nnvm::NodeAttrs& attrs,
int dev_mask,
DispatchMode* dispatch_mode,
std::vector<int> *in_attrs,
std::vector<int> *out_attrs);
template<typename xpu, typename OP>
static void ComputeInt(const nnvm::NodeAttrs &attrs,
const OpContext &ctx,
const std::vector<TBlob> &inputs,
const std::vector<OpReqType> &req,
const std::vector<TBlob> &outputs) {
using namespace mxnet_op;
if (req[0] == kNullOp) return;
Stream<xpu> *s = ctx.get_stream<xpu>();
CHECK_EQ(inputs.size(), 2U);
CHECK_EQ(outputs.size(), 1U);
MXNET_ASSIGN_REQ_SWITCH(req[0], Req, {
MXNET_INT_TYPE_SWITCH(outputs[0].type_flag_, DType, {
const size_t size = (minthree(outputs[0].Size(), inputs[0].Size(), inputs[1].Size())
+ DataType<DType>::kLanes - 1) / DataType<DType>::kLanes;
if (size != 0) {
Kernel<mxnet_op::op_with_req<OP, Req>, xpu>::Launch(s, size,
outputs[0].dptr<DType>(),
inputs[0].dptr<DType>(), inputs[1].dptr<DType>());
}
});
});
}
template<typename xpu, typename OP>
static void Compute(const nnvm::NodeAttrs &attrs,
const OpContext &ctx,
const std::vector<TBlob> &inputs,
const std::vector<OpReqType> &req,
const std::vector<TBlob> &outputs) {
using namespace mxnet_op;
if (req[0] == kNullOp) return;
Stream<xpu> *s = ctx.get_stream<xpu>();
CHECK_EQ(inputs.size(), 2U);
CHECK_EQ(outputs.size(), 1U);
if (outputs[0].type_flag_ == mshadow::kBool) {
LOG(FATAL) << "Operator " << attrs.op->name << " does not support boolean type";
}
MXNET_ASSIGN_REQ_SWITCH(req[0], Req, {
MSHADOW_TYPE_SWITCH(outputs[0].type_flag_, DType, {
const size_t size = (minthree(outputs[0].Size(), inputs[0].Size(), inputs[1].Size())
+ DataType<DType>::kLanes - 1) / DataType<DType>::kLanes;
if (size != 0) {
Kernel<mxnet_op::op_with_req<OP, Req>, xpu>::Launch(s, size,
outputs[0].dptr<DType>(),
inputs[0].dptr<DType>(), inputs[1].dptr<DType>());
}
});
});
}
template<typename xpu, typename OP>
static void ComputeWithBool(const nnvm::NodeAttrs &attrs,
const OpContext &ctx,
const std::vector<TBlob> &inputs,
const std::vector<OpReqType> &req,
const std::vector<TBlob> &outputs) {
using namespace mxnet_op;
if (req[0] == kNullOp) return;
Stream<xpu> *s = ctx.get_stream<xpu>();
CHECK_EQ(inputs.size(), 2U);
CHECK_EQ(outputs.size(), 1U);
MXNET_ASSIGN_REQ_SWITCH(req[0], Req, {
MSHADOW_TYPE_SWITCH_WITH_BOOL(outputs[0].type_flag_, DType, {
const size_t size = (minthree(outputs[0].Size(), inputs[0].Size(), inputs[1].Size())
+ DataType<DType>::kLanes - 1) / DataType<DType>::kLanes;
if (size != 0) {
Kernel<mxnet_op::op_with_req<OP, Req>, xpu>::Launch(s, size,
outputs[0].dptr<DType>(),
inputs[0].dptr<DType>(), inputs[1].dptr<DType>());
}
});
});
}
template<typename xpu, typename OP>
static void ComputeLogic(const nnvm::NodeAttrs &attrs,
const OpContext &ctx,
const std::vector<TBlob> &inputs,
const std::vector<OpReqType> &req,
const std::vector<TBlob> &outputs) {
using namespace mxnet_op;
if (req[0] == kNullOp) return;
Stream<xpu> *s = ctx.get_stream<xpu>();
CHECK_EQ(inputs.size(), 2U);
CHECK_EQ(outputs.size(), 1U);
MXNET_ASSIGN_REQ_SWITCH(req[0], Req, {
MSHADOW_TYPE_SWITCH_WITH_BOOL(inputs[0].type_flag_, DType, {
const size_t size = (minthree(outputs[0].Size(), inputs[0].Size(), inputs[1].Size())
+ DataType<DType>::kLanes - 1) / DataType<DType>::kLanes;
if (size != 0) {
Kernel<mxnet_op::op_with_req<OP, Req>, xpu>::Launch(s, size,
outputs[0].dptr<bool>(),
inputs[0].dptr<DType>(),
inputs[1].dptr<DType>());
}
});
});
}
template<typename xpu, typename OP>
static void ComputeWithHalf2(const nnvm::NodeAttrs &attrs,
const OpContext &ctx,
const std::vector<TBlob> &inputs,
const std::vector<OpReqType> &req,
const std::vector<TBlob> &outputs) {
using namespace mxnet_op;
if (req[0] == kNullOp) return;
Stream<xpu> *s = ctx.get_stream<xpu>();
CHECK_EQ(inputs.size(), 2U);
CHECK_EQ(outputs.size(), 1U);
MXNET_ASSIGN_REQ_SWITCH(req[0], Req, {
MSHADOW_TYPE_SWITCH_WITH_HALF2(outputs[0].type_flag_, DType, {
const size_t size = (minthree(outputs[0].Size(), inputs[0].Size(), inputs[1].Size())
+ DataType<DType>::kLanes - 1) / DataType<DType>::kLanes;
if (size != 0) {
Kernel<mxnet_op::op_with_req<OP, Req>, xpu>::Launch(s, size,
outputs[0].dptr<DType>(),
inputs[0].dptr<DType>(), inputs[1].dptr<DType>());
}
});
});
}
template<typename xpu, typename OP>
static void ComputeEx(const nnvm::NodeAttrs &attrs,
const OpContext &ctx,
const std::vector<NDArray> &inputs,
const std::vector<OpReqType> &req,
const std::vector<NDArray> &outputs) {
using namespace common;
CHECK_EQ(inputs.size(), 2);
CHECK_EQ(outputs.size(), 1);
if (req[0] == kNullOp) return;
const auto lhs_stype = inputs[0].storage_type();
const auto rhs_stype = inputs[1].storage_type();
const auto out_stype = outputs[0].storage_type();
mshadow::Stream<xpu> *s = ctx.get_stream<xpu>();
if ((ContainsOnlyStorage(inputs, kRowSparseStorage)) &&
(out_stype == kRowSparseStorage || out_stype == kDefaultStorage)) {
// rsp, rsp -> rsp
// rsp, rsp -> dns
RspRspOp<OP>(
s, attrs, ctx, inputs[0], inputs[1], req[0], outputs[0], false, false, false, false);
} else if (ContainsOnlyStorage(inputs, kCSRStorage) && out_stype == kCSRStorage) {
// csr, csr -> csr
CsrCsrOp<OP>(s, attrs, ctx, inputs[0], inputs[1], req[0], outputs[0]);
} else if (((lhs_stype == kCSRStorage && rhs_stype == kDefaultStorage) ||
(lhs_stype == kDefaultStorage && rhs_stype == kCSRStorage)) &&
out_stype == kDefaultStorage) {
const NDArray& dns = (lhs_stype == kDefaultStorage)? inputs[0] : inputs[1];
const NDArray& csr = (lhs_stype == kCSRStorage)? inputs[0] : inputs[1];
const bool reverse = (lhs_stype == kCSRStorage);
DnsCsrDnsOp<OP>(s, attrs, ctx, dns, csr, req[0], outputs[0], reverse);
} else if (((lhs_stype == kRowSparseStorage && rhs_stype == kDefaultStorage) ||
(lhs_stype == kDefaultStorage && rhs_stype == kRowSparseStorage)) &&
out_stype == kDefaultStorage) {
const NDArray& dns = (lhs_stype == kDefaultStorage)? inputs[0] : inputs[1];
const bool reverse = (lhs_stype == kRowSparseStorage);
const NDArray& rsp = (reverse)? inputs[0] : inputs[1];
DnsRspDnsOp<xpu, OP>(s, attrs, ctx, dns, rsp, req[0], outputs[0], reverse);
} else {
LogUnimplementedOp(attrs, ctx, inputs, req, outputs);
}
}
/*! \brief ComputeEx allowing dense lvalue and/or rvalue */
template<typename xpu, typename OP, bool lhs_may_be_dense, bool rhs_may_be_dense>
static void ComputeDnsLRValueEx(const nnvm::NodeAttrs &attrs,
const OpContext &ctx,
const std::vector<NDArray> &inputs,
const std::vector<OpReqType> &req,
const std::vector<NDArray> &outputs) {
using namespace mshadow;
using namespace mshadow::expr;
CHECK_EQ(inputs.size(), 2);
CHECK_EQ(outputs.size(), 1);
if (req[0] == kNullOp) return;
const auto lhs_stype = inputs[0].storage_type();
const auto rhs_stype = inputs[1].storage_type();
const auto out_stype = outputs[0].storage_type();
if ((out_stype == kRowSparseStorage || out_stype == kDefaultStorage) &&
((lhs_stype == kRowSparseStorage && rhs_stype == kRowSparseStorage) ||
(lhs_stype == kRowSparseStorage && rhs_stype == kDefaultStorage) ||
(lhs_stype == kDefaultStorage && rhs_stype == kRowSparseStorage)) &&
lhs_may_be_dense && rhs_may_be_dense) {
// rsp, rsp -> rsp
// rsp, rsp -> dns
// rsp, dns -> rsp
// dns, rsp -> rsp
// More than once dense not allowed (this will be checked in RspRspOp):
// rsp, dns -> dns <-- NOT ALLOWED
// dns, rsp -> dns <-- NOT ALLOWED
mshadow::Stream<xpu> *s = ctx.get_stream<xpu>();
RspRspOp<OP>(
s, attrs, ctx, inputs[0], inputs[1],
req[0], outputs[0], lhs_may_be_dense, rhs_may_be_dense, false, false);
} else if (lhs_stype == kCSRStorage && rhs_stype == kCSRStorage) {
ComputeEx<xpu, OP>(attrs, ctx, inputs, req, outputs);
} else if (((lhs_stype == kCSRStorage && rhs_stype == kDefaultStorage) ||
(lhs_stype == kDefaultStorage && rhs_stype == kCSRStorage)) &&
out_stype == kCSRStorage) {
const NDArray& dns = (lhs_stype == kDefaultStorage)? inputs[0] : inputs[1];
const NDArray& csr = (lhs_stype == kCSRStorage)? inputs[0] : inputs[1];
const bool reverse = (lhs_stype == kCSRStorage);
DnsCsrCsrOp<xpu, OP>(attrs, ctx, dns, csr, req[0], outputs[0], reverse);
} else {
LogUnimplementedOp(attrs, ctx, inputs, req, outputs);
}
}
template<typename xpu, typename LOP, typename ROP>
static inline void BackwardUseNone(const nnvm::NodeAttrs &attrs,
const OpContext &ctx,
const std::vector<TBlob> &inputs,
const std::vector<OpReqType> &req,
const std::vector<TBlob> &outputs) {
MSHADOW_TYPE_SWITCH(outputs[0].type_flag_, DType, {
BackwardUseNone_<xpu, LOP, ROP, DType>(attrs, ctx, inputs, req, outputs);
});
}
template<typename xpu, typename LOP, typename ROP>
static inline void BackwardUseNoneWithHalf2(const nnvm::NodeAttrs &attrs,
const OpContext &ctx,
const std::vector<TBlob> &inputs,
const std::vector<OpReqType> &req,
const std::vector<TBlob> &outputs) {
MSHADOW_TYPE_SWITCH_WITH_HALF2(outputs[0].type_flag_, DType, {
BackwardUseNone_<xpu, LOP, ROP, DType>(attrs, ctx, inputs, req, outputs);
});
}
template<typename xpu, typename LOP, typename ROP>
static inline void BackwardUseNoneEx(const nnvm::NodeAttrs &attrs,
const OpContext &ctx,
const std::vector<NDArray> &inputs,
const std::vector<OpReqType> &req,
const std::vector<NDArray> &outputs) {
CHECK_EQ(inputs.size(), 1U); // output grad
CHECK_EQ(outputs.size(), 2U); // lhs input grad, rhs input grad
const auto in_stype = inputs[0].storage_type();
const auto lhs_stype = outputs[0].storage_type();
const auto rhs_stype = outputs[1].storage_type();
// lhs grad
if (req[0] != kNullOp) {
if (in_stype == lhs_stype && (in_stype == kRowSparseStorage || in_stype == kCSRStorage)) {
CHECK_EQ(outputs[0].storage_type(), in_stype);
// rsp -> rsp, _. op requires 0-input returns 0-output
DCHECK_LT(std::fabs(static_cast<float>(LOP::Map(0))), 1e-5f);
UnaryOp::ComputeEx<xpu, LOP>(attrs, ctx, inputs, req, {outputs[0]});
} else {
LogUnimplementedOp(attrs, ctx, inputs, req, outputs);
}
}
// rhs grad
if (req[1] != kNullOp) {
if (in_stype == rhs_stype && (in_stype == kRowSparseStorage || in_stype == kCSRStorage)) {
CHECK_EQ(outputs[0].storage_type(), in_stype);
// rsp -> _, rsp. op requires 0-input returns 0-output
DCHECK_LT(std::fabs(static_cast<float>(ROP::Map(0))), 1e-5f);
UnaryOp::ComputeEx<xpu, ROP>(attrs, ctx, inputs, req, {outputs[1]});
} else {
LogUnimplementedOp(attrs, ctx, inputs, req, outputs);
}
}
}
template<typename xpu, typename LOP, typename ROP>
static inline void BackwardUseIn(const nnvm::NodeAttrs &attrs,
const OpContext &ctx,
const std::vector<TBlob> &inputs,
const std::vector<OpReqType> &req,
const std::vector<TBlob> &outputs) {
MSHADOW_TYPE_SWITCH(outputs[0].type_flag_, DType, {
BackwardUseIn_<xpu, LOP, ROP, DType>(attrs, ctx, inputs, req, outputs);
});
}
template<typename xpu, typename LOP, typename ROP>
static inline void BackwardUseInWithHalf2(const nnvm::NodeAttrs &attrs,
const OpContext &ctx,
const std::vector<TBlob> &inputs,
const std::vector<OpReqType> &req,
const std::vector<TBlob> &outputs) {
MSHADOW_TYPE_SWITCH_WITH_HALF2(outputs[0].type_flag_, DType, {
BackwardUseIn_<xpu, LOP, ROP, DType>(attrs, ctx, inputs, req, outputs);
});
}
template<
typename xpu, typename LOP, typename ROP,
bool in0_ok_dense = false, bool in1_ok_dense = false, bool in2_ok_dense = false>
static inline void BackwardUseInEx(const nnvm::NodeAttrs &attrs,
const OpContext &ctx,
const std::vector<NDArray> &inputs,
const std::vector<OpReqType> &req,
const std::vector<NDArray> &outputs) {
using namespace common;
CHECK_EQ(inputs.size(), 3U);
CHECK_EQ(outputs.size(), 2U); // lhs input grad, rhs input grad
const auto out_grad_stype = inputs[0].storage_type();
const auto lhs_grad_stype = outputs[0].storage_type();
const auto rhs_grad_stype = outputs[1].storage_type();
if (ContainsOnlyStorage(inputs, kRowSparseStorage) &&
(lhs_grad_stype == kDefaultStorage || lhs_grad_stype == kRowSparseStorage) &&
(rhs_grad_stype == kDefaultStorage || rhs_grad_stype == kRowSparseStorage)) {
// rsp, rsp, rsp -> [dns, rsp], [dns, rsp]
RspRspOpBackward<xpu, LOP, ROP, in0_ok_dense, in1_ok_dense, in2_ok_dense>(
attrs, ctx, inputs, req, outputs, BackwardUseIn<xpu, LOP, ROP>);
}
if (((lhs_grad_stype == kDefaultStorage && rhs_grad_stype == kCSRStorage) ||
(lhs_grad_stype == kCSRStorage && rhs_grad_stype == kDefaultStorage)) &&
out_grad_stype == kDefaultStorage) {
// dns, csr, dns -> [csr, dns] / csr, dns, dns -> [dns, csr]
DnsCsrCsrOpBackward<xpu, LOP, ROP>(attrs, ctx, inputs, req, outputs);
}
}
}; // class ElemwiseBinaryOp
/*! \brief Binary launch */
#define MXNET_OPERATOR_REGISTER_BINARY(name) \
NNVM_REGISTER_OP(name) \
.set_num_inputs(2) \
.set_num_outputs(1) \
.set_attr<nnvm::FListInputNames>("FListInputNames", \
[](const NodeAttrs& attrs) { \
return std::vector<std::string>{"lhs", "rhs"}; \
}) \
.set_attr<mxnet::FInferShape>("FInferShape", ElemwiseShape<2, 1>) \
.set_attr<nnvm::FInferType>("FInferType", ElemwiseType<2, 1>) \
.set_attr<nnvm::FInplaceOption>("FInplaceOption", \
[](const NodeAttrs& attrs){ \
return std::vector<std::pair<int, int> >{{0, 0}, {1, 0}}; \
}) \
.add_argument("lhs", "NDArray-or-Symbol", "first input") \
.add_argument("rhs", "NDArray-or-Symbol", "second input")
/*! \brief Binary launch, with FComputeEx for csr and rsp available */
#define MXNET_OPERATOR_REGISTER_BINARY_WITH_SPARSE_CPU(__name$, __kernel$) \
MXNET_OPERATOR_REGISTER_BINARY(__name$) \
.set_attr<FInferStorageType>("FInferStorageType", \
ElemwiseStorageType<2, 1, true, true, true>) \
.set_attr<FCompute>("FCompute<cpu>", ElemwiseBinaryOp::Compute<cpu, __kernel$>) \
.set_attr<FComputeEx>("FComputeEx<cpu>", ElemwiseBinaryOp::ComputeEx<cpu, __kernel$>) \
.set_attr<FResourceRequest>("FResourceRequest", /* For Sparse CSR */ \
[](const NodeAttrs& attrs) { \
return std::vector<ResourceRequest>{ResourceRequest::kTempSpace};})
/*! \brief Binary launch, with FComputeEx for csr and rsp available.
when inputs contain both sparse and dense, sparse output is preferred. */
#define MXNET_OPERATOR_REGISTER_BINARY_WITH_SPARSE_CPU_PS(__name$, __kernel$) \
MXNET_OPERATOR_REGISTER_BINARY(__name$) \
.set_attr<FInferStorageType>("FInferStorageType", \
ElemwiseBinaryOp::PreferSparseStorageType) \
.set_attr<FCompute>("FCompute<cpu>", ElemwiseBinaryOp::Compute<cpu, __kernel$>) \
.set_attr<FComputeEx>("FComputeEx<cpu>", ElemwiseBinaryOp::ComputeEx<cpu, __kernel$>) \
.set_attr<FResourceRequest>("FResourceRequest", /* For Sparse CSR */ \
[](const NodeAttrs& attrs) { \
return std::vector<ResourceRequest>{ResourceRequest::kTempSpace};})
/*! \brief Binary launch, dense result
* FInferStorageType attr is not set using this macro.
* By default DefaultStorageType is used.
*/
#define MXNET_OPERATOR_REGISTER_BINARY_WITH_SPARSE_CPU_DR(__name$, __kernel$) \
MXNET_OPERATOR_REGISTER_BINARY(__name$) \
.set_attr<FInferStorageType>("FInferStorageType", \
ElemwiseBinaryOp::SparseSparseWithDenseResult) \
.set_attr<FCompute>("FCompute<cpu>", ElemwiseBinaryOp::Compute<cpu, __kernel$>) \
.set_attr<FComputeEx>("FComputeEx<cpu>", ElemwiseBinaryOp::ComputeEx<cpu, __kernel$>)
/*! \brief Binary launch, with FComputeEx for prefer dense */
#define MXNET_OPERATOR_REGISTER_BINARY_WITH_SPARSE_CPU_PD(__name$, __kernel$) \
MXNET_OPERATOR_REGISTER_BINARY(__name$) \
.set_attr<FInferStorageType>("FInferStorageType", \
ElemwiseBinaryOp::PreferDenseStorageType<true, true, true>) \
.set_attr<FCompute>("FCompute<cpu>", ElemwiseBinaryOp::Compute<cpu, __kernel$>) \
.set_attr<FComputeEx>("FComputeEx<cpu>", ElemwiseBinaryOp::ComputeEx<cpu, __kernel$>) \
.set_attr<FResourceRequest>("FResourceRequest", /* For Sparse CSR */ \
[](const NodeAttrs& attrs) { \
return std::vector<ResourceRequest>{ResourceRequest::kTempSpace};})
} // namespace op
} // namespace mxnet
#endif // MXNET_OPERATOR_TENSOR_ELEMWISE_BINARY_OP_H_
|
test_multi_tasks_common.h | /* This file is part of the Tomographer project, which is distributed under the
* terms of the MIT license.
*
* The MIT License (MIT)
*
* Copyright (c) 2016 ETH Zurich, Institute for Theoretical Physics, Philippe Faist
* Copyright (c) 2017 Caltech, Institute for Quantum Information and Matter, Philippe Faist
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#ifndef TEST_MULTI_TASKS_COMMON_H
#define TEST_MULTI_TASKS_COMMON_H
#include <string>
#include <functional>
#include <chrono>
#include <tomographer/tools/fmt.h>
// don't include multiprocomp.h, because when compiling test_multiproc I don't want to
// have included multiprocomp.h (be sure that there is no symbol pollution)
#ifdef TOMOGRAPHER_USE_WINDOWS_SLEEP
// use MS Window's Sleep() function
#include <windows.h>
#define TOMOGRAPHERTESTS_SLEEP_FOR_MS(x) Sleep((x))
#else
// normal C++11 API function, not available on mingw32 w/ win threads
#include <thread>
#define TOMOGRAPHERTESTS_SLEEP_FOR_MS(x) \
std::this_thread::sleep_for(std::chrono::milliseconds((x)))
#endif
#ifdef _OPENMP
#include <omp.h>
#endif
typedef
#if defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ <= 6 && !defined(__clang__)
std::chrono::monotonic_clock // for g++ 4.6
#else
std::chrono::steady_clock
#endif
StdClockType;
int UnitTimeSleepMs = 300;
struct MyTaskInput {
MyTaskInput(int a_ = 0, int b_ = 0) : a(a_), b(b_) { }
int a;
int b;
};
struct TestBasicCData {
TestBasicCData(int c_) : c(c_), inputs() { }
int c;
std::vector<MyTaskInput> inputs;
template<typename IntType>
MyTaskInput getTaskInput(IntType k) const {
return inputs[(std::size_t)k];
}
};
struct TestTaskResultType {
TestTaskResultType(int value_) : msg(), value(value_) { }
TestTaskResultType(int value_, std::string msg_) : msg(msg_), value(value_) { }
TestTaskResultType(TestTaskResultType && ) = default; // expressedly allow move construction
std::string msg;
int value;
// test that the multiproc implementation does not depend on these
TestTaskResultType(const TestTaskResultType & ) = delete;
TestTaskResultType & operator=(const TestTaskResultType & ) = delete;
TestTaskResultType explicitCopy() const { return TestTaskResultType(value, msg); }
};
template<typename TestCDataType, typename ResultType_ = TestTaskResultType>
struct TestTaskBase {
//
// A very simple task. The task is to calculate the sum of two inputs, "a" and "b", and
// multiply the result by some common number "c" stored in TestBasicCData.
//
typedef MyTaskInput Input;
typedef Tomographer::MultiProc::TaskStatusReport StatusReportType;
typedef ResultType_ ResultType;
template<typename LoggerType>
TestTaskBase(Input input, const TestCDataType * , LoggerType & logger)
: _input(input), _result(-1)
{
logger.debug("TestTask::TestTask", "constructor called") ;
}
template<typename LoggerType, typename TaskManagerIface>
void run(const TestCDataType * pcdata, LoggerType & logger, TaskManagerIface * )
{
//BOOST_MESSAGE("Running task.") ; // BOOST_TEST_MESSAGE may not be thread-safe!!!!!
logger.info("TestTask::run", "Running task.") ;
logger.debug("TestTask::run", "running task.");
_result.value = ( _input.a + _input.b ) * pcdata->c ;
_result.msg = Tomographer::Tools::fmts("((a=%d)+(b=%d))*(c=%d) == %d",
_input.a, _input.b, pcdata->c, _result.value);
logger.info("TestTask::run", "Task finished.") ;
// BOOST_MESSAGE("Task finished.") ;
}
inline ResultType getResult() const { return _result.explicitCopy(); }
inline ResultType stealResult() { return std::move(_result); }
Input _input;
ResultType _result;
};
typedef TestTaskBase<TestBasicCData> TestTask;
// http://stackoverflow.com/a/23036970/1694896
template<typename T>
class mkvec {
public:
typedef mkvec<T> my_type;
my_type& operator<< (const T& val) {
data_.push_back(val);
return *this;
}
my_type& operator<< (const std::vector<T>& inVector) {
this->data_.reserve(this->data_.size() + inVector.size());
this->data_.insert(this->data_.end(), inVector.begin(), inVector.end());
return *this;
}
operator std::vector<T>() const {
return data_;
}
private:
std::vector<T> data_;
};
struct test_task_dispatcher_fixture {
TestBasicCData cData;
const int num_runs;
const std::vector<int> correct_result_values;
test_task_dispatcher_fixture()
: cData(1000),
num_runs(10),
correct_result_values(mkvec<int>()
<<3000
<<30000
<<3000
<<9000
<<3000
<<20000
<<3000
<<3000
<<17000
<<3000)
{
cData.inputs = mkvec<MyTaskInput>()
<< MyTaskInput(1, 2)
<< MyTaskInput(10, 20)
<< MyTaskInput(1, 2)
<< MyTaskInput(4, 5)
<< MyTaskInput(1, 2)
<< MyTaskInput(-1, 21)
<< MyTaskInput(1, 2)
<< MyTaskInput(1, 2)
<< MyTaskInput(8, 9)
<< MyTaskInput(1, 2);
}
void check_correct_results(const std::vector<TestTask::ResultType*> & results)
{
for (std::size_t k = 0; k < correct_result_values.size(); ++k) {
BOOST_CHECK_EQUAL(results[k]->value, correct_result_values[k]) ;
}
}
template<typename TaskDispatcherType>
void check_correct_results_collected(const TaskDispatcherType & task_dispatcher)
{
// collectedTaskResults()
const std::vector<TestTask::ResultType*> results = task_dispatcher.collectedTaskResults();
check_correct_results(results);
// numTaskRuns()
BOOST_CHECK_EQUAL(results.size(), (std::size_t)task_dispatcher.numTaskRuns());
// collectedTaskResult(k)
const std::size_t N = (std::size_t)task_dispatcher.numTaskRuns();
for (std::size_t k = 0; k < N; ++k) {
BOOST_CHECK_EQUAL(task_dispatcher.collectedTaskResult(k).value, correct_result_values[k]) ;
}
}
};
//
// Utilities for testing the status reporting mechanism of a task dispatcher
//
#ifndef __MINGW32__
// MinGW32 does not have SIGALRM / alarm()
std::function<void()> sigalarm_act;
void sigalarm_act_cfn(int signum)
{
// printf("[SIGALRM]\n");
if (signum == SIGALRM) {
sigalarm_act();
}
}
#endif
struct StatusRepTestBasicCData {
StatusRepTestBasicCData() : UnitTimeSleepMs_(UnitTimeSleepMs) { }
int UnitTimeSleepMs_;
template<typename IntType>
int getTaskInput(IntType k) const {
return 2*UnitTimeSleepMs_ + (int)(k/3)*UnitTimeSleepMs_;
}
};
struct StatusRepTestTask {
typedef Tomographer::MultiProc::TaskStatusReport StatusReportType;
typedef bool ResultType;
template<typename LoggerType>
StatusRepTestTask(int input, const StatusRepTestBasicCData * , LoggerType & logger)
: _input(input) // input is task number
{
logger.debug("StatsRepTestTask constructor", "Task will run for %d ms", input);
}
template<typename LoggerType, typename TaskManagerIface>
void run(const StatusRepTestBasicCData * , LoggerType & logger, TaskManagerIface * iface)
{
_result = false;
// Check for status reports, and generate one once requested. Run the task
// like this for five seconds.
unsigned long count = 0;
StdClockType::time_point time_start = StdClockType::now();
int elapsed_ms = 0;
int ms_to_run = _input;
do {
elapsed_ms = (int)std::chrono::duration_cast<std::chrono::milliseconds>(StdClockType::now() - time_start).count();
if (iface->statusReportRequested()) {
logger.longdebug("StatusRepTestTask::run", "Task input=%02d: Status report requested", _input);
StatusReportType s(elapsed_ms / (double)ms_to_run,
Tomographer::Tools::fmts("elapsed = %d [%.2f%%]; count = %lu = %#lx",
elapsed_ms, 100.0*elapsed_ms/ms_to_run, count, count));
logger.longdebug("StatusRepTestTask::run", "s.msg = %s", s.msg.c_str());
iface->submitStatusReport(s);
logger.longdebug("StatusRepTestTask::run", "report submitted.");
_result = true;
}
++count;
} while (elapsed_ms < ms_to_run);
}
ResultType getResult() const { return _result; }
ResultType stealResult() const { return getResult(); }
private:
int _input;
ResultType _result;
};
void (*global_sig_handler_sigalarm_act)(int) = NULL;
void * global_sig_handler_sigalarm_task_dispatcher = NULL;
struct test_task_dispatcher_status_reporting_fixture {
StatusRepTestBasicCData cData;
const int num_runs;
test_task_dispatcher_status_reporting_fixture()
: cData(), num_runs(6)
{
}
template<typename TaskDispatcher, typename LoggerType>
inline void set_report_handler(TaskDispatcher& task_dispatcher, LoggerType & logger)
{
auto plogger = &logger; // see http://stackoverflow.com/q/21443023/1694896
task_dispatcher.setStatusReportHandler(
[plogger](const typename TaskDispatcher::FullStatusReportType& r) {
plogger->info("status_report test case", [&](std::ostream & stream) {
stream << "Full status report recieved. num_completed = " << r.num_completed
<< ", num_total_runs = " << r.num_total_runs << "\n";
for (std::size_t k = 0; k < r.workers_running.size(); ++k) {
if (!r.workers_running[k]) {
stream << "Worker #" << k << " idle\n";
} else {
stream << "Worker #" << k << ": " << r.workers_reports[k].fraction_done * 100 << "%, "
<< r.workers_reports[k].msg << "\n";
}
}
});
});
}
template<typename TaskDispatcher, typename LoggerType>
inline void perform_test_status_report_periodic(TaskDispatcher & task_dispatcher, LoggerType & logger)
{
logger.info("test case:status_report_periodic", "Starting test case.");
set_report_handler(task_dispatcher, logger);
task_dispatcher.requestPeriodicStatusReport(UnitTimeSleepMs); // every unit time
task_dispatcher.run();
for ( auto res : task_dispatcher.collectedTaskResults() ) {
BOOST_CHECK( res );
}
logger.info("test case:status_report_periodic", "Test case done.");
}
template<typename TaskDispatcher, typename LoggerType>
inline void perform_test_interrupt_tasks_withthread(TaskDispatcher & task_dispatcher, LoggerType & logger)
{
#ifdef _OPENMP
logger.info("test case:interrupt_tasks_withthread", "Starting test case.");
set_report_handler(task_dispatcher, logger);
omp_set_dynamic(0);
omp_set_nested(1);
bool tasks_interrupted = false;
auto starttime = StdClockType::now();
#pragma omp parallel num_threads(2) default(shared)
{
if (omp_get_thread_num() == 0) {
// take care of sending the interrupt request
TOMOGRAPHERTESTS_SLEEP_FOR_MS(UnitTimeSleepMs/2);
task_dispatcher.requestInterrupt();
} else if (omp_get_thread_num() == 1) {
// run the slave tasks
try {
task_dispatcher.run();
} catch (const Tomographer::MultiProc::TasksInterruptedException & e) {
tasks_interrupted = true;
}
} else {
// never here
assert( false ) ;
}
}
auto endtime = StdClockType::now();
#pragma omp critical
{
BOOST_CHECK(tasks_interrupted);
}
logger.debug("test case:interrupt_tasks_withthread", [&](std::ostream & stream) {
stream << "Tasks (hopefully) interrupted after "
<< std::chrono::duration_cast<std::chrono::seconds>(endtime-starttime).count()
<< " seconds.";
});
logger.info("test case:interrupt_tasks_withthread", "Test case done.");
#else // _OPENMP
(void)task_dispatcher; (void)logger;
BOOST_TEST_MESSAGE("test case interrupt_tasks_withthread: nothing to do because OpenMP is disabled");
BOOST_CHECK( true ) ; // dummy test case
#endif
}
template<typename TaskDispatcher, typename LoggerType>
inline void perform_test_status_report_withthread(TaskDispatcher & task_dispatcher, LoggerType & logger)
{
#ifdef _OPENMP
logger.info("test case:status_report_withthread", "Starting test case.");
set_report_handler(task_dispatcher, logger);
omp_set_dynamic(0);
omp_set_nested(1);
volatile std::sig_atomic_t finished = 0;
#pragma omp parallel num_threads(2) default(shared)
{
if (omp_get_thread_num() == 0) {
// take care of sending status report requests
while (!finished) {
TOMOGRAPHERTESTS_SLEEP_FOR_MS(UnitTimeSleepMs);
task_dispatcher.requestStatusReport();
}
} else if (omp_get_thread_num() == 1) {
// run the slave tasks
task_dispatcher.run();
finished = 1;
for ( auto res : task_dispatcher.collectedTaskResults() ) {
BOOST_CHECK( res );
}
} else {
// never here
assert( false ) ;
}
}
logger.debug("test case:status_report_withthread", "Test case done.");
#else // _OPENMP
(void)task_dispatcher; (void)logger;
BOOST_TEST_MESSAGE("test case status_report_withthread: nothing to do because OpenMP is disabled");
BOOST_CHECK( true ) ; // dummy test case
#endif
}
template<typename TaskDispatcher, typename LoggerType>
inline void perform_test_status_report_withsigalrm(TaskDispatcher & task_dispatcher, LoggerType & logger)
{
#ifndef __MINGW32__ // MinGW32 does not have SIGALRM / alarm()
logger.info("test case:status_report_withsigalrm", "Starting test case.");
// sigalarm only accepts integer seconds so make that our time step unit
cData.UnitTimeSleepMs_ = 1000;
set_report_handler(task_dispatcher, logger);
{
auto finally = Tomographer::Tools::finally([](){
alarm(0);
signal(SIGALRM, SIG_DFL);
});
global_sig_handler_sigalarm_task_dispatcher = (void*) &task_dispatcher;
// use pure C function, not std::function/captured lambdas/etc. because
// this is a signal we're in, the interrupt can literally come at any
// point in the code and we must make sure to use only signal-reentrant
// functions
global_sig_handler_sigalarm_act = +[](int) {
// safe to be called from within a signal handler
((TaskDispatcher*)global_sig_handler_sigalarm_task_dispatcher)->requestStatusReport();
alarm(1);
signal(SIGALRM, global_sig_handler_sigalarm_act);
};
alarm(1);
signal(SIGALRM, global_sig_handler_sigalarm_act);
task_dispatcher.run();
for ( auto res : task_dispatcher.collectedTaskResults() ) {
BOOST_CHECK( res );
}
}
logger.info("test case:status_report_withsigalrm", "Test case done.");
#else // MINGW
(void)task_dispatcher; (void)logger;
BOOST_TEST_MESSAGE("test case status_report_withsigalrm: nothing to do because signal/alarm is not supported");
BOOST_CHECK( true ) ; // dummy test case
#if !defined(_OPENMP)
// but on MINGW & !OPENMP, we have no way of testing status report checking ... so fail here
BOOST_CHECK(false && "Status report check NOT IMPLEMENTED on your platform, sorry");
#endif // at least some form of status report checked
#endif // MINGW
}
};
#endif
|
amuxCRS.h | /**
* Copyright (c) 2012, OpenGeoSys Community (http://www.opengeosys.com)
* Distributed under a Modified BSD License.
* See accompanying file LICENSE.txt or
* http://www.opengeosys.com/LICENSE.txt
*
*
* \file amuxCRS.h
*
* Created on 2011-09-20 by Thomas Fischer
*/
#ifndef AMUXCRS_H
#define AMUXCRS_H
namespace MathLib {
template<typename FP_TYPE, typename IDX_TYPE>
void amuxCRS(FP_TYPE a, IDX_TYPE n, IDX_TYPE const * const iA, IDX_TYPE const * const jA,
FP_TYPE const * const A, FP_TYPE const * const x, FP_TYPE* y)
{
for (IDX_TYPE i(0); i < n; i++) {
const IDX_TYPE end(iA[i + 1]);
y[i] = A[iA[i]] * x[jA[iA[i]]];
for (IDX_TYPE j(iA[i]+1); j < end; j++) {
y[i] += A[j] * x[jA[j]];
}
y[i] *= a;
}
}
void amuxCRSParallelPThreads (double a,
unsigned n, unsigned const * const iA, unsigned const * const jA,
double const * const A, double const * const x, double* y,
unsigned num_of_pthreads);
#ifdef _OPENMP
template<typename FP_TYPE, typename IDX_TYPE>
void amuxCRSParallelOpenMP (FP_TYPE a,
unsigned n, IDX_TYPE const * const __restrict__ iA, IDX_TYPE const * const __restrict__ jA,
FP_TYPE const * const A, FP_TYPE const * const __restrict__ x, FP_TYPE* __restrict__ y)
{
OPENMP_LOOP_TYPE i;
{
#pragma omp parallel for
for (i = 0; i < n; i++) {
const IDX_TYPE end(iA[i + 1]);
y[i] = A[iA[i]] * x[jA[iA[i]]];
for (IDX_TYPE j(iA[i]+1); j < end; j++) {
y[i] += A[j] * x[jA[j]];
}
y[i] *= a;
}
}
}
#endif
void amuxCRSSym (double a,
unsigned n, unsigned const * const iA, unsigned const * const jA,
double const * const A, double const * const x, double* y);
} // end namespace MathLib
#endif
|
GB_unop__lnot_fp64_fp64.c | //------------------------------------------------------------------------------
// GB_unop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_unop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop_apply__lnot_fp64_fp64
// op(A') function: GB_unop_tran__lnot_fp64_fp64
// C type: double
// A type: double
// cast: double cij = aij
// unaryop: cij = !(aij != 0)
#define GB_ATYPE \
double
#define GB_CTYPE \
double
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
double aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = !(x != 0) ;
// casting
#define GB_CAST(z, aij) \
double z = aij ;
// cij = op (aij)
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
double aij = Ax [pA] ; \
/* Cx [pC] = op (cast (aij)) */ \
double z = aij ; \
Cx [pC] = !(z != 0) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_LNOT || GxB_NO_FP64)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop_apply__lnot_fp64_fp64
(
double *Cx, // Cx and Ax may be aliased
const double *Ax,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
double aij = Ax [p] ;
double z = aij ;
Cx [p] = !(z != 0) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop_tran__lnot_fp64_fp64
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *GB_RESTRICT *Rowcounts,
GBI_single_iterator Iter,
const int64_t *GB_RESTRICT A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#define GB_PHASE_2_OF_2
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
stacks.c | // -*-Mode: C++;-*- // technically C99
// * BeginRiceCopyright *****************************************************
//
// $HeadURL$
// $Id$
//
// --------------------------------------------------------------------------
// Part of HPCToolkit (hpctoolkit.org)
//
// Information about sources of support for research and development of
// HPCToolkit is at 'hpctoolkit.org' and in 'README.Acknowledgments'.
// --------------------------------------------------------------------------
//
// Copyright ((c)) 2002-2021, Rice University
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
//
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
//
// * Neither the name of Rice University (RICE) nor the names of its
// contributors may be used to endorse or promote products derived from
// this software without specific prior written permission.
//
// This software is provided by RICE and contributors "as is" and any
// express or implied warranties, including, but not limited to, the
// implied warranties of merchantability and fitness for a particular
// purpose are disclaimed. In no event shall RICE or contributors be
// liable for any direct, indirect, incidental, special, exemplary, or
// consequential damages (including, but not limited to, procurement of
// substitute goods or services; loss of use, data, or profits; or
// business interruption) however caused and on any theory of liability,
// whether in contract, strict liability, or tort (including negligence
// or otherwise) arising in any way out of the use of this software, even
// if advised of the possibility of such damage.
//
// ******************************************************* EndRiceCopyright *
//*****************************************************************************
// local includes
//*****************************************************************************
#include "stacks.h"
//*****************************************************************************
// interface functions
//*****************************************************************************
#define Ad(q) q.aptr
#define Ap(q) q->aptr
void
sstack_ptr_set
(
s_element_ptr_t *p,
s_element_t *v
)
{
atomic_store_explicit(&Ap(p), v, memory_order_relaxed);
}
s_element_t *
sstack_ptr_get
(
s_element_ptr_t *e
)
{
return (s_element_t *) atomic_load_explicit(&Ap(e), memory_order_relaxed);
}
s_element_t *
sstack_swap
(
s_element_ptr_t *q,
s_element_t *r
)
{
return (s_element_t *) atomic_exchange_explicit(&Ap(q), r, memory_order_relaxed);
}
void
sstack_push
(
s_element_ptr_t *q,
s_element_t *e
)
{
s_element_t *first =
(s_element_t *) atomic_load_explicit(&Ap(q), memory_order_relaxed);
atomic_store_explicit(&(e->Ad(next)), first, memory_order_relaxed);
atomic_store_explicit(&Ap(q), e, memory_order_relaxed);
}
s_element_t *
sstack_pop
(
s_element_ptr_t *q
)
{
s_element_t *e = (s_element_t *) atomic_load_explicit(&Ap(q), memory_order_relaxed);
if (e) {
s_element_t *next =
(s_element_t *) atomic_load_explicit(&(e->Ad(next)), memory_order_relaxed);
atomic_store_explicit(&Ap(q), next, memory_order_relaxed);
atomic_store_explicit(&(e->Ad(next)), 0, memory_order_relaxed);
}
return e;
}
s_element_t *
sstack_steal
(
s_element_ptr_t *q
)
{
s_element_t *e = sstack_swap(q, 0);
return e;
}
void
sstack_reverse
(
s_element_ptr_t *q
)
{
s_element_t *prev = NULL;
s_element_t *e = (s_element_t *) atomic_load_explicit(&Ap(q), memory_order_relaxed);
while (e) {
s_element_t *next =
(s_element_t *) atomic_load_explicit(&(e->Ad(next)), memory_order_relaxed);
atomic_store_explicit(&(e->Ad(next)), prev, memory_order_relaxed);
prev = e;
e = next;
}
atomic_store_explicit(&Ap(q), prev, memory_order_relaxed);
}
void
sstack_forall
(
s_element_ptr_t *q,
stack_forall_fn_t fn,
void *arg
)
{
s_element_t *current =
(s_element_t *) atomic_load_explicit(&Ap(q), memory_order_relaxed);
while (current) {
fn(current, arg);
current =
(s_element_t *) atomic_load_explicit(¤t->Ad(next), memory_order_relaxed);
}
}
void
cstack_ptr_set
(
s_element_ptr_t *e,
s_element_t *v
)
{
atomic_init(&Ap(e), (s_element_ptr_t *) v);
}
s_element_t *
cstack_ptr_get
(
s_element_ptr_t *e
)
{
return (s_element_t *) atomic_load(&Ap(e));
}
s_element_t *
cstack_swap
(
s_element_ptr_t *q,
s_element_t *r
)
{
s_element_t *e = (s_element_t *) atomic_exchange(&Ap(q), r);
return e;
}
void
cstack_push
(
s_element_ptr_t *q,
s_element_t *e
)
{
s_element_t *head = (s_element_t *) atomic_load(&Ap(q));
s_element_t *new_head = e;
// push a singleton or a chain on the list
for (;;) {
s_element_t *enext = (s_element_t *) atomic_load(&e->Ad(next));
if (enext == 0) break;
e = enext;
}
do {
atomic_store(&e->Ad(next), head);
} while (!atomic_compare_exchange_strong(&Ap(q), &head, new_head));
}
s_element_t *
cstack_pop
(
s_element_ptr_t *q
)
{
s_element_t *oldhead = (s_element_t *) atomic_load(&Ap(q));
s_element_t *next = 0;
do {
if (oldhead == 0) return 0;
next = (s_element_t *) atomic_load(&oldhead->Ad(next));
} while (!atomic_compare_exchange_strong(&Ap(q), &oldhead, next));
atomic_store(&oldhead->Ad(next), 0);
return oldhead;
}
s_element_t *
cstack_steal
(
s_element_ptr_t *q
)
{
s_element_t *e = cstack_swap(q, 0);
return e;
}
void
cstack_forall
(
s_element_ptr_t *q,
stack_forall_fn_t fn,
void *arg
)
{
s_element_t *current = (s_element_t *) atomic_load(&Ap(q));
while (current) {
fn(current, arg);
current = (s_element_t *) atomic_load(¤t->Ad(next));
}
}
//*****************************************************************************
// unit test
//*****************************************************************************
#define UNIT_TEST 0
#if UNIT_TEST
#include <stdlib.h>
#include <stdio.h>
#include <omp.h>
typedef struct {
s_element_ptr_t next;
int value;
} typed_stack_elem(int); // int_q_element_t
typed_stack_declare_type(int);
typed_stack_impl(int, cstack);
typed_stack_elem_ptr(int) queue;
void
print(typed_stack_elem(int) *e, void *arg)
{
printf("%d\n", e->value);
}
int main(int argc, char **argv)
{
int i;
for (i = 0; i < 10; i++) {
typed_stack_elem_ptr(int)
item = (typed_stack_elem_ptr(int)) malloc(sizeof(typed_stack_elem(int)));
item->value = i;
typed_stack_elem_ptr_set(int, cstack)(item, 0);
typed_stack_push(int, cstack)(&queue, item);
}
typed_stack_forall(int, cstack)(&queue, print, 0);
}
#endif
#if 0
#include <stdlib.h>
#include <stdio.h>
#include <omp.h>
typedef struct {
s_element_ptr_t next;
int value;
} typed_stack_elem(int); // int_q_element_t
typed_stack_elem_ptr(int) queue;
#define qtype cstack
typed_stack(int, qtype)
typed_stack_elem(int) *
typed_stack_elem_fn(int,new)(int value)
{
typed_stack_elem(int) *e =
(typed_stack_elem(int) *) malloc(sizeof(int_s_element_t));
e->value = value;
typed_stack_elem_ptr_set(int, qtype)(&e->next, 0);
}
void
pop
(
int n
)
{
int i;
for(i = 0; i < n; i++) {
typed_stack_elem(int) *e = typed_stack_pop(int, qtype)(&queue);
if (e == 0) {
printf("%d queue empty\n", omp_get_thread_num());
break;
} else {
printf("%d popping %d\n", omp_get_thread_num(), e->value);
}
}
}
void
push
(
int min,
int n
)
{
int i;
for(i = min; i < min+n; i++) {
printf("%d pushing %d\n", omp_get_thread_num(), i);
typed_stack_push(int, qtype)(&queue, typed_stack_elem_fn(int, new)(i));
}
}
void
dump
(
int_s_element_t *e
)
{
int i;
for(; e;
e = (int_s_element_t *) typed_stack_elem_ptr_get(int,qtype)(&e->next)) {
printf("%d stole %d\n", omp_get_thread_num(), e->value);
}
}
int
main
(
int argc,
char **argv
)
{
typed_stack_elem_ptr_set(int, qtype)(&queue, 0);
#pragma omp parallel
{
push(0, 30);
pop(10);
push(100, 12);
// pop(100);
int_s_element_t *e = typed_stack_steal(int, qtype)(&queue);
dump(e);
push(300, 30);
typed_stack_push(int, qtype)(&queue, e);
pop(100);
}
}
#endif
|
CSC.h | #ifndef _CSC_H_
#define _CSC_H_
#include "Deleter.h"
#include "HeapEntry.h"
#include <algorithm>
#include <cassert>
#include <cstdlib>
#include <iostream>
#include <tuple>
#include <vector>
#include <random>
#include "BitMap.h"
#include "utility.h"
#include <numeric>
#include "Triple.h"
extern "C" {
#include "GTgraph/R-MAT/graph.h"
}
using namespace std;
template <class IT,
class NT> // IT, NT li dichiaro runtime (polimorfismo parametrico)
class CSC {
public:
CSC() : nnz(0), rows(0), cols(0) {}
CSC(IT mynnz, IT m, IT n, int nt)
: nnz(mynnz), rows(m), cols(n) // costruttore di default
{
// Constructing empty Csc objects (size = 0) are not allowed.
assert(nnz != 0 && cols != 0);
colptr = my_malloc<IT>(cols + 1);
rowids = my_malloc<IT>(nnz);
values = my_malloc<NT>(nnz);
}
CSC(Triple<IT, NT> *triples, IT mynnz, IT m,
IT n); // altro costruttore di default
CSC(std::vector<std::pair<int64_t, int64_t>> edges, IT mynnz, IT m, IT n);
CSC(IT scale, IT r_scale, IT r_edgefactor); // for tall-skiny matrix
void make_empty() {
if (nnz > 0) {
my_free<IT>(rowids);
my_free<NT>(values);
nnz = 0;
}
if (cols > 0) {
my_free<IT>(colptr);
cols = 0;
}
rows = 0;
}
template <typename AddOperation>
CSC(vector<tuple<IT, IT, NT>> &tuple, IT m, IT n,
AddOperation addop); // costruttore
template <typename AddOperation>
void MergeDuplicates(AddOperation addop); // 1st method
CSC(graph &G);
CSC(IT *ri, IT *ci, NT *val, IT mynnz, IT m, IT n);
CSC(const CSC<IT, NT> &rhs); // copy constructor
CSC<IT, NT> &operator=(const CSC<IT, NT> &rhs); // assignment operator
bool operator==(const CSC<IT, NT> &rhs); // ridefinizione ==
~CSC() // distruttore
{
make_empty();
}
bool isEmpty() { return (nnz == 0); }
void Sorted();
void shuffleIds();
CSC<IT, NT> SpRef(const vector<IT> &ri, const vector<IT> &ci);
CSC<IT, NT> SpRef1(const vector<IT> &ri, const vector<IT> &ci);
CSC<IT, NT> SpRef2(const IT *ri, const IT rilen, const IT *ci,
const IT cilen);
void intersect(const IT *rowids_in, const NT *values_in, const IT len_in,
const IT *ri, const IT len_ri, IT *rowids_out, NT *values_out,
IT *len_out);
IT rows;
IT cols;
IT nnz; // number of nonzeros
IT totalcols; // for the parallel case
IT *colptr;
IT *rowids;
NT *values;
};
// copy constructor
template <class IT, class NT>
CSC<IT, NT>::CSC(const CSC<IT, NT> &rhs)
: nnz(rhs.nnz), rows(rhs.rows), cols(rhs.cols) {
if (nnz > 0) {
values = my_malloc<NT>(nnz);
rowids = my_malloc<IT>(nnz);
copy(rhs.values, rhs.values + nnz, values);
copy(rhs.rowids, rhs.rowids + nnz, rowids);
}
if (cols > 0) {
colptr = my_malloc<IT>(cols + 1);
copy(rhs.colptr, rhs.colptr + cols + 1, colptr);
}
}
template <class IT, class NT>
CSC<IT, NT> &CSC<IT, NT>::
operator=(const CSC<IT, NT> &rhs) // ridefinisce operatore = di assegnazione
{
if (this != &rhs) {
if (nnz > 0) // if the existing object is not empty
{
my_free<IT>(rowids);
my_free<NT>(values);
}
if (cols > 0) {
my_free<IT>(colptr);
}
nnz = rhs.nnz;
rows = rhs.rows;
cols = rhs.cols;
if (rhs.nnz > 0) // if the copied object is not empty
{
values = my_malloc<NT>(nnz);
rowids = my_malloc<IT>(nnz);
copy(rhs.values, rhs.values + nnz, values);
copy(rhs.rowids, rhs.rowids + nnz, rowids);
}
if (rhs.cols > 0) {
colptr = my_malloc<IT>(cols + 1);
copy(rhs.colptr, rhs.colptr + cols + 1, colptr);
}
}
return *this;
}
//! Construct a CSC object from a GTgraph object
//! GTgraph might have parallel edges; this constructor sums them up
//! Assumes a sorted GTgraph (primary key: start)
template <class IT, class NT>
CSC<IT, NT>::CSC(graph &G) : nnz(G.m), rows(G.n), cols(G.n) {
// graph is like a triples object
// typedef struct {
// LONG_T m;
// LONG_T n;
// // Arrays of size 'm' storing the edge information
// // A directed edge 'e' (0 <= e < m) from start[e] to end[e]
// // had an integer weight w[e]
// LONG_T* start;
// LONG_T* end;
// WEIGHT_T* w;
// } graph;
cout << "Graph nnz= " << G.m << " and n=" << G.n << endl;
vector<Triple<IT, NT>> simpleG;
vector<pair<pair<IT, IT>, NT>> currCol;
currCol.push_back(make_pair(make_pair(G.start[0], G.end[0]), G.w[0]));
for (IT k = 0; k < nnz - 1; ++k) {
if (G.start[k] != G.start[k + 1]) {
std::sort(currCol.begin(), currCol.end());
simpleG.push_back(Triple<IT, NT>(
currCol[0].first.first, currCol[0].first.second, currCol[0].second));
for (int i = 0; i < currCol.size() - 1; ++i) {
if (currCol[i].first == currCol[i + 1].first) {
simpleG.back().val += currCol[i + 1].second;
} else {
simpleG.push_back(Triple<IT, NT>(currCol[i + 1].first.first,
currCol[i + 1].first.second,
currCol[i + 1].second));
}
}
vector<pair<pair<IT, IT>, NT>>().swap(currCol);
}
currCol.push_back(
make_pair(make_pair(G.start[k + 1], G.end[k + 1]), G.w[k + 1]));
}
// now do the last row
sort(currCol.begin(), currCol.end());
simpleG.push_back(Triple<IT, NT>(currCol[0].first.first,
currCol[0].first.second, currCol[0].second));
for (int i = 0; i < currCol.size() - 1; ++i) {
if (currCol[i].first == currCol[i + 1].first) {
simpleG.back().val += currCol[i + 1].second;
} else {
simpleG.push_back(Triple<IT, NT>(currCol[i + 1].first.first,
currCol[i + 1].first.second,
currCol[i + 1].second));
}
}
nnz = simpleG.size();
cout << "[After duplicate merging] Graph nnz= " << nnz << " and n=" << G.n
<< endl
<< endl;
colptr = my_malloc<IT>(cols + 1);
rowids = my_malloc<IT>(nnz);
values = my_malloc<NT>(nnz);
IT *work = my_malloc<IT>(cols);
std::fill(work, work + cols, (IT)0); // initilized to zero
for (IT k = 0; k < nnz; ++k) {
IT tmp = simpleG[k].col;
work[tmp]++; // col counts (i.e, w holds the "col difference array")
}
if (nnz > 0) {
colptr[cols] = CumulativeSum(work, cols); // cumulative sum of w
copy(work, work + cols, colptr);
IT last;
for (IT k = 0; k < nnz; ++k) {
rowids[last = work[simpleG[k].col]++] = simpleG[k].row;
values[last] = simpleG[k].val;
}
}
my_free<IT>(work);
}
// Construct a Csc object from an array of "triple"s
template <class IT, class NT>
CSC<IT, NT>::CSC(Triple<IT, NT> *triples, IT mynnz, IT m, IT n)
: nnz(mynnz), rows(m), cols(n) {
colptr = my_malloc<IT>(cols + 1);
rowids = my_malloc<IT>(nnz);
values = my_malloc<NT>(nnz);
vector<pair<IT, NT>> tosort(nnz);
IT *work = my_malloc<IT>(cols);
std::fill(work, work + cols, (IT)0);
for (IT k = 0; k < nnz; ++k) {
IT tmp = triples[k].col;
work[tmp]++; // column counts (i.e, w holds the "col difference array")
}
if (nnz > 0) {
colptr[cols] = CumulativeSum(work, cols); // cumulative sum of w
copy(work, work + cols, colptr);
IT last;
for (IT k = 0; k < nnz; ++k) {
tosort[work[triples[k].col]++] =
make_pair(triples[k].row, triples[k].val);
}
#pragma omp parallel for
for (IT i = 0; i < cols; ++i) {
sort(tosort.begin() + colptr[i], tosort.begin() + colptr[i + 1]);
typename vector<pair<IT, NT>>::iterator
itr; // iterator is a dependent name
IT ind;
for (itr = tosort.begin() + colptr[i], ind = colptr[i];
itr != tosort.begin() + colptr[i + 1]; ++itr, ++ind) {
rowids[ind] = itr->first;
values[ind] = itr->second;
}
}
}
my_free<IT>(work);
}
// Construct a Csc object from an array of pairs
template <class IT, class NT>
CSC<IT,NT>::CSC(std::vector<std::pair<int64_t, int64_t>> edges, IT mynnz, IT m, IT n):nnz(mynnz),rows(m),cols(n)
{
colptr = my_malloc<IT>(cols + 1);
rowids = my_malloc<IT>(nnz);
values = my_malloc<NT>(nnz);
IT *work = my_malloc<IT>(cols);
std::fill(work, work+cols, (IT) 0);
for (IT k = 0 ; k < nnz ; ++k)
{
IT colId = std::get<1>(edges[k]);
work [colId]++ ;
}
if(nnz > 0)
{
colptr[cols] = CumulativeSum (work, cols) ; // cumulative sum of w
copy(work, work+cols, colptr);
for (IT k = 0 ; k < nnz ; ++k)
{
IT colId = std::get<1>(edges[k]);
IT rowId = std::get<0>(edges[k]);
rowids[work[colId]++] = rowId;
}
#pragma omp parallel for
for(IT i=0; i< cols; ++i)
{
sort(rowids + colptr[i], rowids + colptr[i+1]);
}
#pragma omp parallel for
for (IT k = 0 ; k < nnz ; ++k)
{
values[k] = (NT) 1;
}
}
my_free<IT>(work);
}
template <class IT, class NT>
template <typename AddOperation>
void CSC<IT, NT>::MergeDuplicates(AddOperation addop) {
vector<IT> diff(cols, 0);
std::adjacent_difference(colptr + 1, colptr + cols + 1, diff.begin());
vector<vector<IT>> v_rowids;
vector<vector<NT>> v_values;
if (nnz > 0) {
#pragma omp parallel for
for (int i = 0; i < cols; ++i) {
for (size_t j = colptr[i]; j < colptr[i + 1]; ++j) {
v_rowids[i].push_back(rowids[j]);
v_values[i].push_back(values[j]);
while (j < colptr[i + 1] - 1 && rowids[j] == rowids[j + 1]) {
v_values[i].back() = addop(v_values[i].back(), values[j + 1]);
j++; // increment j
diff[i]--;
}
}
}
}
colptr[cols] = CumulativeSum(diff.data(), cols); // cumulative sum of diff
copy(diff.begin(), diff.end(), colptr); // update the column pointers
my_free<IT>(rowids);
my_free<NT>(values);
cout << "Old number of nonzeros before merging: " << nnz << endl;
nnz = colptr[cols];
cout << "New number of nonzeros after merging: " << nnz << endl;
rowids = my_malloc<IT>(nnz);
values = my_malloc<NT>(nnz);
#pragma omp parallel for
for (int i = 0; i < cols; ++i) {
copy(v_rowids[i].begin(), v_rowids[i].end(), rowids + colptr[i]);
copy(v_values[i].begin(), v_values[i].end(), values + colptr[i]);
}
}
//! this version handles duplicates in the input
template <class IT, class NT>
template <typename AddOperation>
// n = kmerdict.size(), m = read_id, nnz = tuple.size()
// CSC<size_t, size_t> *spmat = new CSC<size_t, size_t>(occurrences, read_id,
// kmerdict.size(), plus<size_t>());
CSC<IT, NT>::CSC(vector<tuple<IT, IT, NT>> &tuple, IT m, IT n,
AddOperation addop)
: rows(m), cols(n) {
NT nnz = tuple.size(); // there might be duplicates
colptr = my_malloc<IT>(cols + 1);
rowids = my_malloc<IT>(nnz);
values = my_malloc<IT>(nnz);
vector<pair<IT, NT>> tosort(nnz);
IT *work = my_malloc<IT>(cols);
std::fill(work, work + cols, (IT)0); // riempi di 0 tutto
for (IT k = 0; k < nnz; ++k) {
IT tmp = get<1>(tuple[k]); // temp = read_id
work[tmp]++; // column counts (i.e, w holds the "col difference array")
}
if (nnz > 0) {
colptr[cols] =
CumulativeSum(work, cols); // cumulative sum of work, puntatore
// all'ultima posizione contiene
copy(work, work + cols, colptr);
IT last;
for (IT k = 0; k < nnz; ++k) {
tosort[work[get<1>(tuple[k])]++] =
make_pair(get<0>(tuple[k]), get<2>(tuple[k]));
}
#pragma omp parallel for
for (int i = 0; i < cols; ++i) {
sort(tosort.begin() + colptr[i], tosort.begin() + colptr[i + 1]);
typename vector<pair<IT, NT>>::iterator
itr; // iterator is a dependent name
IT ind;
for (itr = tosort.begin() + colptr[i], ind = colptr[i];
itr != tosort.begin() + colptr[i + 1]; ++itr, ++ind) {
rowids[ind] = itr->first;
values[ind] = itr->second;
}
}
}
for (IT j = 0; j < nnz; ++j) {
std::cout << " read_id : " << rowids[j] << " kmer_id : " << get<1>(tuple[j])
<< " pos_in_read : " << values[j] << endl;
// TO DO: as value I want a pair<kmer_id, vector<posix_in_read>>
}
my_free<IT>(work);
}
// Construct a Csc object from parallel arrays
template <class IT, class NT>
CSC<IT, NT>::CSC(IT *ri, IT *ci, NT *val, IT mynnz, IT m, IT n)
: nnz(mynnz), rows(m), cols(n) {
assert(nnz != 0 && rows != 0);
colptr = my_malloc<IT>(cols + 1);
rowids = my_malloc<IT>(nnz);
values = my_malloc<NT>(nnz);
vector<pair<IT, NT>> tosort(nnz);
IT *work = my_malloc<IT>(cols);
std::fill(work, work + cols, (IT)0);
for (IT k = 0; k < nnz; ++k) {
IT tmp = ci[k];
work[tmp]++; // column counts (i.e, w holds the "col difference array")
}
if (nnz > 0) {
colptr[cols] = CumulativeSum(work, cols); // cumulative sum of w
copy(work, work + cols, colptr);
IT last;
for (IT k = 0; k < nnz; ++k) {
tosort[work[ci[k]]++] = make_pair(ri[k], val[k]);
}
#pragma omp parallel for
for (int i = 0; i < cols; ++i) {
sort(tosort.begin() + colptr[i], tosort.begin() + colptr[i + 1]);
typename vector<pair<IT, NT>>::iterator
itr; // iterator is a dependent name
IT ind;
for (itr = tosort.begin() + colptr[i], ind = colptr[i];
itr != tosort.begin() + colptr[i + 1]; ++itr, ++ind) {
rowids[ind] = itr->first;
values[ind] = itr->second;
}
}
}
my_free<IT>(work);
}
// check if sorted within columns
template <class IT, class NT> void CSC<IT, NT>::Sorted() {
bool sorted = true;
for (IT i = 0; i < cols; ++i) {
sorted &= my_is_sorted(rowids + colptr[i], rowids + colptr[i + 1],
std::less<IT>());
}
}
template <class IT, class NT> void CSC<IT, NT>::shuffleIds() {
mt19937_64 mt(0);
for (IT i = 0; i < cols; ++i) {
IT offset = colptr[i];
IT width = colptr[i + 1] - colptr[i];
uniform_int_distribution<IT> rand_scale(0, width - 1);
for (IT j = colptr[i]; j < colptr[i + 1]; ++j) {
IT target = rand_scale(mt);
IT tmpId = rowids[offset + target];
NT tmpVal = values[offset + target];
rowids[offset + target] = rowids[j];
values[offset + target] = values[j];
rowids[j] = tmpId;
values[j] = tmpVal;
}
}
}
template <class IT, class NT>
bool CSC<IT, NT>::operator==(const CSC<IT, NT> &rhs) {
if (nnz != rhs.nnz || rows != rhs.rows || cols != rhs.cols)
return false;
bool same = std::equal(colptr, colptr + cols + 1, rhs.colptr);
same = same && std::equal(rowids, rowids + nnz, rhs.rowids);
bool samebefore = same;
ErrorTolerantEqual<NT> epsilonequal(EPSILON);
same = same && std::equal(values, values + nnz, rhs.values, epsilonequal);
if (samebefore && (!same)) {
#ifdef DEBUG
vector<NT> error(nnz);
transform(values, values + nnz, rhs.values, error.begin(), absdiff<NT>());
vector<pair<NT, NT>> error_original_pair(nnz);
for (IT i = 0; i < nnz; ++i)
error_original_pair[i] = make_pair(error[i], values[i]);
if (error_original_pair.size() > 10) // otherwise would crush for small data
{
partial_sort(error_original_pair.begin(),
error_original_pair.begin() + 10, error_original_pair.end(),
greater<pair<NT, NT>>());
cout << "Highest 10 different entries are: " << endl;
for (IT i = 0; i < 10; ++i)
cout << "Diff: " << error_original_pair[i].first << " on "
<< error_original_pair[i].second << endl;
} else {
sort(error_original_pair.begin(), error_original_pair.end(),
greater<pair<NT, NT>>());
cout << "Highest different entries are: " << endl;
for (typename vector<pair<NT, NT>>::iterator it =
error_original_pair.begin();
it != error_original_pair.end(); ++it)
cout << "Diff: " << it->first << " on " << it->second << endl;
}
#endif
}
return same;
}
template <class IT, class NT>
void CSC<IT, NT>::intersect(const IT *rowids_in, const NT *values_in,
const IT len_in, const IT *ri, const IT len_ri,
IT *rowids_out, NT *values_out, IT *len_out) {
IT maxlen = len_in > len_ri ? len_in : len_ri;
double r =
len_in > len_ri ? (double)len_in / len_ri : (double)len_ri / len_in;
// if(log2(maxlen) < r) // linear scan is asymptotically better
{
IT idx = 0;
for (int j = 0, k = 0; j < len_in && k < len_ri;) {
if (ri[k] < rowids_in[j])
k++;
else if (ri[k] > rowids_in[j])
j++;
else //(ri[k]==rowids[j])
{
values_out[idx] = values_in[j];
rowids_out[idx++] = rowids_in[j];
k++;
j++; // repeated rows are not allowed
}
}
*len_out = idx;
}
// else // use finger search
{}
}
template <class IT, class NT>
CSC<IT, NT> CSC<IT, NT>::SpRef2(const IT *ri, const IT rilen, const IT *ci,
const IT cilen) {
if (cilen > 0 && ci[cilen - 1] > cols) {
cerr << "Col indices out of bounds" << endl;
abort();
}
if (rilen > 0 && ri[rilen - 1] > rows) {
cerr << "Row indices out of bounds" << endl;
abort();
}
// count nnz(A[,:J])
IT nnz_ci = 0;
for (int i = 0; i < cilen; i++) {
nnz_ci = nnz_ci + colptr[ci[i] + 1] - colptr[ci[i]];
}
// IT* rowids_out = new IT[nnz_ci];
// NT* values_out = new NT[nnz_ci];
// IT* len_out = new IT[cilen];
IT *rowids_out = my_malloc<IT>(nnz_ci);
IT *values_out = my_malloc<NT>(nnz_ci);
IT *len_out = my_malloc<IT>(cilen);
IT idx = 0;
for (int i = 0; i < cilen; i++) {
IT cidx1 = colptr[ci[i]];
IT cidx2 = colptr[ci[i] + 1];
intersect(&rowids[cidx1], &values[cidx1], cidx2 - cidx1, ri, rilen,
&rowids_out[cidx1], &values_out[cidx1], &len_out[i]);
}
CSC C;
C.rows = rilen;
C.cols = cilen;
// C.colptr = new IT[C.cols+1];
C.colptr = my_malloc<IT>(C.cols + 1);
C.colptr[0] = 0;
for (int i = 0; i < C.cols; ++i) {
C.colptr[i + 1] = C.colptr[i] + len_out[i];
}
C.nnz = C.colptr[C.cols];
// C.rowids = new IT[C.nnz];
// C.values = new NT[C.nnz];
C.rowids = my_malloc<IT>(C.nnz);
C.values = my_malloc<NT>(C.nnz);
for (int i = 0; i < C.cols; ++i) // combine step
{
IT cidx1 = colptr[ci[i]];
IT cidx2 = cidx1 + len_out[i];
copy(&rowids_out[cidx1], &rowids_out[cidx2], C.rowids + C.colptr[i]);
copy(&values_out[cidx1], &values_out[cidx2], C.values + C.colptr[i]);
}
return C;
}
// write genereal purpose set-intersect
// binary search is faster is one of the vectors is very large
// we assume that ri and ci are sorted in ascending order
// also assume that matrix sorted within column
// output is another CSC
// note that ri and ci might have repeated entries
// behaviour is exactly similar to the matlab implementation
template <class IT, class NT>
CSC<IT, NT> CSC<IT, NT>::SpRef(const vector<IT> &ri, const vector<IT> &ci) {
if ((!ci.empty()) && (ci.back() > cols)) {
cerr << "Col indices out of bounds" << endl;
abort();
}
if ((!ri.empty()) && (ri.back() > rows)) {
cerr << "Row indices out of bounds" << endl;
abort();
}
// first, count nnz in the result matrix
IT refnnz = 0;
for (int i = 0; i < ci.size(); i++) {
IT j = colptr[ci[i]], k = 0;
IT endIdx = colptr[ci[i] + 1];
while (j < endIdx && k < ri.size()) {
// cout << j << "=" << rowids[j] << " :: " << k << "=" << ri[k] << " \n";
if (ri[k] < rowids[j])
k++;
else if (ri[k] > rowids[j])
j++;
else //(ri[k]==rowids[j])
{
refnnz++;
k++;
// j++; // wait for the next iteration of the inner loop to alow
// reapted rows
}
}
}
// Next, allocate memory and save the result matrix
// This two-step implementation is better for multithreading
CSC refmat(refnnz, ri.size(), ci.size(), 0);
refmat.colptr[0] = 0;
IT idx = 0;
for (int i = 0; i < ci.size(); i++) {
IT j = colptr[ci[i]], k = 0;
IT endIdx = colptr[ci[i] + 1];
while (j < endIdx && k < ri.size()) {
if (ri[k] < rowids[j])
k++;
else if (ri[k] > rowids[j])
j++;
else //(ri[k]==rowids[j])
{
refmat.values[idx] = values[j];
refmat.rowids[idx++] = rowids[j];
k++;
// j++; // wait for the next iteration of the inner loop to alow reapted
// rows
}
}
refmat.colptr[i + 1] = idx;
}
return refmat;
}
// write genereal purpose set-intersect
// binary search is faster is one of the vectors is very large
// we assume that ri and ci are sorted in ascending order
// also assume that matrix sorted within column
// output is another CSC
// note that ri and ci might have repeated entries
// behaviour is exactly similar to the matlab implementation
template <class IT, class NT>
CSC<IT, NT> CSC<IT, NT>::SpRef1(const vector<IT> &ri, const vector<IT> &ci) {
if ((!ci.empty()) && (ci.back() > cols)) {
cerr << "Col indices out of bounds" << endl;
abort();
}
if ((!ri.empty()) && (ri.back() > rows)) {
cerr << "Row indices out of bounds" << endl;
abort();
}
BitMap bmap(ri.size()); // space requirement n bits
bmap.reset(); // this is time consuming .....
for (int i = 0; i < ri.size(); i++) {
bmap.set_bit(ri[i]);
}
// first, count nnz in the result matrix
IT refnnz = 0;
for (int i = 0; i < ci.size(); i++) {
IT endIdx = colptr[ci[i] + 1];
for (IT j = colptr[ci[i]]; j < endIdx; j++) {
if (bmap.get_bit(rowids[j]))
refnnz++;
}
}
// Next, allocate memory and save the result matrix
// This two-step implementation is better for multithreading
CSC refmat(refnnz, ri.size(), ci.size(), 0);
refmat.colptr[0] = 0;
IT idx = 0;
for (int i = 0; i < ci.size(); i++) {
IT endIdx = colptr[ci[i] + 1];
for (IT j = colptr[ci[i]]; j < endIdx; j++) {
if (bmap.get_bit(rowids[j])) {
refmat.values[idx] = values[j];
refmat.rowids[idx++] = rowids[j];
}
}
refmat.colptr[i + 1] = idx;
}
return refmat;
}
#endif
|
dynamic_module.c | // RUN: %libomptarget-compile-aarch64-unknown-linux-gnu -DSHARED -fPIC -shared -o %t.so && %libomptarget-compile-aarch64-unknown-linux-gnu %t.so && %libomptarget-run-aarch64-unknown-linux-gnu 2>&1 | %fcheck-aarch64-unknown-linux-gnu
// RUN: %libomptarget-compile-powerpc64-ibm-linux-gnu -DSHARED -fPIC -shared -o %t.so && %libomptarget-compile-powerpc64-ibm-linux-gnu %t.so && %libomptarget-run-powerpc64-ibm-linux-gnu 2>&1 | %fcheck-powerpc64-ibm-linux-gnu
// RUN: %libomptarget-compile-powerpc64le-ibm-linux-gnu -DSHARED -fPIC -shared -o %t.so && %libomptarget-compile-powerpc64le-ibm-linux-gnu %t.so && %libomptarget-run-powerpc64le-ibm-linux-gnu 2>&1 | %fcheck-powerpc64le-ibm-linux-gnu
// RUN: %libomptarget-compile-x86_64-pc-linux-gnu -DSHARED -fPIC -shared -o %t.so && %libomptarget-compile-x86_64-pc-linux-gnu %t.so && %libomptarget-run-x86_64-pc-linux-gnu 2>&1 | %fcheck-x86_64-pc-linux-gnu
#ifdef SHARED
void foo() {}
#else
#include <stdio.h>
int main() {
#pragma omp target
;
// CHECK: DONE.
printf("%s\n", "DONE.");
return 0;
}
#endif
|
convolution_pack1to8_int8.h | // Tencent is pleased to support the open source community by making ncnn available.
//
// Copyright (C) 2021 THL A29 Limited, a Tencent company. All rights reserved.
//
// Licensed under the BSD 3-Clause License (the "License"); you may not use this file except
// in compliance with the License. You may obtain a copy of the License at
//
// https://opensource.org/licenses/BSD-3-Clause
//
// Unless required by applicable law or agreed to in writing, software distributed
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
// specific language governing permissions and limitations under the License.
static void convolution_pack1to8_int8_neon(const Mat& bottom_blob, Mat& top_blob, const Mat& weight_data_int8, int kernel_w, int kernel_h, int dilation_w, int dilation_h, int stride_w, int stride_h, const Option& opt)
{
int w = bottom_blob.w;
int channels = bottom_blob.c;
int outw = top_blob.w;
int outh = top_blob.h;
int outch = top_blob.c;
const int maxk = kernel_w * kernel_h;
// kernel offsets
std::vector<int> _space_ofs(maxk);
int* space_ofs = &_space_ofs[0];
{
int p1 = 0;
int p2 = 0;
int gap = w * dilation_h - kernel_w * dilation_w;
for (int i = 0; i < kernel_h; i++)
{
for (int j = 0; j < kernel_w; j++)
{
space_ofs[p1] = p2;
p1++;
p2 += dilation_w;
}
p2 += gap;
}
}
// num_output
#pragma omp parallel for num_threads(opt.num_threads)
for (int p = 0; p < outch; p++)
{
int* outptr = top_blob.channel(p);
for (int i = 0; i < outh; i++)
{
for (int j = 0; j < outw; j++)
{
__m128i _sum0 = _mm_setzero_si128();
__m128i _sum1 = _mm_setzero_si128();
const signed char* kptr = weight_data_int8.channel(p);
// channels
for (int q = 0; q < channels; q++)
{
const Mat m = bottom_blob.channel(q);
const signed char* sptr = m.row<const signed char>(i * stride_h) + j * stride_w;
for (int k = 0; k < maxk; k++)
{
__m128i _val = _mm_set1_epi16((short)sptr[space_ofs[k]]);
// TODO use _mm_cvtepi8_epi16 on sse4.1
__m128i _w = _mm_loadl_epi64((const __m128i*)kptr);
_w = _mm_unpacklo_epi8(_w, _mm_cmpgt_epi8(_mm_setzero_si128(), _w));
__m128i _sl = _mm_mullo_epi16(_val, _w);
__m128i _sh = _mm_mulhi_epi16(_val, _w);
__m128i _s0 = _mm_unpacklo_epi16(_sl, _sh);
__m128i _s1 = _mm_unpackhi_epi16(_sl, _sh);
_sum0 = _mm_add_epi32(_sum0, _s0);
_sum1 = _mm_add_epi32(_sum1, _s1);
kptr += 8;
}
}
_mm_storeu_si128((__m128i*)(outptr + j * 8), _sum0);
_mm_storeu_si128((__m128i*)(outptr + j * 8 + 4), _sum1);
}
outptr += outw * 8;
}
}
}
|
convolution_3x3_pack1to4.h | // Tencent is pleased to support the open source community by making ncnn available.
//
// Copyright (C) 2019 THL A29 Limited, a Tencent company. All rights reserved.
//
// Licensed under the BSD 3-Clause License (the "License"); you may not use this file except
// in compliance with the License. You may obtain a copy of the License at
//
// https://opensource.org/licenses/BSD-3-Clause
//
// Unless required by applicable law or agreed to in writing, software distributed
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
// specific language governing permissions and limitations under the License.
static void conv3x3s1_pack1to4_neon(const Mat& bottom_blob, Mat& top_blob, const Mat& kernel, const Mat& _bias, const Option& opt)
{
int inch = bottom_blob.c;
int outw = top_blob.w;
int outh = top_blob.h;
int outch = top_blob.c;
const float* bias = _bias;
#pragma omp parallel for num_threads(opt.num_threads)
for (int p=0; p<outch; p++)
{
Mat out0 = top_blob.channel(p);
float32x4_t _bias0 = bias ? vld1q_f32((const float*)bias + p * 4) : vdupq_n_f32(0.f);
out0.fill(_bias0);
const float* k0 = kernel.channel(p);
for (int q=0; q<inch; q++)
{
float* outptr0 = out0.row(0);
const Mat img0 = bottom_blob.channel(q);
const float* r0 = img0.row(0);
const float* r1 = img0.row(1);
const float* r2 = img0.row(2);
float32x4_t _k00 = vld1q_f32(k0);
float32x4_t _k01 = vld1q_f32(k0+4);
float32x4_t _k02 = vld1q_f32(k0+8);
float32x4_t _k10 = vld1q_f32(k0+12);
float32x4_t _k11 = vld1q_f32(k0+16);
float32x4_t _k12 = vld1q_f32(k0+20);
float32x4_t _k20 = vld1q_f32(k0+24);
float32x4_t _k21 = vld1q_f32(k0+28);
float32x4_t _k22 = vld1q_f32(k0+32);
int i = 0;
for (; i < outh; i++)
{
int j = 0;
#if __aarch64__
for (; j+7<outw; j+=8)
{
asm volatile(
"prfm pldl1keep, [%0, #512] \n"
"ld1 {v24.4s, v25.4s, v26.4s, v27.4s}, [%0], #64 \n"
"prfm pldl1keep, [%1, #256] \n"
"ld1 {v0.4s, v1.4s}, [%1], #32 \n"
"prfm pldl1keep, [%0, #512] \n"
"ld1 {v28.4s, v29.4s, v30.4s, v31.4s}, [%0] \n"
"fmla v24.4s, %8.4s, v0.s[0] \n"
"fmla v25.4s, %8.4s, v0.s[1] \n"
"fmla v26.4s, %8.4s, v0.s[2] \n"
"fmla v27.4s, %8.4s, v0.s[3] \n"
"fmla v28.4s, %8.4s, v1.s[0] \n"
"fmla v29.4s, %8.4s, v1.s[1] \n"
"fmla v30.4s, %8.4s, v1.s[2] \n"
"fmla v31.4s, %8.4s, v1.s[3] \n"
"ld1 {v2.2s}, [%1] \n"
"fmla v24.4s, %9.4s, v0.s[1] \n"
"fmla v25.4s, %9.4s, v0.s[2] \n"
"fmla v26.4s, %9.4s, v0.s[3] \n"
"fmla v27.4s, %9.4s, v1.s[0] \n"
"fmla v28.4s, %9.4s, v1.s[1] \n"
"fmla v29.4s, %9.4s, v1.s[2] \n"
"fmla v30.4s, %9.4s, v1.s[3] \n"
"fmla v31.4s, %9.4s, v2.s[0] \n"
"prfm pldl1keep, [%2, #256] \n"
"ld1 {v4.4s, v5.4s}, [%2], #32 \n"
"fmla v24.4s, %10.4s, v0.s[2] \n"
"fmla v25.4s, %10.4s, v0.s[3] \n"
"fmla v26.4s, %10.4s, v1.s[0] \n"
"fmla v27.4s, %10.4s, v1.s[1] \n"
"fmla v28.4s, %10.4s, v1.s[2] \n"
"fmla v29.4s, %10.4s, v1.s[3] \n"
"fmla v30.4s, %10.4s, v2.s[0] \n"
"fmla v31.4s, %10.4s, v2.s[1] \n"
"ld1 {v2.2s}, [%2] \n"
"fmla v24.4s, %11.4s, v4.s[0] \n"
"fmla v25.4s, %11.4s, v4.s[1] \n"
"fmla v26.4s, %11.4s, v4.s[2] \n"
"fmla v27.4s, %11.4s, v4.s[3] \n"
"fmla v28.4s, %11.4s, v5.s[0] \n"
"fmla v29.4s, %11.4s, v5.s[1] \n"
"fmla v30.4s, %11.4s, v5.s[2] \n"
"fmla v31.4s, %11.4s, v5.s[3] \n"
"fmla v24.4s, %12.4s, v4.s[1] \n"
"fmla v25.4s, %12.4s, v4.s[2] \n"
"fmla v26.4s, %12.4s, v4.s[3] \n"
"fmla v27.4s, %12.4s, v5.s[0] \n"
"fmla v28.4s, %12.4s, v5.s[1] \n"
"fmla v29.4s, %12.4s, v5.s[2] \n"
"fmla v30.4s, %12.4s, v5.s[3] \n"
"fmla v31.4s, %12.4s, v2.s[0] \n"
"prfm pldl1keep, [%3, #256] \n"
"ld1 {v0.4s, v1.4s}, [%3], #32 \n"
"fmla v24.4s, %13.4s, v4.s[2] \n"
"fmla v25.4s, %13.4s, v4.s[3] \n"
"fmla v26.4s, %13.4s, v5.s[0] \n"
"fmla v27.4s, %13.4s, v5.s[1] \n"
"fmla v28.4s, %13.4s, v5.s[2] \n"
"fmla v29.4s, %13.4s, v5.s[3] \n"
"fmla v30.4s, %13.4s, v2.s[0] \n"
"fmla v31.4s, %13.4s, v2.s[1] \n"
"ld1 {v2.2s}, [%3] \n"
"fmla v24.4s, %14.4s, v0.s[0] \n"
"fmla v25.4s, %14.4s, v0.s[1] \n"
"fmla v26.4s, %14.4s, v0.s[2] \n"
"fmla v27.4s, %14.4s, v0.s[3] \n"
"fmla v28.4s, %14.4s, v1.s[0] \n"
"fmla v29.4s, %14.4s, v1.s[1] \n"
"fmla v30.4s, %14.4s, v1.s[2] \n"
"fmla v31.4s, %14.4s, v1.s[3] \n"
"fmla v24.4s, %15.4s, v0.s[1] \n"
"fmla v25.4s, %15.4s, v0.s[2] \n"
"fmla v26.4s, %15.4s, v0.s[3] \n"
"fmla v27.4s, %15.4s, v1.s[0] \n"
"fmla v28.4s, %15.4s, v1.s[1] \n"
"fmla v29.4s, %15.4s, v1.s[2] \n"
"fmla v30.4s, %15.4s, v1.s[3] \n"
"fmla v31.4s, %15.4s, v2.s[0] \n"
"sub %0, %0, #64 \n"
"fmla v24.4s, %16.4s, v0.s[2] \n"
"fmla v25.4s, %16.4s, v0.s[3] \n"
"fmla v26.4s, %16.4s, v1.s[0] \n"
"fmla v27.4s, %16.4s, v1.s[1] \n"
"fmla v28.4s, %16.4s, v1.s[2] \n"
"fmla v29.4s, %16.4s, v1.s[3] \n"
"fmla v30.4s, %16.4s, v2.s[0] \n"
"fmla v31.4s, %16.4s, v2.s[1] \n"
"st1 {v24.4s, v25.4s, v26.4s, v27.4s}, [%0], #64 \n"
"st1 {v28.4s, v29.4s, v30.4s, v31.4s}, [%0], #64 \n"
: "=r"(outptr0), // %0
"=r"(r0), // %1
"=r"(r1), // %2
"=r"(r2) // %3
: "0"(outptr0),
"1"(r0),
"2"(r1),
"3"(r2),
"w"(_k00), // %8
"w"(_k01), // %9
"w"(_k02), // %10
"w"(_k10), // %11
"w"(_k11), // %12
"w"(_k12), // %13
"w"(_k20), // %14
"w"(_k21), // %15
"w"(_k22) // %16
: "memory", "v0", "v1", "v2", "v4", "v5", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31"
);
}
#endif // __aarch64__
for (; j+3<outw; j+=4)
{
#if __aarch64__
asm volatile(
"prfm pldl1keep, [%0, #512] \n"
"ld1 {v24.4s, v25.4s, v26.4s, v27.4s}, [%0] \n"
"prfm pldl1keep, [%1, #128] \n"
"ld1 {v0.4s}, [%1], #16 \n"
"fmla v24.4s, %8.4s, v0.s[0] \n"
"fmla v25.4s, %8.4s, v0.s[1] \n"
"fmla v26.4s, %8.4s, v0.s[2] \n"
"fmla v27.4s, %8.4s, v0.s[3] \n"
"ld1 {v1.2s}, [%1] \n"
"fmla v24.4s, %9.4s, v0.s[1] \n"
"fmla v25.4s, %9.4s, v0.s[2] \n"
"fmla v26.4s, %9.4s, v0.s[3] \n"
"fmla v27.4s, %9.4s, v1.s[0] \n"
"prfm pldl1keep, [%2, #128] \n"
"ld1 {v2.4s}, [%2], #16 \n"
"fmla v24.4s, %10.4s, v0.s[2] \n"
"fmla v25.4s, %10.4s, v0.s[3] \n"
"fmla v26.4s, %10.4s, v1.s[0] \n"
"fmla v27.4s, %10.4s, v1.s[1] \n"
"ld1 {v3.2s}, [%2] \n"
"fmla v24.4s, %11.4s, v2.s[0] \n"
"fmla v25.4s, %11.4s, v2.s[1] \n"
"fmla v26.4s, %11.4s, v2.s[2] \n"
"fmla v27.4s, %11.4s, v2.s[3] \n"
"fmla v24.4s, %12.4s, v2.s[1] \n"
"fmla v25.4s, %12.4s, v2.s[2] \n"
"fmla v26.4s, %12.4s, v2.s[3] \n"
"fmla v27.4s, %12.4s, v3.s[0] \n"
"prfm pldl1keep, [%3, #128] \n"
"ld1 {v0.4s}, [%3], #16 \n"
"fmla v24.4s, %13.4s, v2.s[2] \n"
"fmla v25.4s, %13.4s, v2.s[3] \n"
"fmla v26.4s, %13.4s, v3.s[0] \n"
"fmla v27.4s, %13.4s, v3.s[1] \n"
"ld1 {v1.2s}, [%3] \n"
"fmla v24.4s, %14.4s, v0.s[0] \n"
"fmla v25.4s, %14.4s, v0.s[1] \n"
"fmla v26.4s, %14.4s, v0.s[2] \n"
"fmla v27.4s, %14.4s, v0.s[3] \n"
"fmla v24.4s, %15.4s, v0.s[1] \n"
"fmla v25.4s, %15.4s, v0.s[2] \n"
"fmla v26.4s, %15.4s, v0.s[3] \n"
"fmla v27.4s, %15.4s, v1.s[0] \n"
"fmla v24.4s, %16.4s, v0.s[2] \n"
"fmla v25.4s, %16.4s, v0.s[3] \n"
"fmla v26.4s, %16.4s, v1.s[0] \n"
"fmla v27.4s, %16.4s, v1.s[1] \n"
"st1 {v24.4s, v25.4s, v26.4s, v27.4s}, [%0], #64 \n"
: "=r"(outptr0), // %0
"=r"(r0), // %1
"=r"(r1), // %2
"=r"(r2) // %3
: "0"(outptr0),
"1"(r0),
"2"(r1),
"3"(r2),
"w"(_k00), // %8
"w"(_k01), // %9
"w"(_k02), // %10
"w"(_k10), // %11
"w"(_k11), // %12
"w"(_k12), // %13
"w"(_k20), // %14
"w"(_k21), // %15
"w"(_k22) // %16
: "memory", "v0", "v1", "v2", "v3", "v24", "v25", "v26", "v27"
);
#else // __aarch64__
asm volatile(
"pld [%0, #512] \n"
"vldm %0, {d24-d31} \n"
"pld [%1, #128] \n"
"vld1.f32 {d0-d1}, [%1]! \n"
"vmla.f32 q12, %q8, d0[0] \n"
"vmla.f32 q13, %q8, d0[1] \n"
"vmla.f32 q14, %q8, d1[0] \n"
"vmla.f32 q15, %q8, d1[1] \n"
"vld1.f32 {d2}, [%1] \n"
"vmla.f32 q12, %q9, d0[1] \n"
"vmla.f32 q13, %q9, d1[0] \n"
"vmla.f32 q14, %q9, d1[1] \n"
"vmla.f32 q15, %q9, d2[0] \n"
"pld [%2, #128] \n"
"vld1.f32 {d4-d5}, [%2]! \n"
"vmla.f32 q12, %q10, d1[0] \n"
"vmla.f32 q13, %q10, d1[1] \n"
"vmla.f32 q14, %q10, d2[0] \n"
"vmla.f32 q15, %q10, d2[1] \n"
"vmla.f32 q12, %q11, d4[0] \n"
"vmla.f32 q13, %q11, d4[1] \n"
"vmla.f32 q14, %q11, d5[0] \n"
"vmla.f32 q15, %q11, d5[1] \n"
"vld1.f32 {d3}, [%2] \n"
"vmla.f32 q12, %q12, d4[1] \n"
"vmla.f32 q13, %q12, d5[0] \n"
"vmla.f32 q14, %q12, d5[1] \n"
"vmla.f32 q15, %q12, d3[0] \n"
"pld [%3, #128] \n"
"vld1.f32 {d0-d1}, [%3]! \n"
"vmla.f32 q12, %q13, d5[0] \n"
"vmla.f32 q13, %q13, d5[1] \n"
"vmla.f32 q14, %q13, d3[0] \n"
"vmla.f32 q15, %q13, d3[1] \n"
"vmla.f32 q12, %q14, d0[0] \n"
"vmla.f32 q13, %q14, d0[1] \n"
"vmla.f32 q14, %q14, d1[0] \n"
"vmla.f32 q15, %q14, d1[1] \n"
"vld1.f32 {d2}, [%3] \n"
"vmla.f32 q12, %q15, d0[1] \n"
"vmla.f32 q13, %q15, d1[0] \n"
"vmla.f32 q14, %q15, d1[1] \n"
"vmla.f32 q15, %q15, d2[0] \n"
"vmla.f32 q12, %q16, d1[0] \n"
"vmla.f32 q13, %q16, d1[1] \n"
"vmla.f32 q14, %q16, d2[0] \n"
"vmla.f32 q15, %q16, d2[1] \n"
"vstm %0!, {d24-d31} \n"
: "=r"(outptr0), // %0
"=r"(r0), // %1
"=r"(r1), // %2
"=r"(r2) // %3
: "0"(outptr0),
"1"(r0),
"2"(r1),
"3"(r2),
"w"(_k00), // %8
"w"(_k01), // %9
"w"(_k02), // %10
"w"(_k10), // %11
"w"(_k11), // %12
"w"(_k12), // %13
"w"(_k20), // %14
"w"(_k21), // %15
"w"(_k22) // %16
: "memory", "q0", "q1", "q2", "q12", "q13", "q14", "q15"
);
#endif // __aarch64__
}
for (; j+1<outw; j+=2)
{
#if __aarch64__
asm volatile(
"prfm pldl1keep, [%0, #256] \n"
"ld1 {v24.4s, v25.4s}, [%0] \n"
"prfm pldl1keep, [%1, #128] \n"
"ld1 {v0.4s}, [%1] \n"
"fmul v26.4s, %8.4s, v0.s[0] \n"
"fmul v27.4s, %8.4s, v0.s[1] \n"
"fmla v24.4s, %9.4s, v0.s[1] \n"
"fmla v25.4s, %9.4s, v0.s[2] \n"
"prfm pldl1keep, [%2, #128] \n"
"ld1 {v1.4s}, [%2] \n"
"fmla v26.4s, %10.4s, v0.s[2] \n"
"fmla v27.4s, %10.4s, v0.s[3] \n"
"fmla v24.4s, %11.4s, v1.s[0] \n"
"fmla v25.4s, %11.4s, v1.s[1] \n"
"add %1, %1, #8 \n"
"fmla v26.4s, %12.4s, v1.s[1] \n"
"fmla v27.4s, %12.4s, v1.s[2] \n"
"prfm pldl1keep, [%3, #128] \n"
"ld1 {v0.4s}, [%3] \n"
"fmla v24.4s, %13.4s, v1.s[2] \n"
"fmla v25.4s, %13.4s, v1.s[3] \n"
"fmla v26.4s, %14.4s, v0.s[0] \n"
"fmla v27.4s, %14.4s, v0.s[1] \n"
"add %2, %2, #8 \n"
"fmla v24.4s, %15.4s, v0.s[1] \n"
"fmla v25.4s, %15.4s, v0.s[2] \n"
"fmla v26.4s, %16.4s, v0.s[2] \n"
"fmla v27.4s, %16.4s, v0.s[3] \n"
"add %3, %3, #8 \n"
"fadd v24.4s, v24.4s, v26.4s \n"
"fadd v25.4s, v25.4s, v27.4s \n"
"st1 {v24.4s, v25.4s}, [%0], #32 \n"
: "=r"(outptr0), // %0
"=r"(r0), // %1
"=r"(r1), // %2
"=r"(r2) // %3
: "0"(outptr0),
"1"(r0),
"2"(r1),
"3"(r2),
"w"(_k00), // %8
"w"(_k01), // %9
"w"(_k02), // %10
"w"(_k10), // %11
"w"(_k11), // %12
"w"(_k12), // %13
"w"(_k20), // %14
"w"(_k21), // %15
"w"(_k22) // %16
: "memory", "v0", "v1", "v24", "v25", "v26", "v27"
);
#else // __aarch64__
asm volatile(
"pld [%0, #256] \n"
"vld1.f32 {d24-d27}, [%0 :128] \n"
"pld [%1, #128] \n"
"vld1.f32 {d0-d1}, [%1] \n"
"vmul.f32 q14, %q8, d0[0] \n"
"vmul.f32 q15, %q8, d0[1] \n"
"vmla.f32 q12, %q9, d0[1] \n"
"vmla.f32 q13, %q9, d1[0] \n"
"pld [%2, #128] \n"
"vld1.f32 {d2-d3}, [%2] \n"
"vmla.f32 q14, %q10, d1[0] \n"
"vmla.f32 q15, %q10, d1[1] \n"
"vmla.f32 q12, %q11, d2[0] \n"
"vmla.f32 q13, %q11, d2[1] \n"
"add %1, %1, #8 \n"
"vmla.f32 q14, %q12, d2[1] \n"
"vmla.f32 q15, %q12, d3[0] \n"
"pld [%3, #128] \n"
"vld1.f32 {d0-d1}, [%3] \n"
"vmla.f32 q12, %q13, d3[0] \n"
"vmla.f32 q13, %q13, d3[1] \n"
"vmla.f32 q14, %q14, d0[0] \n"
"vmla.f32 q15, %q14, d0[1] \n"
"add %2, %2, #8 \n"
"vmla.f32 q12, %q15, d0[1] \n"
"vmla.f32 q13, %q15, d1[0] \n"
"vmla.f32 q14, %q16, d1[0] \n"
"vmla.f32 q15, %q16, d1[1] \n"
"add %3, %3, #8 \n"
"vadd.f32 q12, q12, q14 \n"
"vadd.f32 q13, q13, q15 \n"
"vst1.f32 {d24-d27}, [%0 :128]! \n"
: "=r"(outptr0), // %0
"=r"(r0), // %1
"=r"(r1), // %2
"=r"(r2) // %3
: "0"(outptr0),
"1"(r0),
"2"(r1),
"3"(r2),
"w"(_k00), // %8
"w"(_k01), // %9
"w"(_k02), // %10
"w"(_k10), // %11
"w"(_k11), // %12
"w"(_k12), // %13
"w"(_k20), // %14
"w"(_k21), // %15
"w"(_k22) // %16
: "memory", "q0", "q1", "q12", "q13", "q14", "q15"
);
#endif // __aarch64__
}
for (; j<outw; j++)
{
float32x4_t _sum0 = vld1q_f32(outptr0);
float32x4_t _r0 = vld1q_f32(r0);
float32x4_t _r1 = vld1q_f32(r1);
float32x4_t _r2 = vld1q_f32(r2);
#if __aarch64__
_sum0 = vfmaq_laneq_f32(_sum0, _k00, _r0, 0);
_sum0 = vfmaq_laneq_f32(_sum0, _k01, _r0, 1);
_sum0 = vfmaq_laneq_f32(_sum0, _k02, _r0, 2);
_sum0 = vfmaq_laneq_f32(_sum0, _k10, _r1, 0);
_sum0 = vfmaq_laneq_f32(_sum0, _k11, _r1, 1);
_sum0 = vfmaq_laneq_f32(_sum0, _k12, _r1, 2);
_sum0 = vfmaq_laneq_f32(_sum0, _k20, _r2, 0);
_sum0 = vfmaq_laneq_f32(_sum0, _k21, _r2, 1);
_sum0 = vfmaq_laneq_f32(_sum0, _k22, _r2, 2);
#else
_sum0 = vmlaq_lane_f32(_sum0, _k00, vget_low_f32(_r0), 0);
_sum0 = vmlaq_lane_f32(_sum0, _k01, vget_low_f32(_r0), 1);
_sum0 = vmlaq_lane_f32(_sum0, _k02, vget_high_f32(_r0), 0);
_sum0 = vmlaq_lane_f32(_sum0, _k10, vget_low_f32(_r1), 0);
_sum0 = vmlaq_lane_f32(_sum0, _k11, vget_low_f32(_r1), 1);
_sum0 = vmlaq_lane_f32(_sum0, _k12, vget_high_f32(_r1), 0);
_sum0 = vmlaq_lane_f32(_sum0, _k20, vget_low_f32(_r2), 0);
_sum0 = vmlaq_lane_f32(_sum0, _k21, vget_low_f32(_r2), 1);
_sum0 = vmlaq_lane_f32(_sum0, _k22, vget_high_f32(_r2), 0);
#endif
vst1q_f32(outptr0, _sum0);
r0 += 1;
r1 += 1;
r2 += 1;
outptr0 += 4;
}
r0 += 2;
r1 += 2;
r2 += 2;
}
k0 += 9*4;
}
}
}
static void conv3x3s2_pack1to4_neon(const Mat& bottom_blob, Mat& top_blob, const Mat& kernel, const Mat& _bias, const Option& opt)
{
int w = bottom_blob.w;
int inch = bottom_blob.c;
int outw = top_blob.w;
int outh = top_blob.h;
int outch = top_blob.c;
const int tailstep = w - 2*outw + w;
const float* bias = _bias;
int nn_outch = 0;
int remain_outch_start = 0;
#if __ARM_NEON && __aarch64__
nn_outch = outch >> 1;
remain_outch_start = nn_outch << 1;
#pragma omp parallel for num_threads(opt.num_threads)
for (int pp=0; pp<nn_outch; pp++)
{
int p = pp * 2;
Mat out0 = top_blob.channel(p);
Mat out1 = top_blob.channel(p+1);
float32x4_t _bias0 = bias ? vld1q_f32((const float*)bias + p * 4) : vdupq_n_f32(0.f);
float32x4_t _bias1 = bias ? vld1q_f32((const float*)bias + (p+1) * 4) : vdupq_n_f32(0.f);
out0.fill(_bias0);
out1.fill(_bias1);
const float* k0 = kernel.channel(p);
const float* k1 = kernel.channel(p+1);
for (int q=0; q<inch; q++)
{
float* outptr0 = out0;
float* outptr1 = out1;
const Mat img0 = bottom_blob.channel(q);
const float* r0 = img0.row(0);
const float* r1 = img0.row(1);
const float* r2 = img0.row(2);
float32x4_t _k00_0 = vld1q_f32(k0);
float32x4_t _k01_0 = vld1q_f32(k0+4);
float32x4_t _k02_0 = vld1q_f32(k0+8);
float32x4_t _k10_0 = vld1q_f32(k0+12);
float32x4_t _k11_0 = vld1q_f32(k0+16);
float32x4_t _k12_0 = vld1q_f32(k0+20);
float32x4_t _k20_0 = vld1q_f32(k0+24);
float32x4_t _k21_0 = vld1q_f32(k0+28);
float32x4_t _k22_0 = vld1q_f32(k0+32);
float32x4_t _k00_1 = vld1q_f32(k1);
float32x4_t _k01_1 = vld1q_f32(k1+4);
float32x4_t _k02_1 = vld1q_f32(k1+8);
float32x4_t _k10_1 = vld1q_f32(k1+12);
float32x4_t _k11_1 = vld1q_f32(k1+16);
float32x4_t _k12_1 = vld1q_f32(k1+20);
float32x4_t _k20_1 = vld1q_f32(k1+24);
float32x4_t _k21_1 = vld1q_f32(k1+28);
float32x4_t _k22_1 = vld1q_f32(k1+32);
int i = 0;
for (; i < outh; i++)
{
int nn = outw >> 2;
int remain = outw & 3;
if (nn > 0)
{
asm volatile(
"0: \n"
"prfm pldl1keep, [%1, #512] \n"
"ld1 {v6.4s, v7.4s, v8.4s, v9.4s}, [%1] \n"// sum0
// r0
"prfm pldl1keep, [%3, #256] \n"
"ld1 {v0.4s, v1.4s}, [%3], #32 \n"
"ld1r {v4.4s}, [%3] \n"
"fmla v6.4s, %12.4s, v0.s[0] \n"
"fmla v7.4s, %12.4s, v0.s[2] \n"
"prfm pldl1keep, [%2, #512] \n"
"ld1 {v10.4s, v11.4s, v12.4s, v13.4s}, [%2] \n"// sum1
"fmla v8.4s, %12.4s, v1.s[0] \n"
"fmla v9.4s, %12.4s, v1.s[2] \n"
"fmla v10.4s, %21.4s, v0.s[0] \n"
"fmla v11.4s, %21.4s, v0.s[2] \n"
"fmla v12.4s, %21.4s, v1.s[0] \n"
"fmla v13.4s, %21.4s, v1.s[2] \n"
"fmla v6.4s, %13.4s, v0.s[1] \n"
"fmla v7.4s, %13.4s, v0.s[3] \n"
"fmla v8.4s, %13.4s, v1.s[1] \n"
"fmla v9.4s, %13.4s, v1.s[3] \n"
"fmla v10.4s, %22.4s, v0.s[1] \n"
"fmla v11.4s, %22.4s, v0.s[3] \n"
"fmla v12.4s, %22.4s, v1.s[1] \n"
"fmla v13.4s, %22.4s, v1.s[3] \n"
// r1
"prfm pldl1keep, [%4, #256] \n"
"ld1 {v2.4s, v3.4s}, [%4], #32 \n"
"ld1r {v5.4s}, [%4] \n"
"fmla v6.4s, %14.4s, v0.s[2] \n"
"fmla v7.4s, %14.4s, v1.s[0] \n"
"fmla v8.4s, %14.4s, v1.s[2] \n"
"fmla v9.4s, %14.4s, v4.s[0] \n"
"fmla v10.4s, %23.4s, v0.s[2] \n"
"fmla v11.4s, %23.4s, v1.s[0] \n"
"fmla v12.4s, %23.4s, v1.s[2] \n"
"fmla v13.4s, %23.4s, v4.s[0] \n"
"fmla v6.4s, %15.4s, v2.s[0] \n"
"fmla v7.4s, %15.4s, v2.s[2] \n"
"fmla v8.4s, %15.4s, v3.s[0] \n"
"fmla v9.4s, %15.4s, v3.s[2] \n"
"fmla v10.4s, %24.4s, v2.s[0] \n"
"fmla v11.4s, %24.4s, v2.s[2] \n"
"fmla v12.4s, %24.4s, v3.s[0] \n"
"fmla v13.4s, %24.4s, v3.s[2] \n"
"fmla v6.4s, %16.4s, v2.s[1] \n"
"fmla v7.4s, %16.4s, v2.s[3] \n"
"fmla v8.4s, %16.4s, v3.s[1] \n"
"fmla v9.4s, %16.4s, v3.s[3] \n"
"fmla v10.4s, %25.4s, v2.s[1] \n"
"fmla v11.4s, %25.4s, v2.s[3] \n"
"fmla v12.4s, %25.4s, v3.s[1] \n"
"fmla v13.4s, %25.4s, v3.s[3] \n"
// r2
"prfm pldl1keep, [%5, #256] \n"
"ld1 {v0.4s, v1.4s}, [%5], #32 \n"
"ld1r {v4.4s}, [%5] \n"
"fmla v6.4s, %17.4s, v2.s[2] \n"
"fmla v7.4s, %17.4s, v3.s[0] \n"
"fmla v8.4s, %17.4s, v3.s[2] \n"
"fmla v9.4s, %17.4s, v5.s[0] \n"
"fmla v10.4s, %26.4s, v2.s[2] \n"
"fmla v11.4s, %26.4s, v3.s[0] \n"
"fmla v12.4s, %26.4s, v3.s[2] \n"
"fmla v13.4s, %26.4s, v5.s[0] \n"
"fmla v6.4s, %18.4s, v0.s[0] \n"
"fmla v7.4s, %18.4s, v0.s[2] \n"
"fmla v8.4s, %18.4s, v1.s[0] \n"
"fmla v9.4s, %18.4s, v1.s[2] \n"
"fmla v10.4s, %27.4s, v0.s[0] \n"
"fmla v11.4s, %27.4s, v0.s[2] \n"
"fmla v12.4s, %27.4s, v1.s[0] \n"
"fmla v13.4s, %27.4s, v1.s[2] \n"
"fmla v6.4s, %19.4s, v0.s[1] \n"
"fmla v7.4s, %19.4s, v0.s[3] \n"
"fmla v8.4s, %19.4s, v1.s[1] \n"
"fmla v9.4s, %19.4s, v1.s[3] \n"
"fmla v10.4s, %28.4s, v0.s[1] \n"
"fmla v11.4s, %28.4s, v0.s[3] \n"
"fmla v12.4s, %28.4s, v1.s[1] \n"
"fmla v13.4s, %28.4s, v1.s[3] \n"
"fmla v6.4s, %20.4s, v0.s[2] \n"
"fmla v7.4s, %20.4s, v1.s[0] \n"
"fmla v8.4s, %20.4s, v1.s[2] \n"
"fmla v9.4s, %20.4s, v4.s[0] \n"
"fmla v10.4s, %29.4s, v0.s[2] \n"
"fmla v11.4s, %29.4s, v1.s[0] \n"
"fmla v12.4s, %29.4s, v1.s[2] \n"
"fmla v13.4s, %29.4s, v4.s[0] \n"
"subs %w0, %w0, #1 \n"
"st1 {v6.4s, v7.4s, v8.4s, v9.4s}, [%1], #64 \n"
"st1 {v10.4s, v11.4s, v12.4s, v13.4s}, [%2], #64 \n"
"bne 0b \n"
: "=r"(nn), // %0
"=r"(outptr0), // %1
"=r"(outptr1), // %2
"=r"(r0), // %3
"=r"(r1), // %4
"=r"(r2) // %5
: "0"(nn),
"1"(outptr0),
"2"(outptr1),
"3"(r0),
"4"(r1),
"5"(r2),
"w"(_k00_0), // %12
"w"(_k01_0), // %13
"w"(_k02_0), // %14
"w"(_k10_0), // %15
"w"(_k11_0), // %16
"w"(_k12_0), // %17
"w"(_k20_0), // %18
"w"(_k21_0), // %19
"w"(_k22_0), // %20
"w"(_k00_1), // %21
"w"(_k01_1), // %22
"w"(_k02_1), // %23
"w"(_k10_1), // %24
"w"(_k11_1), // %25
"w"(_k12_1), // %26
"w"(_k20_1), // %27
"w"(_k21_1), // %28
"w"(_k22_1) // %29
: "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13"
);
}
for (; remain>0; remain--)
{
float32x4_t _sum0 = vld1q_f32(outptr0);
float32x4_t _sum1 = vld1q_f32(outptr1);
float32x4_t _r0 = vld1q_f32(r0);
float32x4_t _r1 = vld1q_f32(r1);
float32x4_t _r2 = vld1q_f32(r2);
_sum0 = vfmaq_laneq_f32(_sum0, _k00_0, _r0, 0);
_sum0 = vfmaq_laneq_f32(_sum0, _k01_0, _r0, 1);
_sum0 = vfmaq_laneq_f32(_sum0, _k02_0, _r0, 2);
_sum0 = vfmaq_laneq_f32(_sum0, _k10_0, _r1, 0);
_sum0 = vfmaq_laneq_f32(_sum0, _k11_0, _r1, 1);
_sum0 = vfmaq_laneq_f32(_sum0, _k12_0, _r1, 2);
_sum0 = vfmaq_laneq_f32(_sum0, _k20_0, _r2, 0);
_sum0 = vfmaq_laneq_f32(_sum0, _k21_0, _r2, 1);
_sum0 = vfmaq_laneq_f32(_sum0, _k22_0, _r2, 2);
_sum1 = vfmaq_laneq_f32(_sum1, _k00_1, _r0, 0);
_sum1 = vfmaq_laneq_f32(_sum1, _k01_1, _r0, 1);
_sum1 = vfmaq_laneq_f32(_sum1, _k02_1, _r0, 2);
_sum1 = vfmaq_laneq_f32(_sum1, _k10_1, _r1, 0);
_sum1 = vfmaq_laneq_f32(_sum1, _k11_1, _r1, 1);
_sum1 = vfmaq_laneq_f32(_sum1, _k12_1, _r1, 2);
_sum1 = vfmaq_laneq_f32(_sum1, _k20_1, _r2, 0);
_sum1 = vfmaq_laneq_f32(_sum1, _k21_1, _r2, 1);
_sum1 = vfmaq_laneq_f32(_sum1, _k22_1, _r2, 2);
vst1q_f32(outptr0, _sum0);
vst1q_f32(outptr1, _sum1);
r0 += 2;
r1 += 2;
r2 += 2;
outptr0 += 4;
outptr1 += 4;
}
r0 += tailstep;
r1 += tailstep;
r2 += tailstep;
}
k0 += 9*4;
k1 += 9*4;
}
}
#endif // __ARM_NEON && __aarch64__
#pragma omp parallel for num_threads(opt.num_threads)
for (int p=remain_outch_start; p<outch; p++)
{
Mat out0 = top_blob.channel(p);
float32x4_t _bias0 = bias ? vld1q_f32((const float*)bias + p * 4) : vdupq_n_f32(0.f);
out0.fill(_bias0);
const float* k0 = kernel.channel(p);
for (int q=0; q<inch; q++)
{
float* outptr0 = out0;
const Mat img0 = bottom_blob.channel(q);
const float* r0 = img0.row(0);
const float* r1 = img0.row(1);
const float* r2 = img0.row(2);
float32x4_t _k00 = vld1q_f32(k0);
float32x4_t _k01 = vld1q_f32(k0+4);
float32x4_t _k02 = vld1q_f32(k0+8);
float32x4_t _k10 = vld1q_f32(k0+12);
float32x4_t _k11 = vld1q_f32(k0+16);
float32x4_t _k12 = vld1q_f32(k0+20);
float32x4_t _k20 = vld1q_f32(k0+24);
float32x4_t _k21 = vld1q_f32(k0+28);
float32x4_t _k22 = vld1q_f32(k0+32);
int i = 0;
for (; i < outh; i++)
{
int nn = outw >> 2;
int remain = outw & 3;
#if __aarch64__
if (nn > 0)
{
asm volatile(
"0: \n"
"prfm pldl1keep, [%1, #512] \n"
"ld1 {v6.4s, v7.4s, v8.4s, v9.4s}, [%1] \n"// sum0
// r0
"prfm pldl1keep, [%2, #256] \n"
"ld1 {v0.4s, v1.4s}, [%2], #32 \n"
"ld1r {v4.4s}, [%2] \n"
"fmla v6.4s, %10.4s, v0.s[0] \n"
"fmla v7.4s, %10.4s, v0.s[2] \n"
"fmla v8.4s, %10.4s, v1.s[0] \n"
"fmla v9.4s, %10.4s, v1.s[2] \n"
"fmla v6.4s, %11.4s, v0.s[1] \n"
"fmla v7.4s, %11.4s, v0.s[3] \n"
"fmla v8.4s, %11.4s, v1.s[1] \n"
"fmla v9.4s, %11.4s, v1.s[3] \n"
// r1
"prfm pldl1keep, [%3, #256] \n"
"ld1 {v2.4s, v3.4s}, [%3], #32 \n"
"ld1r {v5.4s}, [%3] \n"
"fmla v6.4s, %12.4s, v0.s[2] \n"
"fmla v7.4s, %12.4s, v1.s[0] \n"
"fmla v8.4s, %12.4s, v1.s[2] \n"
"fmla v9.4s, %12.4s, v4.s[0] \n"
"fmla v6.4s, %13.4s, v2.s[0] \n"
"fmla v7.4s, %13.4s, v2.s[2] \n"
"fmla v8.4s, %13.4s, v3.s[0] \n"
"fmla v9.4s, %13.4s, v3.s[2] \n"
"fmla v6.4s, %14.4s, v2.s[1] \n"
"fmla v7.4s, %14.4s, v2.s[3] \n"
"fmla v8.4s, %14.4s, v3.s[1] \n"
"fmla v9.4s, %14.4s, v3.s[3] \n"
// r2
"prfm pldl1keep, [%4, #256] \n"
"ld1 {v0.4s, v1.4s}, [%4], #32 \n"
"ld1r {v4.4s}, [%4] \n"
"fmla v6.4s, %15.4s, v2.s[2] \n"
"fmla v7.4s, %15.4s, v3.s[0] \n"
"fmla v8.4s, %15.4s, v3.s[2] \n"
"fmla v9.4s, %15.4s, v5.s[0] \n"
"fmla v6.4s, %16.4s, v0.s[0] \n"
"fmla v7.4s, %16.4s, v0.s[2] \n"
"fmla v8.4s, %16.4s, v1.s[0] \n"
"fmla v9.4s, %16.4s, v1.s[2] \n"
"fmla v6.4s, %17.4s, v0.s[1] \n"
"fmla v7.4s, %17.4s, v0.s[3] \n"
"fmla v8.4s, %17.4s, v1.s[1] \n"
"fmla v9.4s, %17.4s, v1.s[3] \n"
"fmla v6.4s, %18.4s, v0.s[2] \n"
"fmla v7.4s, %18.4s, v1.s[0] \n"
"fmla v8.4s, %18.4s, v1.s[2] \n"
"fmla v9.4s, %18.4s, v4.s[0] \n"
"subs %w0, %w0, #1 \n"
"st1 {v6.4s, v7.4s, v8.4s, v9.4s}, [%1], #64 \n"
"bne 0b \n"
: "=r"(nn), // %0
"=r"(outptr0), // %1
"=r"(r0), // %2
"=r"(r1), // %3
"=r"(r2) // %4
: "0"(nn),
"1"(outptr0),
"2"(r0),
"3"(r1),
"4"(r2),
"w"(_k00), // %10
"w"(_k01), // %11
"w"(_k02), // %12
"w"(_k10), // %13
"w"(_k11), // %14
"w"(_k12), // %15
"w"(_k20), // %16
"w"(_k21), // %17
"w"(_k22) // %18
: "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9"
);
}
#else // __aarch64__
if (nn > 0)
{
asm volatile(
"0: \n"
"pld [%1, #512] \n"
"vldm %1, {d0-d7} \n"// sum0
// r0
"pld [%2, #256] \n"
"vld1.f32 {d8-d11}, [%2]! \n"
"vld1.f32 {d12[]}, [%2] \n"
"vmla.f32 q0, %q10, d8[0] \n"
"vmla.f32 q1, %q10, d9[0] \n"
"vmla.f32 q2, %q10, d10[0] \n"
"vmla.f32 q3, %q10, d11[0] \n"
"vmla.f32 q0, %q11, d8[1] \n"
"vmla.f32 q1, %q11, d9[1] \n"
"vmla.f32 q2, %q11, d10[1] \n"
"vmla.f32 q3, %q11, d11[1] \n"
"vmla.f32 q0, %q12, d9[0] \n"
"vmla.f32 q1, %q12, d10[0] \n"
"vmla.f32 q2, %q12, d11[0] \n"
// r1
"pld [%3, #256] \n"
"vld1.f32 {d8-d11}, [%3]! \n"
"vld1.f32 {d13[]}, [%3] \n"
"vmla.f32 q3, %q12, d12[0] \n"
"vmla.f32 q0, %q13, d8[0] \n"
"vmla.f32 q1, %q13, d9[0] \n"
"vmla.f32 q2, %q13, d10[0] \n"
"vmla.f32 q3, %q13, d11[0] \n"
"vmla.f32 q0, %q14, d8[1] \n"
"vmla.f32 q1, %q14, d9[1] \n"
"vmla.f32 q2, %q14, d10[1] \n"
"vmla.f32 q3, %q14, d11[1] \n"
"vmla.f32 q0, %q15, d9[0] \n"
"vmla.f32 q1, %q15, d10[0] \n"
"vmla.f32 q2, %q15, d11[0] \n"
// r2
"pld [%4, #256] \n"
"vld1.f32 {d8-d11}, [%4]! \n"
"vld1.f32 {d12[]}, [%4] \n"
"vmla.f32 q3, %q15, d13[0] \n"
"vmla.f32 q0, %q16, d8[0] \n"
"vmla.f32 q1, %q16, d9[0] \n"
"vmla.f32 q2, %q16, d10[0] \n"
"vmla.f32 q3, %q16, d11[0] \n"
"vmla.f32 q0, %q17, d8[1] \n"
"vmla.f32 q1, %q17, d9[1] \n"
"vmla.f32 q2, %q17, d10[1] \n"
"vmla.f32 q3, %q17, d11[1] \n"
"vmla.f32 q0, %q18, d9[0] \n"
"vmla.f32 q1, %q18, d10[0] \n"
"vmla.f32 q2, %q18, d11[0] \n"
"vmla.f32 q3, %q18, d12[0] \n"
"subs %0, %0, #1 \n"
"vstm %1!, {d0-d7} \n"
"bne 0b \n"
: "=r"(nn), // %0
"=r"(outptr0), // %1
"=r"(r0), // %2
"=r"(r1), // %3
"=r"(r2) // %4
: "0"(nn),
"1"(outptr0),
"2"(r0),
"3"(r1),
"4"(r2),
"w"(_k00), // %10
"w"(_k01), // %11
"w"(_k02), // %12
"w"(_k10), // %13
"w"(_k11), // %14
"w"(_k12), // %15
"w"(_k20), // %16
"w"(_k21), // %17
"w"(_k22) // %18
: "cc", "memory", "q0", "q1", "q2", "q3", "q4", "q5", "q6"
);
}
#endif // __aarch64__
for (; remain>0; remain--)
{
float32x4_t _sum0 = vld1q_f32(outptr0);
float32x4_t _r0 = vld1q_f32(r0);
float32x4_t _r1 = vld1q_f32(r1);
float32x4_t _r2 = vld1q_f32(r2);
#if __aarch64__
_sum0 = vfmaq_laneq_f32(_sum0, _k00, _r0, 0);
_sum0 = vfmaq_laneq_f32(_sum0, _k01, _r0, 1);
_sum0 = vfmaq_laneq_f32(_sum0, _k02, _r0, 2);
_sum0 = vfmaq_laneq_f32(_sum0, _k10, _r1, 0);
_sum0 = vfmaq_laneq_f32(_sum0, _k11, _r1, 1);
_sum0 = vfmaq_laneq_f32(_sum0, _k12, _r1, 2);
_sum0 = vfmaq_laneq_f32(_sum0, _k20, _r2, 0);
_sum0 = vfmaq_laneq_f32(_sum0, _k21, _r2, 1);
_sum0 = vfmaq_laneq_f32(_sum0, _k22, _r2, 2);
#else
_sum0 = vmlaq_lane_f32(_sum0, _k00, vget_low_f32(_r0), 0);
_sum0 = vmlaq_lane_f32(_sum0, _k01, vget_low_f32(_r0), 1);
_sum0 = vmlaq_lane_f32(_sum0, _k02, vget_high_f32(_r0), 0);
_sum0 = vmlaq_lane_f32(_sum0, _k10, vget_low_f32(_r1), 0);
_sum0 = vmlaq_lane_f32(_sum0, _k11, vget_low_f32(_r1), 1);
_sum0 = vmlaq_lane_f32(_sum0, _k12, vget_high_f32(_r1), 0);
_sum0 = vmlaq_lane_f32(_sum0, _k20, vget_low_f32(_r2), 0);
_sum0 = vmlaq_lane_f32(_sum0, _k21, vget_low_f32(_r2), 1);
_sum0 = vmlaq_lane_f32(_sum0, _k22, vget_high_f32(_r2), 0);
#endif
vst1q_f32(outptr0, _sum0);
r0 += 2;
r1 += 2;
r2 += 2;
outptr0 += 4;
}
r0 += tailstep;
r1 += tailstep;
r2 += tailstep;
}
k0 += 9*4;
}
}
}
|
GB_unop__frexpe_fp32_fp32.c | //------------------------------------------------------------------------------
// GB_unop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_atomics.h"
#include "GB_unop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB (_unop_apply__frexpe_fp32_fp32)
// op(A') function: GB (_unop_tran__frexpe_fp32_fp32)
// C type: float
// A type: float
// cast: float cij = aij
// unaryop: cij = GB_frexpef (aij)
#define GB_ATYPE \
float
#define GB_CTYPE \
float
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
float aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = GB_frexpef (x) ;
// casting
#define GB_CAST(z, aij) \
float z = aij ;
// cij = op (aij)
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
float aij = Ax [pA] ; \
/* Cx [pC] = op (cast (aij)) */ \
float z = aij ; \
Cx [pC] = GB_frexpef (z) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_FREXPE || GxB_NO_FP32)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_apply__frexpe_fp32_fp32)
(
float *Cx, // Cx and Ax may be aliased
const float *Ax,
const int8_t *restrict Ab, // A->b if A is bitmap
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
if (Ab == NULL)
{
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
float aij = Ax [p] ;
float z = aij ;
Cx [p] = GB_frexpef (z) ;
}
}
else
{
// bitmap case, no transpose; A->b already memcpy'd into C->b
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!Ab [p]) continue ;
float aij = Ax [p] ;
float z = aij ;
Cx [p] = GB_frexpef (z) ;
}
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_tran__frexpe_fp32_fp32)
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
pt_explicit.h | #ifndef PT_EXPLICIT_H
#define PT_EXPLICIT_H
#include <chrono>
#include "../integrator.h"
#include "../timer.h"
class PtExplicit : public Integrator {
public:
int N;
PtExplicit(const std::shared_ptr<Camera>& _camera, const std::shared_ptr<Sampler>& _sampler, int _N) : Integrator(_camera, _sampler), N(_N) {};
RGB sampleLight(const Ray& ray, Scene& scene, Hit& res, const Vec3& wo_local, const Vec3& n, const Vec3& s, const Vec3& t, const std::shared_ptr<Light>& light) const {
auto hitMaterial = res.hitPrimitive->material;
//Light Sampling
Vec3 col_light;
float light_pdf = 0;
Vec3 wi_light;
Vec3 samplePos;
RGB le = light->sample(res, *this->sampler, wi_light, samplePos, light_pdf);
if(isZero(le) || light_pdf == 0) return RGB(0);
Vec3 wi_light_local = worldToLocal(wi_light, n, s, t);
//BRDF
RGB k = hitMaterial->f(res, wo_local, wi_light_local) * std::max(cosTheta(wi_light_local), 0.0f);
float brdf_pdf = hitMaterial->Pdf(wo_local, wi_light_local);
if(isZero(k)) return RGB(0);
//MIS Weight
float weight = std::pow(light_pdf, 2.0f)/(std::pow(light_pdf, 2.0f) + std::pow(brdf_pdf, 2.0f));
if(std::isinf(weight) || std::isnan(weight)) return RGB(0);
//Visibility Test
Ray shadowRay(res.hitPos, wi_light);
Hit shadow_res;
if(light->type == LIGHT_TYPE::AREA) {
if(scene.intersect(shadowRay, shadow_res)) {
if(shadow_res.hitPrimitive->light == light && (samplePos - shadow_res.hitPos).length2() < 1e-6) {
col_light += weight * k * le/light_pdf;
}
}
}
else if(light->type == LIGHT_TYPE::SKY) {
if(!scene.intersect(shadowRay, shadow_res)) {
col_light += weight * k * le/light_pdf;
}
}
//Handle Delta Light Case
else if(light->type == LIGHT_TYPE::POINT) {
scene.intersect(shadowRay, shadow_res);
if(shadow_res.t >= (samplePos - shadowRay.origin).length()) {
col_light += k * le/light_pdf;
}
}
return col_light;
};
RGB sampleBRDF(const Ray& ray, Scene& scene, Hit& res, const Vec3& wo_local, const Vec3& n, const Vec3& s, const Vec3& t, const std::shared_ptr<Light>& light) const {
Vec3 col_brdf;
auto hitMaterial = res.hitPrimitive->material;
//BRDF Sampling
Vec3 wi_local;
float brdf_pdf;
RGB brdf = hitMaterial->sample(res, wo_local, *this->sampler, wi_local, brdf_pdf);
if(isZero(brdf) || brdf_pdf == 0) return RGB(0);
float cos = absCosTheta(wi_local);
if(cos == 0) return RGB(0);
Vec3 wi = localToWorld(wi_local, n, s, t);
RGB k = brdf * cos/brdf_pdf;
//Visibility Test
Ray shadowRay(res.hitPos, wi);
Hit shadow_res;
if(light->type != LIGHT_TYPE::SKY) {
if(scene.intersect(shadowRay, shadow_res)) {
if(shadow_res.hitPrimitive->light != nullptr) {
//Light
RGB le = light->Le(shadow_res, shadowRay);
float light_pdf = light->Pdf(res, wi, shadow_res);
//MIS Weight
float weight = std::pow(brdf_pdf, 2.0f)/(std::pow(brdf_pdf, 2.0f) + std::pow(light_pdf, 2.0f));
if(std::isinf(weight) || std::isnan(weight)) return RGB(0);
if(hitMaterial->type != MATERIAL_TYPE::SPECULAR) {
col_brdf += weight * k * le;
}
//Handle Specular Case
else {
col_brdf += k * le;
}
}
}
}
else {
if(!scene.intersect(shadowRay , shadow_res)) {
//Light
RGB le = light->Le(shadow_res, shadowRay);
float light_pdf = light->Pdf(res, wi, shadow_res);
//MIS Weight
float weight = std::pow(brdf_pdf, 2.0f)/(std::pow(brdf_pdf, 2.0f) + std::pow(light_pdf, 2.0f));
if(std::isinf(weight) || std::isnan(weight)) return RGB(0);
if(hitMaterial->type != MATERIAL_TYPE::SPECULAR) {
col_brdf += weight * k * le;
}
//Handle Specular Case
else {
col_brdf += k * le;
}
}
}
return col_brdf;
};
RGB Li(const Ray& _ray, Scene& scene) const {
float russian_roulette = 1.0f;
Vec3 col;
Vec3 col2(1);
Ray ray = _ray;
for(int depth = 0; ; depth++) {
if(isZero(col2)) break;
Hit res;
if(scene.intersect(ray, res)) {
if(res.hitPrimitive->light != nullptr) {
if(depth == 0) {
return res.hitPrimitive->light->Le(res, ray);
}
else {
break;
}
}
auto hitMaterial = res.hitPrimitive->material;
//Local Coordinate
Vec3 wo = -ray.direction;
Vec3 n = res.hitNormal;
Vec3 s, t;
orthonormalBasis(n, s, t);
Vec3 wo_local = worldToLocal(wo, n, s, t);
//Direct Illumination
//Light Sampling
unsigned int light_index = (int)(scene.lights.size() * (*this->sampler).getNext());
if(light_index == scene.lights.size()) light_index--;
const auto light = scene.lights[light_index];
//Calc Direct Illumination
Vec3 direct_col = scene.lights.size() * (sampleLight(ray, scene, res, wo_local, n, s, t, light) + sampleBRDF(ray, scene, res, wo_local, n, s, t, light));
//if Direct Illumination is inf or nan
if(isNan(direct_col) || isInf(direct_col)) {
std::cerr << "NaN or Inf detected at Direct Illumination" << std::endl;
break;
}
//Sample BRDF for next ray direction
Vec3 wi_local;
float brdf_pdf;
RGB brdf = hitMaterial->sample(res, wo_local, *this->sampler, wi_local, brdf_pdf);
float cos = absCosTheta(wi_local);
if(isZero(brdf) || brdf_pdf == 0 || cos == 0) break;
RGB k = brdf * cos / brdf_pdf;
if(isNan(k) || isInf(k)) {
std::cerr << "NaN or Inf detected at BRDF Sampling" << std::endl;
std::cerr << "BRDF PDF: " << brdf_pdf << std::endl;
std::cerr << "cos: " << cos << std::endl;
break;
}
Vec3 wi = localToWorld(wi_local, n, s, t);
//next ray
ray = Ray(res.hitPos, wi);
col += direct_col * col2;
col2 *= k;
//russian roulette
if(depth > 3) {
russian_roulette = std::max(col2.length()*0.577f, 0.05f);
if((*this->sampler).getNext() > russian_roulette) {
break;
}
col2 /= russian_roulette;
}
}
else {
if(depth == 0) {
return scene.sky->Le(res, ray);
}
else {
break;
}
}
}
return col;
};
void render(Scene& scene) const {
const int width = this->camera->film->width;
const int height = this->camera->film->height;
const int N_sqrt = std::sqrt(N);
Timer timer;
int ms = 0;
for(int i = 0; i < width; i++) {
timer.start();
#pragma omp parallel for schedule(dynamic, 1)
for(int j = 0; j < height; j++) {
for(int k = 0; k < N; k++) {
float rx = 2*sampler->getNext() - 1;
float ry = 2*sampler->getNext() - 1;
float sx = float(k%N_sqrt)/N_sqrt + rx/(2.0*N_sqrt) + 1/(2.0*N_sqrt);
float sy = k/N_sqrt * 1/float(N_sqrt) + ry/(2.0*N_sqrt) + 1/(2.0*N_sqrt);
float u = (2.0*(i + sx) - width)/height;
float v = (2.0*(j + sy) - height)/height;
Vec2 uv(u, v);
Ray ray;
float weight;
if(!this->camera->getRay(u, v, *(this->sampler), ray, weight)) {
this->camera->film->addSample(uv, RGB(0, 0, 0));
}
else {
RGB li = weight*this->Li(ray, scene);
this->camera->film->addSample(uv, li);
}
if(omp_get_thread_num() == 0) {
int index = k + N*j + N*height*i;
std::cout << progressbar(index, width*height*N) << " " << percentage(index, width*height*N) << " ETA: " << ms*(width - i)/1e3 << "s" << "\r" << std::flush;
}
}
}
ms = timer.stop();
}
this->camera->film->ppm_output("output.ppm");
};
void render_rtcamp(Scene& scene) const {
const int width = this->camera->film->width;
const int height = this->camera->film->height;
const int N_sqrt = std::sqrt(N);
Timer timer1;
timer1.start();
for(int k = 0; ; k++) {
Timer timer2;
timer2.start();
for(int j = 0; j < height; j++) {
#pragma omp parallel for schedule(dynamic, 1)
for(int i = 0; i < width; i++) {
float rx = 2*sampler->getNext() - 1;
float ry = 2*sampler->getNext() - 1;
float sx = float(k%N_sqrt)/N_sqrt + rx/(2.0*N_sqrt) + 1/(2.0*N_sqrt);
float sy = k/N_sqrt * 1/float(N_sqrt) + ry/(2.0*N_sqrt) + 1/(2.0*N_sqrt);
float u = (2.0*(i + sx) - width)/height;
float v = (2.0*(j + sy) - height)/height;
Vec2 uv(u, v);
Ray ray;
float weight;
if(!this->camera->getRay(u, v, *(this->sampler), ray, weight)) {
this->camera->film->addSample(uv, RGB(0, 0, 0));
}
else {
RGB li = weight*this->Li(ray, scene);
this->camera->film->addSample(uv, li);
}
}
}
int render_time = timer2.stop();
std::cout << timer1.now() << " ms" << std::endl;
if(timer1.now() + render_time >= 55000) {
this->camera->film->png_output("output.png");
std::cout << k + 1 << " samples" << std::endl;
std::cout << timer1.stop() << " ms" << std::endl;
return;
}
}
}
};
#endif
|
declare_target-5.c | /* { dg-do run { target vect_simd_clones } } */
/* { dg-additional-options "-msse2" { target sse2_runtime } } */
/* { dg-additional-options "-mavx" { target avx_runtime } } */
#include <stdlib.h>
#define EPS 0.00001
#define N 10000
#define M 1024
#pragma omp declare target
float Q[N][N];
#pragma omp declare simd uniform(i) linear(k) notinbranch
float Pfun (const int i, const int k)
{
return Q[i][k] * Q[k][i];
}
#pragma omp end declare target
void init ()
{
int i, j;
for (i = 0; i < N; i++)
for (j = 0; j < N; j++)
Q[i][j] = 0.001 * i * j;
}
float accum_ref ()
{
int i, k;
float tmp = 0.0;
for (i = 0; i < N; i++)
{
float tmp1 = 0.0;
for (k = 0; k < M; k++)
tmp1 += Pfun(i,k);
tmp += tmp1;
}
return tmp;
}
float accum ()
{
int i, k;
float tmp = 0.0;
#pragma omp target map(tofrom:tmp)
#pragma omp parallel for reduction(+:tmp)
for (i = 0; i < N; i++)
{
float tmp1 = 0.0;
#pragma omp simd reduction(+:tmp1)
for (k = 0; k < M; k++)
tmp1 += Pfun(i,k);
tmp += tmp1;
}
return tmp;
}
void check (float a, float b)
{
float err = (b == 0.0) ? a : (a - b) / b;
if (((err > 0) ? err : -err) > EPS)
abort ();
}
int main ()
{
init ();
#pragma omp target update to(Q)
check (accum (), accum_ref ());
return 0;
}
|
ej7.c | #include <stdlib.h>
#include <stdio.h>
#include <omp.h>
#include <unistd.h>
static long num_steps = 100000;
double step;
int main(){
int i,numthreads=2; double x, pi, sum = 0.0, start;
step = 1.0/(double) num_steps;
start = omp_get_wtime();
#pragma omp parallel for private (x,i) shared(step, num_steps) reduction (+:sum) num_threads(numthreads)
for(i=1;i<=num_steps;++i){
x = (i-0.5)*step;
sum = sum + 4.0/(1.0+x*x);
}
printf("\n-------------------------------------------\nTiempo de ejecucion del programa con %i hilos: %lfs\n-------------------------------------------\n",numthreads,omp_get_wtime()-start);
sum=0.0;
numthreads=4;
start = omp_get_wtime();
#pragma omp parallel for private (x,i) shared(step, num_steps) reduction (+:sum) num_threads(numthreads)
for(i=1;i<=num_steps;++i){
x = (i-0.5)*step;
sum = sum + 4.0/(1.0+x*x);
}
printf("\n-------------------------------------------\nTiempo de ejecucion del programa con %i hilos: %lfs\n-------------------------------------------\n",numthreads,omp_get_wtime()-start);
sum=0.0;
numthreads=6;
start = omp_get_wtime();
#pragma omp parallel for private (x,i) shared(step, num_steps) reduction (+:sum) num_threads(numthreads)
for(i=1;i<=num_steps;++i){
x = (i-0.5)*step;
sum = sum + 4.0/(1.0+x*x);
}
printf("\n-------------------------------------------\nTiempo de ejecucion del programa con %i hilos: %lfs\n-------------------------------------------\n",numthreads,omp_get_wtime()-start);
sum=0.0;
numthreads=8;
start = omp_get_wtime();
#pragma omp parallel for private (x,i) shared(step, num_steps) reduction (+:sum) num_threads(numthreads)
for(i=1;i<=num_steps;++i){
x = (i-0.5)*step;
sum = sum + 4.0/(1.0+x*x);
}
printf("\n-------------------------------------------\nTiempo de ejecucion del programa con %i hilos: %lfs\n-------------------------------------------\n",numthreads,omp_get_wtime()-start);
pi = step * sum;
printf("\n%lf\n\n", pi);
return 0;
}
|
GB_unop__ainv_int32_int32.c | //------------------------------------------------------------------------------
// GB_unop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCUDA_DEV
#include "GB_control.h"
#include "GB_atomics.h"
#include "GB_unop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB (_unop_apply__ainv_int32_int32)
// op(A') function: GB (_unop_tran__ainv_int32_int32)
// C type: int32_t
// A type: int32_t
// cast: int32_t cij = aij
// unaryop: cij = -aij
#define GB_ATYPE \
int32_t
#define GB_CTYPE \
int32_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
int32_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = -x ;
// casting
#define GB_CAST(z, aij) \
int32_t z = aij ;
// cij = op (aij)
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
int32_t aij = Ax [pA] ; \
/* Cx [pC] = op (cast (aij)) */ \
int32_t z = aij ; \
Cx [pC] = -z ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_AINV || GxB_NO_INT32)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_apply__ainv_int32_int32)
(
int32_t *Cx, // Cx and Ax may be aliased
const int32_t *Ax,
const int8_t *restrict Ab, // A->b if A is bitmap
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
if (Ab == NULL)
{
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
int32_t aij = Ax [p] ;
int32_t z = aij ;
Cx [p] = -z ;
}
}
else
{
// bitmap case, no transpose; A->b already memcpy'd into C->b
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!Ab [p]) continue ;
int32_t aij = Ax [p] ;
int32_t z = aij ;
Cx [p] = -z ;
}
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_tran__ainv_int32_int32)
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
blackscholes.c | // Copyright (c) 2007 Intel Corp.
// Black-Scholes
// Analytical method for calculating European Options
//
//
// Reference Source: Options, Futures, and Other Derivatives, 3rd Edition, Prentice
// Hall, John C. Hull,
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <string.h>
#ifdef ENABLE_PARSEC_HOOKS
#include <hooks.h>
#endif
// Multi-threaded pthreads header
#ifdef ENABLE_THREADS
// Add the following line so that icc 9.0 is compatible with pthread lib.
#define __thread __threadp
MAIN_ENV
#undef __thread
#endif
// Multi-threaded OpenMP header
#ifdef ENABLE_OPENMP
#include <omp.h>
#endif
#ifdef ENABLE_TBB
#include "tbb/blocked_range.h"
#include "tbb/parallel_for.h"
#include "tbb/task_scheduler_init.h"
#include "tbb/tick_count.h"
using namespace std;
using namespace tbb;
#endif //ENABLE_TBB
// Multi-threaded header for Windows
#ifdef WIN32
#pragma warning(disable : 4305)
#pragma warning(disable : 4244)
#include <windows.h>
#endif
//Precision to use for calculations
#include "fp_class.hpp"
#ifndef fptype
#define fptype Fp16
#endif
#define NUM_RUNS 1
typedef struct OptionData_ {
fptype s; // spot price
fptype strike; // strike price
fptype r; // risk-free interest rate
fptype divq; // dividend rate
fptype v; // volatility
fptype t; // time to maturity or option expiration in years
// (1yr = 1.0, 6mos = 0.5, 3mos = 0.25, ..., etc)
char OptionType; // Option type. "P"=PUT, "C"=CALL
fptype divs; // dividend vals (not used in this test)
fptype DGrefval; // DerivaGem Reference Value
} OptionData;
OptionData *data_st;
fptype *prices;
int numOptions;
int * otype;
fptype * sptprice;
fptype * strike;
fptype * rate;
fptype * volatility;
fptype * otime;
int numError = 0;
int nThreads;
////////////////////////////////////////////////////////////////////////////////
////////////////////////////////////////////////////////////////////////////////
///////////////////////////////////////////////////////////////////////////////
////////////////////////////////////////////////////////////////////////////////
// Cumulative Normal Distribution Function
// See Hull, Section 11.8, P.243-244
#define inv_sqrt_2xPI 0.39894228040143270286
fptype CNDF ( fptype InputX )
{
int sign;
fptype OutputX;
fptype xInput;
fptype xNPrimeofX;
fptype expValues;
fptype xK2;
fptype xK2_2, xK2_3;
fptype xK2_4, xK2_5;
fptype xLocal, xLocal_1;
fptype xLocal_2, xLocal_3;
// Check for negative value of InputX
if (InputX < 0.0) {
InputX = -InputX;
sign = 1;
} else
sign = 0;
xInput = InputX;
// Compute NPrimeX term common to both four & six decimal accuracy calcs
expValues = exp(-0.5f * InputX * InputX);
xNPrimeofX = expValues;
xNPrimeofX = xNPrimeofX * inv_sqrt_2xPI;
xK2 = 0.2316419 * xInput;
xK2 = 1.0 + xK2;
xK2 = 1.0 / xK2;
xK2_2 = xK2 * xK2;
xK2_3 = xK2_2 * xK2;
xK2_4 = xK2_3 * xK2;
xK2_5 = xK2_4 * xK2;
xLocal_1 = xK2 * 0.319381530;
xLocal_2 = xK2_2 * (-0.356563782);
xLocal_3 = xK2_3 * 1.781477937;
xLocal_2 = xLocal_2 + xLocal_3;
xLocal_3 = xK2_4 * (-1.821255978);
xLocal_2 = xLocal_2 + xLocal_3;
xLocal_3 = xK2_5 * 1.330274429;
xLocal_2 = xLocal_2 + xLocal_3;
xLocal_1 = xLocal_2 + xLocal_1;
xLocal = xLocal_1 * xNPrimeofX;
xLocal = 1.0 - xLocal;
OutputX = xLocal;
if (sign) {
OutputX = 1.0 - OutputX;
}
return OutputX;
}
//////////////////////////////////////////////////////////////////////////////////////
//////////////////////////////////////////////////////////////////////////////////////
//////////////////////////////////////////////////////////////////////////////////////
//////////////////////////////////////////////////////////////////////////////////////
fptype BlkSchlsEqEuroNoDiv( fptype sptprice,
fptype strike, fptype rate, fptype volatility,
fptype time, int otype, float timet )
{
fptype OptionPrice;
// local private working variables for the calculation
fptype xStockPrice;
fptype xStrikePrice;
fptype xRiskFreeRate;
fptype xVolatility;
fptype xTime;
fptype xSqrtTime;
fptype logValues;
fptype xLogTerm;
fptype xD1;
fptype xD2;
fptype xPowerTerm;
fptype xDen;
fptype d1;
fptype d2;
fptype FutureValueX;
fptype NofXd1;
fptype NofXd2;
fptype NegNofXd1;
fptype NegNofXd2;
xStockPrice = sptprice;
xStrikePrice = strike;
xRiskFreeRate = rate;
xVolatility = volatility;
xTime = time;
xSqrtTime = sqrt(xTime);
logValues = log( sptprice / strike );
xLogTerm = logValues;
xPowerTerm = xVolatility * xVolatility;
xPowerTerm = xPowerTerm * 0.5;
xD1 = xRiskFreeRate + xPowerTerm;
xD1 = xD1 * xTime;
xD1 = xD1 + xLogTerm;
xDen = xVolatility * xSqrtTime;
xD1 = xD1 / xDen;
xD2 = xD1 - xDen;
d1 = xD1;
d2 = xD2;
NofXd1 = CNDF( d1 );
NofXd2 = CNDF( d2 );
FutureValueX = strike * ( exp( -(rate)*(time) ) );
if (otype == 0) {
OptionPrice = (sptprice * NofXd1) - (FutureValueX * NofXd2);
} else {
NegNofXd1 = (1.0 - NofXd1);
NegNofXd2 = (1.0 - NofXd2);
OptionPrice = (FutureValueX * NegNofXd2) - (sptprice * NegNofXd1);
}
return OptionPrice;
}
#ifdef ENABLE_TBB
struct mainWork {
mainWork() {}
mainWork(mainWork &w, tbb::split) {}
void operator()(const tbb::blocked_range<int> &range) const {
fptype price;
int begin = range.begin();
int end = range.end();
for (int i=begin; i!=end; i++) {
/* Calling main function to calculate option value based on
* Black & Scholes's equation.
*/
price = BlkSchlsEqEuroNoDiv( sptprice[i], strike[i],
rate[i], volatility[i], otime[i],
otype[i], 0);
prices[i] = price;
#ifdef ERR_CHK
fptype priceDelta = data_st[i].DGrefval - price;
if( fabs(priceDelta) >= 1e-5 ){
fprintf(stderr,"Error on %d. Computed=%.5f, Ref=%.5f, Delta=%.5f\n",
i, price, data_st[i].DGrefval, priceDelta);
numError ++;
}
#endif
}
}
};
#endif // ENABLE_TBB
//////////////////////////////////////////////////////////////////////////////////////
//////////////////////////////////////////////////////////////////////////////////////
//////////////////////////////////////////////////////////////////////////////////////
//////////////////////////////////////////////////////////////////////////////////////
#ifdef ENABLE_TBB
int bs_thread(void *tid_ptr) {
int j;
tbb::affinity_partitioner a;
mainWork doall;
for (j=0; j<NUM_RUNS; j++) {
tbb::parallel_for(tbb::blocked_range<int>(0, numOptions), doall, a);
}
return 0;
}
#else // !ENABLE_TBB
#ifdef WIN32
DWORD WINAPI bs_thread(LPVOID tid_ptr){
#else
int bs_thread(void *tid_ptr) {
#endif
int i, j;
fptype price;
fptype priceDelta;
int tid = *(int *)tid_ptr;
int start = tid * (numOptions / nThreads);
int end = start + (numOptions / nThreads);
for (j=0; j<NUM_RUNS; j++) {
#ifdef ENABLE_OPENMP
#pragma omp parallel for private(i, price, priceDelta)
for (i=0; i<numOptions; i++) {
#else //ENABLE_OPENMP
for (i=start; i<end; i++) {
#endif //ENABLE_OPENMP
/* Calling main function to calculate option value based on
* Black & Scholes's equation.
*/
price = BlkSchlsEqEuroNoDiv( sptprice[i], strike[i],
rate[i], volatility[i], otime[i],
otype[i], 0);
prices[i] = price;
#ifdef ERR_CHK
priceDelta = data_st[i].DGrefval - price;
if( fabs(priceDelta) >= 1e-4 ){
printf("Error on %d. Computed=%.5f, Ref=%.5f, Delta=%.5f\n",
i, price, data_st[i].DGrefval, priceDelta);
numError ++;
}
#endif
}
}
return 0;
}
#endif //ENABLE_TBB
int main (int argc, char **argv)
{
FILE *file;
int i;
int loopnum;
fptype * buffer;
int * buffer2;
int rv;
#ifdef PARSEC_VERSION
#define __PARSEC_STRING(x) #x
#define __PARSEC_XSTRING(x) __PARSEC_STRING(x)
printf("PARSEC Benchmark Suite Version "__PARSEC_XSTRING(PARSEC_VERSION)"\n");
fflush(NULL);
#else
printf("PARSEC Benchmark Suite\n");
fflush(NULL);
#endif //PARSEC_VERSION
#ifdef ENABLE_PARSEC_HOOKS
__parsec_bench_begin(__parsec_blackscholes);
#endif
if (argc != 4)
{
printf("Usage:\n\t%s <nthreads> <inputFile> <outputFile>\n", argv[0]);
exit(1);
}
nThreads = atoi(argv[1]);
char *inputFile = argv[2];
char *outputFile = argv[3];
//Read input data from file
file = fopen(inputFile, "r");
if(file == NULL) {
printf("ERROR: Unable to open file `%s'.\n", inputFile);
exit(1);
}
rv = fscanf(file, "%i", &numOptions);
if(rv != 1) {
printf("ERROR: Unable to read from file `%s'.\n", inputFile);
fclose(file);
exit(1);
}
if(nThreads > numOptions) {
printf("WARNING: Not enough work, reducing number of threads to match number of options.\n");
nThreads = numOptions;
}
#if !defined(ENABLE_THREADS) && !defined(ENABLE_OPENMP) && !defined(ENABLE_TBB)
if(nThreads != 1) {
printf("Error: <nthreads> must be 1 (serial version)\n");
exit(1);
}
#endif
// alloc spaces for the option data
data_st = (OptionData*)malloc(numOptions*sizeof(OptionData));
prices = (fptype*)malloc(numOptions*sizeof(fptype));
for ( loopnum = 0; loopnum < numOptions; ++ loopnum )
{
double s, strike, r, divq, v, t, divs, DGrefval;
rv = fscanf(file, "%lf %lf %lf %lf %lf %lf %c %lf %lf", &s, &strike, &r, &divq, &v, &t, &data_st[loopnum].OptionType, &divs, &DGrefval);
data_st[loopnum].s = s; data_st[loopnum].strike = strike; data_st[loopnum].r = r; data_st[loopnum].divq = divq; data_st[loopnum].v = v;
data_st[loopnum].t = t; data_st[loopnum].divs = divs; data_st[loopnum].DGrefval = DGrefval;
if(rv != 9) {
printf("ERROR: Unable to read from file `%s'.\n", inputFile);
fclose(file);
exit(1);
}
}
rv = fclose(file);
if(rv != 0) {
printf("ERROR: Unable to close file `%s'.\n", inputFile);
exit(1);
}
#ifdef ENABLE_THREADS
MAIN_INITENV(,8000000,nThreads);
#endif
printf("Num of Options: %d\n", numOptions);
printf("Num of Runs: %d\n", NUM_RUNS);
#define PAD 256
#define LINESIZE 64
buffer = (fptype *) malloc(5 * numOptions * sizeof(fptype) + PAD);
sptprice = (fptype *) (((unsigned long long)buffer + PAD) & ~(LINESIZE - 1));
strike = sptprice + numOptions;
rate = strike + numOptions;
volatility = rate + numOptions;
otime = volatility + numOptions;
buffer2 = (int *) malloc(numOptions * sizeof(fptype) + PAD);
otype = (int *) (((unsigned long long)buffer2 + PAD) & ~(LINESIZE - 1));
for (i=0; i<numOptions; i++) {
otype[i] = (data_st[i].OptionType == 'P') ? 1 : 0;
sptprice[i] = data_st[i].s;
strike[i] = data_st[i].strike;
rate[i] = data_st[i].r;
volatility[i] = data_st[i].v;
otime[i] = data_st[i].t;
}
printf("Size of data: %d\n", numOptions * (sizeof(OptionData) + sizeof(int)));
#ifdef ENABLE_PARSEC_HOOKS
__parsec_roi_begin();
#endif
#ifdef ENABLE_THREADS
#ifdef WIN32
HANDLE *threads;
int *nums;
threads = (HANDLE *) malloc (nThreads * sizeof(HANDLE));
nums = (int *) malloc (nThreads * sizeof(int));
for(i=0; i<nThreads; i++) {
nums[i] = i;
threads[i] = CreateThread(0, 0, bs_thread, &nums[i], 0, 0);
}
WaitForMultipleObjects(nThreads, threads, TRUE, INFINITE);
free(threads);
free(nums);
#else
int *tids;
tids = (int *) malloc (nThreads * sizeof(int));
for(i=0; i<nThreads; i++) {
tids[i]=i;
CREATE_WITH_ARG(bs_thread, &tids[i]);
}
WAIT_FOR_END(nThreads);
free(tids);
#endif //WIN32
#else //ENABLE_THREADS
#ifdef ENABLE_OPENMP
{
int tid=0;
omp_set_num_threads(nThreads);
bs_thread(&tid);
}
#else //ENABLE_OPENMP
#ifdef ENABLE_TBB
tbb::task_scheduler_init init(nThreads);
int tid=0;
bs_thread(&tid);
#else //ENABLE_TBB
//serial version
int tid=0;
bs_thread(&tid);
#endif //ENABLE_TBB
#endif //ENABLE_OPENMP
#endif //ENABLE_THREADS
#ifdef ENABLE_PARSEC_HOOKS
__parsec_roi_end();
#endif
//Write prices to output file
file = fopen(outputFile, "w");
if(file == NULL) {
printf("ERROR: Unable to open file `%s'.\n", outputFile);
exit(1);
}
rv = fprintf(file, "%i\n", numOptions);
if(rv < 0) {
printf("ERROR: Unable to write to file `%s'.\n", outputFile);
fclose(file);
exit(1);
}
for(i=0; i<numOptions; i++) {
rv = fprintf(file, "%.18f\n", prices[i]);
if(rv < 0) {
printf("ERROR: Unable to write to file `%s'.\n", outputFile);
fclose(file);
exit(1);
}
}
rv = fclose(file);
if(rv != 0) {
printf("ERROR: Unable to close file `%s'.\n", outputFile);
exit(1);
}
#ifdef ERR_CHK
printf("Num Errors: %d\n", numError);
#endif
free(data_st);
free(prices);
#ifdef ENABLE_PARSEC_HOOKS
__parsec_bench_end();
#endif
return 0;
}
|
GB_binop__plus_uint32.c | //------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB_AaddB__plus_uint32
// A.*B function (eWiseMult): GB_AemultB__plus_uint32
// A*D function (colscale): GB_AxD__plus_uint32
// D*A function (rowscale): GB_DxB__plus_uint32
// C+=B function (dense accum): GB_Cdense_accumB__plus_uint32
// C+=b function (dense accum): GB_Cdense_accumb__plus_uint32
// C+=A+B function (dense ewise3): GB_Cdense_ewise3_accum__plus_uint32
// C=A+B function (dense ewise3): GB_Cdense_ewise3_noaccum__plus_uint32
// C=scalar+B GB_bind1st__plus_uint32
// C=scalar+B' GB_bind1st_tran__plus_uint32
// C=A+scalar GB_bind2nd__plus_uint32
// C=A'+scalar GB_bind2nd_tran__plus_uint32
// C type: uint32_t
// A type: uint32_t
// B,b type: uint32_t
// BinaryOp: cij = (aij + bij)
#define GB_ATYPE \
uint32_t
#define GB_BTYPE \
uint32_t
#define GB_CTYPE \
uint32_t
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
uint32_t aij = Ax [pA]
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB) \
uint32_t bij = Bx [pB]
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
uint32_t t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA) \
cij = Ax [pA]
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB) \
cij = Bx [pB]
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z, x, y, i, j) \
z = (x + y) ;
// op is second
#define GB_OP_IS_SECOND \
0
// op is plus_fp32 or plus_fp64
#define GB_OP_IS_PLUS_REAL \
0
// op is minus_fp32 or minus_fp64
#define GB_OP_IS_MINUS_REAL \
0
// GB_cblas_*axpy gateway routine, if it exists for this operator and type:
#define GB_CBLAS_AXPY \
(none)
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_PLUS || GxB_NO_UINT32 || GxB_NO_PLUS_UINT32)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB_Cdense_ewise3_accum__plus_uint32
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
GrB_Info GB_Cdense_ewise3_noaccum__plus_uint32
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_dense_ewise3_noaccum_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB_Cdense_accumB__plus_uint32
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *GB_RESTRICT kfirst_slice,
const int64_t *GB_RESTRICT klast_slice,
const int64_t *GB_RESTRICT pstart_slice,
const int ntasks,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB_Cdense_accumb__plus_uint32
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type uint32_t
uint32_t bwork = (*((uint32_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB_AxD__plus_uint32
(
GrB_Matrix C,
const GrB_Matrix A, bool A_is_pattern,
const GrB_Matrix D, bool D_is_pattern,
const int64_t *GB_RESTRICT kfirst_slice,
const int64_t *GB_RESTRICT klast_slice,
const int64_t *GB_RESTRICT pstart_slice,
const int ntasks,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint32_t *GB_RESTRICT Cx = (uint32_t *) C->x ;
#include "GB_AxB_colscale_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB_DxB__plus_uint32
(
GrB_Matrix C,
const GrB_Matrix D, bool D_is_pattern,
const GrB_Matrix B, bool B_is_pattern,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint32_t *GB_RESTRICT Cx = (uint32_t *) C->x ;
#include "GB_AxB_rowscale_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseAdd: C = A+B or C<M> = A+B
//------------------------------------------------------------------------------
#undef GB_FREE_ALL
#define GB_FREE_ALL \
{ \
GB_ek_slice_free (&pstart_Mslice, &kfirst_Mslice, &klast_Mslice) ; \
GB_ek_slice_free (&pstart_Aslice, &kfirst_Aslice, &klast_Aslice) ; \
GB_ek_slice_free (&pstart_Bslice, &kfirst_Bslice, &klast_Bslice) ; \
}
GrB_Info GB_AaddB__plus_uint32
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool Ch_is_Mh,
const int64_t *GB_RESTRICT C_to_M,
const int64_t *GB_RESTRICT C_to_A,
const int64_t *GB_RESTRICT C_to_B,
const GB_task_struct *GB_RESTRICT TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t *pstart_Mslice = NULL, *kfirst_Mslice = NULL, *klast_Mslice = NULL ;
int64_t *pstart_Aslice = NULL, *kfirst_Aslice = NULL, *klast_Aslice = NULL ;
int64_t *pstart_Bslice = NULL, *kfirst_Bslice = NULL, *klast_Bslice = NULL ;
#include "GB_add_template.c"
GB_FREE_ALL ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C = A.*B or C<M> = A.*B
//------------------------------------------------------------------------------
GrB_Info GB_AemultB__plus_uint32
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *GB_RESTRICT C_to_M,
const int64_t *GB_RESTRICT C_to_A,
const int64_t *GB_RESTRICT C_to_B,
const GB_task_struct *GB_RESTRICT TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t *pstart_Mslice = NULL, *kfirst_Mslice = NULL, *klast_Mslice = NULL ;
int64_t *pstart_Aslice = NULL, *kfirst_Aslice = NULL, *klast_Aslice = NULL ;
int64_t *pstart_Bslice = NULL, *kfirst_Bslice = NULL, *klast_Bslice = NULL ;
#include "GB_emult_template.c"
GB_FREE_ALL ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB_bind1st__plus_uint32
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *GB_RESTRICT Bb,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint32_t *Cx = (uint32_t *) Cx_output ;
uint32_t x = (*((uint32_t *) x_input)) ;
uint32_t *Bx = (uint32_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Bb, p)) continue ;
uint32_t bij = Bx [p] ;
Cx [p] = (x + bij) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB_bind2nd__plus_uint32
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *GB_RESTRICT Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
uint32_t *Cx = (uint32_t *) Cx_output ;
uint32_t *Ax = (uint32_t *) Ax_input ;
uint32_t y = (*((uint32_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
uint32_t aij = Ax [p] ;
Cx [p] = (aij + y) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
uint32_t aij = Ax [pA] ; \
Cx [pC] = (x + aij) ; \
}
GrB_Info GB_bind1st_tran__plus_uint32
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *GB_RESTRICT *Workspaces,
const int64_t *GB_RESTRICT A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
uint32_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint32_t x = (*((const uint32_t *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
uint32_t
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
uint32_t aij = Ax [pA] ; \
Cx [pC] = (aij + y) ; \
}
GrB_Info GB_bind2nd_tran__plus_uint32
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *GB_RESTRICT *Workspaces,
const int64_t *GB_RESTRICT A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint32_t y = (*((const uint32_t *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
pre_processing.h | #pragma once
#include "util/primitives/primitives.h"
#include "util/graph/graph.h"
#ifndef NO_ATOMIC
#include "util/ips4o/ips4o.hpp"
#endif
template<typename T, typename OFF>
T RemoveDuplicates(pair<T, T> *&edge_lst, OFF &num_edges, pair<T, T> *&edge_lst_buffer) {
using Edge = pair<T, T>;
Timer timer;
T max_node_id = 0;
T num_buckets;
auto max_omp_threads = omp_get_max_threads();
OFF *bucket_ptrs;
OFF *cur_write_off;
vector<OFF> histogram;
#pragma omp parallel num_threads(max_omp_threads)
{
#pragma omp for reduction(max: max_node_id)
for (OFF i = 0u; i < num_edges; i++) {
if (edge_lst[i].first > edge_lst[i].second) {
swap(edge_lst[i].first, edge_lst[i].second);
}
max_node_id = max(max_node_id, max(edge_lst[i].first, edge_lst[i].second));
}
#pragma omp single
{
num_buckets = max_node_id + 1;
}
// Partition.
BucketSort(histogram, edge_lst, edge_lst_buffer, cur_write_off, bucket_ptrs, num_edges, num_buckets,
[&edge_lst](size_t i) {
return edge_lst[i].first;
}, &timer);
// Sort.
#pragma omp for schedule(dynamic, 600)
for (auto i = 0; i < num_buckets; i++) {
sort(edge_lst_buffer + bucket_ptrs[i], edge_lst_buffer + bucket_ptrs[i + 1],
[](const Edge &left, const Edge &right) {
return left.second < right.second;
});
}
}
swap(edge_lst, edge_lst_buffer);
free(cur_write_off);
free(bucket_ptrs);
log_info("Finish Sort, %.9lfs", timer.elapsed());
// Selection.
auto *relative_off = (OFF *) malloc(sizeof(OFF) * num_edges);
#pragma omp parallel num_threads(max_omp_threads)
{
SelectNotFOMP(histogram, edge_lst_buffer, edge_lst, relative_off, num_edges, [edge_lst](size_t it) {
return edge_lst[it].first == edge_lst[it].second || (it > 0 && edge_lst[it - 1] == edge_lst[it]);
});
}
swap(edge_lst, edge_lst_buffer);
num_edges = num_edges - relative_off[num_edges - 1];
free(relative_off);
log_info("New # of edges: %zu, Elapsed: %.9lfs", num_edges, timer.elapsed());
log_debug("max_node_id: %d", max_node_id);
return max_node_id;
}
template<typename T, typename D, typename I, typename OFF, typename F>
void EdgeListHistogram(I num_vertices, OFF num_edges, pair<T, T> *edge_lst, D *deg_lst, F f) {
auto local_buf = (uint8_t *) calloc(num_vertices, sizeof(uint8_t));
#pragma omp for
for (size_t i = 0u; i < num_edges; i++) {
if (f(i)) {
auto src = edge_lst[i].first;
auto dst = edge_lst[i].second;
local_buf[src]++;
if (local_buf[src] == 0xff) {
__sync_fetch_and_add(°_lst[src], 0xff);
local_buf[src] = 0;
}
local_buf[dst]++;
if (local_buf[dst] == 0xff) {
__sync_fetch_and_add(°_lst[dst], 0xff);
local_buf[dst] = 0;
}
}
}
for (size_t i = 0u; i < num_vertices; i++) {
// atomic add for edge.first
if (local_buf[i] > 0)
__sync_fetch_and_add(&(deg_lst[i]), local_buf[i]);
}
free(local_buf);
#pragma omp barrier
}
template<typename T, typename D, typename I, typename OFF>
void EdgeListHistogram(I num_vertices, OFF num_edges, pair<T, T> *edge_lst, D *deg_lst) {
EdgeListHistogram(num_vertices, num_edges, edge_lst, deg_lst, [](size_t it) {
return true;
});
}
template<typename T, typename OFF>
void ConvertEdgeListToCSR(OFF num_edges, pair<T, T> *edge_lst,
uint32_t num_vertices, uint32_t *°_lst, OFF *&off,
int32_t *&adj_lst, int max_omp_threads) {
Timer convert_timer;
deg_lst = (uint32_t *) malloc(sizeof(uint32_t) * (num_vertices + 1));
off = (OFF *) malloc(sizeof(OFF) * (num_vertices + 1));
auto cur_write_off = (OFF *) malloc(sizeof(OFF) * (num_vertices + 1));
vector<OFF> histogram;
#pragma omp parallel num_threads(max_omp_threads)
{
MemSetOMP(deg_lst, 0, num_vertices + 1);
MemSetOMP(off, 0, num_vertices + 1);
#pragma omp single
log_info("[%s]: InitTime: %.9lf s", __FUNCTION__, convert_timer.elapsed());
EdgeListHistogram(num_vertices, num_edges, edge_lst, deg_lst);
#pragma omp single
log_info("[%s]: Histogram Time: %.9lf s", __FUNCTION__, convert_timer.elapsed());
// PrefixSum.
InclusivePrefixSumOMP(histogram, off + 1, num_vertices, [°_lst](uint32_t it) {
return deg_lst[it];
});
MemCpyOMP(cur_write_off, off, num_vertices + 1);
// Scatter.
#pragma omp single
{
if (adj_lst == nullptr) {
log_info("Allocate Inside (adj_lst)...");
adj_lst = (int32_t *) malloc(sizeof(int32_t) * off[num_vertices]);
}
log_info("[%s]: PrefixSum Time: %.9lf s", __FUNCTION__, convert_timer.elapsed());
}
#pragma omp for
for (size_t i = 0; i < num_edges; i++) {
auto src = edge_lst[i].first;
auto dst = edge_lst[i].second;
auto old_offset = __sync_fetch_and_add(&(cur_write_off[src]), 1);
adj_lst[old_offset] = dst;
old_offset = __sync_fetch_and_add(&(cur_write_off[dst]), 1);
adj_lst[old_offset] = src;
}
}
free(cur_write_off);
log_info("[%s]: Total Conversion Time: %.9lf s", __FUNCTION__, convert_timer.elapsed());
}
inline void Reorder(graph_t &g, vector<int32_t> &new_vid_dict, vector<int32_t> &old_vid_dict, int32_t *&new_adj) {
Timer timer;
new_vid_dict = vector<int32_t>(g.n);
using row_ptr_t = uint32_t ;
vector<row_ptr_t> new_off(g.n + 1);
new_off[0] = 0;
auto max_omp_threads = omp_get_max_threads();
auto histogram = vector<row_ptr_t>((max_omp_threads + 1) * CACHE_LINE_ENTRY, 0);
#pragma omp parallel num_threads(max_omp_threads)
{
// 1st CSR: new_off, new_adj
#pragma omp for
for (auto i = 0; i < g.n; i++) {
new_vid_dict[old_vid_dict[i]] = i;
}
InclusivePrefixSumOMP(histogram, &new_off.front() + 1, g.n, [&g, &old_vid_dict](uint32_t new_id) {
auto vertex = old_vid_dict[new_id];
return g.num_edges[vertex + 1] - g.num_edges[vertex];
});
#pragma omp single
log_info("[%s]: Finish PrefixSum Time: %.9lf s", __FUNCTION__, timer.elapsed_and_reset());
// 2nd Parallel Transform
#pragma omp for schedule(dynamic, 100)
for (auto i = 0; i < g.n; i++) {
auto origin_i = old_vid_dict[i];
// transform
auto cur_idx = new_off[i];
for (auto my_old_off = g.num_edges[origin_i]; my_old_off < g.num_edges[origin_i + 1]; my_old_off++) {
new_adj[cur_idx] = new_vid_dict[g.adj[my_old_off]];
cur_idx++;
}
// sort the local ranges
sort(new_adj + new_off[i], new_adj + new_off[i + 1]);
}
MemCpyOMP(g.num_edges, &new_off.front(), (g.n + 1));
}
swap(g.adj, new_adj);
log_info("[%s]: Finish Reorder Time: %.3lf s", __FUNCTION__, timer.elapsed());
}
inline void ReorderDegDescending(graph_t &g, vector<int32_t> &new_vid_dict, vector<int32_t> &old_vid_dict,
int32_t *&new_adj) {
Timer timer;
#ifdef NO_ATOMIC
#define USE_BUCKET_SORT
#endif
#ifdef USE_BUCKET_SORT
auto max_omp_threads = omp_get_max_threads();
auto max_deg = 0;
auto *old_vid_dict_buffer = (int32_t *) malloc(sizeof(int32_t) * g.n);
uint32_t *write_off = nullptr;
uint32_t *bucket_ptrs = nullptr;
auto histogram = vector<uint32_t>((max_omp_threads + 1) * CACHE_LINE_ENTRY, 0);
#pragma omp parallel num_threads(max_omp_threads)
{
#pragma omp for reduction(max: max_deg)
for (auto i = 0; i < g.n; i++) {
max_deg = max<int>(max_deg, g.num_edges[i + 1] - g.num_edges[i]);
}
#pragma omp single nowait
{
old_vid_dict = vector<int32_t>(g.n);
}
#pragma omp for
for (auto i = 0u; i < g.n; i++) {
old_vid_dict_buffer[i] = i;
}
auto ptr = &old_vid_dict[0];
BucketSortSmallBuckets(histogram, old_vid_dict_buffer, ptr, write_off, bucket_ptrs,
g.n, max_deg + 1, [&g, old_vid_dict_buffer, max_deg](int i) {
auto u = old_vid_dict_buffer[i];
return max_deg - (g.num_edges[u + 1] - g.num_edges[u]);
});
}
free(write_off);
free(bucket_ptrs);
free(old_vid_dict_buffer);
#else
log_info("Use parallel sort (parasort)");
old_vid_dict = vector<int32_t>(g.n);
#pragma omp parallel for
for (auto i = 0; i < g.n; i++) {
old_vid_dict[i] = i;
}
log_info("Allocation time: %.9lf s", timer.elapsed());
ips4o::parallel::sort(old_vid_dict.begin(), old_vid_dict.end(),
[&g](int l, int r) -> bool {
return g.num_edges[l + 1] - g.num_edges[l] > g.num_edges[r + 1] - g.num_edges[r];
});
#endif
log_info("Deg-descending time: %.9lf s", timer.elapsed());
Reorder(g, new_vid_dict, old_vid_dict, new_adj);
}
|
omp_group4.c | #include <stdio.h>
#include <stdlib.h>
#include <omp.h>
#include <time.h>
#include <sys/time.h>
#include <sys/resource.h>
struct rusage r_usage;
int quantaslinhas(char *file){
int i;
char c;
int tamanho = 1;
int flag;
FILE* filepointer = fopen(file,"r");
if (filepointer == NULL)
{
printf("Can't open/find file\n");
return -1;
}
for (c = getc(filepointer); c != EOF;)
{
c = getc(filepointer);
if (c == '\n')
{
tamanho++;
}
}
fclose(filepointer);
return tamanho;
}
int main(int argc,char *argv[])
{
double time = 0.0;
clock_t init = clock();
int i,j,k;
int tamanho;
tamanho = quantaslinhas(argv[1]);
int matriz1[tamanho][tamanho];
int matriz2[tamanho][tamanho];
int matrizfinal[tamanho][tamanho];
char output1[tamanho];
char output2[tamanho];
int linhas = 0;
int colunas = 0;
FILE* filepointer1 = fopen(argv[1],"r");
FILE* filepointer2 = fopen(argv[2],"r");
FILE* filepointer3 = fopen(argv[3],"w");
for (linhas = 0; linhas < tamanho; linhas++)
{
for(colunas = 0; colunas < tamanho; colunas++)
{
fscanf(filepointer1,"%s",output1);
fscanf(filepointer2,"%s",output2);
matriz1[linhas][colunas] = atoi(output1);
matriz2[linhas][colunas] = atoi(output2);
}
}
fclose(filepointer1);
fclose(filepointer2);
//multpl
int soma = 0;
#pragma omp for
for(i = 0;i < tamanho ;i++){
for(j = 0;j < tamanho ;j++){
for(k = 0;k < tamanho ;k++){
soma = soma + matriz1[i][k] * matriz2[k][j];
}
matrizfinal[i][j] = soma;
soma = 0;
}
}
#pragma omp for
for(i = 0;i < tamanho ;i++){
for(j = 0;j < tamanho ;j++){
if (j == tamanho -1){
fprintf(filepointer3, "%d",matrizfinal[i][j]);
}
else{
fprintf(filepointer3, "%d ",matrizfinal[i][j]);
}
}
if (i == tamanho -1){
continue;
}
else{
fputs("\n",filepointer3);
}
}
fclose(filepointer3);
clock_t fim = clock();
time += (double)(fim-init)/CLOCKS_PER_SEC;
FILE* compare = fopen("compare.txt","a");
getrusage(RUSAGE_SELF,&r_usage);
fprintf(compare, "Omp: Tempo - %f Memória - %ld\n",time,r_usage.ru_maxrss);
fclose(compare);
}
|
convolution_sgemm_pack8to4_int8.h | // Tencent is pleased to support the open source community by making ncnn available.
//
// Copyright (C) 2021 THL A29 Limited, a Tencent company. All rights reserved.
//
// Licensed under the BSD 3-Clause License (the "License"); you may not use this file except
// in compliance with the License. You may obtain a copy of the License at
//
// https://opensource.org/licenses/BSD-3-Clause
//
// Unless required by applicable law or agreed to in writing, software distributed
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
// specific language governing permissions and limitations under the License.
static void im2col_sgemm_pack8to4_int8_neon(const Mat& bottom_im2col, Mat& top_blob, const Mat& kernel, const Option& opt)
{
// Mat bottom_im2col(size, maxk, inch, 8u, 8, opt.workspace_allocator);
const int size = bottom_im2col.w;
const int maxk = bottom_im2col.h;
const int inch = bottom_im2col.c;
const int outch = top_blob.c;
// permute
Mat tmp;
#if __aarch64__
#if __ARM_FEATURE_DOTPROD
if (size >= 16)
tmp.create(16 * maxk, inch, size / 16 + (size % 16) / 8 + (size % 8) / 4 + (size % 4) / 2 + size % 2, 8u, 8, opt.workspace_allocator);
else if (size >= 8)
tmp.create(8 * maxk, inch, size / 8 + (size % 8) / 4 + (size % 4) / 2 + size % 2, 8u, 8, opt.workspace_allocator);
else if (size >= 4)
tmp.create(4 * maxk, inch, size / 4 + (size % 4) / 2 + size % 2, 8u, 8, opt.workspace_allocator);
else if (size >= 2)
tmp.create(2 * maxk, inch, size / 2 + size % 2, 8u, 8, opt.workspace_allocator);
else
tmp.create(maxk, inch, size, 8u, 8, opt.workspace_allocator);
#else // __ARM_FEATURE_DOTPROD
if (size >= 4)
tmp.create(4 * maxk, inch, size / 4 + (size % 4) / 2 + size % 2, 8u, 8, opt.workspace_allocator);
else if (size >= 2)
tmp.create(2 * maxk, inch, size / 2 + size % 2, 8u, 8, opt.workspace_allocator);
else
tmp.create(maxk, inch, size, 8u, 8, opt.workspace_allocator);
#endif // __ARM_FEATURE_DOTPROD
#else // __aarch64__
if (size >= 2)
tmp.create(2 * maxk, inch, size / 2 + size % 2, 8u, 8, opt.workspace_allocator);
else
tmp.create(maxk, inch, size, 8u, 8, opt.workspace_allocator);
#endif // __aarch64__
{
#if __aarch64__
#if __ARM_FEATURE_DOTPROD
int nn_size = size >> 4;
int remain_size_start = 0;
#pragma omp parallel for num_threads(opt.num_threads)
for (int ii = 0; ii < nn_size; ii++)
{
int i = remain_size_start + ii * 16;
signed char* tmpptr = tmp.channel(i / 16);
for (int q = 0; q < inch; q++)
{
const signed char* img0 = (const signed char*)bottom_im2col.channel(q) + i * 8;
for (int k = 0; k < maxk; k++)
{
// split pack8 to pack4
asm volatile(
"prfm pldl1keep, [%0, #512] \n"
"ld2 {v0.4s, v1.4s}, [%0], #32 \n"
"ld2 {v2.4s, v3.4s}, [%0], #32 \n"
"ld2 {v4.4s, v5.4s}, [%0], #32 \n"
"ld2 {v6.4s, v7.4s}, [%0] \n"
"sub %0, %0, #96 \n"
"st1 {v0.16b}, [%1], #16 \n"
"st1 {v2.16b}, [%1], #16 \n"
"st1 {v4.16b}, [%1], #16 \n"
"st1 {v6.16b}, [%1], #16 \n"
"st1 {v1.16b}, [%1], #16 \n"
"st1 {v3.16b}, [%1], #16 \n"
"st1 {v5.16b}, [%1], #16 \n"
"st1 {v7.16b}, [%1], #16 \n"
: "=r"(img0), // %0
"=r"(tmpptr) // %1
: "0"(img0),
"1"(tmpptr)
: "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7");
img0 += size * 8;
}
}
}
remain_size_start += nn_size << 4;
nn_size = (size - remain_size_start) >> 3;
#pragma omp parallel for num_threads(opt.num_threads)
for (int ii = 0; ii < nn_size; ii++)
{
int i = remain_size_start + ii * 8;
signed char* tmpptr = tmp.channel(i / 16 + (i % 16) / 8);
for (int q = 0; q < inch; q++)
{
const signed char* img0 = (const signed char*)bottom_im2col.channel(q) + i * 8;
for (int k = 0; k < maxk; k++)
{
asm volatile(
"prfm pldl1keep, [%0, #512] \n"
"ld2 {v0.4s, v1.4s}, [%0], #32 \n"
"ld2 {v2.4s, v3.4s}, [%0] \n"
"sub %0, %0, #32 \n"
"st1 {v0.16b}, [%1], #16 \n"
"st1 {v2.16b}, [%1], #16 \n"
"st1 {v1.16b}, [%1], #16 \n"
"st1 {v3.16b}, [%1], #16 \n"
: "=r"(img0), // %0
"=r"(tmpptr) // %1
: "0"(img0),
"1"(tmpptr)
: "memory", "v0", "v1", "v2", "v3");
img0 += size * 8;
}
}
}
remain_size_start += nn_size << 3;
nn_size = (size - remain_size_start) >> 2;
#else // __ARM_FEATURE_DOTPROD
int remain_size_start = 0;
int nn_size = (size - remain_size_start) >> 2;
#endif // __ARM_FEATURE_DOTPROD
#pragma omp parallel for num_threads(opt.num_threads)
for (int ii = 0; ii < nn_size; ii++)
{
int i = remain_size_start + ii * 4;
#if __ARM_FEATURE_DOTPROD
signed char* tmpptr = tmp.channel(i / 16 + (i % 16) / 8 + (i % 8) / 4);
#else
signed char* tmpptr = tmp.channel(i / 4);
#endif
for (int q = 0; q < inch; q++)
{
const signed char* img0 = (const signed char*)bottom_im2col.channel(q) + i * 8;
for (int k = 0; k < maxk; k++)
{
#if __ARM_FEATURE_DOTPROD
asm volatile(
"prfm pldl1keep, [%0, #256] \n"
"ld2 {v0.4s, v1.4s}, [%0] \n"
"st1 {v0.4s, v1.4s}, [%1], #32 \n"
: "=r"(img0), // %0
"=r"(tmpptr) // %1
: "0"(img0),
"1"(tmpptr)
: "memory", "v0", "v1");
#else
asm volatile(
"prfm pldl1keep, [%0, #256] \n"
"ld1 {v0.16b, v1.16b}, [%0] \n"
"st1 {v0.16b, v1.16b}, [%1], #32 \n"
: "=r"(img0), // %0
"=r"(tmpptr) // %1
: "0"(img0),
"1"(tmpptr)
: "memory", "v0", "v1");
#endif // __ARM_FEATURE_DOTPROD
img0 += size * 8;
}
}
}
remain_size_start += nn_size << 2;
nn_size = (size - remain_size_start) >> 1;
#else
int remain_size_start = 0;
int nn_size = (size - remain_size_start) >> 1;
#endif
#pragma omp parallel for num_threads(opt.num_threads)
for (int ii = 0; ii < nn_size; ii++)
{
int i = remain_size_start + ii * 2;
#if __aarch64__
#if __ARM_FEATURE_DOTPROD
signed char* tmpptr = tmp.channel(i / 16 + (i % 16) / 8 + (i % 8) / 4 + (i % 4) / 2);
#else
signed char* tmpptr = tmp.channel(i / 4 + (i % 4) / 2);
#endif
#else
signed char* tmpptr = tmp.channel(i / 2);
#endif
for (int q = 0; q < inch; q++)
{
const signed char* img0 = (const signed char*)bottom_im2col.channel(q) + i * 8;
for (int k = 0; k < maxk; k++)
{
#if __aarch64__
#if __ARM_FEATURE_DOTPROD
asm volatile(
"prfm pldl1keep, [%0, #128] \n"
"ld2 {v0.2s, v1.2s}, [%0] \n"
"st1 {v0.2s, v1.2s}, [%1], #16 \n"
: "=r"(img0), // %0
"=r"(tmpptr) // %1
: "0"(img0),
"1"(tmpptr)
: "memory", "v0", "v1");
#else
asm volatile(
"prfm pldl1keep, [%0, #128] \n"
"ld1 {v0.16b}, [%0] \n"
"st1 {v0.16b}, [%1], #16 \n"
: "=r"(img0), // %0
"=r"(tmpptr) // %1
: "0"(img0),
"1"(tmpptr)
: "memory", "v0");
#endif // __ARM_FEATURE_DOTPROD
#else
asm volatile(
"pld [%0, #128] \n"
"vld1.s8 {d0-d1}, [%0 :64] \n"
"vst1.s8 {d0-d1}, [%1 :64]! \n"
: "=r"(img0), // %0
"=r"(tmpptr) // %1
: "0"(img0),
"1"(tmpptr)
: "memory", "q0");
#endif
img0 += size * 8;
}
}
}
remain_size_start += nn_size << 1;
#pragma omp parallel for num_threads(opt.num_threads)
for (int i = remain_size_start; i < size; i++)
{
#if __aarch64__
#if __ARM_FEATURE_DOTPROD
signed char* tmpptr = tmp.channel(i / 16 + (i % 16) / 8 + (i % 8) / 4 + (i % 4) / 2 + i % 2);
#else
signed char* tmpptr = tmp.channel(i / 4 + (i % 4) / 2 + i % 2);
#endif
#else
signed char* tmpptr = tmp.channel(i / 2 + i % 2);
#endif
for (int q = 0; q < inch; q++)
{
const signed char* img0 = (const signed char*)bottom_im2col.channel(q) + i * 8;
for (int k = 0; k < maxk; k++)
{
#if __aarch64__
asm volatile(
"prfm pldl1keep, [%0, #64] \n"
"ld1 {v0.8b}, [%0] \n"
"st1 {v0.8b}, [%1], #8 \n"
: "=r"(img0), // %0
"=r"(tmpptr) // %1
: "0"(img0),
"1"(tmpptr)
: "memory", "v0");
#else
asm volatile(
"pld [%0, #64] \n"
"vld1.s8 {d0}, [%0 :64] \n"
"vst1.s8 {d0}, [%1 :64]! \n"
: "=r"(img0), // %0
"=r"(tmpptr) // %1
: "0"(img0),
"1"(tmpptr)
: "memory", "d0");
#endif
img0 += size * 8;
}
}
}
}
#pragma omp parallel for num_threads(opt.num_threads)
for (int p = 0; p < outch; p++)
{
int* outptr0 = top_blob.channel(p);
int i = 0;
#if __aarch64__
#if __ARM_FEATURE_DOTPROD
for (; i + 15 < size; i += 16)
{
const signed char* tmpptr = tmp.channel(i / 16);
const signed char* kptr0 = kernel.channel(p);
int nn = inch * maxk; // inch always > 0
asm volatile(
"ld1 {v24.16b}, [%3], #16 \n" // _w0123_l
"eor v0.16b, v0.16b, v0.16b \n"
"eor v1.16b, v1.16b, v1.16b \n"
"ld1 {v16.16b}, [%2], #16 \n" // _val0123_l
"eor v2.16b, v2.16b, v2.16b \n"
"eor v3.16b, v3.16b, v3.16b \n"
"eor v4.16b, v4.16b, v4.16b \n"
"eor v5.16b, v5.16b, v5.16b \n"
"eor v6.16b, v6.16b, v6.16b \n"
"eor v7.16b, v7.16b, v7.16b \n"
"eor v8.16b, v8.16b, v8.16b \n"
"eor v9.16b, v9.16b, v9.16b \n"
"eor v10.16b, v10.16b, v10.16b \n"
"eor v11.16b, v11.16b, v11.16b \n"
"eor v12.16b, v12.16b, v12.16b \n"
"eor v13.16b, v13.16b, v13.16b \n"
"eor v14.16b, v14.16b, v14.16b \n"
"eor v15.16b, v15.16b, v15.16b \n"
"0: \n"
"ld1 {v17.16b}, [%2], #16 \n" // _val4567_l
"sdot v0.4s, v24.16b, v16.4b[0] \n"
"sdot v1.4s, v24.16b, v16.4b[1] \n"
"sdot v2.4s, v24.16b, v16.4b[2] \n"
"sdot v3.4s, v24.16b, v16.4b[3] \n"
"ld1 {v18.16b}, [%2], #16 \n" // _val891011_l
"sdot v4.4s, v24.16b, v17.4b[0] \n"
"sdot v5.4s, v24.16b, v17.4b[1] \n"
"sdot v6.4s, v24.16b, v17.4b[2] \n"
"sdot v7.4s, v24.16b, v17.4b[3] \n"
"ld1 {v19.16b}, [%2], #16 \n" // _val12131415_l
"sdot v8.4s, v24.16b, v18.4b[0] \n"
"sdot v9.4s, v24.16b, v18.4b[1] \n"
"ld1 {v25.16b}, [%3], #16 \n" // _w0123_h
"sdot v10.4s, v24.16b, v18.4b[2] \n"
"sdot v11.4s, v24.16b, v18.4b[3] \n"
"ld1 {v20.16b}, [%2], #16 \n" // _val0123_h
"sdot v12.4s, v24.16b, v19.4b[0] \n"
"sdot v13.4s, v24.16b, v19.4b[1] \n"
"sdot v14.4s, v24.16b, v19.4b[2] \n"
"sdot v15.4s, v24.16b, v19.4b[3] \n"
"ld1 {v21.16b}, [%2], #16 \n" // _val4567_h
"sdot v0.4s, v25.16b, v20.4b[0] \n"
"sdot v1.4s, v25.16b, v20.4b[1] \n"
"sdot v2.4s, v25.16b, v20.4b[2] \n"
"sdot v3.4s, v25.16b, v20.4b[3] \n"
"ld1 {v22.16b}, [%2], #16 \n" // _val891011_h
"sdot v4.4s, v25.16b, v21.4b[0] \n"
"sdot v5.4s, v25.16b, v21.4b[1] \n"
"sdot v6.4s, v25.16b, v21.4b[2] \n"
"sdot v7.4s, v25.16b, v21.4b[3] \n"
"ld1 {v23.16b}, [%2], #16 \n" // _val12131415_h
"sdot v8.4s, v25.16b, v22.4b[0] \n"
"sdot v9.4s, v25.16b, v22.4b[1] \n"
"ld1 {v24.16b}, [%3], #16 \n" // _w0123_l
"sdot v10.4s, v25.16b, v22.4b[2] \n"
"sdot v11.4s, v25.16b, v22.4b[3] \n"
"ld1 {v16.16b}, [%2], #16 \n" // _val0123_l
"sdot v12.4s, v25.16b, v23.4b[0] \n"
"sdot v13.4s, v25.16b, v23.4b[1] \n"
"subs %w1, %w1, #1 \n"
"sdot v14.4s, v25.16b, v23.4b[2] \n"
"sdot v15.4s, v25.16b, v23.4b[3] \n"
"bne 0b \n"
"sub %2, %2, #16 \n"
"sub %3, %3, #16 \n"
"st1 {v0.4s, v1.4s, v2.4s, v3.4s}, [%0], #64 \n"
"st1 {v4.4s, v5.4s, v6.4s, v7.4s}, [%0], #64 \n"
"st1 {v8.4s, v9.4s, v10.4s, v11.4s}, [%0], #64 \n"
"st1 {v12.4s, v13.4s, v14.4s, v15.4s}, [%0], #64 \n"
: "=r"(outptr0),
"=r"(nn),
"=r"(tmpptr),
"=r"(kptr0)
: "0"(outptr0),
"1"(nn),
"2"(tmpptr),
"3"(kptr0)
: "memory", "x4", "x5", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31");
}
for (; i + 7 < size; i += 8)
{
const signed char* tmpptr = tmp.channel(i / 16 + (i % 16) / 8);
const signed char* kptr0 = kernel.channel(p);
int nn = inch * maxk; // inch always > 0
int32x4_t _sum0 = vdupq_n_s32(0);
int32x4_t _sum1 = vdupq_n_s32(0);
int32x4_t _sum2 = vdupq_n_s32(0);
int32x4_t _sum3 = vdupq_n_s32(0);
int32x4_t _sum4 = vdupq_n_s32(0);
int32x4_t _sum5 = vdupq_n_s32(0);
int32x4_t _sum6 = vdupq_n_s32(0);
int32x4_t _sum7 = vdupq_n_s32(0);
for (int j = 0; j < nn; j++)
{
int8x16_t _val0123_l = vld1q_s8(tmpptr);
int8x16_t _val4567_l = vld1q_s8(tmpptr + 16);
int8x16_t _w0123_l = vld1q_s8(kptr0);
_sum0 = vdotq_laneq_s32(_sum0, _w0123_l, _val0123_l, 0);
_sum1 = vdotq_laneq_s32(_sum1, _w0123_l, _val0123_l, 1);
_sum2 = vdotq_laneq_s32(_sum2, _w0123_l, _val0123_l, 2);
_sum3 = vdotq_laneq_s32(_sum3, _w0123_l, _val0123_l, 3);
_sum4 = vdotq_laneq_s32(_sum4, _w0123_l, _val4567_l, 0);
_sum5 = vdotq_laneq_s32(_sum5, _w0123_l, _val4567_l, 1);
_sum6 = vdotq_laneq_s32(_sum6, _w0123_l, _val4567_l, 2);
_sum7 = vdotq_laneq_s32(_sum7, _w0123_l, _val4567_l, 3);
int8x16_t _val0123_h = vld1q_s8(tmpptr + 32);
int8x16_t _val4567_h = vld1q_s8(tmpptr + 48);
int8x16_t _w0123_h = vld1q_s8(kptr0 + 16);
_sum0 = vdotq_laneq_s32(_sum0, _w0123_h, _val0123_h, 0);
_sum1 = vdotq_laneq_s32(_sum1, _w0123_h, _val0123_h, 1);
_sum2 = vdotq_laneq_s32(_sum2, _w0123_h, _val0123_h, 2);
_sum3 = vdotq_laneq_s32(_sum3, _w0123_h, _val0123_h, 3);
_sum4 = vdotq_laneq_s32(_sum4, _w0123_h, _val4567_h, 0);
_sum5 = vdotq_laneq_s32(_sum5, _w0123_h, _val4567_h, 1);
_sum6 = vdotq_laneq_s32(_sum6, _w0123_h, _val4567_h, 2);
_sum7 = vdotq_laneq_s32(_sum7, _w0123_h, _val4567_h, 3);
tmpptr += 64;
kptr0 += 32;
}
vst1q_s32(outptr0, _sum0);
vst1q_s32(outptr0 + 4, _sum1);
vst1q_s32(outptr0 + 8, _sum2);
vst1q_s32(outptr0 + 12, _sum3);
vst1q_s32(outptr0 + 16, _sum4);
vst1q_s32(outptr0 + 20, _sum5);
vst1q_s32(outptr0 + 24, _sum6);
vst1q_s32(outptr0 + 28, _sum7);
outptr0 += 32;
}
#endif
for (; i + 3 < size; i += 4)
{
#if __ARM_FEATURE_DOTPROD
const signed char* tmpptr = tmp.channel(i / 16 + (i % 16) / 8 + (i % 8) / 4);
#else
const signed char* tmpptr = tmp.channel(i / 4);
#endif
const signed char* kptr0 = kernel.channel(p);
int nn = inch * maxk; // inch always > 0
#if __ARM_FEATURE_DOTPROD
int32x4_t _sum0 = vdupq_n_s32(0);
int32x4_t _sum1 = vdupq_n_s32(0);
int32x4_t _sum2 = vdupq_n_s32(0);
int32x4_t _sum3 = vdupq_n_s32(0);
for (int j = 0; j < nn; j++)
{
int8x16_t _val0123_l = vld1q_s8(tmpptr);
int8x16_t _w0123_l = vld1q_s8(kptr0);
_sum0 = vdotq_laneq_s32(_sum0, _w0123_l, _val0123_l, 0);
_sum1 = vdotq_laneq_s32(_sum1, _w0123_l, _val0123_l, 1);
_sum2 = vdotq_laneq_s32(_sum2, _w0123_l, _val0123_l, 2);
_sum3 = vdotq_laneq_s32(_sum3, _w0123_l, _val0123_l, 3);
int8x16_t _val0123_h = vld1q_s8(tmpptr + 16);
int8x16_t _w0123_h = vld1q_s8(kptr0 + 16);
_sum0 = vdotq_laneq_s32(_sum0, _w0123_h, _val0123_h, 0);
_sum1 = vdotq_laneq_s32(_sum1, _w0123_h, _val0123_h, 1);
_sum2 = vdotq_laneq_s32(_sum2, _w0123_h, _val0123_h, 2);
_sum3 = vdotq_laneq_s32(_sum3, _w0123_h, _val0123_h, 3);
tmpptr += 32;
kptr0 += 32;
}
vst1q_s32(outptr0, _sum0);
vst1q_s32(outptr0 + 4, _sum1);
vst1q_s32(outptr0 + 8, _sum2);
vst1q_s32(outptr0 + 12, _sum3);
outptr0 += 16;
#else // __ARM_FEATURE_DOTPROD
asm volatile(
"eor v0.16b, v0.16b, v0.16b \n"
"eor v1.16b, v1.16b, v1.16b \n"
"eor v2.16b, v2.16b, v2.16b \n"
"eor v3.16b, v3.16b, v3.16b \n"
"eor v4.16b, v4.16b, v4.16b \n"
"eor v5.16b, v5.16b, v5.16b \n"
"eor v6.16b, v6.16b, v6.16b \n"
"eor v7.16b, v7.16b, v7.16b \n"
"eor v8.16b, v8.16b, v8.16b \n"
"eor v9.16b, v9.16b, v9.16b \n"
"eor v10.16b, v10.16b, v10.16b \n"
"eor v11.16b, v11.16b, v11.16b \n"
"eor v12.16b, v12.16b, v12.16b \n"
"eor v13.16b, v13.16b, v13.16b \n"
"eor v14.16b, v14.16b, v14.16b \n"
"eor v15.16b, v15.16b, v15.16b \n"
"prfm pldl1keep, [%2, #128] \n"
"prfm pldl1keep, [%3, #256] \n"
"lsr w4, %w1, #1 \n" // w4 = nn >> 1
"cmp w4, #0 \n"
"beq 1f \n"
"prfm pldl1keep, [%3, #512] \n"
"add x5, %2, #16 \n"
"prfm pldl1keep, [x5, #128] \n"
"ld1 {v16.16b}, [%2] \n" // val L H
"ld1 {v20.16b, v21.16b, v22.16b, v23.16b}, [%3], #64 \n"
"add %2, %2, #32 \n"
"ext v17.16b, v16.16b, v16.16b, #8 \n" // val H L
"ld1 {v18.16b}, [%2] \n"
"add %2, %2, #32 \n"
"0: \n"
"smull v24.8h, v16.8b, v20.8b \n"
"prfm pldl1keep, [%3, #256] \n"
"smull2 v25.8h, v17.16b, v20.16b \n"
"prfm pldl1keep, [%3, #512] \n"
"smull v26.8h, v16.8b, v21.8b \n"
"subs w4, w4, #1 \n"
"smull2 v27.8h, v17.16b, v21.16b \n"
"ext v19.16b, v18.16b, v18.16b, #8 \n" // val H L
"smlal v24.8h, v18.8b, v22.8b \n"
"smlal2 v25.8h, v19.16b, v22.16b \n"
"smlal v26.8h, v18.8b, v23.8b \n"
"smlal2 v27.8h, v19.16b, v23.16b \n"
"smull2 v29.8h, v16.16b, v20.16b \n"
"sadalp v0.4s, v24.8h \n"
"smull v28.8h, v17.8b, v20.8b \n"
"sadalp v1.4s, v25.8h \n"
"smull2 v31.8h, v16.16b, v21.16b \n"
"ld1 {v16.16b}, [x5] \n" // val L H
"smull v30.8h, v17.8b, v21.8b \n"
"add x5, x5, #32 \n"
"smlal2 v29.8h, v18.16b, v22.16b \n"
"sadalp v2.4s, v26.8h \n"
"smlal v28.8h, v19.8b, v22.8b \n"
"sadalp v3.4s, v27.8h \n"
"smlal2 v31.8h, v18.16b, v23.16b \n"
"ld1 {v18.16b}, [x5] \n"
"smlal v30.8h, v19.8b, v23.8b \n"
"ext v17.16b, v16.16b, v16.16b, #8 \n" // val H L
"smull v24.8h, v16.8b, v20.8b \n"
"add x5, x5, #32 \n"
"smull2 v25.8h, v17.16b, v20.16b \n"
"prfm pldl1keep, [x5, #128] \n"
"smull v26.8h, v16.8b, v21.8b \n"
"prfm pldl1keep, [x5, #384] \n"
"smull2 v27.8h, v17.16b, v21.16b \n"
"ext v19.16b, v18.16b, v18.16b, #8 \n" // val H L
"smlal v24.8h, v18.8b, v22.8b \n"
"sadalp v5.4s, v29.8h \n"
"smlal2 v25.8h, v19.16b, v22.16b \n"
"sadalp v4.4s, v28.8h \n"
"smlal v26.8h, v18.8b, v23.8b \n"
"sadalp v7.4s, v31.8h \n"
"smlal2 v27.8h, v19.16b, v23.16b \n"
"sadalp v6.4s, v30.8h \n"
"smull2 v29.8h, v16.16b, v20.16b \n"
"sadalp v8.4s, v24.8h \n"
"smull v28.8h, v17.8b, v20.8b \n"
"sadalp v9.4s, v25.8h \n"
"smull2 v31.8h, v16.16b, v21.16b \n"
"ld1 {v16.16b}, [%2] \n" // val L H
"smull v30.8h, v17.8b, v21.8b \n"
"add %2, %2, #32 \n"
"smlal2 v29.8h, v18.16b, v22.16b \n"
"sadalp v10.4s, v26.8h \n"
"smlal v28.8h, v19.8b, v22.8b \n"
"sadalp v11.4s, v27.8h \n"
"smlal2 v31.8h, v18.16b, v23.16b \n"
"ld1 {v18.16b}, [%2] \n"
"smlal v30.8h, v19.8b, v23.8b \n"
"add %2, %2, #32 \n"
"ld1 {v20.16b, v21.16b, v22.16b, v23.16b}, [%3], #64 \n"
"sadalp v13.4s, v29.8h \n"
"prfm pldl1keep, [%2, #128] \n"
"sadalp v12.4s, v28.8h \n"
"prfm pldl1keep, [%2, #384] \n"
"sadalp v15.4s, v31.8h \n"
"ext v17.16b, v16.16b, v16.16b, #8 \n" // val H L
"sadalp v14.4s, v30.8h \n"
"bne 0b \n"
"sub %2, %2, #64 \n"
"sub %3, %3, #64 \n"
"1: \n"
"and w4, %w1, #1 \n" // w4 = remain = nn & 1
"cmp w4, #0 \n" // w4 > 0
"beq 2f \n"
"ld1 {v16.8b, v17.8b}, [%2], #16 \n"
"ld1 {v20.8b, v21.8b, v22.8b, v23.8b}, [%3], #32 \n"
"smull v24.8h, v16.8b, v20.8b \n"
"smull v25.8h, v16.8b, v21.8b \n"
"smull v26.8h, v16.8b, v22.8b \n"
"ld1 {v18.8b, v19.8b}, [%2], #16 \n"
"smull v27.8h, v16.8b, v23.8b \n"
"sadalp v0.4s, v24.8h \n"
"smull v28.8h, v17.8b, v20.8b \n"
"sadalp v1.4s, v25.8h \n"
"smull v29.8h, v17.8b, v21.8b \n"
"sadalp v2.4s, v26.8h \n"
"smull v30.8h, v17.8b, v22.8b \n"
"sadalp v3.4s, v27.8h \n"
"smull v31.8h, v17.8b, v23.8b \n"
"sadalp v4.4s, v28.8h \n"
"smull v24.8h, v18.8b, v20.8b \n"
"sadalp v5.4s, v29.8h \n"
"smull v25.8h, v18.8b, v21.8b \n"
"sadalp v6.4s, v30.8h \n"
"smull v26.8h, v18.8b, v22.8b \n"
"sadalp v7.4s, v31.8h \n"
"smull v27.8h, v18.8b, v23.8b \n"
"sadalp v8.4s, v24.8h \n"
"smull v28.8h, v19.8b, v20.8b \n"
"sadalp v9.4s, v25.8h \n"
"smull v29.8h, v19.8b, v21.8b \n"
"sadalp v10.4s, v26.8h \n"
"smull v30.8h, v19.8b, v22.8b \n"
"sadalp v11.4s, v27.8h \n"
"smull v31.8h, v19.8b, v23.8b \n"
"sadalp v12.4s, v28.8h \n"
"sadalp v13.4s, v29.8h \n"
"sadalp v14.4s, v30.8h \n"
"sadalp v15.4s, v31.8h \n"
"2: \n"
"addp v0.4s, v0.4s, v1.4s \n"
"addp v2.4s, v2.4s, v3.4s \n"
"addp v4.4s, v4.4s, v5.4s \n"
"addp v6.4s, v6.4s, v7.4s \n"
"addp v8.4s, v8.4s, v9.4s \n"
"addp v10.4s, v10.4s, v11.4s \n"
"addp v12.4s, v12.4s, v13.4s \n"
"addp v14.4s, v14.4s, v15.4s \n"
"addp v0.4s, v0.4s, v2.4s \n"
"addp v1.4s, v4.4s, v6.4s \n"
"addp v2.4s, v8.4s, v10.4s \n"
"addp v3.4s, v12.4s, v14.4s \n"
"st1 {v0.4s, v1.4s, v2.4s, v3.4s}, [%0], #64 \n"
: "=r"(outptr0),
"=r"(nn),
"=r"(tmpptr),
"=r"(kptr0)
: "0"(outptr0),
"1"(nn),
"2"(tmpptr),
"3"(kptr0)
: "memory", "x4", "x5", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31");
#endif // __ARM_FEATURE_DOTPROD
}
#endif // __aarch64__
for (; i + 1 < size; i += 2)
{
#if __aarch64__
#if __ARM_FEATURE_DOTPROD
const signed char* tmpptr = tmp.channel(i / 16 + (i % 16) / 8 + (i % 8) / 4 + (i % 4) / 2);
#else
const signed char* tmpptr = tmp.channel(i / 4 + (i % 4) / 2);
#endif
#else
const signed char* tmpptr = tmp.channel(i / 2);
#endif
const signed char* kptr0 = kernel.channel(p);
int nn = inch * maxk; // inch always > 0
#if __aarch64__
#if __ARM_FEATURE_DOTPROD
int32x4_t _sum0 = vdupq_n_s32(0);
int32x4_t _sum1 = vdupq_n_s32(0);
for (int j = 0; j < nn; j++)
{
int8x16_t _val01_l_h = vld1q_s8(tmpptr);
int8x16_t _w0123_l = vld1q_s8(kptr0);
_sum0 = vdotq_laneq_s32(_sum0, _w0123_l, _val01_l_h, 0);
_sum1 = vdotq_laneq_s32(_sum1, _w0123_l, _val01_l_h, 1);
int8x16_t _w0123_h = vld1q_s8(kptr0 + 16);
_sum0 = vdotq_laneq_s32(_sum0, _w0123_h, _val01_l_h, 2);
_sum1 = vdotq_laneq_s32(_sum1, _w0123_h, _val01_l_h, 3);
tmpptr += 16;
kptr0 += 32;
}
vst1q_s32(outptr0, _sum0);
vst1q_s32(outptr0 + 4, _sum1);
outptr0 += 8;
#else // __ARM_FEATURE_DOTPROD
int32x4_t _sum00 = vdupq_n_s32(0);
int32x4_t _sum01 = vdupq_n_s32(0);
int32x4_t _sum02 = vdupq_n_s32(0);
int32x4_t _sum03 = vdupq_n_s32(0);
int32x4_t _sum10 = vdupq_n_s32(0);
int32x4_t _sum11 = vdupq_n_s32(0);
int32x4_t _sum12 = vdupq_n_s32(0);
int32x4_t _sum13 = vdupq_n_s32(0);
int j = 0;
for (; j + 1 < nn; j += 2)
{
int8x16_t _val0 = vld1q_s8(tmpptr);
int8x16_t _val1 = vld1q_s8(tmpptr + 16);
int8x16_t _w01 = vld1q_s8(kptr0);
int8x16_t _w23 = vld1q_s8(kptr0 + 16);
int16x8_t _wv00 = vmull_s8(vget_low_s8(_val0), vget_low_s8(_w01));
int16x8_t _wv01 = vmull_s8(vget_low_s8(_val0), vget_high_s8(_w01));
int16x8_t _wv02 = vmull_s8(vget_low_s8(_val0), vget_low_s8(_w23));
int16x8_t _wv03 = vmull_s8(vget_low_s8(_val0), vget_high_s8(_w23));
int16x8_t _wv10 = vmull_s8(vget_high_s8(_val0), vget_low_s8(_w01));
int16x8_t _wv11 = vmull_s8(vget_high_s8(_val0), vget_high_s8(_w01));
int16x8_t _wv12 = vmull_s8(vget_high_s8(_val0), vget_low_s8(_w23));
int16x8_t _wv13 = vmull_s8(vget_high_s8(_val0), vget_high_s8(_w23));
int8x16_t _w45 = vld1q_s8(kptr0 + 32);
int8x16_t _w67 = vld1q_s8(kptr0 + 48);
_wv00 = vmlal_s8(_wv00, vget_low_s8(_val1), vget_low_s8(_w45));
_wv01 = vmlal_s8(_wv01, vget_low_s8(_val1), vget_high_s8(_w45));
_wv02 = vmlal_s8(_wv02, vget_low_s8(_val1), vget_low_s8(_w67));
_wv03 = vmlal_s8(_wv03, vget_low_s8(_val1), vget_high_s8(_w67));
_wv10 = vmlal_s8(_wv10, vget_high_s8(_val1), vget_low_s8(_w45));
_wv11 = vmlal_s8(_wv11, vget_high_s8(_val1), vget_high_s8(_w45));
_wv12 = vmlal_s8(_wv12, vget_high_s8(_val1), vget_low_s8(_w67));
_wv13 = vmlal_s8(_wv13, vget_high_s8(_val1), vget_high_s8(_w67));
_sum00 = vpadalq_s16(_sum00, _wv00);
_sum01 = vpadalq_s16(_sum01, _wv01);
_sum02 = vpadalq_s16(_sum02, _wv02);
_sum03 = vpadalq_s16(_sum03, _wv03);
_sum10 = vpadalq_s16(_sum10, _wv10);
_sum11 = vpadalq_s16(_sum11, _wv11);
_sum12 = vpadalq_s16(_sum12, _wv12);
_sum13 = vpadalq_s16(_sum13, _wv13);
tmpptr += 32;
kptr0 += 64;
}
for (; j < nn; j++)
{
int8x16_t _val = vld1q_s8(tmpptr);
int8x16_t _w01 = vld1q_s8(kptr0);
int8x16_t _w23 = vld1q_s8(kptr0 + 16);
int16x8_t _wv00 = vmull_s8(vget_low_s8(_val), vget_low_s8(_w01));
int16x8_t _wv01 = vmull_s8(vget_low_s8(_val), vget_high_s8(_w01));
int16x8_t _wv02 = vmull_s8(vget_low_s8(_val), vget_low_s8(_w23));
int16x8_t _wv03 = vmull_s8(vget_low_s8(_val), vget_high_s8(_w23));
int16x8_t _wv10 = vmull_s8(vget_high_s8(_val), vget_low_s8(_w01));
int16x8_t _wv11 = vmull_s8(vget_high_s8(_val), vget_high_s8(_w01));
int16x8_t _wv12 = vmull_s8(vget_high_s8(_val), vget_low_s8(_w23));
int16x8_t _wv13 = vmull_s8(vget_high_s8(_val), vget_high_s8(_w23));
_sum00 = vpadalq_s16(_sum00, _wv00);
_sum01 = vpadalq_s16(_sum01, _wv01);
_sum02 = vpadalq_s16(_sum02, _wv02);
_sum03 = vpadalq_s16(_sum03, _wv03);
_sum10 = vpadalq_s16(_sum10, _wv10);
_sum11 = vpadalq_s16(_sum11, _wv11);
_sum12 = vpadalq_s16(_sum12, _wv12);
_sum13 = vpadalq_s16(_sum13, _wv13);
tmpptr += 16;
kptr0 += 32;
}
int32x4_t _s001 = vpaddq_s32(_sum00, _sum01);
int32x4_t _s023 = vpaddq_s32(_sum02, _sum03);
int32x4_t _s101 = vpaddq_s32(_sum10, _sum11);
int32x4_t _s123 = vpaddq_s32(_sum12, _sum13);
int32x4_t _s00123 = vpaddq_s32(_s001, _s023);
int32x4_t _s10123 = vpaddq_s32(_s101, _s123);
vst1q_s32(outptr0, _s00123);
vst1q_s32(outptr0 + 4, _s10123);
outptr0 += 8;
#endif // __ARM_FEATURE_DOTPROD
#else // __aarch64__
asm volatile(
"veor q0, q0 \n"
"veor q1, q1 \n"
"veor q2, q2 \n"
"veor q3, q3 \n"
"veor q4, q4 \n"
"veor q5, q5 \n"
"veor q6, q6 \n"
"veor q7, q7 \n"
"pld [%2, #256] \n"
"lsr r4, %1, #1 \n" // r4 = nn = size >> 1
"cmp r4, #0 \n"
"beq 1f \n"
"add r5, %3, #16 \n"
"pld [%3, #128] \n"
"mov r6, #32 \n"
"pld [%3, #384] \n"
"vld1.s8 {d20-d21}, [%3 :128], r6 \n" // _w01
"vld1.s8 {d16-d19}, [%2 :128]! \n" // _val0 _val1
"vld1.s8 {d22-d23}, [%3 :128], r6 \n" // _w45
"0: \n"
"vmull.s8 q12, d16, d20 \n"
"pld [%2, #256] \n"
"vmull.s8 q13, d16, d21 \n"
"pld [%3, #384] \n"
"vmull.s8 q14, d17, d20 \n"
"vmull.s8 q15, d17, d21 \n"
"vld1.s8 {d20-d21}, [r5 :128], r6 \n" // _w23
"vmlal.s8 q12, d18, d22 \n"
"vmlal.s8 q13, d18, d23 \n"
"subs r4, r4, #1 \n"
"vmlal.s8 q14, d19, d22 \n"
"vmlal.s8 q15, d19, d23 \n"
"vld1.s8 {d22-d23}, [r5 :128], r6 \n" // _w67
"vpadal.s16 q0, q12 \n"
"vmull.s8 q12, d16, d20 \n"
"vpadal.s16 q1, q13 \n"
"vmull.s8 q13, d16, d21 \n"
"vpadal.s16 q4, q14 \n"
"vmull.s8 q14, d17, d20 \n"
"vpadal.s16 q5, q15 \n"
"vmull.s8 q15, d17, d21 \n"
"vld1.s8 {d16-d17}, [%2 :128]! \n" // _val0
"vmlal.s8 q12, d18, d22 \n"
"vld1.s8 {d20-d21}, [%3 :128], r6 \n" // _w01
"vmlal.s8 q13, d18, d23 \n"
"pld [r5, #128] \n"
"vmlal.s8 q14, d19, d22 \n"
"pld [r5, #384] \n"
"vmlal.s8 q15, d19, d23 \n"
"vld1.s8 {d18-d19}, [%2 :128]! \n" // _val1
"vpadal.s16 q2, q12 \n"
"vld1.s8 {d22-d23}, [%3 :128], r6 \n" // _w45
"vpadal.s16 q3, q13 \n"
"pld [%2, #128] \n"
"vpadal.s16 q6, q14 \n"
"pld [%3, #128] \n"
"vpadal.s16 q7, q15 \n"
"bne 0b \n"
"sub %2, %2, #32 \n"
"sub %3, %3, #64 \n"
"1: \n"
"and r4, %1, #1 \n" // r4 = remain = size & 1
"cmp r4, #0 \n" // r4 > 0
"beq 2f \n"
"vld1.s8 {d16-d17}, [%2 :128]! \n" // _val
"vld1.s8 {d20-d21}, [%3 :128]! \n" // _w01
"vmull.s8 q12, d16, d20 \n"
"vld1.s8 {d22-d23}, [%3 :128]! \n" // _w23
"vmull.s8 q13, d16, d21 \n"
"vmull.s8 q14, d17, d20 \n"
"vmull.s8 q15, d17, d21 \n"
"vpadal.s16 q0, q12 \n"
"vmull.s8 q12, d16, d22 \n"
"vpadal.s16 q1, q13 \n"
"vmull.s8 q13, d16, d23 \n"
"vpadal.s16 q4, q14 \n"
"vmull.s8 q14, d17, d22 \n"
"vpadal.s16 q5, q15 \n"
"vmull.s8 q15, d17, d23 \n"
"vpadal.s16 q2, q12 \n"
"vpadal.s16 q3, q13 \n"
"vpadal.s16 q6, q14 \n"
"vpadal.s16 q7, q15 \n"
"2: \n"
"vpadd.s32 d16, d0, d1 \n"
"vpadd.s32 d17, d2, d3 \n"
"vpadd.s32 d18, d4, d5 \n"
"vpadd.s32 d19, d6, d7 \n"
"vpadd.s32 d20, d8, d9 \n"
"vpadd.s32 d21, d10, d11 \n"
"vpadd.s32 d22, d12, d13 \n"
"vpadd.s32 d23, d14, d15 \n"
"vpadd.s32 d0, d16, d17 \n"
"vpadd.s32 d1, d18, d19 \n"
"vpadd.s32 d2, d20, d21 \n"
"vpadd.s32 d3, d22, d23 \n"
"vst1.s32 {d0-d3}, [%0 :128]! \n"
: "=r"(outptr0),
"=r"(nn),
"=r"(tmpptr),
"=r"(kptr0)
: "0"(outptr0),
"1"(nn),
"2"(tmpptr),
"3"(kptr0)
: "memory", "r4", "r5", "r6", "q0", "q1", "q2", "q3", "q4", "q5", "q6", "q7", "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15");
#endif // __aarch64__
}
for (; i < size; i++)
{
#if __aarch64__
#if __ARM_FEATURE_DOTPROD
const signed char* tmpptr = tmp.channel(i / 16 + (i % 16) / 8 + (i % 8) / 4 + (i % 4) / 2 + i % 2);
#else
const signed char* tmpptr = tmp.channel(i / 4 + (i % 4) / 2 + i % 2);
#endif
#else
const signed char* tmpptr = tmp.channel(i / 2 + i % 2);
#endif
const signed char* kptr0 = kernel.channel(p);
int nn = inch * maxk; // inch always > 0
#if __ARM_FEATURE_DOTPROD
int32x4_t _sum0 = vdupq_n_s32(0);
for (int j = 0; j < nn; j++)
{
int8x8_t _val0_l_h = vld1_s8(tmpptr);
int8x16_t _w0123_l = vld1q_s8(kptr0);
_sum0 = vdotq_lane_s32(_sum0, _w0123_l, _val0_l_h, 0);
int8x16_t _w0123_h = vld1q_s8(kptr0 + 16);
_sum0 = vdotq_lane_s32(_sum0, _w0123_h, _val0_l_h, 1);
tmpptr += 8;
kptr0 += 32;
}
vst1q_s32(outptr0, _sum0);
outptr0 += 4;
#else // __ARM_FEATURE_DOTPROD
int32x4_t _sum0 = vdupq_n_s32(0);
int32x4_t _sum1 = vdupq_n_s32(0);
int32x4_t _sum2 = vdupq_n_s32(0);
int32x4_t _sum3 = vdupq_n_s32(0);
int j = 0;
for (; j + 1 < nn; j += 2)
{
int8x16_t _val = vld1q_s8(tmpptr);
int8x16_t _w01 = vld1q_s8(kptr0);
int8x16_t _w23 = vld1q_s8(kptr0 + 16);
int16x8_t _wv0 = vmull_s8(vget_low_s8(_val), vget_low_s8(_w01));
int16x8_t _wv1 = vmull_s8(vget_low_s8(_val), vget_high_s8(_w01));
int16x8_t _wv2 = vmull_s8(vget_low_s8(_val), vget_low_s8(_w23));
int16x8_t _wv3 = vmull_s8(vget_low_s8(_val), vget_high_s8(_w23));
int8x16_t _w45 = vld1q_s8(kptr0 + 32);
int8x16_t _w67 = vld1q_s8(kptr0 + 48);
_wv0 = vmlal_s8(_wv0, vget_high_s8(_val), vget_low_s8(_w45));
_wv1 = vmlal_s8(_wv1, vget_high_s8(_val), vget_high_s8(_w45));
_wv2 = vmlal_s8(_wv2, vget_high_s8(_val), vget_low_s8(_w67));
_wv3 = vmlal_s8(_wv3, vget_high_s8(_val), vget_high_s8(_w67));
_sum0 = vpadalq_s16(_sum0, _wv0);
_sum1 = vpadalq_s16(_sum1, _wv1);
_sum2 = vpadalq_s16(_sum2, _wv2);
_sum3 = vpadalq_s16(_sum3, _wv3);
tmpptr += 16;
kptr0 += 64;
}
for (; j < nn; j++)
{
int8x8_t _val = vld1_s8(tmpptr);
int8x16_t _w01 = vld1q_s8(kptr0);
int8x16_t _w23 = vld1q_s8(kptr0 + 16);
int16x8_t _wv0 = vmull_s8(_val, vget_low_s8(_w01));
int16x8_t _wv1 = vmull_s8(_val, vget_high_s8(_w01));
int16x8_t _wv2 = vmull_s8(_val, vget_low_s8(_w23));
int16x8_t _wv3 = vmull_s8(_val, vget_high_s8(_w23));
_sum0 = vpadalq_s16(_sum0, _wv0);
_sum1 = vpadalq_s16(_sum1, _wv1);
_sum2 = vpadalq_s16(_sum2, _wv2);
_sum3 = vpadalq_s16(_sum3, _wv3);
tmpptr += 8;
kptr0 += 32;
}
#if __aarch64__
int32x4_t _s01 = vpaddq_s32(_sum0, _sum1);
int32x4_t _s23 = vpaddq_s32(_sum2, _sum3);
int32x4_t _s0123 = vpaddq_s32(_s01, _s23);
#else
int32x2_t _s01_low = vpadd_s32(vget_low_s32(_sum0), vget_high_s32(_sum0));
int32x2_t _s01_high = vpadd_s32(vget_low_s32(_sum1), vget_high_s32(_sum1));
int32x2_t _s23_low = vpadd_s32(vget_low_s32(_sum2), vget_high_s32(_sum2));
int32x2_t _s23_high = vpadd_s32(vget_low_s32(_sum3), vget_high_s32(_sum3));
int32x4_t _s0123 = vcombine_s32(vpadd_s32(_s01_low, _s01_high), vpadd_s32(_s23_low, _s23_high));
#endif
vst1q_s32(outptr0, _s0123);
outptr0 += 4;
#endif // __ARM_FEATURE_DOTPROD
}
}
}
static void convolution_im2col_sgemm_transform_kernel_pack8to4_int8_neon(const Mat& _kernel, Mat& kernel_tm, int inch, int outch, int kernel_w, int kernel_h)
{
const int maxk = kernel_w * kernel_h;
// interleave
// src = maxk-inch-outch
// dst = 8a-4b-maxk-inch/8a-outch/4b
// dst = 4a-4b-2-maxk-inch/8a-outch/4b (arm82)
Mat kernel = _kernel.reshape(maxk, inch, outch);
kernel_tm.create(32 * maxk, inch / 8, outch / 4, 1u);
for (int q = 0; q + 3 < outch; q += 4)
{
signed char* g00 = kernel_tm.channel(q / 4);
for (int p = 0; p + 7 < inch; p += 8)
{
for (int k = 0; k < maxk; k++)
{
#if __ARM_FEATURE_DOTPROD
for (int i = 0; i < 4; i++)
{
for (int j = 0; j < 4; j++)
{
const signed char* k00 = kernel.channel(q + i).row<const signed char>(p + j);
g00[0] = k00[k];
g00++;
}
}
for (int i = 0; i < 4; i++)
{
for (int j = 4; j < 8; j++)
{
const signed char* k00 = kernel.channel(q + i).row<const signed char>(p + j);
g00[0] = k00[k];
g00++;
}
}
#else
for (int i = 0; i < 4; i++)
{
for (int j = 0; j < 8; j++)
{
const signed char* k00 = kernel.channel(q + i).row<const signed char>(p + j);
g00[0] = k00[k];
g00++;
}
}
#endif
}
}
}
}
static void convolution_im2col_sgemm_pack8to4_int8_neon(const Mat& bottom_blob, Mat& top_blob, const Mat& kernel, int kernel_w, int kernel_h, int dilation_w, int dilation_h, int stride_w, int stride_h, const Option& opt)
{
int w = bottom_blob.w;
int inch = bottom_blob.c;
int outw = top_blob.w;
int outh = top_blob.h;
const int size = outw * outh;
const int maxk = kernel_w * kernel_h;
// im2col
Mat bottom_im2col(size, maxk, inch, 8u, 8, opt.workspace_allocator);
{
const int gap = (w * stride_h - outw * stride_w) * 8;
#pragma omp parallel for num_threads(opt.num_threads)
for (int p = 0; p < inch; p++)
{
const Mat img = bottom_blob.channel(p);
signed char* ptr = bottom_im2col.channel(p);
for (int u = 0; u < kernel_h; u++)
{
for (int v = 0; v < kernel_w; v++)
{
const signed char* sptr = img.row<const signed char>(dilation_h * u) + dilation_w * v * 8;
for (int i = 0; i < outh; i++)
{
int j = 0;
for (; j + 3 < outw; j += 4)
{
int8x8_t _val0 = vld1_s8(sptr);
int8x8_t _val1 = vld1_s8(sptr + stride_w * 8);
int8x8_t _val2 = vld1_s8(sptr + stride_w * 16);
int8x8_t _val3 = vld1_s8(sptr + stride_w * 24);
vst1_s8(ptr, _val0);
vst1_s8(ptr + 8, _val1);
vst1_s8(ptr + 16, _val2);
vst1_s8(ptr + 24, _val3);
sptr += stride_w * 32;
ptr += 32;
}
for (; j + 1 < outw; j += 2)
{
int8x8_t _val0 = vld1_s8(sptr);
int8x8_t _val1 = vld1_s8(sptr + stride_w * 8);
vst1_s8(ptr, _val0);
vst1_s8(ptr + 8, _val1);
sptr += stride_w * 16;
ptr += 16;
}
for (; j < outw; j++)
{
int8x8_t _val = vld1_s8(sptr);
vst1_s8(ptr, _val);
sptr += stride_w * 8;
ptr += 8;
}
sptr += gap;
}
}
}
}
}
im2col_sgemm_pack8to4_int8_neon(bottom_im2col, top_blob, kernel, opt);
}
|
pzmemory.c | /*! \file
Copyright (c) 2003, The Regents of the University of California, through
Lawrence Berkeley National Laboratory (subject to receipt of any required
approvals from U.S. Dept. of Energy)
All rights reserved.
The source code is distributed under BSD license, see the file License.txt
at the top-level directory.
*/
/*
* -- SuperLU MT routine (version 2.2) --
* Lawrence Berkeley National Lab, Univ. of California Berkeley,
* and Xerox Palo Alto Research Center.
* September 10, 2007
*
* Last modified:
* -- 8/29/2013: added lock to access Stack memory supplied by user
*
*/
#include "slu_mt_zdefs.h"
/* ------------------
Constants & Macros
------------------ */
#define EXPAND 1.5
#define NO_MEMTYPE 4 /* 0: lusup;
1: ucol;
2: lsub;
3: usub */
#define GluIntArray(n) (9 * (n) + 5)
/* -------------------
Internal prototypes
------------------- */
void *pzgstrf_expand (int_t *, MemType,int_t, int_t, GlobalLU_t *);
void copy_mem_doublecomplex (int_t, void *, void *);
void pzgstrf_StackCompress(GlobalLU_t *);
void pzgstrf_SetupSpace (void *, int_t);
void *zuser_malloc (int_t, int_t);
void zuser_free (int_t, int_t);
/* ----------------------------------------------
External prototypes (in memory.c - prec-indep)
---------------------------------------------- */
extern void copy_mem_int (int_t, void *, void *);
extern void user_bcopy (char *, char *, int_t);
typedef struct {
int_t size;
int_t used;
int_t top1; /* grow upward, relative to &array[0] */
int_t top2; /* grow downward */
void *array;
#if ( MACH==PTHREAD )
pthread_mutex_t lock;;
#endif
} LU_stack_t;
typedef enum {HEAD, TAIL} stack_end_t;
typedef enum {SYSTEM, USER} LU_space_t;
ExpHeader *zexpanders = 0; /* Array of pointers to 4 types of memory */
static LU_stack_t stack;
static int_t no_expand;
static int_t ndim;
static LU_space_t whichspace; /* 0 - system malloc'd; 1 - user provided */
/* Macros to manipulate stack */
#define StackFull(x) ( x + stack.used >= stack.size )
#define NotDoubleAlign(addr) ( (long long int)addr & 7 )
#define DoubleAlign(addr) ( ((long long int)addr + 7) & ~7L )
#define Reduce(alpha) ((alpha + 1) / 2) /* i.e. (alpha-1)/2 + 1 */
/* temporary space used by BLAS calls */
#define NUM_TEMPV(n,w,t,b) (SUPERLU_MAX( 2*n, (t + b)*w ))
/*
* Setup the memory model to be used for factorization.
* lwork = 0: use system malloc;
* lwork > 0: use user-supplied work[] space.
*/
void pzgstrf_SetupSpace(void *work, int_t lwork)
{
if ( lwork == 0 ) {
whichspace = SYSTEM; /* malloc/free */
} else if ( lwork > 0 ) {
whichspace = USER; /* user provided space */
stack.size = lwork;
stack.used = 0;
stack.top1 = 0;
stack.top2 = lwork;
stack.array = (void *) work;
}
#if ( MACH==PTHREAD )
pthread_mutex_init ( &stack.lock, NULL);
#endif
}
/*
* Destroy the lock used for user stack memory.
*/
void pzgstrf_StackFree()
{
#if ( MACH==PTHREAD ) /* Use pthread ... */
if ( whichspace == USER )
pthread_mutex_destroy( &stack.lock );
#endif
}
void *zuser_malloc(int_t bytes, int_t which_end)
{
void *buf;
#if ( MACH==PTHREAD ) /* Use pthread ... */
pthread_mutex_lock( &stack.lock );
#elif ( MACH==OPENMP ) /* Use openMP ... */
#pragma omp critical ( STACK_LOCK )
#endif
{
if ( StackFull(bytes) ) {
buf = NULL;
goto end;
}
if ( which_end == HEAD ) {
buf = (char*) stack.array + stack.top1;
stack.top1 += bytes;
} else {
stack.top2 -= bytes;
buf = (char*) stack.array + stack.top2;
}
stack.used += bytes;
end: ;
} /* ---- end critical section ---- */
#if ( MACH==PTHREAD ) /* Use pthread ... */
pthread_mutex_unlock( &stack.lock );
#endif
return buf;
}
void zuser_free(int_t bytes, int_t which_end)
{
#if ( MACH==PTHREAD ) /* Use pthread ... */
pthread_mutex_lock( &stack.lock );
#elif ( MACH==OPENMP ) /* Use openMP ... */
#pragma omp critical ( STACK_LOCK )
#endif
{
if ( which_end == HEAD ) stack.top1 -= bytes;
else stack.top2 += bytes;
stack.used -= bytes;
}
#if ( MACH==PTHREAD ) /* Use pthread ... */
pthread_mutex_unlock( &stack.lock );
#endif
}
/* Returns the working storage used during factorization */
int_t superlu_zTempSpace(int_t n, int_t w, int_t p)
{
register float tmp, ptmp;
register int_t iword = sizeof(int_t), dword = sizeof(doublecomplex);
int_t maxsuper = sp_ienv(3), rowblk = sp_ienv(4);
/* globally shared */
tmp = 14 * n * iword;
/* local to each processor */
ptmp = (2 * w + 5 + NO_MARKER) * n * iword;
ptmp += (n * w + NUM_TEMPV(n,w,maxsuper,rowblk)) * dword;
#if ( PRNTlevel>=1 )
printf("Per-processor work[] %.0f MB\n", ptmp/1024/1024);
#endif
ptmp *= p;
return (tmp + ptmp);
}
/*
* superlu_memusage consists of the following fields:
* o for_lu (float)
* The amount of space used in bytes for L\U data structures.
* o total_needed (float)
* The amount of space needed in bytes to perform factorization.
* o expansions (int)
* The number of memory expansions during the LU factorization.
*/
int_t superlu_zQuerySpace(int_t P, SuperMatrix *L, SuperMatrix *U, int_t panel_size,
superlu_memusage_t *superlu_memusage)
{
SCPformat *Lstore;
NCPformat *Ustore;
register int_t n, iword, dword, lwork;
Lstore = L->Store;
Ustore = U->Store;
n = L->ncol;
iword = sizeof(int_t);
dword = sizeof(doublecomplex);
/* L supernodes of type SCP */
superlu_memusage->for_lu = (float) (7*n + 3) * iword
+ (float) Lstore->nzval_colend[n-1] * dword
+ (float) Lstore->rowind_colend[n-1] * iword;
/* U columns of type NCP */
superlu_memusage->for_lu += (2*n + 1) * iword
+ (float) Ustore->colend[n-1] * (dword + iword);
/* Working storage to support factorization */
lwork = superlu_zTempSpace(n, panel_size, P);
superlu_memusage->total_needed = superlu_memusage->for_lu + lwork;
superlu_memusage->expansions = --no_expand;
return 0;
}
float pzgstrf_memory_use(const int_t nzlmax, const int_t nzumax, const int_t nzlumax)
{
register float iword, dword, t;
iword = sizeof(int_t);
dword = sizeof(doublecomplex);
t = 10. * ndim * iword + nzlmax * iword + nzumax * (iword + dword)
+ nzlumax * dword;
return t;
}
/*
* Allocate storage for the data structures common to all factor routines.
* For those unpredictable size, make a guess as FILL * nnz(A).
* Return value:
* If lwork = -1, return the estimated amount of space required;
* otherwise, return the amount of space actually allocated when
* memory allocation failure occurred.
*/
float
pzgstrf_MemInit(int_t n, int_t annz, superlumt_options_t *superlumt_options,
SuperMatrix *L, SuperMatrix *U, GlobalLU_t *Glu)
{
register int_t nprocs = superlumt_options->nprocs;
yes_no_t refact = superlumt_options->refact;
register int_t panel_size = superlumt_options->panel_size;
register int_t lwork = superlumt_options->lwork;
void *work = superlumt_options->work;
int_t iword, dword, retries = 0;
SCPformat *Lstore;
NCPformat *Ustore;
int_t *xsup, *xsup_end, *supno;
int_t *lsub, *xlsub, *xlsub_end;
doublecomplex *lusup;
int_t *xlusup, *xlusup_end;
doublecomplex *ucol;
int_t *usub, *xusub, *xusub_end;
int_t nzlmax, nzumax, nzlumax;
int_t FILL_LUSUP = sp_ienv(6); /* Guess the fill-in growth for LUSUP */
int_t FILL_UCOL = sp_ienv(7); /* Guess the fill-in growth for UCOL */
int_t FILL_LSUB = sp_ienv(8); /* Guess the fill-in growth for LSUB */
no_expand = 0;
ndim = n;
iword = sizeof(int_t);
dword = sizeof(doublecomplex);
if ( !zexpanders )
zexpanders = (ExpHeader *) SUPERLU_MALLOC(NO_MEMTYPE * sizeof(ExpHeader));
if ( refact == NO ) {
/* Guess amount of storage needed by L\U factors. */
if ( FILL_UCOL < 0 ) nzumax = -FILL_UCOL * annz;
else nzumax = FILL_UCOL;
if ( FILL_LSUB < 0 ) nzlmax = -FILL_LSUB * annz;
else nzlmax = FILL_LSUB;
if ( Glu->dynamic_snode_bound == YES ) {
if ( FILL_LUSUP < 0 ) nzlumax = -FILL_LUSUP * annz;
else nzlumax = FILL_LUSUP; /* estimate an upper bound */
} else {
nzlumax = Glu->nzlumax; /* preset as static upper bound */
}
if ( lwork == -1 ) {
return (GluIntArray(n) * iword +
superlu_zTempSpace(n, panel_size, nprocs)
+ (nzlmax+nzumax)*iword + (nzlumax+nzumax)*dword);
} else {
pzgstrf_SetupSpace(work, lwork);
}
/* Integer pointers for L\U factors */
if ( whichspace == SYSTEM ) {
xsup = intMalloc(n+1);
xsup_end = intMalloc(n);
supno = intMalloc(n+1);
xlsub = intMalloc(n+1);
xlsub_end = intMalloc(n);
xlusup = intMalloc(n+1);
xlusup_end = intMalloc(n);
xusub = intMalloc(n+1);
xusub_end = intMalloc(n);
} else {
xsup = (int_t *)zuser_malloc((n+1) * iword, HEAD);
xsup_end = (int_t *)zuser_malloc((n) * iword, HEAD);
supno = (int_t *)zuser_malloc((n+1) * iword, HEAD);
xlsub = (int_t *)zuser_malloc((n+1) * iword, HEAD);
xlsub_end = (int_t *)zuser_malloc((n) * iword, HEAD);
xlusup = (int_t *)zuser_malloc((n+1) * iword, HEAD);
xlusup_end = (int_t *)zuser_malloc((n) * iword, HEAD);
xusub = (int_t *)zuser_malloc((n+1) * iword, HEAD);
xusub_end = (int_t *)zuser_malloc((n) * iword, HEAD);
}
lusup = (doublecomplex *) pzgstrf_expand( &nzlumax, LUSUP, 0, 0, Glu );
ucol = (doublecomplex *) pzgstrf_expand( &nzumax, UCOL, 0, 0, Glu );
lsub = (int_t *) pzgstrf_expand( &nzlmax, LSUB, 0, 0, Glu );
usub = (int_t *) pzgstrf_expand( &nzumax, USUB, 0, 1, Glu );
while ( !ucol || !lsub || !usub ) {
/*SUPERLU_ABORT("Not enough core in LUMemInit()");*/
#if (PRNTlevel==1)
printf(".. pzgstrf_MemInit(): #retries " IFMT "\n", ++retries);
#endif
if ( whichspace == SYSTEM ) {
SUPERLU_FREE(ucol);
SUPERLU_FREE(lsub);
SUPERLU_FREE(usub);
} else {
zuser_free(nzumax*dword+(nzlmax+nzumax)*iword, HEAD);
}
nzumax /= 2; /* reduce request */
nzlmax /= 2;
if ( nzumax < annz/2 ) {
printf("Not enough memory to perform factorization.\n");
return (pzgstrf_memory_use(nzlmax, nzumax, nzlumax) + n);
}
ucol = (doublecomplex *) pzgstrf_expand( &nzumax, UCOL, 0, 0, Glu );
lsub = (int_t *) pzgstrf_expand( &nzlmax, LSUB, 0, 0, Glu );
usub = (int_t *) pzgstrf_expand( &nzumax, USUB, 0, 1, Glu );
}
if ( !lusup ) {
float t = pzgstrf_memory_use(nzlmax, nzumax, nzlumax) + n;
printf("Not enough memory to perform factorization .. "
"need %.1f GBytes\n", t*1e-9);
fflush(stdout);
return (t);
}
} else { /* refact == YES */
Lstore = L->Store;
Ustore = U->Store;
xsup = Lstore->sup_to_colbeg;
xsup_end = Lstore->sup_to_colend;
supno = Lstore->col_to_sup;
xlsub = Lstore->rowind_colbeg;
xlsub_end= Lstore->rowind_colend;
xlusup = Lstore->nzval_colbeg;
xlusup_end= Lstore->nzval_colend;
xusub = Ustore->colbeg;
xusub_end= Ustore->colend;
nzlmax = Glu->nzlmax; /* max from previous factorization */
nzumax = Glu->nzumax;
nzlumax = Glu->nzlumax;
if ( lwork == -1 ) {
return (GluIntArray(n) * iword + superlu_zTempSpace(n, panel_size, nprocs)
+ (nzlmax+nzumax)*iword + (nzlumax+nzumax)*dword);
} else if ( lwork == 0 ) {
whichspace = SYSTEM;
} else {
whichspace = USER;
stack.size = lwork;
stack.top2 = lwork;
}
lsub = zexpanders[LSUB].mem = Lstore->rowind;
lusup = zexpanders[LUSUP].mem = Lstore->nzval;
usub = zexpanders[USUB].mem = Ustore->rowind;
ucol = zexpanders[UCOL].mem = Ustore->nzval;;
zexpanders[LSUB].size = nzlmax;
zexpanders[LUSUP].size = nzlumax;
zexpanders[USUB].size = nzumax;
zexpanders[UCOL].size = nzumax;
}
Glu->xsup = xsup;
Glu->xsup_end = xsup_end;
Glu->supno = supno;
Glu->lsub = lsub;
Glu->xlsub = xlsub;
Glu->xlsub_end = xlsub_end;
Glu->lusup = lusup;
Glu->xlusup = xlusup;
Glu->xlusup_end = xlusup_end;
Glu->ucol = ucol;
Glu->usub = usub;
Glu->xusub = xusub;
Glu->xusub_end = xusub_end;
Glu->nzlmax = nzlmax;
Glu->nzumax = nzumax;
Glu->nzlumax = nzlumax;
++no_expand;
#if ( PRNTlevel>=1 )
printf(".. pzgstrf_MemInit() refact %d, whichspace %d, nzlumax " IFMT ", nzumax " IFMT ", nzlmax " IFMT "\n",
refact, whichspace, nzlumax, nzumax, nzlmax);
printf(".. pzgstrf_MemInit() FILL_LUSUP " IFMT ", FILL_UCOL " IFMT ", FILL_LSUB " IFMT "\n",
FILL_LUSUP, FILL_UCOL, FILL_LSUB);
fflush(stdout);
#endif
return 0;
} /* pzgstrf_MemInit */
/*
* Allocate known working storage. Returns 0 if success, otherwise
* returns the number of bytes allocated so far when failure occurred.
*/
int_t
pzgstrf_WorkInit(int_t n, int_t panel_size, int_t **iworkptr, doublecomplex **dworkptr)
{
int_t isize, dsize, extra;
doublecomplex *old_ptr;
int_t maxsuper = sp_ienv(3),
rowblk = sp_ienv(4);
isize = (2*panel_size + 5 + NO_MARKER) * n * sizeof(int_t);
dsize = (n * panel_size +
NUM_TEMPV(n,panel_size,maxsuper,rowblk)) * sizeof(doublecomplex);
if ( whichspace == SYSTEM )
*iworkptr = (int_t *) intCalloc(isize/sizeof(int_t));
else
*iworkptr = (int_t *) zuser_malloc(isize, TAIL);
if ( ! *iworkptr ) {
fprintf(stderr, "pzgstrf_WorkInit: malloc fails for local iworkptr[]\n");
return (isize + n);
}
if ( whichspace == SYSTEM )
*dworkptr = (doublecomplex *) SUPERLU_MALLOC((size_t) dsize);
else {
*dworkptr = (doublecomplex *) zuser_malloc(dsize, TAIL);
if ( NotDoubleAlign(*dworkptr) ) {
old_ptr = *dworkptr;
*dworkptr = (doublecomplex*) DoubleAlign(*dworkptr);
*dworkptr = (doublecomplex*) ((double*)*dworkptr - 1);
extra = (char*)old_ptr - (char*)*dworkptr;
#if ( DEBUGlevel>=1 )
printf("pzgstrf_WorkInit: not aligned, extra" IFMT "\n", extra);
#endif
#if ( MACH==PTHREAD ) /* Use pthread ... */
pthread_mutex_lock( &stack.lock );
#elif ( MACH==OPENMP ) /* Use openMP ... */
#pragma omp critical ( STACK_LOCK )
#endif
{
stack.top2 -= extra;
stack.used += extra;
}
#if ( MACH==PTHREAD ) /* Use pthread ... */
pthread_mutex_unlock( &stack.lock );
#endif
}
} /* else */
if ( ! *dworkptr ) {
printf("malloc fails for local dworkptr[] ... dsize " IFMT "\n", dsize);
return (isize + dsize + n);
}
return 0;
}
/*
* Set up pointers for real working arrays.
*/
void
pzgstrf_SetRWork(int_t n, int_t panel_size, doublecomplex *dworkptr,
doublecomplex **dense, doublecomplex **tempv)
{
doublecomplex zero = {0.0, 0.0};
int_t maxsuper = sp_ienv(3);
int_t rowblk = sp_ienv(4);
*dense = dworkptr;
*tempv = *dense + panel_size*n;
zfill (*dense, n * panel_size, zero);
zfill (*tempv, NUM_TEMPV(n,panel_size,maxsuper,rowblk), zero);
}
/*
* Free the working storage used by factor routines.
*/
void pzgstrf_WorkFree(int_t *iwork, doublecomplex *dwork, GlobalLU_t *Glu)
{
if ( whichspace == SYSTEM ) {
SUPERLU_FREE (iwork);
SUPERLU_FREE (dwork);
} else {
#if ( MACH==PTHREAD ) /* Use pthread ... */
pthread_mutex_lock( &stack.lock );
#elif ( MACH==OPENMP ) /* Use openMP ... */
#pragma omp critical ( STACK_LOCK )
#endif
{
stack.used -= (stack.size - stack.top2);
stack.top2 = stack.size;
/* pzgstrf_StackCompress(Glu); */
}
#if ( MACH==PTHREAD ) /* Use pthread ... */
pthread_mutex_unlock( &stack.lock );
#endif
}
}
/*
* Expand the data structures for L and U during the factorization.
* Return value: 0 - successful return
* > 0 - number of bytes allocated when run out of space
*
* @@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
* !! Warning: Not Implemented in SuperLU_MT !!
* @@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
*/
int_t
pzgstrf_MemXpand(
int_t jcol,
int_t next, /* number of elements currently in the factors */
MemType mem_type,/* which type of memory to expand */
int_t *maxlen, /* modified - max. length of a data structure */
GlobalLU_t *Glu /* modified - global LU data structures */
)
{
void *new_mem;
#ifdef CHK_EXPAND
printf("pzgstrf_MemXpand(): jcol " IFMT ", next " IFMT ", maxlen " IFMT ", MemType " IFMT "\n",
jcol, next, *maxlen, mem_type);
#endif
if (mem_type == USUB)
new_mem = pzgstrf_expand(maxlen, mem_type, next, 1, Glu);
else
new_mem = pzgstrf_expand(maxlen, mem_type, next, 0, Glu);
if ( !new_mem ) {
int_t nzlmax = Glu->nzlmax;
int_t nzumax = Glu->nzumax;
int_t nzlumax = Glu->nzlumax;
fprintf(stderr, "Can't expand MemType %d : jcol " IFMT "\n",
mem_type, jcol);
return (pzgstrf_memory_use(nzlmax, nzumax, nzlumax) + ndim);
}
switch ( mem_type ) {
case LUSUP:
Glu->lusup = (doublecomplex *) new_mem;
Glu->nzlumax = *maxlen;
break;
case UCOL:
Glu->ucol = (doublecomplex *) new_mem;
Glu->nzumax = *maxlen;
break;
case LSUB:
Glu->lsub = (int_t *) new_mem;
Glu->nzlmax = *maxlen;
break;
case USUB:
Glu->usub = (int_t *) new_mem;
Glu->nzumax = *maxlen;
break;
}
return 0;
}
void
copy_mem_doublecomplex(int_t howmany, void *old, void *new)
{
register int_t i;
doublecomplex *dold = old;
doublecomplex *dnew = new;
for (i = 0; i < howmany; i++) dnew[i] = dold[i];
}
/*
* Expand the existing storage to accommodate more fill-ins.
*/
void
*pzgstrf_expand(
int_t *prev_len, /* length used from previous call */
MemType type, /* which part of the memory to expand */
int_t len_to_copy, /* size of memory to be copied to new store */
int_t keep_prev, /* = 1: use prev_len;
= 0: compute new_len to expand */
GlobalLU_t *Glu /* modified - global LU data structures */
)
{
double alpha = EXPAND;
void *new_mem, *old_mem;
int_t new_len, tries, lword, extra, bytes_to_copy;
void *ret = NULL;
if ( no_expand == 0 || keep_prev ) /* First time allocate requested */
new_len = *prev_len;
else {
new_len = alpha * *prev_len;
}
if ( type == LSUB || type == USUB ) lword = sizeof(int_t);
else lword = sizeof(doublecomplex);
if ( whichspace == SYSTEM ) {
new_mem = (void *) SUPERLU_MALLOC( (size_t) new_len * lword );
if ( no_expand != 0 ) {
tries = 0;
if ( keep_prev ) {
if ( !new_mem ) return (NULL);
} else {
while ( !new_mem ) {
if ( ++tries > 10 ) return (NULL);
alpha = Reduce(alpha);
new_len = alpha * *prev_len;
new_mem = (void *) SUPERLU_MALLOC((size_t) new_len * lword);
}
}
if ( type == LSUB || type == USUB ) {
copy_mem_int(len_to_copy, zexpanders[type].mem, new_mem);
} else {
copy_mem_doublecomplex(len_to_copy, zexpanders[type].mem, new_mem);
}
SUPERLU_FREE (zexpanders[type].mem);
}
zexpanders[type].mem = (void *) new_mem;
} else { /* whichspace == USER */
if ( no_expand == 0 ) {
new_mem = zuser_malloc(new_len * lword, HEAD);
if ( NotDoubleAlign(new_mem) &&
(type == LUSUP || type == UCOL) ) {
old_mem = new_mem;
new_mem = (void *)DoubleAlign(new_mem);
extra = (char*)new_mem - (char*)old_mem;
#ifdef CHK_EXPAND
printf("expand(): not aligned, extra " IFMT "\n", extra);
#endif
#if ( MACH==PTHREAD ) /* Use pthread ... */
pthread_mutex_lock( &stack.lock );
#elif ( MACH==OPENMP ) /* Use openMP ... */
#pragma omp critical ( STACK_LOCK )
#endif
{
stack.top1 += extra;
stack.used += extra;
}
#if ( MACH==PTHREAD ) /* Use pthread ... */
pthread_mutex_unlock( &stack.lock );
#endif
}
zexpanders[type].mem = (void *) new_mem;
} else {
tries = 0;
extra = (new_len - *prev_len) * lword;
if ( keep_prev ) {
if ( StackFull(extra) ) {
new_len = 0;
zexpanders[type].mem = NULL;
return NULL;
}
} else {
while ( StackFull(extra) ) {
if ( ++tries > 10 ) {
new_len = 0;
zexpanders[type].mem = NULL;
return NULL;
}
alpha = Reduce(alpha);
new_len = alpha * *prev_len;
extra = (new_len - *prev_len) * lword;
}
}
if ( type != USUB ) {
new_mem = (void*)((char*)zexpanders[type + 1].mem + extra);
bytes_to_copy = (char*)stack.array + stack.top1
- (char*)zexpanders[type + 1].mem;
user_bcopy(zexpanders[type+1].mem, new_mem, bytes_to_copy);
if ( type < USUB ) {
Glu->usub = zexpanders[USUB].mem =
(void*)((char*)zexpanders[USUB].mem + extra);
}
if ( type < LSUB ) {
Glu->lsub = zexpanders[LSUB].mem =
(void*)((char*)zexpanders[LSUB].mem + extra);
}
if ( type < UCOL ) {
Glu->ucol = zexpanders[UCOL].mem =
(void*)((char*)zexpanders[UCOL].mem + extra);
}
stack.top1 += extra;
stack.used += extra;
if ( type == UCOL ) {
stack.top1 += extra; /* Add same amount for USUB */
stack.used += extra;
}
} /* if ... */
} /* else ... */
} /* else, whichspace == USER */
#ifdef DEBUG
printf("pzgstrf_expand[type " IFMT "]\n", type);
#endif
zexpanders[type].size = new_len;
*prev_len = new_len;
if ( no_expand ) ++no_expand;
return (void *) zexpanders[type].mem;
} /* expand */
/*
* Compress the work[] array to remove fragmentation.
*/
void
pzgstrf_StackCompress(GlobalLU_t *Glu)
{
register int_t iword, dword;
char *last, *fragment;
int_t *ifrom, *ito;
doublecomplex *dfrom, *dto;
int_t *xlsub, *lsub, *xusub_end, *usub, *xlusup;
doublecomplex *ucol, *lusup;
iword = sizeof(int_t);
dword = sizeof(doublecomplex);
xlsub = Glu->xlsub;
lsub = Glu->lsub;
xusub_end = Glu->xusub_end;
usub = Glu->usub;
xlusup = Glu->xlusup;
ucol = Glu->ucol;
lusup = Glu->lusup;
dfrom = ucol;
dto = (doublecomplex *)((char*)lusup + xlusup[ndim] * dword);
copy_mem_doublecomplex(xusub_end[ndim-1], dfrom, dto);
ucol = dto;
ifrom = lsub;
ito = (int_t *) ((char*)ucol + xusub_end[ndim-1] * iword);
copy_mem_int(xlsub[ndim], ifrom, ito);
lsub = ito;
ifrom = usub;
ito = (int_t *) ((char*)lsub + xlsub[ndim] * iword);
copy_mem_int(xusub_end[ndim-1], ifrom, ito);
usub = ito;
last = (char*)usub + xusub_end[ndim-1] * iword;
fragment = (char*) ((char*)stack.array + stack.top1 - last);
stack.used -= (long long int) fragment;
stack.top1 -= (long long int) fragment;
Glu->ucol = ucol;
Glu->lsub = lsub;
Glu->usub = usub;
#ifdef CHK_EXPAND
printf("pzgstrf_StackCompress: fragment " IFMT "\n", fragment);
/* PrintStack("After compress", Glu);
for (last = 0; last < ndim; ++last)
print_lu_col("After compress:", last, 0);*/
#endif
}
/*
* Allocate storage for original matrix A
*/
void
zallocateA(int_t n, int_t nnz, doublecomplex **a, int_t **asub, int_t **xa)
{
*a = (doublecomplex *) doublecomplexMalloc(nnz);
*asub = (int_t *) intMalloc(nnz);
*xa = (int_t *) intMalloc(n+1);
}
doublecomplex *doublecomplexMalloc(int_t n)
{
doublecomplex *buf;
buf = (doublecomplex *) SUPERLU_MALLOC( (size_t) n * sizeof(doublecomplex) );
if ( !buf ) {
fprintf(stderr, "SUPERLU_MALLOC failed for buf in doublecomplexMalloc()");
exit (1);
}
return (buf);
}
doublecomplex *doublecomplexCalloc(int_t n)
{
doublecomplex *buf;
register int_t i;
doublecomplex zero = {0.0, 0.0};
buf = (doublecomplex *) SUPERLU_MALLOC( (size_t) n * sizeof(doublecomplex) );
if ( !buf ) {
fprintf(stderr, "SUPERLU_MALLOC failed for buf in doublecomplexCalloc()");
exit (1);
}
for (i = 0; i < n; ++i) buf[i] = zero;
return (buf);
}
/*
* Set up memory image in lusup[*], using the supernode boundaries in
* the Householder matrix.
*
* In both static and dynamic scheme, the relaxed supernodes (leaves)
* are stored in the beginning of lusup[*]. In the static scheme, the
* memory is also set aside for the internal supernodes using upper
* bound information from H. In the dynamic scheme, however, the memory
* for the internal supernodes is not allocated by this routine.
*
* Return value
* o Static scheme: number of nonzeros of all the supernodes in H.
* o Dynamic scheme: number of nonzeros of the relaxed supernodes.
*/
int_t
zPresetMap(
const int_t n,
SuperMatrix *A, /* original matrix permuted by columns */
pxgstrf_relax_t *pxgstrf_relax, /* relaxed supernodes */
superlumt_options_t *superlumt_options, /* input */
GlobalLU_t *Glu /* modified */
)
{
register int_t i, j, k, w, rs, rs_lastcol, krow, kmark, maxsup, nextpos;
register int_t rs_nrow; /* number of nonzero rows in a relaxed supernode */
int_t *marker, *asub, *xa_begin, *xa_end;
NCPformat *Astore;
int_t *map_in_sup; /* memory mapping function; values irrelevant on entry. */
int_t *colcnt; /* column count of Lc or H */
int_t *super_bnd; /* supernodes partition in H */
char *snode_env, *getenv();
snode_env = getenv("SuperLU_DYNAMIC_SNODE_STORE");
if ( snode_env != NULL ) {
Glu->dynamic_snode_bound = YES;
#if ( PRNTlevel>=1 )
printf(".. Use dynamic alg. to allocate storage for L supernodes.\n");
#endif
} else Glu->dynamic_snode_bound = NO;
Astore = A->Store;
asub = Astore->rowind;
xa_begin = Astore->colbeg;
xa_end = Astore->colend;
rs = 1;
marker = intMalloc(n);
ifill(marker, n, EMPTY);
map_in_sup = Glu->map_in_sup = intCalloc(n+1);
colcnt = superlumt_options->colcnt_h;
super_bnd = superlumt_options->part_super_h;
nextpos = 0;
/* Split large supernode into smaller pieces */
maxsup = sp_ienv(3);
for (j = 0; j < n; ) {
w = super_bnd[j];
k = j + w;
if ( w > maxsup ) {
w = w % maxsup;
if ( w == 0 ) w = maxsup;
while ( j < k ) {
super_bnd[j] = w;
j += w;
w = maxsup;
}
}
j = k;
}
for (j = 0; j < n; j += w) {
if ( Glu->dynamic_snode_bound == NO ) map_in_sup[j] = nextpos;
if ( pxgstrf_relax[rs].fcol == j ) {
/* Column j starts a relaxed supernode. */
map_in_sup[j] = nextpos;
rs_nrow = 0;
w = pxgstrf_relax[rs++].size;
rs_lastcol = j + w;
for (i = j; i < rs_lastcol; ++i) {
/* for each nonzero in A[*,i] */
for (k = xa_begin[i]; k < xa_end[i]; k++) {
krow = asub[k];
kmark = marker[krow];
if ( kmark != j ) { /* first time visit krow */
marker[krow] = j;
++rs_nrow;
}
}
}
nextpos += w * rs_nrow;
/* Find the next H-supernode, with leading column i, which is
outside the relaxed supernode, rs. */
for (i = j; i < rs_lastcol; k = i, i += super_bnd[i]);
if ( i > rs_lastcol ) {
/* The w columns [rs_lastcol, i) may join in the
preceeding relaxed supernode; make sure we leave
enough room for the combined supernode. */
w = i - rs_lastcol;
nextpos += w * SUPERLU_MAX( rs_nrow, colcnt[k] );
}
w = i - j;
} else { /* Column j starts a supernode in H */
w = super_bnd[j];
if ( Glu->dynamic_snode_bound == NO ) nextpos += w * colcnt[j];
}
/* Set up the offset (negative) to the leading column j of a
supernode in H */
for (i = 1; i < w; ++i) map_in_sup[j + i] = -i;
} /* for j ... */
if ( Glu->dynamic_snode_bound == YES ) Glu->nextlu = nextpos;
else map_in_sup[n] = nextpos;
#if ( PRNTlevel>=1 )
printf("** PresetMap() allocates " IFMT " reals to lusup[*]....\n", nextpos);
#endif
free (marker);
return nextpos;
}
|
ICP.h | ///////////////////////////////////////////////////////////////////////////////
/// "Sparse Iterative Closest Point"
/// by Sofien Bouaziz, Andrea Tagliasacchi, Mark Pauly
/// Copyright (C) 2013 LGG, EPFL
///////////////////////////////////////////////////////////////////////////////
/// 1) This file contains different implementations of the ICP algorithm.
/// 2) This code requires EIGEN and NANOFLANN.
/// 3) If OPENMP is activated some part of the code will be parallelized.
/// 4) This code is for now designed for 3D registration
/// 5) Two main input types are Eigen::Matrix3Xd or Eigen::Map<Eigen::Matrix3Xd>
///////////////////////////////////////////////////////////////////////////////
/// namespace nanoflann: NANOFLANN KD-tree adaptor for EIGEN
/// namespace RigidMotionEstimator: functions to compute the rigid motion
/// namespace SICP: sparse ICP implementation
/// namespace ICP: reweighted ICP implementation
///////////////////////////////////////////////////////////////////////////////
#ifndef ICP_H
#define ICP_H
#include "nanoflann.hpp"
#include <Eigen/Dense>
///////////////////////////////////////////////////////////////////////////////
namespace nanoflann {
/// KD-tree adaptor for working with data directly stored in an Eigen Matrix, without duplicating the data storage.
/// This code is adapted from the KDTreeEigenMatrixAdaptor class of nanoflann.hpp
template <class MatrixType, int DIM = -1, class Distance = nanoflann::metric_L2, typename IndexType = int>
struct KDTreeAdaptor {
typedef KDTreeAdaptor<MatrixType,DIM,Distance> self_t;
typedef typename MatrixType::Scalar num_t;
typedef typename Distance::template traits<num_t,self_t>::distance_t metric_t;
typedef KDTreeSingleIndexAdaptor< metric_t,self_t,DIM,IndexType> index_t;
index_t* index;
KDTreeAdaptor(const MatrixType &mat, const int leaf_max_size = 10) : m_data_matrix(mat) {
const size_t dims = mat.rows();
index = new index_t( dims, *this, nanoflann::KDTreeSingleIndexAdaptorParams(leaf_max_size, dims ) );
index->buildIndex();
}
~KDTreeAdaptor() {delete index;}
const MatrixType &m_data_matrix;
/// Query for the num_closest closest points to a given point (entered as query_point[0:dim-1]).
inline void query(const num_t *query_point, const size_t num_closest, IndexType *out_indices, num_t *out_distances_sq) const {
nanoflann::KNNResultSet<typename MatrixType::Scalar,IndexType> resultSet(num_closest);
resultSet.init(out_indices, out_distances_sq);
index->findNeighbors(resultSet, query_point, nanoflann::SearchParams());
}
/// Query for the closest points to a given point (entered as query_point[0:dim-1]).
inline IndexType closest(const num_t *query_point) const {
IndexType out_indices;
num_t out_distances_sq;
query(query_point, 1, &out_indices, &out_distances_sq);
return out_indices;
}
const self_t & derived() const {return *this;}
self_t & derived() {return *this;}
inline size_t kdtree_get_point_count() const {return m_data_matrix.cols();}
/// Returns the distance between the vector "p1[0:size-1]" and the data point with index "idx_p2" stored in the class:
inline num_t kdtree_distance(const num_t *p1, const size_t idx_p2,size_t size) const {
num_t s=0;
for (size_t i=0; i<size; i++) {
const num_t d= p1[i]-m_data_matrix.coeff(i,idx_p2);
s+=d*d;
}
return s;
}
/// Returns the dim'th component of the idx'th point in the class:
inline num_t kdtree_get_pt(const size_t idx, int dim) const {
return m_data_matrix.coeff(dim,idx);
}
/// Optional bounding-box computation: return false to default to a standard bbox computation loop.
template <class BBOX> bool kdtree_get_bbox(BBOX&) const {return false;}
};
}
///////////////////////////////////////////////////////////////////////////////
/// Compute the rigid motion for point-to-point and point-to-plane distances
namespace RigidMotionEstimator {
/// @param Source (one 3D point per column)
/// @param Target (one 3D point per column)
/// @param Confidence weights
template <typename Derived1, typename Derived2, typename Derived3>
Eigen::Affine3d point_to_point(Eigen::MatrixBase<Derived1>& X,
Eigen::MatrixBase<Derived2>& Y,
const Eigen::MatrixBase<Derived3>& w) {
/// Normalize weight vector
Eigen::VectorXd w_normalized = w/w.sum();
/// De-mean
Eigen::Vector3d X_mean, Y_mean;
for(int i=0; i<3; ++i) {
X_mean(i) = (X.row(i).array()*w_normalized.transpose().array()).sum();
Y_mean(i) = (Y.row(i).array()*w_normalized.transpose().array()).sum();
}
X.colwise() -= X_mean;
Y.colwise() -= Y_mean;
/// Compute transformation
Eigen::Affine3d transformation;
Eigen::Matrix3d sigma = X * w_normalized.asDiagonal() * Y.transpose();
Eigen::JacobiSVD<Eigen::Matrix3d> svd(sigma, Eigen::ComputeFullU | Eigen::ComputeFullV);
if(svd.matrixU().determinant()*svd.matrixV().determinant() < 0.0) {
Eigen::Vector3d S = Eigen::Vector3d::Ones(); S(2) = -1.0;
transformation.linear().noalias() = svd.matrixV()*S.asDiagonal()*svd.matrixU().transpose();
} else {
transformation.linear().noalias() = svd.matrixV()*svd.matrixU().transpose();
}
transformation.translation().noalias() = Y_mean - transformation.linear()*X_mean;
/// Apply transformation
X = transformation*X;
/// Re-apply mean
X.colwise() += X_mean;
Y.colwise() += Y_mean;
/// Return transformation
return transformation;
}
/// @param Source (one 3D point per column)
/// @param Target (one 3D point per column)
/// @param Confidence weights
template <typename Derived1, typename Derived2, typename Derived3>
Eigen::Affine3d point_to_point_about_center(Eigen::MatrixBase<Derived1>& X,
Eigen::MatrixBase<Derived2>& Y,
const Eigen::MatrixBase<Derived3>& w, const Eigen::Vector3d & center) {
/// Normalize weight vector
Eigen::VectorXd w_normalized = w/w.sum();
/// De-mean
Eigen::Vector3d X_mean, Y_mean;
X_mean=center;
Y_mean=center;
X.colwise() -= X_mean;
Y.colwise() -= Y_mean;
/// Compute transformation
Eigen::Affine3d transformation;
Eigen::Matrix3d sigma = X * w_normalized.asDiagonal() * Y.transpose();
Eigen::JacobiSVD<Eigen::Matrix3d> svd(sigma, Eigen::ComputeFullU | Eigen::ComputeFullV);
if(svd.matrixU().determinant()*svd.matrixV().determinant() < 0.0) {
Eigen::Vector3d S = Eigen::Vector3d::Ones(); S(2) = -1.0;
transformation.linear().noalias() = svd.matrixV()*S.asDiagonal()*svd.matrixU().transpose();
} else {
transformation.linear().noalias() = svd.matrixV()*svd.matrixU().transpose();
}
transformation.translation().noalias() = Y_mean - transformation.linear()*X_mean;
/// Apply transformation
X = transformation*X;
// T*R*T^-1*X =
// (I T)*(R 0)*(I -T)*X
// = (R T)*(I -T)*X
// = (R -RT)*X
/// Re-apply mean
X.colwise() += X_mean;
Y.colwise() += Y_mean;
/// Return transformation
return transformation;
}
/// @param Source (one 3D point per column)
/// @param Target (one 3D point per column)
template <typename Derived1, typename Derived2>
inline Eigen::Affine3d point_to_point(Eigen::MatrixBase<Derived1>& X,
Eigen::MatrixBase<Derived2>& Y) {
return point_to_point(X, Y, Eigen::VectorXd::Ones(X.cols()));
}
/// @param Source (one 3D point per column)
/// @param Target (one 3D point per column)
/// @param Target normals (one 3D normal per column)
/// @param Confidence weights
/// @param Right hand side
template <typename Derived1, typename Derived2, typename Derived3, typename Derived4, typename Derived5>
Eigen::Affine3d point_to_plane(Eigen::MatrixBase<Derived1>& X,
Eigen::MatrixBase<Derived2>& Y,
Eigen::MatrixBase<Derived3>& N,
const Eigen::MatrixBase<Derived4>& w,
const Eigen::MatrixBase<Derived5>& u) {
typedef Eigen::Matrix<double, 6, 6> Matrix66;
typedef Eigen::Matrix<double, 6, 1> Vector6;
typedef Eigen::Block<Matrix66, 3, 3> Block33;
/// Normalize weight vector
Eigen::VectorXd w_normalized = w/w.sum();
/// De-mean
Eigen::Vector3d X_mean;
for(int i=0; i<3; ++i)
X_mean(i) = (X.row(i).array()*w_normalized.transpose().array()).sum();
X.colwise() -= X_mean;
Y.colwise() -= X_mean;
/// Prepare LHS and RHS
Matrix66 LHS = Matrix66::Zero();
Vector6 RHS = Vector6::Zero();
Block33 TL = LHS.topLeftCorner<3,3>();
Block33 TR = LHS.topRightCorner<3,3>();
Block33 BR = LHS.bottomRightCorner<3,3>();
Eigen::MatrixXd C = Eigen::MatrixXd::Zero(3,X.cols());
#pragma omp parallel
{
#pragma omp for
for(int i=0; i<X.cols(); i++) {
C.col(i) = X.col(i).cross(N.col(i));
}
#pragma omp sections nowait
{
#pragma omp section
for(int i=0; i<X.cols(); i++) TL.selfadjointView<Eigen::Upper>().rankUpdate(C.col(i), w(i));
#pragma omp section
for(int i=0; i<X.cols(); i++) TR += (C.col(i)*N.col(i).transpose())*w(i);
#pragma omp section
for(int i=0; i<X.cols(); i++) BR.selfadjointView<Eigen::Upper>().rankUpdate(N.col(i), w(i));
#pragma omp section
for(int i=0; i<C.cols(); i++) {
double dist_to_plane = -((X.col(i) - Y.col(i)).dot(N.col(i)) - u(i))*w(i);
RHS.head<3>() += C.col(i)*dist_to_plane;
RHS.tail<3>() += N.col(i)*dist_to_plane;
}
}
}
LHS = LHS.selfadjointView<Eigen::Upper>();
/// Compute transformation
Eigen::Affine3d transformation;
Eigen::LDLT<Matrix66> ldlt(LHS);
RHS = ldlt.solve(RHS);
transformation = Eigen::AngleAxisd(RHS(0), Eigen::Vector3d::UnitX()) *
Eigen::AngleAxisd(RHS(1), Eigen::Vector3d::UnitY()) *
Eigen::AngleAxisd(RHS(2), Eigen::Vector3d::UnitZ());
transformation.translation() = RHS.tail<3>();
/// Apply transformation
X = transformation*X;
/// Re-apply mean
X.colwise() += X_mean;
Y.colwise() += X_mean;
/// Return transformation
return transformation;
}
/// @param Source (one 3D point per column)
/// @param Target (one 3D point per column)
/// @param Target normals (one 3D normal per column)
/// @param Confidence weights
template <typename Derived1, typename Derived2, typename Derived3, typename Derived4>
inline Eigen::Affine3d point_to_plane(Eigen::MatrixBase<Derived1>& X,
Eigen::MatrixBase<Derived2>& Yp,
Eigen::MatrixBase<Derived3>& Yn,
const Eigen::MatrixBase<Derived4>& w) {
return point_to_plane(X, Yp, Yn, w, Eigen::VectorXd::Zero(X.cols()));
}
}
///////////////////////////////////////////////////////////////////////////////
/// ICP implementation using ADMM/ALM/Penalty method
namespace SICP {
struct Parameters {
//bool use_penalty = false; /// if use_penalty then penalty method else ADMM or ALM (see max_inner)
//double p = 1.0; /// p norm
//double mu = 10.0; /// penalty weight
//double alpha = 1.2; /// penalty increase factor
//double max_mu = 1e5; /// max penalty
//int max_icp = 100; /// max ICP iteration
//int max_outer = 100; /// max outer iteration
//int max_inner = 1; /// max inner iteration. If max_inner=1 then ADMM else ALM
//double stop = 1e-5; /// stopping criteria
//bool print_icpn = false; /// (debug) print ICP iteration
bool use_penalty;
double p ;
double mu ;
double alpha;
double max_mu ;
int max_icp ;
int max_outer ;
int max_inner;
double stop ;
bool print_icpn;
Parameters()
{
use_penalty = false;
p = 1.0;
mu = 10.0;
alpha = 1.2;
max_mu = 1e5;
max_icp = 100;
max_outer = 100;
max_inner = 1;
stop = 1e-5;
print_icpn = false;
}
};
/// Shrinkage operator (Automatic loop unrolling using template)
template<unsigned int I>
inline double shrinkage(double mu, double n, double p, double s) {
return shrinkage<I-1>(mu, n, p, 1.0 - (p/mu)*std::pow(n, p-2.0)*std::pow(s, p-1.0));
}
template<>
inline double shrinkage<0>(double, double, double, double s) {return s;}
/// 3D Shrinkage for point-to-point
template<unsigned int I>
inline void shrink(Eigen::Matrix3Xd& Q, double mu, double p) {
double Ba = std::pow((2.0/mu)*(1.0-p), 1.0/(2.0-p));
double ha = Ba + (p/mu)*std::pow(Ba, p-1.0);
#pragma omp parallel for
for(int i=0; i<Q.cols(); ++i) {
double n = Q.col(i).norm();
double w = 0.0;
if(n > ha) w = shrinkage<I>(mu, n, p, (Ba/n + 1.0)/2.0);
Q.col(i) *= w;
}
}
/// 1D Shrinkage for point-to-plane
template<unsigned int I>
inline void shrink(Eigen::VectorXd& y, double mu, double p) {
double Ba = std::pow((2.0/mu)*(1.0-p), 1.0/(2.0-p));
double ha = Ba + (p/mu)*std::pow(Ba, p-1.0);
#pragma omp parallel for
for(int i=0; i<y.rows(); ++i) {
double n = std::abs(y(i));
double s = 0.0;
if(n > ha) s = shrinkage<I>(mu, n, p, (Ba/n + 1.0)/2.0);
y(i) *= s;
}
}
/// Sparse ICP with point to point
/// @param Source (one 3D point per column)
/// @param Target (one 3D point per column)
/// @param Parameters
template <typename Derived1, typename Derived2>
void point_to_point(Eigen::MatrixBase<Derived1>& X,
Eigen::MatrixBase<Derived2>& Y,
Parameters par = Parameters()) {
/// Build kd-tree
nanoflann::KDTreeAdaptor<Eigen::MatrixBase<Derived2>, 3, nanoflann::metric_L2_Simple> kdtree(Y);
/// Buffers
Eigen::Matrix3Xd Q = Eigen::Matrix3Xd::Zero(3, X.cols());
Eigen::Matrix3Xd Z = Eigen::Matrix3Xd::Zero(3, X.cols());
Eigen::Matrix3Xd C = Eigen::Matrix3Xd::Zero(3, X.cols());
Eigen::Matrix3Xd Xo1 = X;
Eigen::Matrix3Xd Xo2 = X;
/// ICP
for(int icp=0; icp<par.max_icp; ++icp) {
if(par.print_icpn) std::cout << "Iteration #" << icp << "/" << par.max_icp << std::endl;
/// Find closest point
#pragma omp parallel for
for(int i=0; i<X.cols(); ++i) {
Q.col(i) = Y.col(kdtree.closest(X.col(i).data()));
}
/// Computer rotation and translation
double mu = par.mu;
for(int outer=0; outer<par.max_outer; ++outer) {
double dual = 0.0;
for(int inner=0; inner<par.max_inner; ++inner) {
/// Z update (shrinkage)
Z = X-Q+C/mu;
shrink<3>(Z, mu, par.p);
/// Rotation and translation update
Eigen::Matrix3Xd U = Q+Z-C/mu;
RigidMotionEstimator::point_to_point(X, U);
/// Stopping criteria
dual = (X-Xo1).colwise().norm().maxCoeff();
Xo1 = X;
if(dual < par.stop) break;
}
/// C update (lagrange multipliers)
Eigen::Matrix3Xd P = X-Q-Z;
if(!par.use_penalty) C.noalias() += mu*P;
/// mu update (penalty)
if(mu < par.max_mu) mu *= par.alpha;
/// Stopping criteria
double primal = P.colwise().norm().maxCoeff();
if(primal < par.stop && dual < par.stop) break;
}
/// Stopping criteria
double stop = (X-Xo2).colwise().norm().maxCoeff();
Xo2 = X;
if(stop < par.stop) break;
}
}
/// Sparse ICP with point to plane
/// @param Source (one 3D point per column)
/// @param Target (one 3D point per column)
/// @param Target normals (one 3D normal per column)
/// @param Parameters
template <typename Derived1, typename Derived2, typename Derived3>
void point_to_plane(Eigen::MatrixBase<Derived1>& X,
Eigen::MatrixBase<Derived2>& Y,
Eigen::MatrixBase<Derived3>& N,
Parameters par = Parameters()) {
/// Build kd-tree
nanoflann::KDTreeAdaptor<Eigen::MatrixBase<Derived2>, 3, nanoflann::metric_L2_Simple> kdtree(Y);
/// Buffers
Eigen::Matrix3Xd Qp = Eigen::Matrix3Xd::Zero(3, X.cols());
Eigen::Matrix3Xd Qn = Eigen::Matrix3Xd::Zero(3, X.cols());
Eigen::VectorXd Z = Eigen::VectorXd::Zero(X.cols());
Eigen::VectorXd C = Eigen::VectorXd::Zero(X.cols());
Eigen::Matrix3Xd Xo1 = X;
Eigen::Matrix3Xd Xo2 = X;
/// ICP
for(int icp=0; icp<par.max_icp; ++icp) {
if(par.print_icpn) std::cout << "Iteration #" << icp << "/" << par.max_icp << std::endl;
/// Find closest point
#pragma omp parallel for
for(int i=0; i<X.cols(); ++i) {
int id = kdtree.closest(X.col(i).data());
Qp.col(i) = Y.col(id);
Qn.col(i) = N.col(id);
}
/// Computer rotation and translation
double mu = par.mu;
for(int outer=0; outer<par.max_outer; ++outer) {
double dual = 0.0;
for(int inner=0; inner<par.max_inner; ++inner) {
/// Z update (shrinkage)
Z = (Qn.array()*(X-Qp).array()).colwise().sum().transpose()+C.array()/mu;
shrink<3>(Z, mu, par.p);
/// Rotation and translation update
Eigen::VectorXd U = Z-C/mu;
RigidMotionEstimator::point_to_plane(X, Qp, Qn, Eigen::VectorXd::Ones(X.cols()), U);
/// Stopping criteria
dual = (X-Xo1).colwise().norm().maxCoeff();
Xo1 = X;
if(dual < par.stop) break;
}
/// C update (lagrange multipliers)
Eigen::VectorXf P = (Qn.array()*(X-Qp).array()).colwise().sum().transpose()-Z.array();
if(!par.use_penalty) C.noalias() += mu*P;
/// mu update (penalty)
if(mu < par.max_mu) mu *= par.alpha;
/// Stopping criteria
double primal = P.array().abs().maxCoeff();
if(primal < par.stop && dual < par.stop) break;
}
/// Stopping criteria
double stop = (X-Xo2).colwise().norm().maxCoeff();
Xo2 = X;
if(stop < par.stop) break;
}
}
}
///////////////////////////////////////////////////////////////////////////////
/// ICP implementation using iterative reweighting
namespace ICP {
enum Function {
PNORM,
TUKEY,
FAIR,
LOGISTIC,
TRIMMED,
NONE
};
class Parameters {
public:
Parameters() : f(NONE),
p(0.1),
max_icp(100),
max_outer(100),
stop(1e-5) {}
/// Parameters
Function f; /// robust function type
double p; /// paramter of the robust function
int max_icp; /// max ICP iteration
int max_outer; /// max outer iteration
double stop; /// stopping criteria
};
/// Weight functions
/// @param Residuals
/// @param Parameter
void uniform_weight(Eigen::VectorXd& r) {
r = Eigen::VectorXd::Ones(r.rows());
}
/// @param Residuals
/// @param Parameter
void pnorm_weight(Eigen::VectorXd& r, double p, double reg=1e-8) {
for(int i=0; i<r.rows(); ++i) {
r(i) = p/(std::pow(r(i),2-p) + reg);
}
}
/// @param Residuals
/// @param Parameter
void tukey_weight(Eigen::VectorXd& r, double p) {
for(int i=0; i<r.rows(); ++i) {
if(r(i) > p) r(i) = 0.0;
else r(i) = std::pow((1.0 - std::pow(r(i)/p,2.0)), 2.0);
}
}
/// @param Residuals
/// @param Parameter
void fair_weight(Eigen::VectorXd& r, double p) {
for(int i=0; i<r.rows(); ++i) {
r(i) = 1.0/(1.0 + r(i)/p);
}
}
/// @param Residuals
/// @param Parameter
void logistic_weight(Eigen::VectorXd& r, double p) {
for(int i=0; i<r.rows(); ++i) {
r(i) = (p/r(i))*std::tanh(r(i)/p);
}
}
struct sort_pred {
bool operator()(const std::pair<int,double> &left,
const std::pair<int,double> &right) {
return left.second < right.second;
}
};
/// @param Residuals
/// @param Parameter
void trimmed_weight(Eigen::VectorXd& r, double p) {
std::vector<std::pair<int, double> > sortedDist(r.rows());
for(int i=0; i<r.rows(); ++i) {
sortedDist[i] = std::pair<int, double>(i,r(i));
}
std::sort(sortedDist.begin(), sortedDist.end(), sort_pred());
r.setZero();
int nbV = r.rows()*p;
for(int i=0; i<nbV; ++i) {
r(sortedDist[i].first) = 1.0;
}
}
/// @param Function type
/// @param Residuals
/// @param Parameter
void robust_weight(Function f, Eigen::VectorXd& r, double p) {
switch(f) {
case PNORM: pnorm_weight(r,p); break;
case TUKEY: tukey_weight(r,p); break;
case FAIR: fair_weight(r,p); break;
case LOGISTIC: logistic_weight(r,p); break;
case TRIMMED: trimmed_weight(r,p); break;
case NONE: uniform_weight(r); break;
default: uniform_weight(r); break;
}
}
/// Reweighted ICP with point to point
/// @param Source (one 3D point per column)
/// @param Target (one 3D point per column)
/// @param Parameters
void point_to_point(Eigen::Matrix3Xd& X,
Eigen::Matrix3Xd& Y,
Parameters par = Parameters()) {
/// Build kd-tree
nanoflann::KDTreeAdaptor<Eigen::Matrix3Xd, 3, nanoflann::metric_L2_Simple> kdtree(Y);
/// Buffers
Eigen::Matrix3Xd Q = Eigen::Matrix3Xd::Zero(3, X.cols());
Eigen::VectorXd W = Eigen::VectorXd::Zero(X.cols());
Eigen::Matrix3Xd Xo1 = X;
Eigen::Matrix3Xd Xo2 = X;
/// ICP
for(int icp=0; icp<par.max_icp; ++icp) {
/// Find closest point
#pragma omp parallel for
for(int i=0; i<X.cols(); ++i) {
Q.col(i) = Y.col(kdtree.closest(X.col(i).data()));
}
/// Computer rotation and translation
for(int outer=0; outer<par.max_outer; ++outer) {
/// Compute weights
W = (X-Q).colwise().norm();
robust_weight(par.f, W, par.p);
/// Rotation and translation update
RigidMotionEstimator::point_to_point(X, Q, W);
/// Stopping criteria
double stop1 = (X-Xo1).colwise().norm().maxCoeff();
Xo1 = X;
if(stop1 < par.stop) break;
}
/// Stopping criteria
double stop2 = (X-Xo2).colwise().norm().maxCoeff();
Xo2 = X;
if(stop2 < par.stop) break;
}
}
/// Reweighted ICP with point to plane
/// @param Source (one 3D point per column)
/// @param Target (one 3D point per column)
/// @param Target normals (one 3D normal per column)
/// @param Parameters
template <typename Derived1, typename Derived2, typename Derived3>
void point_to_plane(Eigen::MatrixBase<Derived1>& X,
Eigen::MatrixBase<Derived2>& Y,
Eigen::MatrixBase<Derived3>& N,
Parameters par = Parameters()) {
/// Build kd-tree
nanoflann::KDTreeAdaptor<Eigen::MatrixBase<Derived2>, 3, nanoflann::metric_L2_Simple> kdtree(Y);
/// Buffers
Eigen::Matrix3Xd Qp = Eigen::Matrix3Xd::Zero(3, X.cols());
Eigen::Matrix3Xd Qn = Eigen::Matrix3Xd::Zero(3, X.cols());
Eigen::VectorXd W = Eigen::VectorXd::Zero(X.cols());
Eigen::Matrix3Xd Xo1 = X;
Eigen::Matrix3Xd Xo2 = X;
/// ICP
for(int icp=0; icp<par.max_icp; ++icp) {
/// Find closest point
#pragma omp parallel for
for(int i=0; i<X.cols(); ++i) {
int id = kdtree.closest(X.col(i).data());
Qp.col(i) = Y.col(id);
Qn.col(i) = N.col(id);
}
/// Computer rotation and translation
for(int outer=0; outer<par.max_outer; ++outer) {
/// Compute weights
W = (Qn.array()*(X-Qp).array()).colwise().sum().abs().transpose();
robust_weight(par.f, W, par.p);
/// Rotation and translation update
RigidMotionEstimator::point_to_plane(X, Qp, Qn, W);
/// Stopping criteria
double stop1 = (X-Xo1).colwise().norm().maxCoeff();
Xo1 = X;
if(stop1 < par.stop) break;
}
/// Stopping criteria
double stop2 = (X-Xo2).colwise().norm().maxCoeff() ;
Xo2 = X;
if(stop2 < par.stop) break;
}
}
}
///////////////////////////////////////////////////////////////////////////////
#endif
|
jacobi2d-tile-no.c | /**
* jacobi-2d-imper.c: This file is part of the PolyBench/C 3.2 test suite.
* Jacobi with array copying, no reduction. with tiling and nested SIMD.
*
* Contact: Louis-Noel Pouchet <pouchet@cse.ohio-state.edu>
* Web address: http://polybench.sourceforge.net
* License: /LICENSE.OSU.txt
*/
#include <stdio.h>
#include <unistd.h>
#include <string.h>
#include <math.h>
/* Include polybench common header. */
#include <polybench.h>
/* Include benchmark-specific header. */
/* Default data type is double, default size is 20x1000. */
#include "jacobi-2d-imper.h"
/* Array initialization. */
static void init_array(int n,double A[500 + 0][500 + 0],double B[500 + 0][500 + 0])
{
//int i;
//int j;
{
int c1;
int c2;
int c4;
int c3;
if (n >= 1) {
#pragma omp parallel for private(c3, c4, c2)
for (c1 = 0; c1 <= (((n + -1) * 16 < 0?((16 < 0?-((-(n + -1) + 16 + 1) / 16) : -((-(n + -1) + 16 - 1) / 16))) : (n + -1) / 16)); c1++) {
for (c2 = 0; c2 <= (((n + -1) * 16 < 0?((16 < 0?-((-(n + -1) + 16 + 1) / 16) : -((-(n + -1) + 16 - 1) / 16))) : (n + -1) / 16)); c2++) {
for (c3 = 16 * c2; c3 <= ((16 * c2 + 15 < n + -1?16 * c2 + 15 : n + -1)); c3++) {
#pragma omp simd
for (c4 = 16 * c1; c4 <= ((16 * c1 + 15 < n + -1?16 * c1 + 15 : n + -1)); c4++) {
A[c4][c3] = (((double )c4) * (c3 + 2) + 2) / n;
B[c4][c3] = (((double )c4) * (c3 + 3) + 3) / n;
}
}
}
}
}
}
}
/* DCE code. Must scan the entire live-out data.
Can be used also to check the correctness of the output. */
static void print_array(int n,double A[500 + 0][500 + 0])
{
int i;
int j;
for (i = 0; i < n; i++)
for (j = 0; j < n; j++) {
fprintf(stderr,"%0.2lf ",A[i][j]);
if ((i * n + j) % 20 == 0)
fprintf(stderr,"\n");
}
fprintf(stderr,"\n");
}
/* Main computational kernel. The whole function will be timed,
including the call and return. */
static void kernel_jacobi_2d_imper(int tsteps,int n,double A[500 + 0][500 + 0],double B[500 + 0][500 + 0])
{
//int t;
//int i;
//int j;
//#pragma scop
{
int c0;
int c1;
int c3;
int c2;
int c4;
int c5;
if (n >= 3 && tsteps >= 1) {
for (c0 = 0; c0 <= (((n + 3 * tsteps + -4) * 16 < 0?((16 < 0?-((-(n + 3 * tsteps + -4) + 16 + 1) / 16) : -((-(n + 3 * tsteps + -4) + 16 - 1) / 16))) : (n + 3 * tsteps + -4) / 16)); c0++) {
#pragma omp parallel for private(c5, c4, c2, c3)
for (c1 = (((2 * c0 * 3 < 0?-(-(2 * c0) / 3) : ((3 < 0?(-(2 * c0) + - 3 - 1) / - 3 : (2 * c0 + 3 - 1) / 3)))) > (((16 * c0 + -1 * tsteps + 1) * 16 < 0?-(-(16 * c0 + -1 * tsteps + 1) / 16) : ((16 < 0?(-(16 * c0 + -1 * tsteps + 1) + - 16 - 1) / - 16 : (16 * c0 + -1 * tsteps + 1 + 16 - 1) / 16))))?((2 * c0 * 3 < 0?-(-(2 * c0) / 3) : ((3 < 0?(-(2 * c0) + - 3 - 1) / - 3 : (2 * c0 + 3 - 1) / 3)))) : (((16 * c0 + -1 * tsteps + 1) * 16 < 0?-(-(16 * c0 + -1 * tsteps + 1) / 16) : ((16 < 0?(-(16 * c0 + -1 * tsteps + 1) + - 16 - 1) / - 16 : (16 * c0 + -1 * tsteps + 1 + 16 - 1) / 16))))); c1 <= (((((((n + 2 * tsteps + -3) * 16 < 0?((16 < 0?-((-(n + 2 * tsteps + -3) + 16 + 1) / 16) : -((-(n + 2 * tsteps + -3) + 16 - 1) / 16))) : (n + 2 * tsteps + -3) / 16)) < (((32 * c0 + n + 29) * 48 < 0?((48 < 0?-((-(32 * c0 + n + 29) + 48 + 1) / 48) : -((-(32 * c0 + n + 29) + 48 - 1) / 48))) : (32 * c0 + n + 29) / 48))?(((n + 2 * tsteps + -3) * 16 < 0?((16 < 0?-((-(n + 2 * tsteps + -3) + 16 + 1) / 16) : -((-(n + 2 * tsteps + -3) + 16 - 1) / 16))) : (n + 2 * tsteps + -3) / 16)) : (((32 * c0 + n + 29) * 48 < 0?((48 < 0?-((-(32 * c0 + n + 29) + 48 + 1) / 48) : -((-(32 * c0 + n + 29) + 48 - 1) / 48))) : (32 * c0 + n + 29) / 48)))) < c0?(((((n + 2 * tsteps + -3) * 16 < 0?((16 < 0?-((-(n + 2 * tsteps + -3) + 16 + 1) / 16) : -((-(n + 2 * tsteps + -3) + 16 - 1) / 16))) : (n + 2 * tsteps + -3) / 16)) < (((32 * c0 + n + 29) * 48 < 0?((48 < 0?-((-(32 * c0 + n + 29) + 48 + 1) / 48) : -((-(32 * c0 + n + 29) + 48 - 1) / 48))) : (32 * c0 + n + 29) / 48))?(((n + 2 * tsteps + -3) * 16 < 0?((16 < 0?-((-(n + 2 * tsteps + -3) + 16 + 1) / 16) : -((-(n + 2 * tsteps + -3) + 16 - 1) / 16))) : (n + 2 * tsteps + -3) / 16)) : (((32 * c0 + n + 29) * 48 < 0?((48 < 0?-((-(32 * c0 + n + 29) + 48 + 1) / 48) : -((-(32 * c0 + n + 29) + 48 - 1) / 48))) : (32 * c0 + n + 29) / 48)))) : c0)); c1++) {
for (c2 = ((((16 * c1 + -1 * n + -12) * 16 < 0?-(-(16 * c1 + -1 * n + -12) / 16) : ((16 < 0?(-(16 * c1 + -1 * n + -12) + - 16 - 1) / - 16 : (16 * c1 + -1 * n + -12 + 16 - 1) / 16)))) > 2 * c0 + -2 * c1?(((16 * c1 + -1 * n + -12) * 16 < 0?-(-(16 * c1 + -1 * n + -12) / 16) : ((16 < 0?(-(16 * c1 + -1 * n + -12) + - 16 - 1) / - 16 : (16 * c1 + -1 * n + -12 + 16 - 1) / 16)))) : 2 * c0 + -2 * c1); c2 <= (((((((16 * c1 + n + 12) * 16 < 0?((16 < 0?-((-(16 * c1 + n + 12) + 16 + 1) / 16) : -((-(16 * c1 + n + 12) + 16 - 1) / 16))) : (16 * c1 + n + 12) / 16)) < (((n + 2 * tsteps + -3) * 16 < 0?((16 < 0?-((-(n + 2 * tsteps + -3) + 16 + 1) / 16) : -((-(n + 2 * tsteps + -3) + 16 - 1) / 16))) : (n + 2 * tsteps + -3) / 16))?(((16 * c1 + n + 12) * 16 < 0?((16 < 0?-((-(16 * c1 + n + 12) + 16 + 1) / 16) : -((-(16 * c1 + n + 12) + 16 - 1) / 16))) : (16 * c1 + n + 12) / 16)) : (((n + 2 * tsteps + -3) * 16 < 0?((16 < 0?-((-(n + 2 * tsteps + -3) + 16 + 1) / 16) : -((-(n + 2 * tsteps + -3) + 16 - 1) / 16))) : (n + 2 * tsteps + -3) / 16)))) < (((32 * c0 + -32 * c1 + n + 29) * 16 < 0?((16 < 0?-((-(32 * c0 + -32 * c1 + n + 29) + 16 + 1) / 16) : -((-(32 * c0 + -32 * c1 + n + 29) + 16 - 1) / 16))) : (32 * c0 + -32 * c1 + n + 29) / 16))?(((((16 * c1 + n + 12) * 16 < 0?((16 < 0?-((-(16 * c1 + n + 12) + 16 + 1) / 16) : -((-(16 * c1 + n + 12) + 16 - 1) / 16))) : (16 * c1 + n + 12) / 16)) < (((n + 2 * tsteps + -3) * 16 < 0?((16 < 0?-((-(n + 2 * tsteps + -3) + 16 + 1) / 16) : -((-(n + 2 * tsteps + -3) + 16 - 1) / 16))) : (n + 2 * tsteps + -3) / 16))?(((16 * c1 + n + 12) * 16 < 0?((16 < 0?-((-(16 * c1 + n + 12) + 16 + 1) / 16) : -((-(16 * c1 + n + 12) + 16 - 1) / 16))) : (16 * c1 + n + 12) / 16)) : (((n + 2 * tsteps + -3) * 16 < 0?((16 < 0?-((-(n + 2 * tsteps + -3) + 16 + 1) / 16) : -((-(n + 2 * tsteps + -3) + 16 - 1) / 16))) : (n + 2 * tsteps + -3) / 16)))) : (((32 * c0 + -32 * c1 + n + 29) * 16 < 0?((16 < 0?-((-(32 * c0 + -32 * c1 + n + 29) + 16 + 1) / 16) : -((-(32 * c0 + -32 * c1 + n + 29) + 16 - 1) / 16))) : (32 * c0 + -32 * c1 + n + 29) / 16)))); c2++) {
if (c0 <= (((32 * c1 + 16 * c2 + -1 * n + 1) * 32 < 0?((32 < 0?-((-(32 * c1 + 16 * c2 + -1 * n + 1) + 32 + 1) / 32) : -((-(32 * c1 + 16 * c2 + -1 * n + 1) + 32 - 1) / 32))) : (32 * c1 + 16 * c2 + -1 * n + 1) / 32)) && c1 <= c2 + -1) {
if ((n + 1) % 2 == 0) {
for (c4 = (16 * c1 > 16 * c2 + -1 * n + 3?16 * c1 : 16 * c2 + -1 * n + 3); c4 <= 16 * c1 + 15; c4++) {
A[-16 * c2 + c4 + n + -2][n + -2] = B[-16 * c2 + c4 + n + -2][n + -2];
}
}
}
if (c0 <= (((48 * c1 + -1 * n + 1) * 32 < 0?((32 < 0?-((-(48 * c1 + -1 * n + 1) + 32 + 1) / 32) : -((-(48 * c1 + -1 * n + 1) + 32 - 1) / 32))) : (48 * c1 + -1 * n + 1) / 32)) && c1 >= c2) {
if ((n + 1) % 2 == 0) {
for (c5 = (16 * c2 > 16 * c1 + -1 * n + 3?16 * c2 : 16 * c1 + -1 * n + 3); c5 <= ((16 * c1 < 16 * c2 + 15?16 * c1 : 16 * c2 + 15)); c5++) {
A[n + -2][-16 * c1 + c5 + n + -2] = B[n + -2][-16 * c1 + c5 + n + -2];
}
}
}
for (c3 = ((((((16 * c1 + -1 * n + 2) * 2 < 0?-(-(16 * c1 + -1 * n + 2) / 2) : ((2 < 0?(-(16 * c1 + -1 * n + 2) + - 2 - 1) / - 2 : (16 * c1 + -1 * n + 2 + 2 - 1) / 2)))) > (((16 * c2 + -1 * n + 2) * 2 < 0?-(-(16 * c2 + -1 * n + 2) / 2) : ((2 < 0?(-(16 * c2 + -1 * n + 2) + - 2 - 1) / - 2 : (16 * c2 + -1 * n + 2 + 2 - 1) / 2))))?(((16 * c1 + -1 * n + 2) * 2 < 0?-(-(16 * c1 + -1 * n + 2) / 2) : ((2 < 0?(-(16 * c1 + -1 * n + 2) + - 2 - 1) / - 2 : (16 * c1 + -1 * n + 2 + 2 - 1) / 2)))) : (((16 * c2 + -1 * n + 2) * 2 < 0?-(-(16 * c2 + -1 * n + 2) / 2) : ((2 < 0?(-(16 * c2 + -1 * n + 2) + - 2 - 1) / - 2 : (16 * c2 + -1 * n + 2 + 2 - 1) / 2)))))) > 16 * c0 + -16 * c1?(((((16 * c1 + -1 * n + 2) * 2 < 0?-(-(16 * c1 + -1 * n + 2) / 2) : ((2 < 0?(-(16 * c1 + -1 * n + 2) + - 2 - 1) / - 2 : (16 * c1 + -1 * n + 2 + 2 - 1) / 2)))) > (((16 * c2 + -1 * n + 2) * 2 < 0?-(-(16 * c2 + -1 * n + 2) / 2) : ((2 < 0?(-(16 * c2 + -1 * n + 2) + - 2 - 1) / - 2 : (16 * c2 + -1 * n + 2 + 2 - 1) / 2))))?(((16 * c1 + -1 * n + 2) * 2 < 0?-(-(16 * c1 + -1 * n + 2) / 2) : ((2 < 0?(-(16 * c1 + -1 * n + 2) + - 2 - 1) / - 2 : (16 * c1 + -1 * n + 2 + 2 - 1) / 2)))) : (((16 * c2 + -1 * n + 2) * 2 < 0?-(-(16 * c2 + -1 * n + 2) / 2) : ((2 < 0?(-(16 * c2 + -1 * n + 2) + - 2 - 1) / - 2 : (16 * c2 + -1 * n + 2 + 2 - 1) / 2)))))) : 16 * c0 + -16 * c1); c3 <= ((((((8 * c1 + 6 < 8 * c2 + 6?8 * c1 + 6 : 8 * c2 + 6)) < tsteps + -1?((8 * c1 + 6 < 8 * c2 + 6?8 * c1 + 6 : 8 * c2 + 6)) : tsteps + -1)) < 16 * c0 + -16 * c1 + 15?((((8 * c1 + 6 < 8 * c2 + 6?8 * c1 + 6 : 8 * c2 + 6)) < tsteps + -1?((8 * c1 + 6 < 8 * c2 + 6?8 * c1 + 6 : 8 * c2 + 6)) : tsteps + -1)) : 16 * c0 + -16 * c1 + 15)); c3++) {
if (c1 <= ((c3 * 8 < 0?((8 < 0?-((-c3 + 8 + 1) / 8) : -((-c3 + 8 - 1) / 8))) : c3 / 8))) {
for (c5 = (16 * c2 > 2 * c3 + 1?16 * c2 : 2 * c3 + 1); c5 <= ((16 * c2 + 15 < 2 * c3 + n + -2?16 * c2 + 15 : 2 * c3 + n + -2)); c5++) {
B[1][-2 * c3 + c5] = 0.2 * (A[1][-2 * c3 + c5] + A[1][-2 * c3 + c5 - 1] + A[1][1 + (-2 * c3 + c5)] + A[1 + 1][-2 * c3 + c5] + A[1 - 1][-2 * c3 + c5]);
}
}
for (c4 = (16 * c1 > 2 * c3 + 2?16 * c1 : 2 * c3 + 2); c4 <= ((16 * c1 + 15 < 2 * c3 + n + -2?16 * c1 + 15 : 2 * c3 + n + -2)); c4++) {
if (c2 <= ((c3 * 8 < 0?((8 < 0?-((-c3 + 8 + 1) / 8) : -((-c3 + 8 - 1) / 8))) : c3 / 8))) {
B[-2 * c3 + c4][1] = 0.2 * (A[-2 * c3 + c4][1] + A[-2 * c3 + c4][1 - 1] + A[-2 * c3 + c4][1 + 1] + A[1 + (-2 * c3 + c4)][1] + A[-2 * c3 + c4 - 1][1]);
}
for (c5 = (16 * c2 > 2 * c3 + 2?16 * c2 : 2 * c3 + 2); c5 <= ((16 * c2 + 15 < 2 * c3 + n + -2?16 * c2 + 15 : 2 * c3 + n + -2)); c5++) {
B[-2 * c3 + c4][-2 * c3 + c5] = 0.2 * (A[-2 * c3 + c4][-2 * c3 + c5] + A[-2 * c3 + c4][-2 * c3 + c5 - 1] + A[-2 * c3 + c4][1 + (-2 * c3 + c5)] + A[1 + (-2 * c3 + c4)][-2 * c3 + c5] + A[-2 * c3 + c4 - 1][-2 * c3 + c5]);
A[-2 * c3 + c4 + -1][-2 * c3 + c5 + -1] = B[-2 * c3 + c4 + -1][-2 * c3 + c5 + -1];
}
if (c2 >= (((2 * c3 + n + -16) * 16 < 0?-(-(2 * c3 + n + -16) / 16) : ((16 < 0?(-(2 * c3 + n + -16) + - 16 - 1) / - 16 : (2 * c3 + n + -16 + 16 - 1) / 16))))) {
A[-2 * c3 + c4 + -1][n + -2] = B[-2 * c3 + c4 + -1][n + -2];
}
}
if (c1 >= (((2 * c3 + n + -16) * 16 < 0?-(-(2 * c3 + n + -16) / 16) : ((16 < 0?(-(2 * c3 + n + -16) + - 16 - 1) / - 16 : (2 * c3 + n + -16 + 16 - 1) / 16))))) {
for (c5 = (16 * c2 > 2 * c3 + 2?16 * c2 : 2 * c3 + 2); c5 <= ((16 * c2 + 15 < 2 * c3 + n + -1?16 * c2 + 15 : 2 * c3 + n + -1)); c5++) {
A[n + -2][-2 * c3 + c5 + -1] = B[n + -2][-2 * c3 + c5 + -1];
}
}
}
if (c0 >= (((2 * c1 + c2 + -1) * 2 < 0?-(-(2 * c1 + c2 + -1) / 2) : ((2 < 0?(-(2 * c1 + c2 + -1) + - 2 - 1) / - 2 : (2 * c1 + c2 + -1 + 2 - 1) / 2)))) && c1 >= c2 + 1 && c2 <= (((tsteps + -8) * 8 < 0?((8 < 0?-((-(tsteps + -8) + 8 + 1) / 8) : -((-(tsteps + -8) + 8 - 1) / 8))) : (tsteps + -8) / 8))) {
for (c4 = 16 * c1; c4 <= ((16 * c1 + 15 < 16 * c2 + n + 12?16 * c1 + 15 : 16 * c2 + n + 12)); c4++) {
B[-16 * c2 + c4 + -14][1] = 0.2 * (A[-16 * c2 + c4 + -14][1] + A[-16 * c2 + c4 + -14][1 - 1] + A[-16 * c2 + c4 + -14][1 + 1] + A[1 + (-16 * c2 + c4 + -14)][1] + A[-16 * c2 + c4 + -14 - 1][1]);
}
}
if (c0 >= (((3 * c1 + -1) * 2 < 0?-(-(3 * c1 + -1) / 2) : ((2 < 0?(-(3 * c1 + -1) + - 2 - 1) / - 2 : (3 * c1 + -1 + 2 - 1) / 2)))) && c1 <= (((((tsteps + -8) * 8 < 0?((8 < 0?-((-(tsteps + -8) + 8 + 1) / 8) : -((-(tsteps + -8) + 8 - 1) / 8))) : (tsteps + -8) / 8)) < c2?(((tsteps + -8) * 8 < 0?((8 < 0?-((-(tsteps + -8) + 8 + 1) / 8) : -((-(tsteps + -8) + 8 - 1) / 8))) : (tsteps + -8) / 8)) : c2))) {
for (c5 = (16 * c2 > 16 * c1 + 15?16 * c2 : 16 * c1 + 15); c5 <= ((16 * c2 + 15 < 16 * c1 + n + 12?16 * c2 + 15 : 16 * c1 + n + 12)); c5++) {
B[1][-16 * c1 + c5 + -14] = 0.2 * (A[1][-16 * c1 + c5 + -14] + A[1][-16 * c1 + c5 + -14 - 1] + A[1][1 + (-16 * c1 + c5 + -14)] + A[1 + 1][-16 * c1 + c5 + -14] + A[1 - 1][-16 * c1 + c5 + -14]);
}
}
}
}
}
}
}
//#pragma endscop
}
int main(int argc,char **argv)
{
/* Retrieve problem size. */
int n = 500;
int tsteps = 10;
/* Variable declaration/allocation. */
double (*A)[500 + 0][500 + 0];
A = ((double (*)[500 + 0][500 + 0])(polybench_alloc_data(((500 + 0) * (500 + 0)),(sizeof(double )))));
;
double (*B)[500 + 0][500 + 0];
B = ((double (*)[500 + 0][500 + 0])(polybench_alloc_data(((500 + 0) * (500 + 0)),(sizeof(double )))));
;
/* Initialize array(s). */
init_array(n, *A, *B);
/* Start timer. */
polybench_timer_start();
;
/* Run kernel. */
kernel_jacobi_2d_imper(tsteps,n, *A, *B);
/* Stop and print timer. */
polybench_timer_stop();
;
polybench_timer_print();
;
/* Prevent dead-code elimination. All live-out data must be printed
by the function call in argument. */
if (argc > 42 && !strcmp(argv[0],""))
print_array(n, *A);
/* Be clean. */
free(((void *)A));
;
free(((void *)B));
;
return 0;
}
|
efp.c | /*-
* Copyright (c) 2012-2017 Ilya Kaliman
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS "AS IS" AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*/
#include <stdio.h>
#include <stdlib.h>
#include <stdbool.h>
#include "balance.h"
#include "clapack.h"
#include "elec.h"
#include "private.h"
#include "stream.h"
#include "util.h"
static void
add_screen2_params(struct frag *frag) {
double *scr;
scr = (double *)malloc(frag->n_multipole_pts * sizeof(double));
// if (scr == NULL)
// return EFP_RESULT_NO_MEMORY;
for (int i=0; i<frag->n_multipole_pts; i++){
scr[i] = 10.0; // assign large value - no effective screening
}
if (frag->screen_params)
free(frag->screen_params);
frag->screen_params = scr;
}
static void
update_fragment(struct frag *frag)
{
/* update atoms */
for (size_t i = 0; i < frag->n_atoms; i++)
efp_move_pt(CVEC(frag->x), &frag->rotmat,
CVEC(frag->lib->atoms[i].x), VEC(frag->atoms[i].x));
efp_update_elec(frag);
efp_update_pol(frag);
efp_update_disp(frag);
efp_update_xr(frag);
}
static enum efp_result
set_coord_xyzabc(struct frag *frag, const double *coord)
{
frag->x = coord[0];
frag->y = coord[1];
frag->z = coord[2];
euler_to_matrix(coord[3], coord[4], coord[5], &frag->rotmat);
update_fragment(frag);
return EFP_RESULT_SUCCESS;
}
static enum efp_result
set_coord_points(struct frag *frag, const double *coord)
{
/* allow fragments with less than 3 atoms by using multipole points of
* ghost atoms; multipole points have the same coordinates as atoms */
if (frag->n_multipole_pts < 3) {
efp_log("fragment must contain at least three atoms");
return EFP_RESULT_FATAL;
}
double ref[9] = {
frag->lib->multipole_pts[0].x,
frag->lib->multipole_pts[0].y,
frag->lib->multipole_pts[0].z,
frag->lib->multipole_pts[1].x,
frag->lib->multipole_pts[1].y,
frag->lib->multipole_pts[1].z,
frag->lib->multipole_pts[2].x,
frag->lib->multipole_pts[2].y,
frag->lib->multipole_pts[2].z
};
vec_t p1;
mat_t rot1, rot2;
efp_points_to_matrix(coord, &rot1);
efp_points_to_matrix(ref, &rot2);
rot2 = mat_transpose(&rot2);
frag->rotmat = mat_mat(&rot1, &rot2);
p1 = mat_vec(&frag->rotmat, VEC(frag->lib->multipole_pts[0].x));
/* center of mass */
frag->x = coord[0] - p1.x;
frag->y = coord[1] - p1.y;
frag->z = coord[2] - p1.z;
update_fragment(frag);
return EFP_RESULT_SUCCESS;
}
static enum efp_result
set_coord_rotmat(struct frag *frag, const double *coord)
{
if (!efp_check_rotation_matrix((const mat_t *)(coord + 3))) {
efp_log("invalid rotation matrix specified");
return EFP_RESULT_FATAL;
}
frag->x = coord[0];
frag->y = coord[1];
frag->z = coord[2];
memcpy(&frag->rotmat, coord + 3, sizeof(frag->rotmat));
update_fragment(frag);
return EFP_RESULT_SUCCESS;
}
static void
free_frag(struct frag *frag)
{
if (!frag)
return;
free(frag->atoms);
free(frag->multipole_pts);
free(frag->polarizable_pts);
free(frag->dynamic_polarizable_pts);
free(frag->dipquad_polarizable_pts);
free(frag->lmo_centroids);
free(frag->xr_fock_mat);
free(frag->xr_wf);
free(frag->xrfit);
free(frag->screen_params);
free(frag->ai_screen_params);
for (size_t i = 0; i < 3; i++)
free(frag->xr_wf_deriv[i]);
for (size_t i = 0; i < frag->n_xr_atoms; i++) {
for (size_t j = 0; j < frag->xr_atoms[i].n_shells; j++)
free(frag->xr_atoms[i].shells[j].coef);
free(frag->xr_atoms[i].shells);
}
free(frag->xr_atoms);
/* don't do free(frag) here */
}
static enum efp_result
copy_frag(struct frag *dest, const struct frag *src)
{
size_t size;
memcpy(dest, src, sizeof(*dest));
if (src->atoms) {
size = src->n_atoms * sizeof(struct efp_atom);
dest->atoms = (struct efp_atom *)malloc(size);
if (!dest->atoms)
return EFP_RESULT_NO_MEMORY;
memcpy(dest->atoms, src->atoms, size);
}
if (src->multipole_pts) {
size = src->n_multipole_pts * sizeof(struct multipole_pt);
dest->multipole_pts = (struct multipole_pt *)malloc(size);
if (!dest->multipole_pts)
return EFP_RESULT_NO_MEMORY;
memcpy(dest->multipole_pts, src->multipole_pts, size);
}
if (src->screen_params) {
size = src->n_multipole_pts * sizeof(double);
dest->screen_params = (double *)malloc(size);
if (!dest->screen_params)
return EFP_RESULT_NO_MEMORY;
memcpy(dest->screen_params, src->screen_params, size);
}
if (src->ai_screen_params) {
size = src->n_multipole_pts * sizeof(double);
dest->ai_screen_params = (double *)malloc(size);
if (!dest->ai_screen_params)
return EFP_RESULT_NO_MEMORY;
memcpy(dest->ai_screen_params, src->ai_screen_params, size);
}
if (src->polarizable_pts) {
size = src->n_polarizable_pts * sizeof(struct polarizable_pt);
dest->polarizable_pts = (struct polarizable_pt *)malloc(size);
if (!dest->polarizable_pts)
return EFP_RESULT_NO_MEMORY;
memcpy(dest->polarizable_pts, src->polarizable_pts, size);
}
if (src->dynamic_polarizable_pts) {
size = src->n_dynamic_polarizable_pts *
sizeof(struct dynamic_polarizable_pt);
dest->dynamic_polarizable_pts =
(struct dynamic_polarizable_pt *)malloc(size);
if (!dest->dynamic_polarizable_pts)
return EFP_RESULT_NO_MEMORY;
memcpy(dest->dynamic_polarizable_pts,
src->dynamic_polarizable_pts, size);
}
if (src->dipquad_polarizable_pts) {
size = src->n_dipquad_polarizable_pts *
sizeof(struct dipquad_polarizable_pt);
dest->dipquad_polarizable_pts =
(struct dipquad_polarizable_pt *)malloc(size);
if (!dest->dipquad_polarizable_pts)
return EFP_RESULT_NO_MEMORY;
memcpy(dest->dipquad_polarizable_pts,
src->dipquad_polarizable_pts, size);
}
if (src->lmo_centroids) {
size = src->n_lmo * sizeof(vec_t);
dest->lmo_centroids = (vec_t *)malloc(size);
if (!dest->lmo_centroids)
return EFP_RESULT_NO_MEMORY;
memcpy(dest->lmo_centroids, src->lmo_centroids, size);
}
if (src->xr_atoms) {
size = src->n_xr_atoms * sizeof(struct xr_atom);
dest->xr_atoms = (struct xr_atom *)malloc(size);
if (!dest->xr_atoms)
return EFP_RESULT_NO_MEMORY;
memcpy(dest->xr_atoms, src->xr_atoms, size);
for (size_t j = 0; j < src->n_xr_atoms; j++) {
const struct xr_atom *at_src = src->xr_atoms + j;
struct xr_atom *at_dest = dest->xr_atoms + j;
size = at_src->n_shells * sizeof(struct shell);
at_dest->shells = (struct shell *)malloc(size);
if (!at_dest->shells)
return EFP_RESULT_NO_MEMORY;
memcpy(at_dest->shells, at_src->shells, size);
for (size_t i = 0; i < at_src->n_shells; i++) {
size = (at_src->shells[i].type == 'L' ? 3 : 2) *
at_src->shells[i].n_funcs * sizeof(double);
at_dest->shells[i].coef =
(double *)malloc(size);
if (!at_dest->shells[i].coef)
return EFP_RESULT_NO_MEMORY;
memcpy(at_dest->shells[i].coef,
at_src->shells[i].coef, size);
}
}
}
if (src->xr_fock_mat) {
size = src->n_lmo * (src->n_lmo + 1) / 2 * sizeof(double);
dest->xr_fock_mat = (double *)malloc(size);
if (!dest->xr_fock_mat)
return EFP_RESULT_NO_MEMORY;
memcpy(dest->xr_fock_mat, src->xr_fock_mat, size);
}
if (src->xr_wf) {
size = src->n_lmo * src->xr_wf_size * sizeof(double);
dest->xr_wf = (double *)malloc(size);
if (!dest->xr_wf)
return EFP_RESULT_NO_MEMORY;
memcpy(dest->xr_wf, src->xr_wf, size);
}
if (src->xrfit) {
size = src->n_lmo * 4 * sizeof(double);
dest->xrfit = (double *)malloc(size);
if (!dest->xrfit)
return EFP_RESULT_NO_MEMORY;
memcpy(dest->xrfit, src->xrfit, size);
}
return EFP_RESULT_SUCCESS;
}
static enum efp_result
check_opts(const struct efp_opts *opts)
{
if (opts->enable_pbc) {
if ((opts->terms & EFP_TERM_AI_ELEC) ||
(opts->terms & EFP_TERM_AI_POL) ||
(opts->terms & EFP_TERM_AI_DISP) ||
(opts->terms & EFP_TERM_AI_XR) ||
(opts->terms & EFP_TERM_AI_CHTR)) {
efp_log("periodic calculations are not supported "
"for QM/EFP");
return EFP_RESULT_FATAL;
}
if (!opts->enable_cutoff) {
efp_log("periodic calculations require interaction "
"cutoff to be enabled");
return EFP_RESULT_FATAL;
}
}
if (opts->enable_cutoff) {
if (opts->swf_cutoff < 1.0) {
efp_log("interaction cutoff is too small");
return EFP_RESULT_FATAL;
}
}
if (opts->enable_cutoff) {
if (opts->swf_cutoff < opts->xr_cutoff) {
efp_log("exchange-repulsion cutoff is smaller than interaction cutoff");
return EFP_RESULT_FATAL;
}
}
return EFP_RESULT_SUCCESS;
}
static enum efp_result
check_frag_params(const struct efp_opts *opts, struct frag *frag)
{
if ((opts->terms & EFP_TERM_ELEC) || (opts->terms & EFP_TERM_AI_ELEC)) {
if (!frag->multipole_pts) {
efp_log("electrostatic parameters are missing");
return EFP_RESULT_FATAL;
}
if (opts->elec_damp == EFP_ELEC_DAMP_SCREEN &&
frag->screen_params == NULL) {
efp_log("electrostatic screening parameters are missing; continue");
add_screen2_params(frag);
//return EFP_RESULT_FATAL;
}
}
if ((opts->terms & EFP_TERM_POL) || (opts->terms & EFP_TERM_AI_POL)) {
if (!frag->polarizable_pts || !frag->multipole_pts) {
efp_log("polarization parameters are missing");
return EFP_RESULT_FATAL;
}
}
if ((opts->terms & EFP_TERM_DISP) || (opts->terms & EFP_TERM_AI_DISP)) {
if (frag->dynamic_polarizable_pts == NULL) {
efp_log("dispersion parameters are missing");
return EFP_RESULT_FATAL;
}
if (opts->disp_damp == EFP_DISP_DAMP_OVERLAP &&
frag->n_lmo != frag->n_dynamic_polarizable_pts) {
efp_log("number of polarization points does not "
"match number of LMOs");
return EFP_RESULT_FATAL;
}
}
if ((opts->terms & EFP_TERM_XR) || (opts->terms & EFP_TERM_AI_XR)) {
if (!frag->xr_atoms ||
!frag->xr_fock_mat ||
!frag->xr_wf ||
!frag->lmo_centroids) {
efp_log("exchange repulsion parameters are missing");
return EFP_RESULT_FATAL;
}
}
return EFP_RESULT_SUCCESS;
}
static enum efp_result
check_params(struct efp *efp)
{
enum efp_result res;
for (size_t i = 0; i < efp->n_frag; i++)
if ((res = check_frag_params(&efp->opts, efp->frags + i))) {
efp_log("check_params() failure");
return res;
}
return EFP_RESULT_SUCCESS;
}
static int
do_elec(const struct efp_opts *opts)
{
return opts->terms & EFP_TERM_ELEC;
}
static int
do_disp(const struct efp_opts *opts)
{
return opts->terms & EFP_TERM_DISP;
}
static int
do_xr(const struct efp_opts *opts)
{
int xr = (opts->terms & EFP_TERM_XR);
int cp = (opts->terms & EFP_TERM_ELEC) &&
(opts->elec_damp == EFP_ELEC_DAMP_OVERLAP);
int dd = (opts->terms & EFP_TERM_DISP) &&
(opts->disp_damp == EFP_DISP_DAMP_OVERLAP);
return xr || cp || dd;
}
static void
compute_two_body_range(struct efp *efp, size_t frag_from, size_t frag_to,
void *data)
{
double e_elec = 0.0, e_disp = 0.0, e_xr = 0.0, e_cp = 0.0, e_elec_tmp = 0.0, e_disp_tmp = 0.0;
(void)data;
#ifdef _OPENMP
#pragma omp parallel for schedule(dynamic) reduction(+:e_elec,e_disp,e_xr,e_cp)
#endif
for (size_t i = frag_from; i < frag_to; i++) {
size_t cnt = efp->n_frag % 2 ? (efp->n_frag - 1) / 2 :
i < efp->n_frag / 2 ? efp->n_frag / 2 :
efp->n_frag / 2 - 1;
for (size_t j = i + 1; j < i + 1 + cnt; j++) {
size_t fr_j = j % efp->n_frag;
if (!efp_skip_frag_pair(efp, i, fr_j)) {
double *s;
six_t *ds;
size_t n_lmo_ij = efp->frags[i].n_lmo *
efp->frags[fr_j].n_lmo;
s = (double *)calloc(n_lmo_ij, sizeof(double));
ds = (six_t *)calloc(n_lmo_ij, sizeof(six_t));
if (do_xr(&efp->opts)) {
double exr, ecp;
efp_frag_frag_xr(efp, i, fr_j,
s, ds, &exr, &ecp);
e_xr += exr;
e_cp += ecp;
/* */
if (efp->opts.enable_pairwise) {
if (i == efp->opts.ligand) {
efp->pair_energies[fr_j].exchange_repulsion = exr;
efp->pair_energies[fr_j].charge_penetration = ecp;
}
if (fr_j == efp->opts.ligand) {
efp->pair_energies[i].exchange_repulsion = exr;
efp->pair_energies[i].charge_penetration = ecp;
}
}
}
if (do_elec(&efp->opts)) {
e_elec_tmp = efp_frag_frag_elec(efp,
i, fr_j);
e_elec += e_elec_tmp;
/* */
if (efp->opts.enable_pairwise) {
if (i == efp->opts.ligand)
efp->pair_energies[fr_j].electrostatic = e_elec_tmp;
if (fr_j == efp->opts.ligand)
efp->pair_energies[i].electrostatic = e_elec_tmp;
}
}
if (do_disp(&efp->opts)) {
e_disp_tmp = efp_frag_frag_disp(efp,
i, fr_j, s, ds);
e_disp += e_disp_tmp;
/* */
if (efp->opts.enable_pairwise) {
if (i == efp->opts.ligand)
efp->pair_energies[fr_j].dispersion = e_disp_tmp;
if (fr_j == efp->opts.ligand)
efp->pair_energies[i].dispersion = e_disp_tmp;
}
}
free(s);
free(ds);
}
}
}
efp->energy.electrostatic += e_elec;
efp->energy.dispersion += e_disp;
efp->energy.exchange_repulsion += e_xr;
efp->energy.charge_penetration += e_cp;
}
EFP_EXPORT enum efp_result
compute_two_body_crystal(struct efp *efp)
{
double e_elec = 0.0, e_disp = 0.0, e_xr = 0.0, e_cp = 0.0, e_elec_tmp = 0.0, e_disp_tmp = 0.0;
// no parallelization
int nsymm = efp->nsymm_frag;
size_t *unique_frag = (size_t *)calloc(nsymm, sizeof(size_t));
unique_symm_frag(efp, unique_frag);
size_t *nsymm_frag = (size_t *)calloc(nsymm, sizeof(size_t));
n_symm_frag(efp, nsymm_frag);
for (size_t k = 0; k < nsymm; k++) {
size_t i = unique_frag[k];
struct frag *frag = efp->frags + i;
// scaling factor that tells how many fragments like this are in the system
size_t factor = nsymm_frag[k];
for (size_t fr_j=0; fr_j<efp->n_frag; fr_j++){
if ( fr_j != i && !efp_skip_frag_pair(efp, i, fr_j)) {
struct frag *fragj = efp->frags + fr_j;
double *s;
six_t *ds;
size_t n_lmo_ij = efp->frags[i].n_lmo *
efp->frags[fr_j].n_lmo;
s = (double *)calloc(n_lmo_ij, sizeof(double));
ds = (six_t *)calloc(n_lmo_ij, sizeof(six_t));
if (do_xr(&efp->opts)) {
double exr, ecp;
efp_frag_frag_xr(efp, i, fr_j,
s, ds, &exr, &ecp);
e_xr += exr * factor;
e_cp += ecp * factor;
/* */
if (efp->opts.enable_pairwise) {
if (i == efp->opts.ligand) {
efp->pair_energies[fr_j].exchange_repulsion = exr;
efp->pair_energies[fr_j].charge_penetration = ecp;
}
if (fr_j == efp->opts.ligand) {
efp->pair_energies[i].exchange_repulsion = exr;
efp->pair_energies[i].charge_penetration = ecp;
}
}
}
if (do_elec(&efp->opts)) {
e_elec_tmp = efp_frag_frag_elec(efp,
i, fr_j);
e_elec += e_elec_tmp * factor;
/* */
if (efp->opts.enable_pairwise) {
if (i == efp->opts.ligand)
efp->pair_energies[fr_j].electrostatic = e_elec_tmp;
if (fr_j == efp->opts.ligand)
efp->pair_energies[i].electrostatic = e_elec_tmp;
}
}
if (do_disp(&efp->opts)) {
e_disp_tmp = efp_frag_frag_disp(efp,
i, fr_j, s, ds);
e_disp += e_disp_tmp * factor;
/* */
if (efp->opts.enable_pairwise) {
if (i == efp->opts.ligand)
efp->pair_energies[fr_j].dispersion = e_disp_tmp;
if (fr_j == efp->opts.ligand)
efp->pair_energies[i].dispersion = e_disp_tmp;
}
}
free(s);
free(ds);
}
}
}
// really, we counted all pairwise interactions twice. Scaling back
efp->energy.electrostatic += e_elec/2;
efp->energy.dispersion += e_disp/2;
efp->energy.exchange_repulsion += e_xr/2;
efp->energy.charge_penetration += e_cp/2;
return EFP_RESULT_SUCCESS;
}
EFP_EXPORT enum efp_result
efp_get_energy(struct efp *efp, struct efp_energy *energy)
{
assert(efp);
assert(energy);
*energy = efp->energy;
return EFP_RESULT_SUCCESS;
}
EFP_EXPORT enum efp_result
efp_get_gradient(struct efp *efp, double *grad)
{
assert(efp);
assert(grad);
if (!efp->do_gradient) {
efp_log("gradient calculation was not requested");
return EFP_RESULT_FATAL;
}
memcpy(grad, efp->grad, efp->n_frag * sizeof(six_t));
return EFP_RESULT_SUCCESS;
}
EFP_EXPORT enum efp_result
efp_get_atomic_gradient(struct efp *efp, double *grad)
{
six_t *efpgrad = NULL; /* Calculated EFP gradient */
vec_t *pgrad; /* Conversion of grad to vec_t type */
size_t i, j, k, l;
size_t nr; /* Number of atoms in the current fragment */
size_t maxa; /* Maximum number of size of m, Ia, r arrays */
vec_t *r = NULL; /* Radius-vector of each atom inside current fragment
with respect to CoM of that fragment */
double mm, *m = NULL; /* Total Mass of fragments and individual atoms */
double I, *Ia = NULL; /* Inertia along axis and contribution of each
individual atom */
mat_t Id; /* Total inertia tensor of a fragment */
vec_t v, g; /* Principal axis and Inertia along that axis */
vec_t rbuf, rbuf2, tq, ri, rt;
double dist, sina, ft, norm;
enum efp_result res;
assert(efp);
assert(grad);
if (!efp->do_gradient) {
efp_log("gradient calculation was not requested");
return EFP_RESULT_FATAL;
}
pgrad = (vec_t *)grad;
/* Calculate maximum size of a fragment */
maxa = 0;
for (j = 0; j < efp->n_frag; j++) {
if (efp->frags[j].n_atoms > maxa)
maxa = efp->frags[j].n_atoms;
}
res = EFP_RESULT_NO_MEMORY;
/* Create and initialize some arrays for work */
if ((r = (vec_t *)malloc(maxa * sizeof(*r))) == NULL)
goto error;
if ((m = (double *)malloc(maxa * sizeof(*m))) == NULL)
goto error;
if ((Ia = (double *)malloc(maxa * sizeof(*Ia))) == NULL)
goto error;
/* Copy computed efp->grad */
if ((efpgrad = (six_t *)malloc(efp->n_frag * sizeof(*efpgrad))) == NULL)
goto error;
memcpy(efpgrad, efp->grad, efp->n_frag * sizeof(*efpgrad));
/* Main cycle (iterate fragments, distribute forces and torques) */
for (k = 0, j = 0; j < efp->n_frag; j++) {
nr = efp->frags[j].n_atoms;
memset(r, 0, maxa * sizeof(*r));
memset(m, 0, maxa * sizeof(*m));
memset(Ia, 0, maxa * sizeof(*Ia));
mm = 0.0;
Id = mat_zero;
v = vec_zero;
g = vec_zero;
for (i = 0; i < nr ; i++) {
r[i].x = efp->frags[j].atoms[i].x - efp->frags[j].x;
r[i].y = efp->frags[j].atoms[i].y - efp->frags[j].y;
r[i].z = efp->frags[j].atoms[i].z - efp->frags[j].z;
m[i] = efp->frags[j].atoms[i].mass;
mm += m[i];
/* Inertia tensor contribution calculations */
Id.xx += m[i] * (r[i].y*r[i].y + r[i].z*r[i].z);
Id.yy += m[i] * (r[i].x*r[i].x + r[i].z*r[i].z);
Id.zz += m[i] * (r[i].x*r[i].x + r[i].y*r[i].y);
Id.xy -= m[i] * r[i].x * r[i].y;
Id.yx -= m[i] * r[i].x * r[i].y;
Id.xz -= m[i] * r[i].x * r[i].z;
Id.zx -= m[i] * r[i].x * r[i].z;
Id.yz -= m[i] * r[i].y * r[i].z;
Id.zy -= m[i] * r[i].y * r[i].z;
}
/* Try to diagonalize Id and get principal axis */
if (efp_dsyev('V', 'U', 3, (double *)&Id, 3, (double *)&g)) {
efp_log("inertia tensor diagonalization failed");
res = EFP_RESULT_FATAL;
goto error;
}
/* Add any additional forces from grad array to efpgrad array */
for (i = 0; i < nr; i++) {
efpgrad[j].x += pgrad[k+i].x;
efpgrad[j].y += pgrad[k+i].y;
efpgrad[j].z += pgrad[k+i].z;
rbuf = vec_cross(&r[i], &pgrad[k+i]);
efpgrad[j].a += rbuf.x;
efpgrad[j].b += rbuf.y;
efpgrad[j].c += rbuf.z;
pgrad[k+i] = vec_zero;
}
/* Now we are ready to redistribute efpgrad over the atoms */
/* Redistribute total translation grad[i]=m[i]/mm*efpgrad[j] */
for (i = 0; i < nr; i++) {
pgrad[k+i].x = efpgrad[j].x;
pgrad[k+i].y = efpgrad[j].y;
pgrad[k+i].z = efpgrad[j].z;
vec_scale(&pgrad[k+i], m[i] / mm);
}
/* Redistribution of torque should be done over 3 principal
axes computed previously */
for (l = 0; l < 3; l++) {
v = ((vec_t *)&Id)[l];
tq.x = efpgrad[j].a;
tq.y = efpgrad[j].b;
tq.z = efpgrad[j].c;
/* Calculate contribution of each atom to moment of
inertia with respect to current axis */
I = 0.0;
for (i = 0; i < nr; i++) {
rbuf = vec_cross(&v, &r[i]);
dist = vec_len(&rbuf);
Ia[i] = m[i] * dist * dist;
I += Ia[i];
}
/* Project torque onto v axis */
norm = vec_dot(&tq, &v);
tq = v;
vec_scale(&tq, norm);
/* Now distribute torque using Ia[i]/I as a scale */
for (i = 0; i < nr; i++) {
if (eq(Ia[i], 0.0))
continue;
/* If atom is not on the current axis */
rbuf = tq;
vec_scale(&rbuf, Ia[i]/I);
ft = vec_len(&rbuf);
ri = r[i];
vec_normalize(&ri);
rt = tq;
vec_normalize(&rt);
rbuf2 = vec_cross(&rt, &ri);
sina = vec_len(&rbuf2);
vec_normalize(&rbuf2);
vec_scale(&rbuf2, ft/sina/vec_len(&r[i]));
/* Update grad with torque contribution of
atom i over axis v */
pgrad[k+i] = vec_add(&pgrad[k+i], &rbuf2);
}
}
k += nr;
}
res = EFP_RESULT_SUCCESS;
error:
free(r);
free(m);
free(Ia);
free(efpgrad);
return res;
}
EFP_EXPORT enum efp_result
efp_set_point_charges(struct efp *efp, size_t n_ptc, const double *ptc,
const double *xyz)
{
assert(efp);
efp->n_ptc = n_ptc;
if (n_ptc == 0) {
free(efp->ptc);
free(efp->ptc_xyz);
free(efp->ptc_grad);
efp->ptc = NULL;
efp->ptc_xyz = NULL;
efp->ptc_grad = NULL;
return EFP_RESULT_SUCCESS;
}
assert(ptc);
assert(xyz);
efp->ptc = (double *)realloc(efp->ptc, n_ptc * sizeof(double));
efp->ptc_xyz = (vec_t *)realloc(efp->ptc_xyz, n_ptc * sizeof(vec_t));
efp->ptc_grad = (vec_t *)realloc(efp->ptc_grad, n_ptc * sizeof(vec_t));
memcpy(efp->ptc, ptc, n_ptc * sizeof(double));
memcpy(efp->ptc_xyz, xyz, n_ptc * sizeof(vec_t));
memset(efp->ptc_grad, 0, n_ptc * sizeof(vec_t));
return EFP_RESULT_SUCCESS;
}
EFP_EXPORT enum efp_result
efp_get_point_charge_count(struct efp *efp, size_t *n_ptc)
{
assert(efp);
assert(n_ptc);
*n_ptc = efp->n_ptc;
return EFP_RESULT_SUCCESS;
}
EFP_EXPORT enum efp_result
efp_get_point_charge_gradient(struct efp *efp, double *grad)
{
assert(efp);
assert(grad);
if (!efp->do_gradient) {
efp_log("gradient calculation was not requested");
return EFP_RESULT_FATAL;
}
memcpy(grad, efp->ptc_grad, efp->n_ptc * sizeof(vec_t));
return EFP_RESULT_SUCCESS;
}
EFP_EXPORT enum efp_result
efp_get_point_charge_coordinates(struct efp *efp, double *xyz)
{
assert(efp);
assert(xyz);
memcpy(xyz, efp->ptc_xyz, efp->n_ptc * sizeof(vec_t));
return EFP_RESULT_SUCCESS;
}
EFP_EXPORT enum efp_result
efp_set_point_charge_coordinates(struct efp *efp, const double *xyz)
{
assert(efp);
assert(xyz);
memcpy(efp->ptc_xyz, xyz, efp->n_ptc * sizeof(vec_t));
return EFP_RESULT_SUCCESS;
}
EFP_EXPORT enum efp_result
efp_get_point_charge_values(struct efp *efp, double *ptc)
{
assert(efp);
assert(ptc);
memcpy(ptc, efp->ptc, efp->n_ptc * sizeof(double));
return EFP_RESULT_SUCCESS;
}
EFP_EXPORT enum efp_result
efp_set_point_charge_values(struct efp *efp, const double *ptc)
{
assert(efp);
assert(ptc);
memcpy(efp->ptc, ptc, efp->n_ptc * sizeof(double));
return EFP_RESULT_SUCCESS;
}
EFP_EXPORT enum efp_result
efp_set_coordinates(struct efp *efp, enum efp_coord_type coord_type,
const double *coord)
{
assert(efp);
assert(coord);
size_t stride;
enum efp_result res;
switch (coord_type) {
case EFP_COORD_TYPE_XYZABC:
stride = 6;
break;
case EFP_COORD_TYPE_POINTS:
stride = 9;
break;
case EFP_COORD_TYPE_ROTMAT:
stride = 12;
break;
}
for (size_t i = 0; i < efp->n_frag; i++, coord += stride)
if ((res = efp_set_frag_coordinates(efp, i, coord_type, coord))) {
efp_log("efp_set_coordinates() failure");
return res;
}
return EFP_RESULT_SUCCESS;
}
EFP_EXPORT enum efp_result
efp_set_frag_coordinates(struct efp *efp, size_t frag_idx,
enum efp_coord_type coord_type, const double *coord)
{
struct frag *frag;
assert(efp);
assert(coord);
assert(frag_idx < efp->n_frag);
frag = efp->frags + frag_idx;
switch (coord_type) {
case EFP_COORD_TYPE_XYZABC:
return set_coord_xyzabc(frag, coord);
case EFP_COORD_TYPE_POINTS:
return set_coord_points(frag, coord);
case EFP_COORD_TYPE_ROTMAT:
return set_coord_rotmat(frag, coord);
}
efp_log("efp_set_frag_coordinates() failure");
assert(0);
}
EFP_EXPORT enum efp_result
efp_get_coordinates(struct efp *efp, double *xyzabc)
{
assert(efp);
assert(xyzabc);
for (size_t i = 0; i < efp->n_frag; i++) {
struct frag *frag = efp->frags + i;
double a, b, c;
matrix_to_euler(&frag->rotmat, &a, &b, &c);
*xyzabc++ = frag->x;
*xyzabc++ = frag->y;
*xyzabc++ = frag->z;
*xyzabc++ = a;
*xyzabc++ = b;
*xyzabc++ = c;
}
return EFP_RESULT_SUCCESS;
}
EFP_EXPORT enum efp_result
efp_get_frag_xyzabc(struct efp *efp, size_t frag_idx, double *xyzabc)
{
struct frag *frag;
double a, b, c;
assert(efp);
assert(frag_idx < efp->n_frag);
assert(xyzabc);
frag = efp->frags + frag_idx;
matrix_to_euler(&frag->rotmat, &a, &b, &c);
xyzabc[0] = frag->x;
xyzabc[1] = frag->y;
xyzabc[2] = frag->z;
xyzabc[3] = a;
xyzabc[4] = b;
xyzabc[5] = c;
return EFP_RESULT_SUCCESS;
}
EFP_EXPORT enum efp_result
efp_set_periodic_box(struct efp *efp, double x, double y, double z, double alpha, double beta, double gamma)
{
assert(efp);
if (alpha < 0.01) {
// assigning default angles of 90.0 degrees
//printf("\n assigning angles to 90.0 \n");
alpha = 90.0;
beta = 90.0;
gamma = 90.0;
}
efp->box.x = x;
efp->box.y = y;
efp->box.z = z;
efp->box.a = alpha;
efp->box.b = beta;
efp->box.c = gamma;
double max_cut = max_cutoff(efp->box);
double cut = efp->opts.swf_cutoff;
if (cut > max_cut) {
printf("\n Maximum allowed cutoff is %lf ", max_cut*0.52917721092);
printf("\n Given cutoff is %lf \n", cut*0.52917721092);
efp_log("periodic box dimensions must be at least twice "
"the switching function cutoff");
return EFP_RESULT_FATAL;
}
return EFP_RESULT_SUCCESS;
}
EFP_EXPORT enum efp_result
efp_get_periodic_box(struct efp *efp, double *xyzabc)
{
assert(efp);
assert(xyzabc);
xyzabc[0] = efp->box.x;
xyzabc[1] = efp->box.y;
xyzabc[2] = efp->box.z;
xyzabc[3] = efp->box.a;
xyzabc[4] = efp->box.b;
xyzabc[5] = efp->box.c;
return EFP_RESULT_SUCCESS;
}
EFP_EXPORT enum efp_result
efp_get_stress_tensor(struct efp *efp, double *stress)
{
assert(efp);
assert(stress);
if (!efp->do_gradient) {
efp_log("gradient calculation was not requested");
return EFP_RESULT_FATAL;
}
*(mat_t *)stress = efp->stress;
return EFP_RESULT_SUCCESS;
}
EFP_EXPORT enum efp_result
efp_get_ai_screen(struct efp *efp, size_t frag_idx, double *screen)
{
const struct frag *frag;
size_t size;
assert(efp);
assert(screen);
assert(frag_idx < efp->n_frag);
frag = &efp->frags[frag_idx];
if (frag->ai_screen_params == NULL) {
efp_log("no screening parameters found for %s", frag->name);
return EFP_RESULT_FATAL;
}
size = frag->n_multipole_pts * sizeof(double);
memcpy(screen, frag->ai_screen_params, size);
return EFP_RESULT_SUCCESS;
}
EFP_EXPORT enum efp_result
efp_prepare(struct efp *efp)
{
assert(efp);
efp->n_polarizable_pts = 0;
for (size_t i = 0; i < efp->n_frag; i++) {
efp->frags[i].polarizable_offset = efp->n_polarizable_pts;
efp->n_polarizable_pts += efp->frags[i].n_polarizable_pts;
}
efp->n_fragment_field_pts = 0;
if (efp->opts.enable_pairwise && efp->opts.ligand != -1) {
size_t ligand_idx = efp->opts.ligand;
for (size_t i = 0; i < efp->n_frag; i++) {
efp->frags[i].fragment_field_offset = efp->n_fragment_field_pts;
efp->n_fragment_field_pts += efp->frags[ligand_idx].n_polarizable_pts;
}
}
efp->fragment_field = (vec_t *)calloc(efp->n_fragment_field_pts, sizeof(vec_t));
efp->indip = (vec_t *)calloc(efp->n_polarizable_pts, sizeof(vec_t));
efp->indipconj = (vec_t *)calloc(efp->n_polarizable_pts, sizeof(vec_t));
efp->grad = (six_t *)calloc(efp->n_frag, sizeof(six_t));
efp->skiplist = (char *)calloc(efp->n_frag * efp->n_frag, 1);
efp->pair_energies = (struct efp_energy *)calloc(efp->n_frag, sizeof(struct efp_energy));
efp->symmlist = (size_t *)calloc(efp->n_frag, sizeof(size_t));
return EFP_RESULT_SUCCESS;
}
EFP_EXPORT enum efp_result
efp_set_orbital_energies(struct efp *efp, size_t n_core, size_t n_act,
size_t n_vir, const double *oe)
{
size_t size;
assert(efp);
assert(oe);
efp->n_ai_core = n_core;
efp->n_ai_act = n_act;
efp->n_ai_vir = n_vir;
size = (n_core + n_act + n_vir) * sizeof(double);
efp->ai_orbital_energies = (double *)realloc(efp->ai_orbital_energies,
size);
memcpy(efp->ai_orbital_energies, oe, size);
return EFP_RESULT_SUCCESS;
}
EFP_EXPORT enum efp_result
efp_set_dipole_integrals(struct efp *efp, size_t n_core, size_t n_act,
size_t n_vir, const double *dipint)
{
size_t size;
assert(efp);
assert(dipint);
efp->n_ai_core = n_core;
efp->n_ai_act = n_act;
efp->n_ai_vir = n_vir;
size = 3 * (n_core + n_act + n_vir) * (n_core + n_act + n_vir);
efp->ai_dipole_integrals = (double *)realloc(efp->ai_dipole_integrals,
size * sizeof(double));
memcpy(efp->ai_dipole_integrals, dipint, size * sizeof(double));
return EFP_RESULT_SUCCESS;
}
EFP_EXPORT enum efp_result
efp_get_wavefunction_dependent_energy(struct efp *efp, double *energy)
{
assert(efp);
assert(energy);
if (!(efp->opts.terms & EFP_TERM_POL) &&
!(efp->opts.terms & EFP_TERM_AI_POL)) {
*energy = 0.0;
return EFP_RESULT_SUCCESS;
}
return efp_compute_pol_energy(efp, energy);
}
EFP_EXPORT enum efp_result
efp_compute(struct efp *efp, int do_gradient)
{
enum efp_result res;
assert(efp);
if (efp->grad == NULL) {
efp_log("call efp_prepare after all fragments are added");
return EFP_RESULT_FATAL;
}
efp->do_gradient = do_gradient;
if ((res = check_params(efp))) {
efp_log("check_params() failure");
return res;
}
memset(&efp->energy, 0, sizeof(efp->energy));
memset(&efp->stress, 0, sizeof(efp->stress));
memset(efp->grad, 0, efp->n_frag * sizeof(six_t));
memset(efp->ptc_grad, 0, efp->n_ptc * sizeof(vec_t));
memset(efp->pair_energies, 0, efp->n_frag * sizeof(efp->energy));
if (efp->opts.symmetry == 0) { // standard case
efp_balance_work(efp, compute_two_body_range, NULL);
}
else { // high-symmetry crystals
if (res = compute_two_body_crystal(efp)){
efp_log("compute_two_body_crystal() failure");
return res;
}
}
if (res = efp_compute_pol(efp)) {
efp_log("efp_compute_pol() failure");
return res;
}
if (res = efp_compute_ai_elec(efp)){
efp_log("efp_compute_ai_elec() failure");
return res;
}
if (res = efp_compute_ai_disp(efp)){
efp_log("efp_compute_ai_disp() failure");
return res;
}
#ifdef EFP_USE_MPI
efp_allreduce(&efp->energy.electrostatic, 1);
efp_allreduce(&efp->energy.dispersion, 1);
efp_allreduce(&efp->energy.exchange_repulsion, 1);
efp_allreduce(&efp->energy.charge_penetration, 1);
if (efp->do_gradient) {
efp_allreduce((double *)efp->grad, 6 * efp->n_frag);
efp_allreduce((double *)efp->ptc_grad, 3 * efp->n_ptc);
efp_allreduce((double *)&efp->stress, 9);
}
#endif
efp->energy.total = efp->energy.electrostatic +
efp->energy.charge_penetration +
efp->energy.electrostatic_point_charges +
efp->energy.polarization +
efp->energy.dispersion +
efp->energy.ai_dispersion +
efp->energy.exchange_repulsion;
return EFP_RESULT_SUCCESS;
}
EFP_EXPORT enum efp_result
efp_get_frag_charge(struct efp *efp, size_t frag_idx, double *charge)
{
struct frag *frag;
double sum = 0.0;
size_t i;
assert(efp);
assert(charge);
assert(frag_idx < efp->n_frag);
frag = efp->frags + frag_idx;
for (i = 0; i < frag->n_atoms; i++)
sum += frag->atoms[i].znuc;
for (i = 0; i < frag->n_multipole_pts; i++)
sum += frag->multipole_pts[i].monopole;
*charge = sum;
return EFP_RESULT_SUCCESS;
}
EFP_EXPORT enum efp_result
efp_get_frag_multiplicity(struct efp *efp, size_t frag_idx, int *mult)
{
assert(efp);
assert(mult);
assert(frag_idx < efp->n_frag);
*mult = efp->frags[frag_idx].multiplicity;
return EFP_RESULT_SUCCESS;
}
EFP_EXPORT enum efp_result
efp_get_frag_multipole_count(struct efp *efp, size_t frag_idx, size_t *n_mult)
{
assert(efp);
assert(n_mult);
assert(frag_idx < efp->n_frag);
*n_mult = efp->frags[frag_idx].n_multipole_pts;
return EFP_RESULT_SUCCESS;
}
EFP_EXPORT enum efp_result
efp_get_frag_mult_rank(struct efp *efp, size_t frag_idx, size_t mult_idx, size_t *rank)
{
assert(efp);
assert(rank);
assert(frag_idx < efp->n_frag);
assert(mult_idx < efp->frags[frag_idx].n_multipole_pts);
struct frag *frag = efp->frags + frag_idx;
struct multipole_pt *pt = frag->multipole_pts + mult_idx;
*rank = -1;
for (size_t t = 0; t < 10; t++)
if (pt->octupole[t] != 0) {
*rank = 3;
return EFP_RESULT_SUCCESS;
}
for (size_t t = 0; t < 6; t++)
if (pt->quadrupole[t] != 0) {
*rank = 2;
return EFP_RESULT_SUCCESS;
}
if (pt->dipole.x != 0 || pt->dipole.y != 0 || pt->dipole.z != 0) {
*rank = 1;
return EFP_RESULT_SUCCESS;
}
if (pt->monopole != 0) {
*rank = 0;
return EFP_RESULT_SUCCESS;
}
return EFP_RESULT_SUCCESS;
}
EFP_EXPORT enum efp_result
efp_get_multipole_count(struct efp *efp, size_t *n_mult)
{
size_t sum = 0;
assert(efp);
assert(n_mult);
for (size_t i = 0; i < efp->n_frag; i++)
sum += efp->frags[i].n_multipole_pts;
*n_mult = sum;
return EFP_RESULT_SUCCESS;
}
EFP_EXPORT enum efp_result
efp_get_multipole_coordinates(struct efp *efp, double *xyz)
{
assert(efp);
assert(xyz);
for (size_t i = 0; i < efp->n_frag; i++) {
struct frag *frag = efp->frags + i;
for (size_t j = 0; j < frag->n_multipole_pts; j++) {
*xyz++ = frag->multipole_pts[j].x;
*xyz++ = frag->multipole_pts[j].y;
*xyz++ = frag->multipole_pts[j].z;
}
}
return EFP_RESULT_SUCCESS;
}
EFP_EXPORT enum efp_result
efp_get_multipole_values(struct efp *efp, double *mult)
{
assert(efp);
assert(mult);
for (size_t i = 0; i < efp->n_frag; i++) {
struct frag *frag = efp->frags + i;
for (size_t j = 0; j < frag->n_multipole_pts; j++) {
struct multipole_pt *pt = frag->multipole_pts + j;
*mult++ = pt->monopole;
*mult++ = pt->dipole.x;
*mult++ = pt->dipole.y;
*mult++ = pt->dipole.z;
for (size_t t = 0; t < 6; t++)
*mult++ = pt->quadrupole[t];
for (size_t t = 0; t < 10; t++)
*mult++ = pt->octupole[t];
}
}
return EFP_RESULT_SUCCESS;
}
EFP_EXPORT enum efp_result
efp_get_frag_induced_dipole_count(struct efp *efp, size_t frag_idx, size_t *n_dip)
{
assert(efp);
assert(n_dip);
assert(frag_idx < efp->n_frag);
*n_dip = efp->frags[frag_idx].n_polarizable_pts;
return EFP_RESULT_SUCCESS;
}
EFP_EXPORT enum efp_result
efp_get_induced_dipole_count(struct efp *efp, size_t *n_dip)
{
size_t sum = 0;
assert(efp);
assert(n_dip);
for (size_t i = 0; i < efp->n_frag; i++)
sum += efp->frags[i].n_polarizable_pts;
*n_dip = sum;
return EFP_RESULT_SUCCESS;
}
EFP_EXPORT enum efp_result
efp_get_induced_dipole_coordinates(struct efp *efp, double *xyz)
{
assert(efp);
assert(xyz);
for (size_t i = 0; i < efp->n_frag; i++) {
struct frag *frag = efp->frags + i;
for (size_t j = 0; j < frag->n_polarizable_pts; j++) {
struct polarizable_pt *pt = frag->polarizable_pts + j;
*xyz++ = pt->x;
*xyz++ = pt->y;
*xyz++ = pt->z;
}
}
return EFP_RESULT_SUCCESS;
}
EFP_EXPORT enum efp_result
efp_get_induced_dipole_values(struct efp *efp, double *dip)
{
assert(efp);
assert(dip);
memcpy(dip, efp->indip, efp->n_polarizable_pts * sizeof(vec_t));
return EFP_RESULT_SUCCESS;
}
EFP_EXPORT enum efp_result
efp_get_induced_dipole_conj_values(struct efp *efp, double *dip)
{
assert(efp);
assert(dip);
memcpy(dip, efp->indipconj, efp->n_polarizable_pts * sizeof(vec_t));
return EFP_RESULT_SUCCESS;
}
EFP_EXPORT enum efp_result
efp_get_lmo_count(struct efp *efp, size_t frag_idx, size_t *n_lmo)
{
assert(efp != NULL);
assert(frag_idx < efp->n_frag);
assert(n_lmo != NULL);
*n_lmo = efp->frags[frag_idx].n_lmo;
return EFP_RESULT_SUCCESS;
}
EFP_EXPORT enum efp_result
efp_get_lmo_coordinates(struct efp *efp, size_t frag_idx, double *xyz)
{
struct frag *frag;
assert(efp != NULL);
assert(frag_idx < efp->n_frag);
assert(xyz != NULL);
frag = efp->frags + frag_idx;
if (frag->lmo_centroids == NULL) {
efp_log("no LMO centroids for fragment %s", frag->name);
return EFP_RESULT_FATAL;
}
memcpy(xyz, frag->lmo_centroids, frag->n_lmo * sizeof(vec_t));
return EFP_RESULT_SUCCESS;
}
EFP_EXPORT enum efp_result
efp_get_xrfit(struct efp *efp, size_t frag_idx, double *xrfit)
{
struct frag *frag;
assert(efp != NULL);
assert(frag_idx < efp->n_frag);
assert(xrfit != NULL);
frag = efp->frags + frag_idx;
if (frag->xrfit == NULL) {
efp_log("no XRFIT parameters for fragment %s", frag->name);
return EFP_RESULT_FATAL;
}
memcpy(xrfit, frag->xrfit, frag->n_lmo * 4 * sizeof(double));
return EFP_RESULT_SUCCESS;
}
EFP_EXPORT void
efp_shutdown(struct efp *efp)
{
if (efp == NULL)
return;
for (size_t i = 0; i < efp->n_frag; i++)
free_frag(efp->frags + i);
for (size_t i = 0; i < efp->n_lib; i++) {
free_frag(efp->lib[i]);
free(efp->lib[i]);
}
free(efp->frags);
free(efp->lib);
free(efp->grad);
free(efp->ptc);
free(efp->ptc_xyz);
free(efp->ptc_grad);
free(efp->indip);
free(efp->indipconj);
free(efp->ai_orbital_energies);
free(efp->ai_dipole_integrals);
free(efp->skiplist);
free(efp->fragment_field);
free(efp->pair_energies);
free(efp->symmlist);
free(efp);
}
EFP_EXPORT enum efp_result
efp_set_opts(struct efp *efp, const struct efp_opts *opts)
{
enum efp_result res;
assert(efp);
assert(opts);
if ((res = check_opts(opts))) {
efp_log("check_opts() failure");
return res;
}
efp->opts = *opts;
return EFP_RESULT_SUCCESS;
}
EFP_EXPORT enum efp_result
efp_get_opts(struct efp *efp, struct efp_opts *opts)
{
assert(efp);
assert(opts);
*opts = efp->opts;
return EFP_RESULT_SUCCESS;
}
EFP_EXPORT void
efp_opts_default(struct efp_opts *opts)
{
assert(opts);
memset(opts, 0, sizeof(*opts));
opts->terms = EFP_TERM_ELEC | EFP_TERM_POL | EFP_TERM_DISP |
EFP_TERM_XR | EFP_TERM_AI_ELEC | EFP_TERM_AI_POL;
}
EFP_EXPORT void
efp_set_error_log(void (*cb)(const char *))
{
efp_set_log_cb(cb);
}
EFP_EXPORT enum efp_result
efp_add_fragment(struct efp *efp, const char *name)
{
const struct frag *lib;
assert(efp);
assert(name);
if (efp->skiplist) {
efp_log("cannot add fragments after efp_prepare");
return EFP_RESULT_FATAL;
}
if ((lib = efp_find_lib(efp, name)) == NULL) {
efp_log("cannot find \"%s\" in any of .efp files", name);
return EFP_RESULT_UNKNOWN_FRAGMENT;
}
efp->n_frag++;
efp->frags = (struct frag *)realloc(efp->frags,
efp->n_frag * sizeof(struct frag));
if (efp->frags == NULL)
return EFP_RESULT_NO_MEMORY;
enum efp_result res;
struct frag *frag = efp->frags + efp->n_frag - 1;
if ((res = copy_frag(frag, lib))) {
efp_log("copy_frag() failure");
return res;
}
for (size_t a = 0; a < 3; a++) {
size_t size = frag->xr_wf_size * frag->n_lmo;
frag->xr_wf_deriv[a] = (double *)calloc(size, sizeof(double));
if (frag->xr_wf_deriv[a] == NULL)
return EFP_RESULT_NO_MEMORY;
}
return EFP_RESULT_SUCCESS;
}
EFP_EXPORT enum efp_result
efp_skip_fragments(struct efp *efp, size_t i, size_t j, int value)
{
assert(efp);
assert(efp->skiplist); /* call efp_prepare first */
assert(i < efp->n_frag);
assert(j < efp->n_frag);
efp->skiplist[i * efp->n_frag + j] = value ? 1 : 0;
efp->skiplist[j * efp->n_frag + i] = value ? 1 : 0;
return EFP_RESULT_SUCCESS;
}
EFP_EXPORT struct efp *
efp_create(void)
{
struct efp *efp = (struct efp *)calloc(1, sizeof(struct efp));
if (efp == NULL)
return NULL;
efp_opts_default(&efp->opts);
return efp;
}
EFP_EXPORT enum efp_result
efp_set_electron_density_field_fn(struct efp *efp,
efp_electron_density_field_fn fn)
{
assert(efp);
efp->get_electron_density_field = fn;
return EFP_RESULT_SUCCESS;
}
EFP_EXPORT enum efp_result
efp_set_electron_density_field_user_data(struct efp *efp, void *user_data)
{
assert(efp);
efp->get_electron_density_field_user_data = user_data;
return EFP_RESULT_SUCCESS;
}
EFP_EXPORT enum efp_result
efp_get_frag_count(struct efp *efp, size_t *n_frag)
{
assert(efp);
assert(n_frag);
*n_frag = efp->n_frag;
return EFP_RESULT_SUCCESS;
}
EFP_EXPORT enum efp_result
efp_get_frag_name(struct efp *efp, size_t frag_idx, size_t size,
char *frag_name)
{
assert(efp);
assert(frag_name);
assert(frag_idx < efp->n_frag);
strncpy(frag_name, efp->frags[frag_idx].name, size);
return EFP_RESULT_SUCCESS;
}
EFP_EXPORT enum efp_result
efp_get_frag_mass(struct efp *efp, size_t frag_idx, double *mass_out)
{
assert(efp);
assert(mass_out);
assert(frag_idx < efp->n_frag);
const struct frag *frag = efp->frags + frag_idx;
double mass = 0.0;
for (size_t i = 0; i < frag->n_atoms; i++)
mass += frag->atoms[i].mass;
*mass_out = mass;
return EFP_RESULT_SUCCESS;
}
EFP_EXPORT enum efp_result
efp_get_frag_inertia(struct efp *efp, size_t frag_idx, double *inertia_out)
{
assert(efp);
assert(inertia_out);
assert(frag_idx < efp->n_frag);
/* center of mass is in origin and axes are principal axes of inertia */
const struct frag *frag = efp->frags[frag_idx].lib;
vec_t inertia = vec_zero;
for (size_t i = 0; i < frag->n_atoms; i++) {
const struct efp_atom *atom = frag->atoms + i;
inertia.x += atom->mass * (atom->y*atom->y + atom->z*atom->z);
inertia.y += atom->mass * (atom->x*atom->x + atom->z*atom->z);
inertia.z += atom->mass * (atom->x*atom->x + atom->y*atom->y);
}
inertia_out[0] = inertia.x;
inertia_out[1] = inertia.y;
inertia_out[2] = inertia.z;
return EFP_RESULT_SUCCESS;
}
EFP_EXPORT enum efp_result
efp_get_frag_atom_count(struct efp *efp, size_t frag_idx, size_t *n_atoms)
{
assert(efp);
assert(n_atoms);
assert(frag_idx < efp->n_frag);
*n_atoms = efp->frags[frag_idx].n_atoms;
return EFP_RESULT_SUCCESS;
}
EFP_EXPORT enum efp_result
efp_get_frag_atoms(struct efp *efp, size_t frag_idx, size_t size,
struct efp_atom *atoms)
{
struct frag *frag;
assert(efp);
assert(atoms);
assert(frag_idx < efp->n_frag);
assert(size >= efp->frags[frag_idx].n_atoms);
frag = efp->frags + frag_idx;
memcpy(atoms, frag->atoms, frag->n_atoms * sizeof(struct efp_atom));
return EFP_RESULT_SUCCESS;
}
EFP_EXPORT void
efp_torque_to_derivative(const double *euler, const double *torque,
double *deriv)
{
assert(euler);
assert(torque);
assert(deriv);
double tx = torque[0];
double ty = torque[1];
double tz = torque[2];
double sina = sin(euler[0]);
double cosa = cos(euler[0]);
double sinb = sin(euler[1]);
double cosb = cos(euler[1]);
deriv[0] = tz;
deriv[1] = cosa * tx + sina * ty;
deriv[2] = sinb * sina * tx - sinb * cosa * ty + cosb * tz;
}
EFP_EXPORT const char *
efp_banner(void)
{
static const char banner[] =
"LIBEFP ver. " LIBEFP_VERSION_STRING "\n"
"Copyright (c) 2012-2017 Ilya Kaliman\n"
"\n"
"Journal References:\n"
" - Kaliman and Slipchenko, JCC 2013.\n"
" DOI: http://dx.doi.org/10.1002/jcc.23375\n"
" - Kaliman and Slipchenko, JCC 2015.\n"
" DOI: http://dx.doi.org/10.1002/jcc.23772\n"
"\n"
"Project web site: https://github.com/libefp2/libefp/\n";
return banner;
}
EFP_EXPORT void
efp_print_banner(void)
{
puts(efp_banner());
}
EFP_EXPORT const char *
efp_result_to_string(enum efp_result res)
{
switch (res) {
case EFP_RESULT_SUCCESS:
return "Operation was successful.";
case EFP_RESULT_FATAL:
return "Fatal error has occurred.";
case EFP_RESULT_NO_MEMORY:
return "Insufficient memory.";
case EFP_RESULT_FILE_NOT_FOUND:
return "File not found.";
case EFP_RESULT_SYNTAX_ERROR:
return "Syntax error.";
case EFP_RESULT_UNKNOWN_FRAGMENT:
return "Unknown EFP fragment.";
case EFP_RESULT_POL_NOT_CONVERGED:
return "Polarization SCF procedure did not converge.";
}
assert(0);
}
EFP_EXPORT enum efp_result
efp_get_pairwise_energy(struct efp *efp, struct efp_energy *pair_energies){
assert(efp);
assert(pair_energies);
memcpy(pair_energies, efp->pair_energies, efp->n_frag * sizeof(struct efp_energy));
return EFP_RESULT_SUCCESS;
}
EFP_EXPORT enum efp_result
efp_set_pairwise_energy(struct efp *efp, struct efp_energy *pair_energies)
{
assert(efp);
assert(pair_energies);
memcpy(efp->pair_energies, pair_energies, efp->n_frag * sizeof(struct efp_energy));
return EFP_RESULT_SUCCESS;
}
EFP_EXPORT enum efp_result
efp_set_symmlist(struct efp *efp)
{
assert(efp);
assert(efp->symmlist);
assert(efp->skiplist);
if (efp->opts.symmetry == 0)
{
for (size_t i = 0; i < efp->n_frag; i++) {
efp->symmlist[i] = 0;
}
}
else if (efp->opts.symm_frag == EFP_SYMM_FRAG_FRAG) {
//printf("\n n_lib %d \n", efp->n_lib);
// this needs to be changed for list settings of symmetry!!!
efp->nsymm_frag = efp->n_lib;
char name[32];
char** unique_names=malloc(efp->n_lib * sizeof(name));
for (int i = 0; i < efp->n_lib; i++){
unique_names[i] = NULL;
}
int n_unique = 0;
for (size_t i = 0; i < efp->n_frag; i++) {
struct frag *frag = efp->frags + i;
for (size_t m = 0; m < efp->n_lib; m++) {
if (unique_names[m] != NULL && strcmp(frag->name, unique_names[m]) == 0) {
efp->symmlist[i] = m+1;
break;
}
}
if (efp->symmlist[i] == 0) // did not find this fragment name in sofar unique names
{
unique_names[n_unique] = frag->name;
n_unique++;
efp->symmlist[i] = n_unique;
}
// printf("symm_list %d \n", efp->symmlist[i]);
}
// setup skiplist now...
for (size_t i = 0; i < efp->n_frag; i++) {
for (size_t j = 0; j < efp->n_frag; j++) {
efp_skip_fragments(efp, i, j, 1);
}
}
n_unique = 1;
//memset(efp->skiplist,true,efp->n_frag*efp->n_frag);
for (size_t i = 0; i < efp->n_frag; i++) {
// this is the first occuracnce of the symmetry-unique fragment
if (efp->symmlist[i] == n_unique) {
for (size_t j = 0; j < efp->n_frag; j++) {
efp_skip_fragments(efp, i, j, 0);
}
n_unique ++;
}
}
free(unique_names);
}
else {
printf("\n DO NOT KNOW WHAT TO DO WITH THIS SYMMETRIC SYSTEM: SYMM_FRAG IS UNKNOWN \n");
}
/*
//printf("\n skiplist \n");
for (int i = 0; i < efp->n_frag; i++){
for (int j=0; j < efp->n_frag; j++){
printf(" %s ", efp->skiplist[i*efp->n_frag + j] ? "true" : "false");
}
}
*/
return EFP_RESULT_SUCCESS;
}
EFP_EXPORT enum efp_result
efp_get_symmlist(struct efp *efp, size_t frag_idx, size_t *symm){
assert(efp);
assert(efp->symmlist);
*symm = efp->symmlist[frag_idx];
return EFP_RESULT_SUCCESS;
}
EFP_EXPORT enum efp_result
efp_get_nsymm_frag(struct efp *efp, size_t *nsymm_frag){
assert(efp);
assert(efp->nsymm_frag);
*nsymm_frag = efp->nsymm_frag;
return EFP_RESULT_SUCCESS;
}
void
unique_symm_frag(struct efp *efp, size_t *unique_frag){
//printf("\n Symmetry-unique fragments \n");
int n = 0;
int i = 0;
do {
if (efp->symmlist[i] > n) {
unique_frag[n] = i;
//printf(" %d ", unique_frag[n]);
n++;
}
i++;
} while (n < efp->nsymm_frag);
}
void
n_symm_frag(struct efp *efp, size_t *symm_frag) {
for (size_t i = 0; i < efp->nsymm_frag; i++) {
size_t counter = 0;
for (size_t j = 0; j < efp->n_frag; j++) {
if (efp->symmlist[i] == efp->symmlist[j])
counter++;
}
symm_frag[i] = counter;
// printf("\n symm_frag %d = %d", i, symm_frag[i]);
}
}
|
polybench.c | #define _POSIX_C_SOURCE 200809L
#include <stdio.h>
#include <string.h>
#include <stdlib.h>
#include <unistd.h>
#include <assert.h>
#include <time.h>
#include <sys/time.h>
#include <sys/resource.h>
#include <sched.h>
#include <math.h>
#ifdef _OPENMP
# include <omp.h>
#endif
/* By default, collect PAPI counters on thread 0. */
#ifndef POLYBENCH_THREAD_MONITOR
# define POLYBENCH_THREAD_MONITOR 0
#endif
/* Total LLC cache size. By default 32+MB.. */
#ifndef POLYBENCH_CACHE_SIZE_KB
# define POLYBENCH_CACHE_SIZE_KB 32770
#endif
int polybench_papi_counters_threadid = POLYBENCH_THREAD_MONITOR;
double polybench_program_total_flops = 0;
#ifdef POLYBENCH_PAPI
# include <papi.h>
# define POLYBENCH_MAX_NB_PAPI_COUNTERS 96
char* _polybench_papi_eventlist[] = {
#include "papi_counters.list"
NULL
};
int polybench_papi_eventset;
int polybench_papi_eventlist[POLYBENCH_MAX_NB_PAPI_COUNTERS];
long_long polybench_papi_values[POLYBENCH_MAX_NB_PAPI_COUNTERS];
#endif
/* Timer code (gettimeofday). */
double polybench_t_start, polybench_t_end;
/* Timer code (RDTSC). */
unsigned long long int polybench_c_start, polybench_c_end;
static
double rtclock()
{
#if defined(POLYBENCH_TIME) || defined(POLYBENCH_GFLOPS)
struct timeval Tp;
int stat;
stat = gettimeofday (&Tp, NULL);
if (stat != 0)
printf ("Error return from gettimeofday: %d", stat);
return (Tp.tv_sec + Tp.tv_usec * 1.0e-6);
#else
return 0;
#endif
}
#ifdef POLYBENCH_CYCLE_ACCURATE_TIMER
static
unsigned long long int rdtsc()
{
unsigned long long int ret = 0;
unsigned int cycles_lo;
unsigned int cycles_hi;
__asm__ volatile ("RDTSC" : "=a" (cycles_lo), "=d" (cycles_hi));
ret = (unsigned long long int)cycles_hi << 32 | cycles_lo;
return ret;
}
#endif
void polybench_flush_cache()
{
int cs = POLYBENCH_CACHE_SIZE_KB * 1024 / sizeof(double);
double* flush = (double*) calloc (cs, sizeof(double));
int i;
double tmp = 0.0;
#ifdef _OPENMP
#pragma omp parallel for reduction(+:tmp) private(i)
#endif
for (i = 0; i < cs; i++)
tmp += flush[i];
assert (tmp <= 10.0);
free (flush);
}
#ifdef POLYBENCH_LINUX_FIFO_SCHEDULER
void polybench_linux_fifo_scheduler()
{
/* Use FIFO scheduler to limit OS interference. Program must be run
as root, and this works only for Linux kernels. */
struct sched_param schedParam;
schedParam.sched_priority = sched_get_priority_max (SCHED_FIFO);
sched_setscheduler (0, SCHED_FIFO, &schedParam);
}
void polybench_linux_standard_scheduler()
{
/* Restore to standard scheduler policy. */
struct sched_param schedParam;
schedParam.sched_priority = sched_get_priority_max (SCHED_OTHER);
sched_setscheduler (0, SCHED_OTHER, &schedParam);
}
#endif
#ifdef POLYBENCH_PAPI
static
void test_fail(char *file, int line, char *call, int retval)
{
char buf[128];
memset(buf, '\0', sizeof(buf));
if (retval != 0)
fprintf (stdout,"%-40s FAILED\nLine # %d\n", file, line);
else
{
fprintf (stdout,"%-40s SKIPPED\n", file);
fprintf (stdout,"Line # %d\n", line);
}
if (retval == PAPI_ESYS)
{
sprintf (buf, "System error in %s", call);
perror (buf);
}
else if (retval > 0)
fprintf (stdout,"Error: %s\n", call);
else if (retval == 0)
fprintf (stdout,"Error: %s\n", call);
else
{
char errstring[PAPI_MAX_STR_LEN];
PAPI_perror (retval, errstring, PAPI_MAX_STR_LEN);
fprintf (stdout,"Error in %s: %s\n", call, errstring);
}
fprintf (stdout,"\n");
if (PAPI_is_initialized ())
PAPI_shutdown ();
exit (1);
}
void polybench_papi_init()
{
# ifdef _OPENMP
#pragma omp parallel
{
#pragma omp master
{
if (omp_get_max_threads () < polybench_papi_counters_threadid)
polybench_papi_counters_threadid = omp_get_max_threads () - 1;
}
#pragma omp barrier
if (omp_get_thread_num () == polybench_papi_counters_threadid)
{
# endif
int retval;
polybench_papi_eventset = PAPI_NULL;
if ((retval = PAPI_library_init (PAPI_VER_CURRENT)) != PAPI_VER_CURRENT)
test_fail (__FILE__, __LINE__, "PAPI_library_init", retval);
if ((retval = PAPI_create_eventset (&polybench_papi_eventset))
!= PAPI_OK)
test_fail (__FILE__, __LINE__, "PAPI_create_eventset", retval);
int k;
for (k = 0; _polybench_papi_eventlist[k]; ++k)
{
if ((retval =
PAPI_event_name_to_code (_polybench_papi_eventlist[k],
&(polybench_papi_eventlist[k])))
!= PAPI_OK)
test_fail (__FILE__, __LINE__, "PAPI_event_name_to_code", retval);
}
polybench_papi_eventlist[k] = 0;
# ifdef _OPENMP
}
}
#pragma omp barrier
# endif
}
void polybench_papi_close()
{
# ifdef _OPENMP
#pragma omp parallel
{
if (omp_get_thread_num () == polybench_papi_counters_threadid)
{
# endif
int retval;
if ((retval = PAPI_destroy_eventset (&polybench_papi_eventset))
!= PAPI_OK)
test_fail (__FILE__, __LINE__, "PAPI_destroy_eventset", retval);
if (PAPI_is_initialized ())
PAPI_shutdown ();
# ifdef _OPENMP
}
}
#pragma omp barrier
# endif
}
int polybench_papi_start_counter(int evid)
{
# ifndef POLYBENCH_NO_FLUSH_CACHE
polybench_flush_cache();
# endif
# ifdef _OPENMP
# pragma omp parallel
{
if (omp_get_thread_num () == polybench_papi_counters_threadid)
{
# endif
int retval = 1;
char descr[PAPI_MAX_STR_LEN];
PAPI_event_info_t evinfo;
PAPI_event_code_to_name (polybench_papi_eventlist[evid], descr);
if (PAPI_add_event (polybench_papi_eventset,
polybench_papi_eventlist[evid]) != PAPI_OK)
test_fail (__FILE__, __LINE__, "PAPI_add_event", 1);
if (PAPI_get_event_info (polybench_papi_eventlist[evid], &evinfo)
!= PAPI_OK)
test_fail (__FILE__, __LINE__, "PAPI_get_event_info", retval);
if ((retval = PAPI_start (polybench_papi_eventset)) != PAPI_OK)
test_fail (__FILE__, __LINE__, "PAPI_start", retval);
# ifdef _OPENMP
}
}
#pragma omp barrier
# endif
return 0;
}
void polybench_papi_stop_counter(int evid)
{
# ifdef _OPENMP
# pragma omp parallel
{
if (omp_get_thread_num () == polybench_papi_counters_threadid)
{
# endif
int retval;
long_long values[1];
values[0] = 0;
if ((retval = PAPI_read (polybench_papi_eventset, &values[0]))
!= PAPI_OK)
test_fail (__FILE__, __LINE__, "PAPI_read", retval);
if ((retval = PAPI_stop (polybench_papi_eventset, NULL)) != PAPI_OK)
test_fail (__FILE__, __LINE__, "PAPI_stop", retval);
polybench_papi_values[evid] = values[0];
if ((retval = PAPI_remove_event
(polybench_papi_eventset,
polybench_papi_eventlist[evid])) != PAPI_OK)
test_fail (__FILE__, __LINE__, "PAPI_remove_event", retval);
# ifdef _OPENMP
}
}
#pragma omp barrier
# endif
}
void polybench_papi_print()
{
int verbose = 0;
# ifdef _OPENMP
# pragma omp parallel
{
if (omp_get_thread_num() == polybench_papi_counters_threadid)
{
#ifdef POLYBENCH_PAPI_VERBOSE
verbose = 1;
#endif
if (verbose)
printf ("On thread %d:\n", polybench_papi_counters_threadid);
#endif
int evid;
for (evid = 0; polybench_papi_eventlist[evid] != 0; ++evid)
{
if (verbose)
printf ("%s=", _polybench_papi_eventlist[evid]);
printf ("%llu ", polybench_papi_values[evid]);
if (verbose)
printf ("\n");
}
printf ("\n");
# ifdef _OPENMP
}
}
#pragma omp barrier
# endif
}
#endif
/* ! POLYBENCH_PAPI */
void polybench_prepare_instruments()
{
#ifndef POLYBENCH_NO_FLUSH_CACHE
polybench_flush_cache ();
#endif
#ifdef POLYBENCH_LINUX_FIFO_SCHEDULER
polybench_linux_fifo_scheduler ();
#endif
}
void polybench_timer_start()
{
polybench_prepare_instruments ();
#ifndef POLYBENCH_CYCLE_ACCURATE_TIMER
polybench_t_start = rtclock ();
#else
polybench_c_start = rdtsc ();
#endif
}
void polybench_timer_stop()
{
#ifndef POLYBENCH_CYCLE_ACCURATE_TIMER
polybench_t_end = rtclock ();
#else
polybench_c_end = rdtsc ();
#endif
#ifdef POLYBENCH_LINUX_FIFO_SCHEDULER
polybench_linux_standard_scheduler ();
#endif
}
void polybench_timer_print()
{
#ifdef POLYBENCH_GFLOPS
if (polybench_program_total_flops == 0)
{
printf ("[PolyBench][WARNING] Program flops not defined, use polybench_set_program_flops(value)\n");
printf ("%0.6lf\n", polybench_t_end - polybench_t_start);
}
else
printf ("%0.2lf\n",
(polybench_program_total_flops /
(double)(polybench_t_end - polybench_t_start)) / 1000000000);
#else
# ifndef POLYBENCH_CYCLE_ACCURATE_TIMER
printf ("%0.6f\n", polybench_t_end - polybench_t_start);
# else
printf ("%Ld\n", polybench_c_end - polybench_c_start);
# endif
#endif
}
static
void *
xmalloc (size_t num)
{
void* cur = NULL;
int ret = posix_memalign (&cur, 32, num);
if (! cur || ret)
{
fprintf (stderr, "[PolyBench] posix_memalign: cannot allocate memory");
exit (1);
}
return cur;
}
void* polybench_alloc_data(unsigned long long int n, int elt_size)
{
/// FIXME: detect overflow!
size_t val = n;
val *= elt_size;
void* ret = xmalloc (val);
return ret;
}
|
GB_binop__rdiv_int64.c | //------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_mkl.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB_AaddB__rdiv_int64
// A.*B function (eWiseMult): GB_AemultB__rdiv_int64
// A*D function (colscale): GB_AxD__rdiv_int64
// D*A function (rowscale): GB_DxB__rdiv_int64
// C+=B function (dense accum): GB_Cdense_accumB__rdiv_int64
// C+=b function (dense accum): GB_Cdense_accumb__rdiv_int64
// C+=A+B function (dense ewise3): GB_Cdense_ewise3_accum__rdiv_int64
// C=A+B function (dense ewise3): GB_Cdense_ewise3_noaccum__rdiv_int64
// C=scalar+B GB_bind1st__rdiv_int64
// C=scalar+B' GB_bind1st_tran__rdiv_int64
// C=A+scalar GB_bind2nd__rdiv_int64
// C=A'+scalar GB_bind2nd_tran__rdiv_int64
// C type: int64_t
// A type: int64_t
// B,b type: int64_t
// BinaryOp: cij = GB_IDIV_SIGNED (bij, aij, 64)
#define GB_ATYPE \
int64_t
#define GB_BTYPE \
int64_t
#define GB_CTYPE \
int64_t
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
int64_t aij = Ax [pA]
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB) \
int64_t bij = Bx [pB]
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
int64_t t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA) \
cij = Ax [pA]
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB) \
cij = Bx [pB]
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z, x, y) \
z = GB_IDIV_SIGNED (y, x, 64) ;
// op is second
#define GB_OP_IS_SECOND \
0
// op is plus_fp32 or plus_fp64
#define GB_OP_IS_PLUS_REAL \
0
// op is minus_fp32 or minus_fp64
#define GB_OP_IS_MINUS_REAL \
0
// GB_cblas_*axpy gateway routine, if it exists for this operator and type:
#define GB_CBLAS_AXPY \
(none)
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_RDIV || GxB_NO_INT64 || GxB_NO_RDIV_INT64)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB_Cdense_ewise3_accum__rdiv_int64
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
GrB_Info GB_Cdense_ewise3_noaccum__rdiv_int64
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_dense_ewise3_noaccum_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB_Cdense_accumB__rdiv_int64
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *GB_RESTRICT kfirst_slice,
const int64_t *GB_RESTRICT klast_slice,
const int64_t *GB_RESTRICT pstart_slice,
const int ntasks,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB_Cdense_accumb__rdiv_int64
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type int64_t
int64_t bwork = (*((int64_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB_AxD__rdiv_int64
(
GrB_Matrix C,
const GrB_Matrix A, bool A_is_pattern,
const GrB_Matrix D, bool D_is_pattern,
const int64_t *GB_RESTRICT kfirst_slice,
const int64_t *GB_RESTRICT klast_slice,
const int64_t *GB_RESTRICT pstart_slice,
const int ntasks,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t *GB_RESTRICT Cx = (int64_t *) C->x ;
#include "GB_AxB_colscale_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB_DxB__rdiv_int64
(
GrB_Matrix C,
const GrB_Matrix D, bool D_is_pattern,
const GrB_Matrix B, bool B_is_pattern,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t *GB_RESTRICT Cx = (int64_t *) C->x ;
#include "GB_AxB_rowscale_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseAdd: C = A+B or C<M> = A+B
//------------------------------------------------------------------------------
GrB_Info GB_AaddB__rdiv_int64
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const bool Ch_is_Mh,
const int64_t *GB_RESTRICT C_to_M,
const int64_t *GB_RESTRICT C_to_A,
const int64_t *GB_RESTRICT C_to_B,
const GB_task_struct *GB_RESTRICT TaskList,
const int ntasks,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_add_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C = A.*B or C<M> = A.*B
//------------------------------------------------------------------------------
GrB_Info GB_AemultB__rdiv_int64
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *GB_RESTRICT C_to_M,
const int64_t *GB_RESTRICT C_to_A,
const int64_t *GB_RESTRICT C_to_B,
const GB_task_struct *GB_RESTRICT TaskList,
const int ntasks,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB_bind1st__rdiv_int64
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t *Cx = (int64_t *) Cx_output ;
int64_t x = (*((int64_t *) x_input)) ;
int64_t *Bx = (int64_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
int64_t bij = Bx [p] ;
Cx [p] = GB_IDIV_SIGNED (bij, x, 64) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB_bind2nd__rdiv_int64
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
int64_t *Cx = (int64_t *) Cx_output ;
int64_t *Ax = (int64_t *) Ax_input ;
int64_t y = (*((int64_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
int64_t aij = Ax [p] ;
Cx [p] = GB_IDIV_SIGNED (y, aij, 64) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typcasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
int64_t aij = Ax [pA] ; \
Cx [pC] = GB_IDIV_SIGNED (aij, x, 64) ; \
}
GrB_Info GB_bind1st_tran__rdiv_int64
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *GB_RESTRICT *Rowcounts,
GBI_single_iterator Iter,
const int64_t *GB_RESTRICT A_slice,
int naslice
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
int64_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t x = (*((const int64_t *) x_input)) ;
#define GB_PHASE_2_OF_2
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
int64_t
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typcasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
int64_t aij = Ax [pA] ; \
Cx [pC] = GB_IDIV_SIGNED (y, aij, 64) ; \
}
GrB_Info GB_bind2nd_tran__rdiv_int64
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *GB_RESTRICT *Rowcounts,
GBI_single_iterator Iter,
const int64_t *GB_RESTRICT A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t y = (*((const int64_t *) y_input)) ;
#define GB_PHASE_2_OF_2
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
DRB051-getthreadnum-orig-no.c | /*
Copyright (c) 2017, Lawrence Livermore National Security, LLC.
Produced at the Lawrence Livermore National Laboratory
Written by Chunhua Liao, Pei-Hung Lin, Joshua Asplund,
Markus Schordan, and Ian Karlin
(email: liao6@llnl.gov, lin32@llnl.gov, asplund1@llnl.gov,
schordan1@llnl.gov, karlin1@llnl.gov)
LLNL-CODE-732144
All rights reserved.
This file is part of DataRaceBench. For details, see
https://github.com/LLNL/dataracebench. Please also see the LICENSE file
for our additional BSD notice.
Redistribution and use in source and binary forms, with
or without modification, are permitted provided that the following
conditions are met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the disclaimer below.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the disclaimer (as noted below)
in the documentation and/or other materials provided with the
distribution.
* Neither the name of the LLNS/LLNL nor the names of its contributors
may be used to endorse or promote products derived from this
software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL LAWRENCE LIVERMORE NATIONAL
SECURITY, LLC, THE U.S. DEPARTMENT OF ENERGY OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY,
OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
omp_get_thread_num() is used to ensure serial semantics.
*/
#include <omp.h>
#include <stdio.h>
int main()
{
int numThreads=0 ;
#pragma omp parallel
{
if ( omp_get_thread_num()==0 ) {
numThreads = omp_get_num_threads();
}
}
printf ("numThreads=%d\n", numThreads);
return 0;
}
|
cg_impl.c | #include "cg_impl.h"
//---------------------------------------------------------------------
// Floaging point arrays here are named as in spec discussion of
// CG algorithm
//---------------------------------------------------------------------
void conj_grad(int colidx[],
int rowstr[],
double x[],
double z[],
double a[],
double p[],
double q[],
double r[],
double *rnorm)
{
int j, k;
int cgit, cgitmax = 25;
double d, sum, rho, rho0, alpha, beta;
rho = 0.0;
//---------------------------------------------------------------------
// Initialize the CG algorithm:
//---------------------------------------------------------------------
#pragma omp parallel for
for (j = 0; j < naa + 1; j++)
{
q[j] = 0.0;
z[j] = 0.0;
r[j] = x[j];
p[j] = r[j];
}
//---------------------------------------------------------------------
// rho = r.r
// Now, obtain the norm of r: First, sum squares of r elements locally...
//---------------------------------------------------------------------
for (j = 0; j < lastcol - firstcol + 1; j++)
{
rho = rho + r[j] * r[j];
}
//---------------------------------------------------------------------
//---->
// The conj grad iteration loop
//---->
//---------------------------------------------------------------------
for (cgit = 1; cgit <= cgitmax; cgit++)
{
//---------------------------------------------------------------------
// q = A.p
// The partition submatrix-vector multiply: use workspace w
//---------------------------------------------------------------------
//
// NOTE: this version of the multiply is actually (slightly: maybe %5)
// faster on the sp2 on 16 nodes than is the unrolled-by-2 version
// below. On the Cray t3d, the reverse is true, i.e., the
// unrolled-by-two version is some 10% faster.
// The unrolled-by-8 version below is significantly faster
// on the Cray t3d - overall speed of code is 1.5 times faster.
#pragma omp parallel for firstprivate(sum)
for (j = 0; j < lastrow - firstrow + 1; j++)
{
sum = 0.0;
for (k = rowstr[j]; k < rowstr[j + 1]; k++)
{
sum = sum + a[k] * p[colidx[k]];
}
q[j] = sum;
}
//---------------------------------------------------------------------
// Obtain p.q
//---------------------------------------------------------------------
d = 0.0;
for (j = 0; j < lastcol - firstcol + 1; j++)
{
d = d + p[j] * q[j];
}
//---------------------------------------------------------------------
// Obtain alpha = rho / (p.q)
//---------------------------------------------------------------------
alpha = rho / d;
//---------------------------------------------------------------------
// Save a temporary of rho
//---------------------------------------------------------------------
rho0 = rho;
//---------------------------------------------------------------------
// Obtain z = z + alpha*p
// and r = r - alpha*q
//---------------------------------------------------------------------
rho = 0.0;
#pragma omp parallel for
for (j = 0; j < lastcol - firstcol + 1; j++)
{
z[j] = z[j] + alpha * p[j];
r[j] = r[j] - alpha * q[j];
}
//---------------------------------------------------------------------
// rho = r.r
// Now, obtain the norm of r: First, sum squares of r elements locally...
//---------------------------------------------------------------------
for (j = 0; j < lastcol - firstcol + 1; j++)
{
rho = rho + r[j] * r[j];
}
//---------------------------------------------------------------------
// Obtain beta:
//---------------------------------------------------------------------
beta = rho / rho0;
//---------------------------------------------------------------------
// p = r + beta*p
//---------------------------------------------------------------------
#pragma omp parallel for
for (j = 0; j < lastcol - firstcol + 1; j++)
{
p[j] = r[j] + beta * p[j];
}
} // end of do cgit=1,cgitmax
//---------------------------------------------------------------------
// Compute residual norm explicitly: ||r|| = ||x - A.z||
// First, form A.z
// The partition submatrix-vector multiply
//---------------------------------------------------------------------
sum = 0.0;
#pragma omp parallel for firstprivate(d)
for (j = 0; j < lastrow - firstrow + 1; j++)
{
d = 0.0;
for (k = rowstr[j]; k < rowstr[j + 1]; k++)
{
d = d + a[k] * z[colidx[k]];
}
r[j] = d;
}
//---------------------------------------------------------------------
// At this point, r contains A.z
//---------------------------------------------------------------------
for (j = 0; j < lastcol - firstcol + 1; j++)
{
d = x[j] - r[j];
sum = sum + d * d;
}
*rnorm = sqrt(sum);
}
//---------------------------------------------------------------------
// generate the test problem for benchmark 6
// makea generates a sparse matrix with a
// prescribed sparsity distribution
//
// parameter type usage
//
// input
//
// n i number of cols/rows of matrix
// nz i nonzeros as declared array size
// rcond r*8 condition number
// shift r*8 main diagonal shift
//
// output
//
// a r*8 array for nonzeros
// colidx i col indices
// rowstr i row pointers
//
// workspace
//
// iv, arow, acol i
// aelt r*8
//---------------------------------------------------------------------
void makea(int n,
int nz,
double a[],
int colidx[],
int rowstr[],
int firstrow,
int lastrow,
int firstcol,
int lastcol,
int arow[],
int acol[][NONZER + 1],
double aelt[][NONZER + 1],
int iv[])
{
int iouter, ivelt, nzv, nn1;
int ivc[NONZER + 1];
double vc[NONZER + 1];
//---------------------------------------------------------------------
// nonzer is approximately (int(sqrt(nnza /n)));
//---------------------------------------------------------------------
//---------------------------------------------------------------------
// nn1 is the smallest power of two not less than n
//---------------------------------------------------------------------
nn1 = 1;
do
{
nn1 = 2 * nn1;
} while (nn1 < n);
//---------------------------------------------------------------------
// Generate nonzero positions and save for the use in sparse.
//---------------------------------------------------------------------
for (iouter = 0; iouter < n; iouter++)
{
nzv = NONZER;
sprnvc(n, nzv, nn1, vc, ivc);
vecset(n, vc, ivc, &nzv, iouter + 1, 0.5);
arow[iouter] = nzv;
for (ivelt = 0; ivelt < nzv; ivelt++)
{
acol[iouter][ivelt] = ivc[ivelt] - 1;
aelt[iouter][ivelt] = vc[ivelt];
}
}
//---------------------------------------------------------------------
// ... make the sparse matrix from list of elements with duplicates
// (iv is used as workspace)
//---------------------------------------------------------------------
sparse(a, colidx, rowstr, n, nz, NONZER, arow, acol,
aelt, firstrow, lastrow,
iv, RCOND, SHIFT);
}
//---------------------------------------------------------------------
// rows range from firstrow to lastrow
// the rowstr pointers are defined for nrows = lastrow-firstrow+1 values
//---------------------------------------------------------------------
void sparse(double a[],
int colidx[],
int rowstr[],
int n,
int nz,
int nozer,
int arow[],
int acol[][NONZER + 1],
double aelt[][NONZER + 1],
int firstrow,
int lastrow,
int nzloc[],
double rcond,
double shift)
{
int nrows;
//---------------------------------------------------
// generate a sparse matrix from a list of
// [col, row, element] tri
//---------------------------------------------------
int i, j, j1, j2, nza, k, kk, nzrow, jcol;
double size, scale, ratio, va;
logical cont40;
//---------------------------------------------------------------------
// how many rows of result
//---------------------------------------------------------------------
nrows = lastrow - firstrow + 1;
//---------------------------------------------------------------------
// ...count the number of triples in each row
//---------------------------------------------------------------------
for (j = 0; j < nrows + 1; j++)
{
rowstr[j] = 0;
}
for (i = 0; i < n; i++)
{
for (nza = 0; nza < arow[i]; nza++)
{
j = acol[i][nza] + 1;
rowstr[j] = rowstr[j] + arow[i];
}
}
rowstr[0] = 0;
for (j = 1; j < nrows + 1; j++)
{
rowstr[j] = rowstr[j] + rowstr[j - 1];
}
nza = rowstr[nrows] - 1;
//---------------------------------------------------------------------
// ... rowstr(j) now is the location of the first nonzero
// of row j of a
//---------------------------------------------------------------------
if (nza > nz)
{
printf("Space for matrix elements exceeded in sparse\n");
printf("nza, nzmax = %d, %d\n", nza, nz);
exit(EXIT_FAILURE);
}
//---------------------------------------------------------------------
// ... preload data pages
//---------------------------------------------------------------------
for (j = 0; j < nrows; j++)
{
for (k = rowstr[j]; k < rowstr[j + 1]; k++)
{
a[k] = 0.0;
colidx[k] = -1;
}
nzloc[j] = 0;
}
//---------------------------------------------------------------------
// ... generate actual values by summing duplicates
//---------------------------------------------------------------------
size = 1.0;
ratio = pow(rcond, (1.0 / (double)(n)));
for (i = 0; i < n; i++)
{
for (nza = 0; nza < arow[i]; nza++)
{
j = acol[i][nza];
scale = size * aelt[i][nza];
for (nzrow = 0; nzrow < arow[i]; nzrow++)
{
jcol = acol[i][nzrow];
va = aelt[i][nzrow] * scale;
//--------------------------------------------------------------------
// ... add the identity * rcond to the generated matrix to bound
// the smallest eigenvalue from below by rcond
//--------------------------------------------------------------------
if (jcol == j && j == i)
{
va = va + rcond - shift;
}
cont40 = false;
for (k = rowstr[j]; k < rowstr[j + 1]; k++)
{
if (colidx[k] > jcol)
{
//----------------------------------------------------------------
// ... insert colidx here orderly
//----------------------------------------------------------------
for (kk = rowstr[j + 1] - 2; kk >= k; kk--)
{
if (colidx[kk] > -1)
{
a[kk + 1] = a[kk];
colidx[kk + 1] = colidx[kk];
}
}
colidx[k] = jcol;
a[k] = 0.0;
cont40 = true;
break;
}
else if (colidx[k] == -1)
{
colidx[k] = jcol;
cont40 = true;
break;
}
else if (colidx[k] == jcol)
{
//--------------------------------------------------------------
// ... mark the duplicated entry
//--------------------------------------------------------------
nzloc[j] = nzloc[j] + 1;
cont40 = true;
break;
}
}
if (cont40 == false)
{
printf("internal error in sparse: i=%d\n", i);
exit(EXIT_FAILURE);
}
a[k] = a[k] + va;
}
}
size = size * ratio;
}
//---------------------------------------------------------------------
// ... remove empty entries and generate final results
//---------------------------------------------------------------------
for (j = 1; j < nrows; j++)
{
nzloc[j] = nzloc[j] + nzloc[j - 1];
}
for (j = 0; j < nrows; j++)
{
if (j > 0)
{
j1 = rowstr[j] - nzloc[j - 1];
}
else
{
j1 = 0;
}
j2 = rowstr[j + 1] - nzloc[j];
nza = rowstr[j];
for (k = j1; k < j2; k++)
{
a[k] = a[nza];
colidx[k] = colidx[nza];
nza = nza + 1;
}
}
for (j = 1; j < nrows + 1; j++)
{
rowstr[j] = rowstr[j] - nzloc[j - 1];
}
nza = rowstr[nrows] - 1;
}
//---------------------------------------------------------------------
// generate a sparse n-vector (v, iv)
// having nzv nonzeros
//
// mark(i) is set to 1 if position i is nonzero.
// mark is all zero on entry and is reset to all zero before exit
// this corrects a performance bug found by John G. Lewis, caused by
// reinitialization of mark on every one of the n calls to sprnvc
//---------------------------------------------------------------------
void sprnvc(int n, int nz, int nn1, double v[], int iv[])
{
int nzv, ii, i;
double vecelt, vecloc;
nzv = 0;
while (nzv < nz)
{
vecelt = randlc(&tran, amult);
//---------------------------------------------------------------------
// generate an integer between 1 and n in a portable manner
//---------------------------------------------------------------------
vecloc = randlc(&tran, amult);
i = icnvrt(vecloc, nn1) + 1;
if (i > n)
continue;
//---------------------------------------------------------------------
// was this integer generated already?
//---------------------------------------------------------------------
logical was_gen = false;
for (ii = 0; ii < nzv; ii++)
{
if (iv[ii] == i)
{
was_gen = true;
break;
}
}
if (was_gen)
continue;
v[nzv] = vecelt;
iv[nzv] = i;
nzv = nzv + 1;
}
}
//---------------------------------------------------------------------
// scale a double precision number x in (0,1) by a power of 2 and chop it
//---------------------------------------------------------------------
int icnvrt(double x, int ipwr2)
{
return (int)(ipwr2 * x);
}
//---------------------------------------------------------------------
// set ith element of sparse vector (v, iv) with
// nzv nonzeros to val
//---------------------------------------------------------------------
void vecset(int n, double v[], int iv[], int *nzv, int i, double val)
{
int k;
logical set;
set = false;
for (k = 0; k < *nzv; k++)
{
if (iv[k] == i)
{
v[k] = val;
set = true;
}
}
if (set == false)
{
v[*nzv] = val;
iv[*nzv] = i;
*nzv = *nzv + 1;
}
}
void init(double *zeta)
{
int i, j, k;
firstrow = 0;
lastrow = NA - 1;
firstcol = 0;
lastcol = NA - 1;
naa = NA;
nzz = NZ;
//---------------------------------------------------------------------
// Inialize random number generator
//---------------------------------------------------------------------
tran = 314159265.0;
amult = 1220703125.0;
*zeta = randlc(&tran, amult);
//---------------------------------------------------------------------
//
//---------------------------------------------------------------------
makea(naa, nzz, a, colidx, rowstr,
firstrow, lastrow, firstcol, lastcol,
arow,
(int(*)[NONZER + 1])(void *)acol,
(double(*)[NONZER + 1])(void *)aelt,
iv);
//---------------------------------------------------------------------
// Note: as a result of the above call to makea:
// values of j used in indexing rowstr go from 0 --> lastrow-firstrow
// values of colidx which are col indexes go from firstcol --> lastcol
// So:
// Shift the col index vals from actual (firstcol --> lastcol )
// to local, i.e., (0 --> lastcol-firstcol)
//---------------------------------------------------------------------
for (j = 0; j < lastrow - firstrow + 1; j++)
{
for (k = rowstr[j]; k < rowstr[j + 1]; k++)
{
colidx[k] = colidx[k] - firstcol;
}
}
//---------------------------------------------------------------------
// set starting vector to (1, 1, .... 1)
//---------------------------------------------------------------------
for (i = 0; i < NA + 1; i++)
{
x[i] = 1.0;
}
for (j = 0; j < lastcol - firstcol + 1; j++)
{
q[j] = 0.0;
z[j] = 0.0;
r[j] = 0.0;
p[j] = 0.0;
}
}
void iterate(double *zeta, int *it)
{
int j;
double rnorm;
double norm_temp1, norm_temp2;
conj_grad(colidx, rowstr, x, z, a, p, q, r, &rnorm);
//---------------------------------------------------------------------
// zeta = shift + 1/(x.z)
// So, first: (x.z)
// Also, find norm of z
// So, first: (z.z)
//---------------------------------------------------------------------
norm_temp1 = 0.0;
norm_temp2 = 0.0;
for (j = 0; j < lastcol - firstcol + 1; j++)
{
norm_temp1 = norm_temp1 + x[j] * z[j];
norm_temp2 = norm_temp2 + z[j] * z[j];
}
norm_temp2 = 1.0 / sqrt(norm_temp2);
*zeta = SHIFT + 1.0 / norm_temp1;
if (*it == 1)
printf("\n iteration ||r|| zeta\n");
printf(" %5d %20.14E%20.13f\n", *it, rnorm, *zeta);
//---------------------------------------------------------------------
// Normalize z to obtain x
//---------------------------------------------------------------------
for (j = 0; j < lastcol - firstcol + 1; j++)
{
x[j] = norm_temp2 * z[j];
}
} |
gmapper.h | /*
* This file should contain extern declarations for global variables in gmapper.
* This is the only file that should be included by other modules (seeds, mapping, etc).
*/
#ifndef _GMAPPER_H
#define _GMAPPER_H
#ifdef __cplusplus
//extern "C" {
#endif
#include "../gmapper/gmapper-definitions.h"
#include "../common/debug.h"
#include "../common/util.h"
#include "../common/time_counter.h"
#include "../common/gen-st.h"
#undef EXTERN
#undef STATIC
#ifdef _MODULE_GMAPPER
#include "../gmapper/gmapper-defaults.h"
#define EXTERN(_type, _id, _init_val) _type _id = _init_val
#define STATIC(_type, _id, _init_val) static _type _id = _init_val
#else
#define EXTERN(_type, _id, _init_val) extern _type _id
#define STATIC(_type, _id, _init_val)
#endif
/* shrimp mode */
EXTERN(shrimp_mode_t, shrimp_mode, DEF_SHRIMP_MODE);
EXTERN(shrimp_args_t, shrimp_args, {});
/* thread control */
EXTERN(int, num_threads, DEF_NUM_THREADS);
EXTERN(int, chunk_size, DEF_CHUNK_SIZE);
EXTERN(int, not_used, 0);
/* parameters */
EXTERN(struct read_mapping_options_t *, unpaired_mapping_options[2], {});
EXTERN(int, n_unpaired_mapping_options[2], {});
EXTERN(struct readpair_mapping_options_t *, paired_mapping_options, NULL);
EXTERN(int, n_paired_mapping_options, 0);
EXTERN(int, mode_mirna, false);
EXTERN(double, window_len, DEF_WINDOW_LEN);
EXTERN(double, window_overlap, DEF_WINDOW_OVERLAP);
EXTERN(int, match_mode, 0);
EXTERN(int, num_outputs, DEF_NUM_OUTPUTS);
EXTERN(int, max_alignments, DEF_MAX_ALIGNMENTS);
EXTERN(int, num_tmp_outputs, 20 + DEF_NUM_OUTPUTS);
EXTERN(int, anchor_width, DEF_ANCHOR_WIDTH);
EXTERN(int, indel_taboo_len, DEF_INDEL_TABOO_LEN);
EXTERN(uint32_t, list_cutoff, DEF_LIST_CUTOFF);
EXTERN(bool, gapless_sw, DEF_GAPLESS_SW);
EXTERN(bool, hash_filter_calls, DEF_HASH_FILTER_CALLS);
EXTERN(int, longest_read_len, DEF_LONGEST_READ_LENGTH);
EXTERN(bool, trim, false);
EXTERN(int, trim_front, 0);
EXTERN(int, trim_end, 0);
EXTERN(bool, trim_first, true);
EXTERN(bool, trim_second, true);
EXTERN(bool, trim_illumina, false);
EXTERN(char *, save_file, NULL);
EXTERN(char *, load_file, NULL);
EXTERN(char *, save_mmap, NULL);
EXTERN(char *, load_mmap, NULL);
EXTERN(unsigned int, progress, DEF_PROGRESS);
EXTERN(bool, compute_mapping_qualities, true);
EXTERN(bool, no_qv_check, false);
//EXTERN(int, score_difference_mq_cutoff, 0);
EXTERN(bool, all_contigs, false);
EXTERN(bool, use_sanger_qvs, true);
EXTERN(int, qual_vector_offset, 0);
EXTERN(int, qual_delta, 33);
EXTERN(int, min_avg_qv, 10);
/* Flags */
EXTERN(bool, strata_flag, false); /* get only top scoring hits */
EXTERN(bool, Cflag, false); /* do complement only */
EXTERN(bool, Fflag, false); /* do positive (forward) only */
EXTERN(bool, Hflag, false); /* use hash table, not lookup */
EXTERN(bool, Pflag, false); /* pretty print results */
EXTERN(bool, Rflag, false); /* add read sequence to output*/
EXTERN(bool, Tflag, true); /* reverse sw full tie breaks */
EXTERN(bool, Dflag, false); /* print statistics for each thread */
EXTERN(bool, Eflag, true); /* output sam format */
EXTERN(bool, Xflag, false); /* print insert histogram */
EXTERN(bool, Yflag, false); /* print genome projection histogram */
EXTERN(bool, Vflag, true); /* automatic genome index trimming */
EXTERN(bool, Qflag, true); /* use fastq reads */
EXTERN(bool, Gflag, true); /* global alignment flag ! */
EXTERN(bool, Bflag, false); /* be like bfast - cs only! */
EXTERN(bool, SQFflag, false); /* discard low quality kmers */
EXTERN(bool, extra_sam_fields, false);
EXTERN(bool, single_best_mapping, false);
EXTERN(bool, improper_mappings, true);
EXTERN(bool, autodetect_input, true);
EXTERN(bool, ignore_qvs, false); /* if input is fastq, ignore qvs in analysis */
//EXTERN(bool, hack, false);
/* Scores */
EXTERN(int, match_score, DEF_LS_MATCH_SCORE);
EXTERN(int, mismatch_score, DEF_LS_MISMATCH_SCORE);
EXTERN(int, a_gap_open_score, DEF_LS_A_GAP_OPEN);
EXTERN(int, a_gap_extend_score, DEF_LS_A_GAP_EXTEND);
EXTERN(int, b_gap_open_score, DEF_LS_B_GAP_OPEN);
EXTERN(int, b_gap_extend_score, DEF_LS_B_GAP_EXTEND);
EXTERN(int, crossover_score, DEF_CS_XOVER_SCORE);
EXTERN(double, score_alpha, 0.0);
EXTERN(double, score_beta, 0.0);
EXTERN(double, pr_mismatch, 0.0);
EXTERN(double, pr_xover, 0.0);
EXTERN(double, pr_del_open, 0.0);
EXTERN(double, pr_del_extend, 0.0);
EXTERN(double, pr_ins_open, 0.0);
EXTERN(double, pr_ins_extend, 0.0);
EXTERN(double, window_gen_threshold, DEF_WINDOW_GEN_THRESHOLD);
EXTERN(double, sw_vect_threshold, DEF_SW_VECT_THRESHOLD);
EXTERN(double, sw_full_threshold, DEF_SW_FULL_THRESHOLD);
/* shrimp parameter/option parsing */
STATIC(struct option const, standard_options[], DEF_STANDARD_OPTIONS);
STATIC(struct option const, colour_space_options[], DEF_COLOUR_SPACE_OPTIONS);
STATIC(struct option const, letter_space_options[], DEF_LETTER_SPACE_OPTIONS);
STATIC(size_t const, standard_entries, sizeof(standard_options)/sizeof(struct option));
STATIC(size_t const, letter_entries, sizeof(letter_space_options)/sizeof(struct option));
STATIC(size_t const, colour_entries, sizeof(colour_space_options)/sizeof(struct option));
/* pairing mode */
EXTERN(int, pair_mode, DEF_PAIR_MODE);
EXTERN(int, min_insert_size, DEF_MIN_INSERT_SIZE);
EXTERN(int, max_insert_size, DEF_MAX_INSERT_SIZE);
EXTERN(double, insert_size_mean, DEF_INSERT_SIZE_MEAN);
EXTERN(double, insert_size_stddev, DEF_INSERT_SIZE_STDDEV);
EXTERN(llint, insert_histogram[100], {});
EXTERN(int, insert_histogram_bucket_size, 1);
EXTERN(int, insert_histogram_load, 100);
EXTERN(char *, reads_filename, NULL);
EXTERN(char *, left_reads_filename, NULL);
EXTERN(char *, right_reads_filename, NULL);
EXTERN(bool, single_reads_file, true);
STATIC(char const * const, pair_mode_string[5], DEF_PAIR_MODE_STRING);
EXTERN(bool, pair_reverse[5][2], DEF_PAIR_REVERSE);
/* seed management */
EXTERN(int, n_seeds, 0);
EXTERN(struct seed_type *, seed, NULL);
EXTERN(uint32_t * *, seed_hash_mask, NULL);
EXTERN(int, max_seed_span, 0);
EXTERN(int, min_seed_span, MAX_SEED_SPAN);
EXTERN(int, avg_seed_span, 0);
/* Thread output buffer */
EXTERN(char **, thread_output_buffer, NULL);
EXTERN(size_t *, thread_output_buffer_sizes, NULL);
EXTERN(char **, thread_output_buffer_filled, NULL);
EXTERN(unsigned int *, thread_output_buffer_chunk, NULL);
EXTERN(size_t, thread_output_buffer_initial, DEF_THREAD_OUTPUT_BUFFER_INITIAL);
EXTERN(size_t, thread_output_buffer_increment, DEF_THREAD_OUTPUT_BUFFER_INCREMENT);
EXTERN(size_t, thread_output_buffer_safety, DEF_THREAD_OUTPUT_BUFFER_SAFETY);
EXTERN(unsigned int, thread_output_heap_capacity, DEF_THREAD_OUTPUT_HEAP_CAPACITY);
/* SAM stuff */
EXTERN(FILE *, unaligned_reads_file, NULL);
EXTERN(FILE *, aligned_reads_file, NULL);
EXTERN(bool, sam_unaligned, false);
EXTERN(bool, half_paired, true); //output reads in paired mode that only have one mapping
EXTERN(bool, sam_r2, false);
EXTERN(char *, sam_header_filename, NULL);
EXTERN(char *, sam_read_group_name, NULL);
EXTERN(char *, sam_sample_name, NULL);
EXTERN(FILE *, sam_header_hd, NULL);
EXTERN(FILE *, sam_header_sq, NULL);
EXTERN(FILE *, sam_header_rg, NULL);
EXTERN(FILE *, sam_header_pg, NULL);
/* Statistics */
EXTERN(llint, nreads, 0);
EXTERN(llint, nreads_mod, 0);
EXTERN(llint, total_reads_matched, 0);
EXTERN(llint, total_pairs_matched, 0);
EXTERN(llint, total_reads_matched_conf, 0);
EXTERN(llint, total_pairs_matched_conf, 0);
EXTERN(llint, total_reads_dropped, 0);
EXTERN(llint, total_pairs_dropped, 0);
EXTERN(llint, total_single_matches, 0);
EXTERN(llint, total_paired_matches, 0);
EXTERN(llint, total_dup_single_matches, 0); /* number of duplicate hits */
EXTERN(llint, total_dup_paired_matches, 0);
EXTERN(llint, load_genome_usecs, 0);
EXTERN(llint, mapping_wallclock_usecs, 0);
/* per-thread counts and statistics */
//EXTERN(llint, read_handle_usecs, 0);
//EXTERN(llint, wait_ticks, 0);
//EXTERN(llint, anchor_list_ticks, 0);
//EXTERN(llint, region_counts_ticks, 0);
//EXTERN(llint, mp_region_counts_ticks, 0);
//EXTERN(llint, hit_list_ticks, 0);
//EXTERN(llint, pass1_ticks, 0);
//EXTERN(llint, get_vector_hits_ticks, 0);
//EXTERN(llint, pass2_ticks, 0);
//EXTERN(llint, duplicate_removal_ticks, 0);
//EXTERN(stat_t, anchor_list_init_size, 0);
//EXTERN(stat_t, n_big_gaps_anchor_list, 0);
//EXTERN(stat_t, n_anchors_discarded, 0);
EXTERN(int, anchor_list_big_gap, DEF_ANCHOR_LIST_BIG_GAP);
// thread-private globals
typedef struct tpg_t {
llint read_handle_usecs;
//llint wait_ticks;
time_counter wait_tc;
//llint anchor_list_ticks;
time_counter anchor_list_tc;
//llint region_counts_ticks;
time_counter region_counts_tc;
//llint mp_region_counts_ticks;
time_counter mp_region_counts_tc;
//llint hit_list_ticks;
time_counter hit_list_tc;
//llint pass1_ticks;
time_counter pass1_tc;
//llint get_vector_hits_ticks;
time_counter get_vector_hits_tc;
//llint pass2_ticks;
time_counter pass2_tc;
//llint duplicate_removal_ticks;
time_counter duplicate_removal_tc;
stat_t anchor_list_init_size;
stat_t n_big_gaps_anchor_list;
stat_t n_anchors_discarded;
} tpg_t;
EXTERN(tpg_t, tpg, {});
#pragma omp threadprivate(tpg)
EXTERN(count_t, mem_genomemap, {});
EXTERN(count_t, mem_small, {});
EXTERN(count_t, mem_thread_buffer, {});
EXTERN(count_t, mem_mapping, {});
EXTERN(count_t, mem_sw, {});
/* genome map */
EXTERN(uint32_t ***, genomemap, NULL);
EXTERN(uint32_t **, genomemap_len, NULL);
EXTERN(uint32_t *, contig_offsets, NULL); /* offset info for genome contigs */
EXTERN(char **, contig_names, NULL);
EXTERN(int, num_contigs, 0);
EXTERN(uint32_t **, genome_contigs, NULL); /* genome -- always in letter */
EXTERN(uint32_t **, genome_contigs_rc, NULL); /* reverse complemets */
EXTERN(uint32_t **, genome_cs_contigs, NULL);
EXTERN(uint32_t **, genome_cs_contigs_rc, NULL);
EXTERN(int *, genome_initbp, NULL);
EXTERN(uint32_t *, genome_len, NULL);
EXTERN(bool, genome_is_rna, false); /* is genome RNA (has uracil)?*/
EXTERN(long long int, total_genome_size, 0);
EXTERN(gen_st, contig_offsets_gen_st, {});
EXTERN(ptr_and_sz *, genomemap_block, NULL);
EXTERN(ptr_and_sz, genome_contigs_block, {});
EXTERN(ptr_and_sz, genome_contigs_rc_block, {});
EXTERN(ptr_and_sz, genome_cs_contigs_block, {});
/* region handling */
EXTERN(bool, use_regions, DEF_USE_REGIONS);
EXTERN(int, region_bits, DEF_REGION_BITS);
EXTERN(int, region_overlap, DEF_REGION_OVERLAP);
EXTERN(int, n_regions, (1 << (32 - DEF_REGION_BITS)));
typedef uint16_t region_map_t;
EXTERN(region_map_t *, region_map[2][2], {});
EXTERN(int, region_map_id, 0);
EXTERN(int, region_map_id_bits, 13);
//EXTERN(int, region_map_max_count, ((1 << 8) - 1));
#pragma omp threadprivate(region_map, region_map_id)
/* contains inlined calls; uses gapless_sw and hash_filter_calls vars */
#include "../common/f1-wrapper.h"
//void hit_free_sfrp(struct read_hit *);
void read_free(struct read_entry *);
void read_free_hit_list(struct read_entry *);
void read_free_anchor_list(struct read_entry *);
void read_free_full(struct read_entry *);
/* pulled off the web; this may or may not be any good */
static inline uint32_t
hash(uint32_t a)
{
a = (a+0x7ed55d16) + (a<<12);
a = (a^0xc761c23c) ^ (a>>19);
a = (a+0x165667b1) + (a<<5);
a = (a+0xd3a2646c) ^ (a<<9);
a = (a+0xfd7046c5) + (a<<3);
a = (a^0xb55a4f09) ^ (a>>16);
return a;
}
/* hash-based version or kmer -> map index function for larger seeds */
static inline uint32_t
kmer_to_mapidx_hash(uint32_t *kmerWindow, int sn)
{
static uint32_t maxidx = ((uint32_t)1 << 2*HASH_TABLE_POWER) - 1;
uint32_t mapidx = 0;
int i;
assert(seed_hash_mask != NULL);
for (i = 0; i < BPTO32BW(max_seed_span); i++)
mapidx = hash((kmerWindow[i] & seed_hash_mask[sn][i]) ^ mapidx);
return mapidx & maxidx;
}
/*
* Compress the given kmer into an index in 'readmap' according to the seed.
* While not optimal, this is only about 20% of the spaced seed scan time.
*
* This is the original version for smaller seeds.
*
* XXX- This algorithm only considers bases 0-3, which implies overlap
* when we have other bases (mainly uracil, but also wobble codes).
* This won't affect sensitivity, but may cause extra S-W calls.
*/
static inline uint32_t
kmer_to_mapidx_orig(uint32_t *kmerWindow, int sn)
{
bitmap_type a = seed[sn].mask[0];
uint32_t mapidx = 0;
int i = 0;
do {
if ((a & 0x1) == 0x1) {
mapidx <<= 2;
mapidx |= ((kmerWindow[i/8] >> (i%8)*4) & 0x3);
}
a >>= 1;
i++;
} while (a != 0x0);
assert(mapidx < power(4, seed[sn].weight));
return mapidx;
}
#define KMER_TO_MAPIDX(kmer, sn) (Hflag? kmer_to_mapidx_hash((kmer), (sn)) : kmer_to_mapidx_orig((kmer), (sn)))
/* get contig number from absolute index */
static inline void
get_contig_num(uint32_t idx, int * cn) {
if (num_contigs < 100)
{
*cn = 0;
while (*cn < num_contigs - 1
&& idx >= contig_offsets[*cn + 1])
(*cn)++;
}
else
{
/*
int l, r, m;
l = 0;
r = num_contigs;
while (l + 1 < r) {
m = (r + l)/2;
if (idx < contig_offsets[m])
r = m;
else
l = m;
}
*cn = l;
*/
*cn = gen_st_search(&contig_offsets_gen_st, idx);
}
assert(contig_offsets[*cn] <= idx && idx < contig_offsets[*cn] + genome_len[*cn]);
}
#ifdef ENABLE_LOW_QUALITY_FILTER
// #define AUTOMATICALLY_DISCARD_LOW_QUAL_POSITIONS
#define INDIVIDUAL_QUALITY_THRESHOLD 3
/* 3 -> 50% chance to be right*/
#define AVERAGE_QUALITY_THRESHOLD 6
/* 6 -> 75% chance to be right*/
#define TOP_QUALITY_CUTOFF 10
/* 10 -> 90% chance to be right */
#ifdef AUTOMATICALLY_DISCARD_LOW_QUAL_POSITIONS
#define UNTRUSTED_QUALITY (-128)
#else
#define UNTRUSTED_QUALITY 0
#endif
static inline void
read_quality_filter_preprocess (const char * original_qual, char * processed_qual)
{
int i, size = strlen(original_qual);
for (i = 0; i < size; ++i) {
processed_qual[i] = original_qual[i] - qual_delta;
processed_qual[i] = (processed_qual[i] >= TOP_QUALITY_CUTOFF) ? TOP_QUALITY_CUTOFF : (processed_qual[i] < INDIVIDUAL_QUALITY_THRESHOLD) ? UNTRUSTED_QUALITY : processed_qual[i];
}
}
static inline bool
is_low_quality_read_subsequence(const char * quality, const int position, const seed_type seed)
{
int i, subsequence_quality = 0;
if (!quality) {
return false;
}
for (i = 0; i < seed.span; ++i) {
subsequence_quality += bitmap_extract(seed.mask, 1, seed.span - i - 1) * MAX(quality[position + i], UNTRUSTED_QUALITY);
}
if (subsequence_quality <= 0) {
fprintf(stderr, "%d (pos %d, span %d) <<< ", subsequence_quality, position, seed.span);
for (i = 0; i < seed.span; ++i) {
fprintf (stderr, "%d ", quality[position + i]);
}
exit(1);
}
return (subsequence_quality >= AVERAGE_QUALITY_THRESHOLD * seed.weight);
}
#endif
#ifdef __cplusplus
//} /* extern "C" */
#endif
#endif
|
mediancut.c | /*
** © 2009-2018 by Kornel Lesiński.
** © 1989, 1991 by Jef Poskanzer.
** © 1997, 2000, 2002 by Greg Roelofs; based on an idea by Stefan Schneider.
**
** See COPYRIGHT file for license.
*/
#include <stdlib.h>
#include <stddef.h>
#include "libimagequant.h"
#include "pam.h"
#include "mediancut.h"
#define index_of_channel(ch) (offsetof(f_pixel,ch)/sizeof(float))
static f_pixel averagepixels(unsigned int clrs, const hist_item achv[]);
struct box {
f_pixel color;
f_pixel variance;
double sum, total_error, max_error;
unsigned int ind;
unsigned int colors;
};
ALWAYS_INLINE static double variance_diff(double val, const double good_enough);
inline static double variance_diff(double val, const double good_enough)
{
val *= val;
if (val < good_enough*good_enough) return val*0.25;
return val;
}
/** Weighted per-channel variance of the box. It's used to decide which channel to split by */
static f_pixel box_variance(const hist_item achv[], const struct box *box)
{
f_pixel mean = box->color;
double variancea=0, variancer=0, varianceg=0, varianceb=0;
for(unsigned int i = 0; i < box->colors; ++i) {
const f_pixel px = achv[box->ind + i].acolor;
double weight = achv[box->ind + i].adjusted_weight;
variancea += variance_diff(mean.a - px.a, 2.0/256.0)*weight;
variancer += variance_diff(mean.r - px.r, 1.0/256.0)*weight;
varianceg += variance_diff(mean.g - px.g, 1.0/256.0)*weight;
varianceb += variance_diff(mean.b - px.b, 1.0/256.0)*weight;
}
return (f_pixel){
.a = variancea*(4.0/16.0),
.r = variancer*(7.0/16.0),
.g = varianceg*(9.0/16.0),
.b = varianceb*(5.0/16.0),
};
}
static double box_max_error(const hist_item achv[], const struct box *box)
{
f_pixel mean = box->color;
double max_error = 0;
for(unsigned int i = 0; i < box->colors; ++i) {
const double diff = colordifference(mean, achv[box->ind + i].acolor);
if (diff > max_error) {
max_error = diff;
}
}
return max_error;
}
ALWAYS_INLINE static double color_weight(f_pixel median, hist_item h);
static inline void hist_item_swap(hist_item *l, hist_item *r)
{
if (l != r) {
hist_item t = *l;
*l = *r;
*r = t;
}
}
ALWAYS_INLINE static unsigned int qsort_pivot(const hist_item *const base, const unsigned int len);
inline static unsigned int qsort_pivot(const hist_item *const base, const unsigned int len)
{
if (len < 32) {
return len/2;
}
const unsigned int aidx=8, bidx=len/2, cidx=len-1;
const unsigned int a=base[aidx].tmp.sort_value, b=base[bidx].tmp.sort_value, c=base[cidx].tmp.sort_value;
return (a < b) ? ((b < c) ? bidx : ((a < c) ? cidx : aidx ))
: ((b > c) ? bidx : ((a < c) ? aidx : cidx ));
}
ALWAYS_INLINE static unsigned int qsort_partition(hist_item *const base, const unsigned int len);
inline static unsigned int qsort_partition(hist_item *const base, const unsigned int len)
{
unsigned int l = 1, r = len;
if (len >= 8) {
hist_item_swap(&base[0], &base[qsort_pivot(base,len)]);
}
const unsigned int pivot_value = base[0].tmp.sort_value;
while (l < r) {
if (base[l].tmp.sort_value >= pivot_value) {
l++;
} else {
while(l < --r && base[r].tmp.sort_value <= pivot_value) {}
hist_item_swap(&base[l], &base[r]);
}
}
l--;
hist_item_swap(&base[0], &base[l]);
return l;
}
/** quick select algorithm */
static void hist_item_sort_range(hist_item base[], unsigned int len, unsigned int sort_start)
{
for(;;) {
const unsigned int l = qsort_partition(base, len), r = l+1;
if (l > 0 && sort_start < l) {
len = l;
}
else if (r < len && sort_start > r) {
base += r; len -= r; sort_start -= r;
}
else break;
}
}
/** sorts array to make sum of weights lower than halfvar one side, returns edge between <halfvar and >halfvar parts of the set */
static hist_item *hist_item_sort_halfvar(hist_item base[], unsigned int len, double *const lowervar, const double halfvar)
{
do {
const unsigned int l = qsort_partition(base, len), r = l+1;
// check if sum of left side is smaller than half,
// if it is, then it doesn't need to be sorted
unsigned int t = 0; double tmpsum = *lowervar;
while (t <= l && tmpsum < halfvar) tmpsum += base[t++].color_weight;
if (tmpsum < halfvar) {
*lowervar = tmpsum;
} else {
if (l > 0) {
hist_item *res = hist_item_sort_halfvar(base, l, lowervar, halfvar);
if (res) return res;
} else {
// End of left recursion. This will be executed in order from the first element.
*lowervar += base[0].color_weight;
if (*lowervar > halfvar) return &base[0];
}
}
if (len > r) {
base += r; len -= r; // tail-recursive "call"
} else {
*lowervar += base[r].color_weight;
return (*lowervar > halfvar) ? &base[r] : NULL;
}
} while(1);
}
static f_pixel get_median(const struct box *b, hist_item achv[]);
typedef struct {
unsigned int chan; float variance;
} channelvariance;
static int comparevariance(const void *ch1, const void *ch2)
{
return ((const channelvariance*)ch1)->variance > ((const channelvariance*)ch2)->variance ? -1 :
(((const channelvariance*)ch1)->variance < ((const channelvariance*)ch2)->variance ? 1 : 0);
}
/** Finds which channels need to be sorted first and preproceses achv for fast sort */
static double prepare_sort(struct box *b, hist_item achv[])
{
/*
** Sort dimensions by their variance, and then sort colors first by dimension with highest variance
*/
channelvariance channels[4] = {
{index_of_channel(a), b->variance.a},
{index_of_channel(r), b->variance.r},
{index_of_channel(g), b->variance.g},
{index_of_channel(b), b->variance.b},
};
qsort(channels, 4, sizeof(channels[0]), comparevariance);
const unsigned int ind1 = b->ind;
const unsigned int colors = b->colors;
#pragma omp parallel for if (colors > 25000) \
schedule(static) default(none) shared(achv, channels)
for(unsigned int i=0; i < colors; i++) {
const float *chans = (const float *)&achv[ind1 + i].acolor;
// Only the first channel really matters. When trying median cut many times
// with different histogram weights, I don't want sort randomness to influence outcome.
achv[ind1 + i].tmp.sort_value = ((unsigned int)(chans[channels[0].chan]*65535.0)<<16) |
(unsigned int)((chans[channels[2].chan] + chans[channels[1].chan]/2.0 + chans[channels[3].chan]/4.0)*65535.0);
}
const f_pixel median = get_median(b, achv);
// box will be split to make color_weight of each side even
const unsigned int ind = b->ind, end = ind+b->colors;
double totalvar = 0;
#pragma omp parallel for if (end - ind > 15000) \
schedule(static) default(shared) reduction(+:totalvar)
for(unsigned int j=ind; j < end; j++) totalvar += (achv[j].color_weight = color_weight(median, achv[j]));
return totalvar / 2.0;
}
/** finds median in unsorted set by sorting only minimum required */
static f_pixel get_median(const struct box *b, hist_item achv[])
{
const unsigned int median_start = (b->colors-1)/2;
hist_item_sort_range(&(achv[b->ind]), b->colors,
median_start);
if (b->colors&1) return achv[b->ind + median_start].acolor;
// technically the second color is not guaranteed to be sorted correctly
// but most of the time it is good enough to be useful
return averagepixels(2, &achv[b->ind + median_start]);
}
/*
** Find the best splittable box. -1 if no boxes are splittable.
*/
static int best_splittable_box(struct box bv[], unsigned int boxes, const double max_mse)
{
int bi=-1; double maxsum=0;
for(unsigned int i=0; i < boxes; i++) {
if (bv[i].colors < 2) {
continue;
}
// looks only at max variance, because it's only going to split by it
const double cv = MAX(bv[i].variance.r, MAX(bv[i].variance.g,bv[i].variance.b));
double thissum = bv[i].sum * MAX(bv[i].variance.a, cv);
if (bv[i].max_error > max_mse) {
thissum = thissum* bv[i].max_error/max_mse;
}
if (thissum > maxsum) {
maxsum = thissum;
bi = i;
}
}
return bi;
}
inline static double color_weight(f_pixel median, hist_item h)
{
float diff = colordifference(median, h.acolor);
return sqrt(diff) * (sqrt(1.0+h.adjusted_weight)-1.0);
}
static void set_colormap_from_boxes(colormap *map, struct box bv[], unsigned int boxes, hist_item *achv);
static void adjust_histogram(hist_item *achv, const struct box bv[], unsigned int boxes);
static double box_error(const struct box *box, const hist_item achv[])
{
f_pixel avg = box->color;
double total_error=0;
for (unsigned int i = 0; i < box->colors; ++i) {
total_error += colordifference(avg, achv[box->ind + i].acolor) * achv[box->ind + i].perceptual_weight;
}
return total_error;
}
static bool total_box_error_below_target(double target_mse, struct box bv[], unsigned int boxes, const histogram *hist)
{
target_mse *= hist->total_perceptual_weight;
double total_error=0;
for(unsigned int i=0; i < boxes; i++) {
// error is (re)calculated lazily
if (bv[i].total_error >= 0) {
total_error += bv[i].total_error;
}
if (total_error > target_mse) return false;
}
for(unsigned int i=0; i < boxes; i++) {
if (bv[i].total_error < 0) {
bv[i].total_error = box_error(&bv[i], hist->achv);
total_error += bv[i].total_error;
}
if (total_error > target_mse) return false;
}
return true;
}
static void box_init(struct box *box, const hist_item *achv, const unsigned int ind, const unsigned int colors, const double sum) {
box->ind = ind;
box->colors = colors;
box->sum = sum;
box->total_error = -1;
box->color = averagepixels(colors, &achv[ind]);
#pragma omp task if (colors > 5000)
box->variance = box_variance(achv, box);
#pragma omp task if (colors > 8000)
box->max_error = box_max_error(achv, box);
}
/*
** Here is the fun part, the median-cut colormap generator. This is based
** on Paul Heckbert's paper, "Color Image Quantization for Frame Buffer
** Display," SIGGRAPH 1982 Proceedings, page 297.
*/
LIQ_PRIVATE colormap *mediancut(histogram *hist, unsigned int newcolors, const double target_mse, const double max_mse, void* (*malloc)(size_t), void (*free)(void*))
{
hist_item *achv = hist->achv;
LIQ_ARRAY(struct box, bv, newcolors);
unsigned int boxes = 1;
/*
** Set up the initial box.
*/
#pragma omp parallel
#pragma omp single
{
double sum = 0;
for(unsigned int i=0; i < hist->size; i++) {
sum += achv[i].adjusted_weight;
}
#pragma omp taskgroup
{
box_init(&bv[0], achv, 0, hist->size, sum);
}
/*
** Main loop: split boxes until we have enough.
*/
while (boxes < newcolors) {
// first splits boxes that exceed quality limit (to have colors for things like odd green pixel),
// later raises the limit to allow large smooth areas/gradients get colors.
const double current_max_mse = max_mse + (boxes/(double)newcolors)*16.0*max_mse;
const int bi = best_splittable_box(bv, boxes, current_max_mse);
if (bi < 0) {
break; /* ran out of colors! */
}
unsigned int indx = bv[bi].ind;
unsigned int clrs = bv[bi].colors;
/*
Classic implementation tries to get even number of colors or pixels in each subdivision.
Here, instead of popularity I use (sqrt(popularity)*variance) metric.
Each subdivision balances number of pixels (popular colors) and low variance -
boxes can be large if they have similar colors. Later boxes with high variance
will be more likely to be split.
Median used as expected value gives much better results than mean.
*/
const double halfvar = prepare_sort(&bv[bi], achv);
double lowervar=0;
// hist_item_sort_halfvar sorts and sums lowervar at the same time
// returns item to break at …minus one, which does smell like an off-by-one error.
hist_item *break_p = hist_item_sort_halfvar(&achv[indx], clrs, &lowervar, halfvar);
unsigned int break_at = MIN(clrs-1, break_p - &achv[indx] + 1);
/*
** Split the box.
*/
double sm = bv[bi].sum;
double lowersum = 0;
for(unsigned int i=0; i < break_at; i++) lowersum += achv[indx + i].adjusted_weight;
#pragma omp taskgroup
{
box_init(&bv[bi], achv, indx, break_at, lowersum);
box_init(&bv[boxes], achv, indx + break_at, clrs - break_at, sm - lowersum);
}
++boxes;
if (total_box_error_below_target(target_mse, bv, boxes, hist)) {
break;
}
}
}
colormap *map = pam_colormap(boxes, malloc, free);
set_colormap_from_boxes(map, bv, boxes, achv);
adjust_histogram(achv, bv, boxes);
return map;
}
static void set_colormap_from_boxes(colormap *map, struct box* bv, unsigned int boxes, hist_item *achv)
{
/*
** Ok, we've got enough boxes. Now choose a representative color for
** each box. There are a number of possible ways to make this choice.
** One would be to choose the center of the box; this ignores any structure
** within the boxes. Another method would be to average all the colors in
** the box - this is the method specified in Heckbert's paper.
*/
for(unsigned int bi = 0; bi < boxes; ++bi) {
map->palette[bi].acolor = bv[bi].color;
/* store total color popularity (perceptual_weight is approximation of it) */
map->palette[bi].popularity = 0;
for(unsigned int i=bv[bi].ind; i < bv[bi].ind+bv[bi].colors; i++) {
map->palette[bi].popularity += achv[i].perceptual_weight;
}
}
}
/* increase histogram popularity by difference from the final color (this is used as part of feedback loop) */
static void adjust_histogram(hist_item *achv, const struct box* bv, unsigned int boxes)
{
for(unsigned int bi = 0; bi < boxes; ++bi) {
for(unsigned int i=bv[bi].ind; i < bv[bi].ind+bv[bi].colors; i++) {
achv[i].tmp.likely_colormap_index = bi;
}
}
}
static f_pixel averagepixels(unsigned int clrs, const hist_item achv[])
{
double r = 0, g = 0, b = 0, a = 0, sum = 0;
#pragma omp parallel for if (clrs > 25000) \
schedule(static) default(shared) reduction(+:a) reduction(+:r) reduction(+:g) reduction(+:b) reduction(+:sum)
for(unsigned int i = 0; i < clrs; i++) {
const f_pixel px = achv[i].acolor;
const double weight = achv[i].adjusted_weight;
sum += weight;
a += px.a * weight;
r += px.r * weight;
g += px.g * weight;
b += px.b * weight;
}
if (sum) {
a /= sum;
r /= sum;
g /= sum;
b /= sum;
}
assert(!isnan(r) && !isnan(g) && !isnan(b) && !isnan(a));
return (f_pixel){.r=r, .g=g, .b=b, .a=a};
}
|
GB_binop__bxor_int8.c | //------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_emult.h"
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB (_AaddB__bxor_int8)
// A.*B function (eWiseMult): GB (_AemultB)
// A.*B function (eWiseMult): GB (_AemultB_02__bxor_int8)
// A.*B function (eWiseMult): GB (_AemultB_03__bxor_int8)
// A.*B function (eWiseMult): GB (_AemultB_bitmap__bxor_int8)
// A*D function (colscale): GB ((none))
// D*A function (rowscale): GB ((node))
// C+=B function (dense accum): GB (_Cdense_accumB__bxor_int8)
// C+=b function (dense accum): GB (_Cdense_accumb__bxor_int8)
// C+=A+B function (dense ewise3): GB ((none))
// C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__bxor_int8)
// C=scalar+B GB (_bind1st__bxor_int8)
// C=scalar+B' GB (_bind1st_tran__bxor_int8)
// C=A+scalar GB (_bind2nd__bxor_int8)
// C=A'+scalar GB (_bind2nd_tran__bxor_int8)
// C type: int8_t
// A type: int8_t
// B,b type: int8_t
// BinaryOp: cij = (aij) ^ (bij)
#define GB_ATYPE \
int8_t
#define GB_BTYPE \
int8_t
#define GB_CTYPE \
int8_t
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
int8_t aij = Ax [pA]
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB) \
int8_t bij = Bx [pB]
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
int8_t t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA) \
cij = Ax [pA]
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB) \
cij = Bx [pB]
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z, x, y, i, j) \
z = (x) ^ (y) ;
// true if the binop must be flipped
#define GB_BINOP_FLIP \
0
// op is second
#define GB_OP_IS_SECOND \
0
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_BXOR || GxB_NO_INT8 || GxB_NO_BXOR_INT8)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
#if 0
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB ((none))
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
#endif
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_ewise3_noaccum__bxor_int8)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_dense_ewise3_noaccum_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumB__bxor_int8)
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumb__bxor_int8)
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type int8_t
int8_t bwork = (*((int8_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
#if 0
GrB_Info GB ((none))
(
GrB_Matrix C,
const GrB_Matrix A, bool A_is_pattern,
const GrB_Matrix D, bool D_is_pattern,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int8_t *restrict Cx = (int8_t *) C->x ;
#include "GB_AxB_colscale_meta.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
#if 0
GrB_Info GB ((node))
(
GrB_Matrix C,
const GrB_Matrix D, bool D_is_pattern,
const GrB_Matrix B, bool B_is_pattern,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int8_t *restrict Cx = (int8_t *) C->x ;
#include "GB_AxB_rowscale_meta.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
//------------------------------------------------------------------------------
// eWiseAdd: C = A+B or C<M> = A+B
//------------------------------------------------------------------------------
GrB_Info GB (_AaddB__bxor_int8)
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool Ch_is_Mh,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GB_WERK_DECLARE (M_ek_slicing, int64_t) ;
GB_WERK_DECLARE (A_ek_slicing, int64_t) ;
GB_WERK_DECLARE (B_ek_slicing, int64_t) ;
#include "GB_add_template.c"
GB_FREE_WORK ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C = A.*B or C<M> = A.*B
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_01__bxor_int8)
(
GrB_Matrix C,
const int C_sparsity,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_01_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_02__bxor_int8)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool flipxy,
const int64_t *restrict Cp_kfirst,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if GB_BINOP_FLIP
// The operator is not commutative, and does not have a flipped
// variant. For example z=atan2(y,x).
if (flipxy)
{
// use fmult(y,x)
#undef GB_FLIPPED
#define GB_FLIPPED 1
#include "GB_emult_02_template.c"
}
else
{
// use fmult(x,y)
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
}
#else
// No need to handle the flip: the operator is either commutative, or
// has been handled by changing z=div(y,x) to z=rdiv(x,y) for example.
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_03__bxor_int8)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict Cp_kfirst,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_03_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_bitmap__bxor_int8)
(
GrB_Matrix C,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_bitmap_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB (_bind1st__bxor_int8)
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *restrict Bb,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int8_t *Cx = (int8_t *) Cx_output ;
int8_t x = (*((int8_t *) x_input)) ;
int8_t *Bx = (int8_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Bb, p)) continue ;
int8_t bij = Bx [p] ;
Cx [p] = (x) ^ (bij) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB (_bind2nd__bxor_int8)
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *restrict Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
int8_t *Cx = (int8_t *) Cx_output ;
int8_t *Ax = (int8_t *) Ax_input ;
int8_t y = (*((int8_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
int8_t aij = Ax [p] ;
Cx [p] = (aij) ^ (y) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
int8_t aij = Ax [pA] ; \
Cx [pC] = (x) ^ (aij) ; \
}
GrB_Info GB (_bind1st_tran__bxor_int8)
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
int8_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int8_t x = (*((const int8_t *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
int8_t
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
int8_t aij = Ax [pA] ; \
Cx [pC] = (aij) ^ (y) ; \
}
GrB_Info GB (_bind2nd_tran__bxor_int8)
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int8_t y = (*((const int8_t *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
stream.c | /*-----------------------------------------------------------------------*/
/* Program: STREAM */
/* Revision: $Id: stream.c,v 5.10 2013/01/17 16:01:06 mccalpin Exp mccalpin $ */
/* Original code developed by John D. McCalpin */
/* Programmers: John D. McCalpin */
/* Joe R. Zagar */
/* */
/* This program measures memory transfer rates in MB/s for simple */
/* computational kernels coded in C. */
/*-----------------------------------------------------------------------*/
/* Copyright 1991-2013: John D. McCalpin */
/*-----------------------------------------------------------------------*/
/* License: */
/* 1. You are free to use this program and/or to redistribute */
/* this program. */
/* 2. You are free to modify this program for your own use, */
/* including commercial use, subject to the publication */
/* restrictions in item 3. */
/* 3. You are free to publish results obtained from running this */
/* program, or from works that you derive from this program, */
/* with the following limitations: */
/* 3a. In order to be referred to as "STREAM benchmark results", */
/* published results must be in conformance to the STREAM */
/* Run Rules, (briefly reviewed below) published at */
/* http://www.cs.virginia.edu/stream/ref.html */
/* and incorporated herein by reference. */
/* As the copyright holder, John McCalpin retains the */
/* right to determine conformity with the Run Rules. */
/* 3b. Results based on modified source code or on runs not in */
/* accordance with the STREAM Run Rules must be clearly */
/* labelled whenever they are published. Examples of */
/* proper labelling include: */
/* "tuned STREAM benchmark results" */
/* "based on a variant of the STREAM benchmark code" */
/* Other comparable, clear, and reasonable labelling is */
/* acceptable. */
/* 3c. Submission of results to the STREAM benchmark web site */
/* is encouraged, but not required. */
/* 4. Use of this program or creation of derived works based on this */
/* program constitutes acceptance of these licensing restrictions. */
/* 5. Absolutely no warranty is expressed or implied. */
/*-----------------------------------------------------------------------*/
# include <stdio.h>
# include <unistd.h>
# include <math.h>
# include <float.h>
# include <limits.h>
# include <sys/time.h>
/*-----------------------------------------------------------------------
* INSTRUCTIONS:
*
* 1) STREAM requires different amounts of memory to run on different
* systems, depending on both the system cache size(s) and the
* granularity of the system timer.
* You should adjust the value of 'STREAM_ARRAY_SIZE' (below)
* to meet *both* of the following criteria:
* (a) Each array must be at least 4 times the size of the
* available cache memory. I don't worry about the difference
* between 10^6 and 2^20, so in practice the minimum array size
* is about 3.8 times the cache size.
* Example 1: One Xeon E3 with 8 MB L3 cache
* STREAM_ARRAY_SIZE should be >= 4 million, giving
* an array size of 30.5 MB and a total memory requirement
* of 91.5 MB.
* Example 2: Two Xeon E5's with 20 MB L3 cache each (using OpenMP)
* STREAM_ARRAY_SIZE should be >= 20 million, giving
* an array size of 153 MB and a total memory requirement
* of 458 MB.
* (b) The size should be large enough so that the 'timing calibration'
* output by the program is at least 20 clock-ticks.
* Example: most versions of Windows have a 10 millisecond timer
* granularity. 20 "ticks" at 10 ms/tic is 200 milliseconds.
* If the chip is capable of 10 GB/s, it moves 2 GB in 200 msec.
* This means the each array must be at least 1 GB, or 128M elements.
*
* Version 5.10 increases the default array size from 2 million
* elements to 10 million elements in response to the increasing
* size of L3 caches. The new default size is large enough for caches
* up to 20 MB.
* Version 5.10 changes the loop index variables from "register int"
* to "ssize_t", which allows array indices >2^32 (4 billion)
* on properly configured 64-bit systems. Additional compiler options
* (such as "-mcmodel=medium") may be required for large memory runs.
*
* Array size can be set at compile time without modifying the source
* code for the (many) compilers that support preprocessor definitions
* on the compile line. E.g.,
* gcc -O -DSTREAM_ARRAY_SIZE=100000000 stream.c -o stream.100M
* will override the default size of 10M with a new size of 100M elements
* per array.
*/
#define STREAM_ARRAY_SIZE 32768
#ifndef STREAM_ARRAY_SIZE
# define STREAM_ARRAY_SIZE 10000000
#endif
/* 2) STREAM runs each kernel "NTIMES" times and reports the *best* result
* for any iteration after the first, therefore the minimum value
* for NTIMES is 2.
* There are no rules on maximum allowable values for NTIMES, but
* values larger than the default are unlikely to noticeably
* increase the reported performance.
* NTIMES can also be set on the compile line without changing the source
* code using, for example, "-DNTIMES=7".
*/
#define NTIMES 2
#ifdef NTIMES
#if NTIMES<=1
# define NTIMES 10
#endif
#endif
#ifndef NTIMES
# define NTIMES 10
#endif
/* Users are allowed to modify the "OFFSET" variable, which *may* change the
* relative alignment of the arrays (though compilers may change the
* effective offset by making the arrays non-contiguous on some systems).
* Use of non-zero values for OFFSET can be especially helpful if the
* STREAM_ARRAY_SIZE is set to a value close to a large power of 2.
* OFFSET can also be set on the compile line without changing the source
* code using, for example, "-DOFFSET=56".
*/
#ifndef OFFSET
# define OFFSET 0
#endif
/*
* 3) Compile the code with optimization. Many compilers generate
* unreasonably bad code before the optimizer tightens things up.
* If the results are unreasonably good, on the other hand, the
* optimizer might be too smart for me!
*
* For a simple single-core version, try compiling with:
* cc -O stream.c -o stream
* This is known to work on many, many systems....
*
* To use multiple cores, you need to tell the compiler to obey the OpenMP
* directives in the code. This varies by compiler, but a common example is
* gcc -O -fopenmp stream.c -o stream_omp
* The environment variable OMP_NUM_THREADS allows runtime control of the
* number of threads/cores used when the resulting "stream_omp" program
* is executed.
*
* To run with single-precision variables and arithmetic, simply add
* -DSTREAM_TYPE=float
* to the compile line.
* Note that this changes the minimum array sizes required --- see (1) above.
*
* The preprocessor directive "TUNED" does not do much -- it simply causes the
* code to call separate functions to execute each kernel. Trivial versions
* of these functions are provided, but they are *not* tuned -- they just
* provide predefined interfaces to be replaced with tuned code.
*
*
* 4) Optional: Mail the results to mccalpin@cs.virginia.edu
* Be sure to include info that will help me understand:
* a) the computer hardware configuration (e.g., processor model, memory type)
* b) the compiler name/version and compilation flags
* c) any run-time information (such as OMP_NUM_THREADS)
* d) all of the output from the test case.
*
* Thanks!
*
*-----------------------------------------------------------------------*/
# define HLINE "-------------------------------------------------------------\n"
# ifndef MIN
# define MIN(x,y) ((x)<(y)?(x):(y))
# endif
# ifndef MAX
# define MAX(x,y) ((x)>(y)?(x):(y))
# endif
#ifndef STREAM_TYPE
#define STREAM_TYPE double
#endif
static STREAM_TYPE a[STREAM_ARRAY_SIZE+OFFSET],
b[STREAM_ARRAY_SIZE+OFFSET],
c[STREAM_ARRAY_SIZE+OFFSET];
static double avgtime[4] = {0}, maxtime[4] = {0},
mintime[4] = {FLT_MAX,FLT_MAX,FLT_MAX,FLT_MAX};
static char *label[4] = {"Copy: ", "Scale: ",
"Add: ", "Triad: "};
static double bytes[4] = {
2 * sizeof(STREAM_TYPE) * STREAM_ARRAY_SIZE,
2 * sizeof(STREAM_TYPE) * STREAM_ARRAY_SIZE,
3 * sizeof(STREAM_TYPE) * STREAM_ARRAY_SIZE,
3 * sizeof(STREAM_TYPE) * STREAM_ARRAY_SIZE
};
extern double mysecond();
extern void checkSTREAMresults();
#ifdef TUNED
extern void tuned_STREAM_Copy();
extern void tuned_STREAM_Scale(STREAM_TYPE scalar);
extern void tuned_STREAM_Add();
extern void tuned_STREAM_Triad(STREAM_TYPE scalar);
#endif
#ifdef _OPENMP
extern int omp_get_num_threads();
#endif
int
main()
{
int quantum, checktick();
int BytesPerWord;
int k;
ssize_t j;
STREAM_TYPE scalar;
double t, times[4][NTIMES];
/* --- SETUP --- determine precision and check timing --- */
printf(HLINE);
printf("STREAM version $Revision: 5.10 $\n");
printf(HLINE);
BytesPerWord = sizeof(STREAM_TYPE);
printf("This system uses %d bytes per array element.\n",
BytesPerWord);
printf(HLINE);
#ifdef N
printf("***** WARNING: ******\n");
printf(" It appears that you set the preprocessor variable N when compiling this code.\n");
printf(" This version of the code uses the preprocesor variable STREAM_ARRAY_SIZE to control the array size\n");
printf(" Reverting to default value of STREAM_ARRAY_SIZE=%llu\n",(unsigned long long) STREAM_ARRAY_SIZE);
printf("***** WARNING: ******\n");
#endif
printf("Array size = %llu (elements), Offset = %d (elements)\n" , (unsigned long long) STREAM_ARRAY_SIZE, OFFSET);
printf("Memory per array = %.1f MiB (= %.1f GiB).\n",
BytesPerWord * ( (double) STREAM_ARRAY_SIZE / 1024.0/1024.0),
BytesPerWord * ( (double) STREAM_ARRAY_SIZE / 1024.0/1024.0/1024.0));
printf("Total memory required = %.1f MiB (= %.1f GiB).\n",
(3.0 * BytesPerWord) * ( (double) STREAM_ARRAY_SIZE / 1024.0/1024.),
(3.0 * BytesPerWord) * ( (double) STREAM_ARRAY_SIZE / 1024.0/1024./1024.));
printf("Each kernel will be executed %d times.\n", NTIMES);
printf(" The *best* time for each kernel (excluding the first iteration)\n");
printf(" will be used to compute the reported bandwidth.\n");
#ifdef _OPENMP
printf(HLINE);
#pragma omp parallel
{
#pragma omp master
{
k = omp_get_num_threads();
printf ("Number of Threads requested = %i\n",k);
}
}
#endif
#ifdef _OPENMP
k = 0;
#pragma omp parallel
#pragma omp atomic
k++;
printf ("Number of Threads counted = %i\n",k);
#endif
/* Get initial value for system clock. */
#pragma omp parallel for
for (j=0; j<STREAM_ARRAY_SIZE; j++) {
a[j] = 1.0;
b[j] = 2.0;
c[j] = 0.0;
}
printf(HLINE);
if ( (quantum = checktick()) >= 1)
printf("Your clock granularity/precision appears to be "
"%d microseconds.\n", quantum);
else {
printf("Your clock granularity appears to be "
"less than one microsecond.\n");
quantum = 1;
}
t = mysecond();
#pragma omp parallel for
for (j = 0; j < STREAM_ARRAY_SIZE; j++)
a[j] = 2.0E0 * a[j];
t = 1.0E6 * (mysecond() - t);
printf("Each test below will take on the order"
" of %d microseconds.\n", (int) t );
printf(" (= %d clock ticks)\n", (int) (t/quantum) );
printf("Increase the size of the arrays if this shows that\n");
printf("you are not getting at least 20 clock ticks per test.\n");
printf(HLINE);
printf("WARNING -- The above is only a rough guideline.\n");
printf("For best results, please be sure you know the\n");
printf("precision of your system timer.\n");
printf(HLINE);
/* --- MAIN LOOP --- repeat test cases NTIMES times --- */
scalar = 3.0;
for (k=0; k<NTIMES; k++)
{
times[0][k] = mysecond();
#ifdef TUNED
tuned_STREAM_Copy();
#else
#pragma omp parallel for
for (j=0; j<STREAM_ARRAY_SIZE; j++)
c[j] = a[j];
#endif
times[0][k] = mysecond() - times[0][k];
times[1][k] = mysecond();
#ifdef TUNED
tuned_STREAM_Scale(scalar);
#else
#pragma omp parallel for
for (j=0; j<STREAM_ARRAY_SIZE; j++)
b[j] = scalar*c[j];
#endif
times[1][k] = mysecond() - times[1][k];
times[2][k] = mysecond();
#ifdef TUNED
tuned_STREAM_Add();
#else
#pragma omp parallel for
for (j=0; j<STREAM_ARRAY_SIZE; j++)
c[j] = a[j]+b[j];
#endif
times[2][k] = mysecond() - times[2][k];
times[3][k] = mysecond();
#ifdef TUNED
tuned_STREAM_Triad(scalar);
#else
#pragma omp parallel for
for (j=0; j<STREAM_ARRAY_SIZE; j++)
a[j] = b[j]+scalar*c[j];
#endif
times[3][k] = mysecond() - times[3][k];
}
/* --- SUMMARY --- */
for (k=1; k<NTIMES; k++) /* note -- skip first iteration */
{
for (j=0; j<4; j++)
{
avgtime[j] = avgtime[j] + times[j][k];
mintime[j] = MIN(mintime[j], times[j][k]);
maxtime[j] = MAX(maxtime[j], times[j][k]);
}
}
printf("Function Best Rate MB/s Avg time Min time Max time\n");
for (j=0; j<4; j++) {
avgtime[j] = avgtime[j]/(double)(NTIMES-1);
printf("%s%12.1f %11.6f %11.6f %11.6f\n", label[j],
1.0E-06 * bytes[j]/mintime[j],
avgtime[j],
mintime[j],
maxtime[j]);
}
printf(HLINE);
/* --- Check Results --- */
checkSTREAMresults();
printf(HLINE);
return 0;
}
# define M 20
int
checktick()
{
int i, minDelta, Delta;
double t1, t2, timesfound[M];
/* Collect a sequence of M unique time values from the system. */
for (i = 0; i < M; i++) {
t1 = mysecond();
while( ((t2=mysecond()) - t1) < 1.0E-6 )
;
timesfound[i] = t1 = t2;
}
/*
* Determine the minimum difference between these M values.
* This result will be our estimate (in microseconds) for the
* clock granularity.
*/
minDelta = 1000000;
for (i = 1; i < M; i++) {
Delta = (int)( 1.0E6 * (timesfound[i]-timesfound[i-1]));
minDelta = MIN(minDelta, MAX(Delta,0));
}
return(minDelta);
}
/* A gettimeofday routine to give access to the wall
clock timer on most UNIX-like systems. */
#include <sys/time.h>
double mysecond()
{
struct timeval tp;
struct timezone tzp;
int i;
i = gettimeofday(&tp,&tzp);
return ( (double) tp.tv_sec + (double) tp.tv_usec * 1.e-6 );
}
#ifndef abs
#define abs(a) ((a) >= 0 ? (a) : -(a))
#endif
void checkSTREAMresults ()
{
STREAM_TYPE aj,bj,cj,scalar;
STREAM_TYPE aSumErr,bSumErr,cSumErr;
STREAM_TYPE aAvgErr,bAvgErr,cAvgErr;
double epsilon;
ssize_t j;
int k,ierr,err;
/* reproduce initialization */
aj = 1.0;
bj = 2.0;
cj = 0.0;
/* a[] is modified during timing check */
aj = 2.0E0 * aj;
/* now execute timing loop */
scalar = 3.0;
for (k=0; k<NTIMES; k++)
{
cj = aj;
bj = scalar*cj;
cj = aj+bj;
aj = bj+scalar*cj;
}
/* accumulate deltas between observed and expected results */
aSumErr = 0.0;
bSumErr = 0.0;
cSumErr = 0.0;
for (j=0; j<STREAM_ARRAY_SIZE; j++) {
aSumErr += abs(a[j] - aj);
bSumErr += abs(b[j] - bj);
cSumErr += abs(c[j] - cj);
// if (j == 417) printf("Index 417: c[j]: %f, cj: %f\n",c[j],cj); // MCCALPIN
}
aAvgErr = aSumErr / (STREAM_TYPE) STREAM_ARRAY_SIZE;
bAvgErr = bSumErr / (STREAM_TYPE) STREAM_ARRAY_SIZE;
cAvgErr = cSumErr / (STREAM_TYPE) STREAM_ARRAY_SIZE;
if (sizeof(STREAM_TYPE) == 4) {
epsilon = 1.e-6;
}
else if (sizeof(STREAM_TYPE) == 8) {
epsilon = 1.e-13;
}
else {
printf("WEIRD: sizeof(STREAM_TYPE) = %lu\n",sizeof(STREAM_TYPE));
epsilon = 1.e-6;
}
err = 0;
if (abs(aAvgErr/aj) > epsilon) {
err++;
printf ("Failed Validation on array a[], AvgRelAbsErr > epsilon (%e)\n",epsilon);
printf (" Expected Value: %e, AvgAbsErr: %e, AvgRelAbsErr: %e\n",aj,aAvgErr,abs(aAvgErr)/aj);
ierr = 0;
for (j=0; j<STREAM_ARRAY_SIZE; j++) {
if (abs(a[j]/aj-1.0) > epsilon) {
ierr++;
#ifdef VERBOSE
if (ierr < 10) {
printf(" array a: index: %ld, expected: %e, observed: %e, relative error: %e\n",
j,aj,a[j],abs((aj-a[j])/aAvgErr));
}
#endif
}
}
printf(" For array a[], %d errors were found.\n",ierr);
}
if (abs(bAvgErr/bj) > epsilon) {
err++;
printf ("Failed Validation on array b[], AvgRelAbsErr > epsilon (%e)\n",epsilon);
printf (" Expected Value: %e, AvgAbsErr: %e, AvgRelAbsErr: %e\n",bj,bAvgErr,abs(bAvgErr)/bj);
printf (" AvgRelAbsErr > Epsilon (%e)\n",epsilon);
ierr = 0;
for (j=0; j<STREAM_ARRAY_SIZE; j++) {
if (abs(b[j]/bj-1.0) > epsilon) {
ierr++;
#ifdef VERBOSE
if (ierr < 10) {
printf(" array b: index: %ld, expected: %e, observed: %e, relative error: %e\n",
j,bj,b[j],abs((bj-b[j])/bAvgErr));
}
#endif
}
}
printf(" For array b[], %d errors were found.\n",ierr);
}
if (abs(cAvgErr/cj) > epsilon) {
err++;
printf ("Failed Validation on array c[], AvgRelAbsErr > epsilon (%e)\n",epsilon);
printf (" Expected Value: %e, AvgAbsErr: %e, AvgRelAbsErr: %e\n",cj,cAvgErr,abs(cAvgErr)/cj);
printf (" AvgRelAbsErr > Epsilon (%e)\n",epsilon);
ierr = 0;
for (j=0; j<STREAM_ARRAY_SIZE; j++) {
if (abs(c[j]/cj-1.0) > epsilon) {
ierr++;
#ifdef VERBOSE
if (ierr < 10) {
printf(" array c: index: %ld, expected: %e, observed: %e, relative error: %e\n",
j,cj,c[j],abs((cj-c[j])/cAvgErr));
}
#endif
}
}
printf(" For array c[], %d errors were found.\n",ierr);
}
if (err == 0) {
printf ("Solution Validates: avg error less than %e on all three arrays\n",epsilon);
}
#ifdef VERBOSE
printf ("Results Validation Verbose Results: \n");
printf (" Expected a(1), b(1), c(1): %f %f %f \n",aj,bj,cj);
printf (" Observed a(1), b(1), c(1): %f %f %f \n",a[1],b[1],c[1]);
printf (" Rel Errors on a, b, c: %e %e %e \n",abs(aAvgErr/aj),abs(bAvgErr/bj),abs(cAvgErr/cj));
#endif
}
#ifdef TUNED
/* stubs for "tuned" versions of the kernels */
void tuned_STREAM_Copy()
{
ssize_t j;
#pragma omp parallel for
for (j=0; j<STREAM_ARRAY_SIZE; j++)
c[j] = a[j];
}
void tuned_STREAM_Scale(STREAM_TYPE scalar)
{
ssize_t j;
#pragma omp parallel for
for (j=0; j<STREAM_ARRAY_SIZE; j++)
b[j] = scalar*c[j];
}
void tuned_STREAM_Add()
{
ssize_t j;
#pragma omp parallel for
for (j=0; j<STREAM_ARRAY_SIZE; j++)
c[j] = a[j]+b[j];
}
void tuned_STREAM_Triad(STREAM_TYPE scalar)
{
ssize_t j;
#pragma omp parallel for
for (j=0; j<STREAM_ARRAY_SIZE; j++)
a[j] = b[j]+scalar*c[j];
}
/* end of stubs for the "tuned" versions of the kernels */
#endif
|
GB_unop__ainv_int64_int64.c | //------------------------------------------------------------------------------
// GB_unop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_atomics.h"
#include "GB_unop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop_apply__ainv_int64_int64
// op(A') function: GB_unop_tran__ainv_int64_int64
// C type: int64_t
// A type: int64_t
// cast: int64_t cij = aij
// unaryop: cij = -aij
#define GB_ATYPE \
int64_t
#define GB_CTYPE \
int64_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
int64_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = -x ;
// casting
#define GB_CAST(z, aij) \
int64_t z = aij ;
// cij = op (aij)
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
int64_t aij = Ax [pA] ; \
/* Cx [pC] = op (cast (aij)) */ \
int64_t z = aij ; \
Cx [pC] = -z ; \
}
// true if operator is the identity op with no typecasting
#define GB_OP_IS_IDENTITY_WITH_NO_TYPECAST \
0
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_AINV || GxB_NO_INT64)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop_apply__ainv_int64_int64
(
int64_t *Cx, // Cx and Ax may be aliased
const int64_t *Ax,
const int8_t *GB_RESTRICT Ab, // A->b if A is bitmap
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
if (Ab == NULL)
{
#if ( GB_OP_IS_IDENTITY_WITH_NO_TYPECAST )
GB_memcpy (Cx, Ax, anz * sizeof (int64_t), nthreads) ;
#else
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
int64_t aij = Ax [p] ;
int64_t z = aij ;
Cx [p] = -z ;
}
#endif
}
else
{
// bitmap case, no transpose; A->b already memcpy'd into C->b
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!Ab [p]) continue ;
int64_t aij = Ax [p] ;
int64_t z = aij ;
Cx [p] = -z ;
}
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop_tran__ainv_int64_int64
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *GB_RESTRICT *Workspaces,
const int64_t *GB_RESTRICT A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
3mm.c | /* POLYBENCH/GPU-OPENMP
*
* This file is a part of the Polybench/GPU-OpenMP suite
*
* Contact:
* William Killian <killian@udel.edu>
*
* Copyright 2013, The University of Delaware
*/
#include <stdio.h>
#include <unistd.h>
#include <string.h>
#include <math.h>
/* Include polybench common header. */
#include <polybench.h>
/* Include benchmark-specific header. */
/* Default data type is double, default size is 4000. */
#include "3mm.h"
/* Array initialization. */
static
void init_array(int ni, int nj, int nk, int nl, int nm,
DATA_TYPE POLYBENCH_2D(A,NI,NK,ni,nk),
DATA_TYPE POLYBENCH_2D(B,NK,NJ,nk,nj),
DATA_TYPE POLYBENCH_2D(C,NJ,NM,nj,nm),
DATA_TYPE POLYBENCH_2D(D,NM,NL,nm,nl))
{
int i, j;
for (i = 0; i < ni; i++)
for (j = 0; j < nk; j++)
A[i][j] = ((DATA_TYPE) i*j) / ni;
for (i = 0; i < nk; i++)
for (j = 0; j < nj; j++)
B[i][j] = ((DATA_TYPE) i*(j+1)) / nj;
for (i = 0; i < nj; i++)
for (j = 0; j < nm; j++)
C[i][j] = ((DATA_TYPE) i*(j+3)) / nl;
for (i = 0; i < nm; i++)
for (j = 0; j < nl; j++)
D[i][j] = ((DATA_TYPE) i*(j+2)) / nk;
}
/* DCE code. Must scan the entire live-out data.
Can be used also to check the correctness of the output. */
static
void print_array(int ni, int nl,
DATA_TYPE POLYBENCH_2D(G,NI,NL,ni,nl))
{
int i, j;
for (i = 0; i < ni; i++)
for (j = 0; j < nl; j++) {
fprintf (stderr, DATA_PRINTF_MODIFIER, G[i][j]);
if ((i * ni + j) % 20 == 0) fprintf (stderr, "\n");
}
fprintf (stderr, "\n");
}
/* Main computational kernel. The whole function will be timed,
including the call and return. */
static
void kernel_3mm(int ni, int nj, int nk, int nl, int nm,
DATA_TYPE POLYBENCH_2D(E,NI,NJ,ni,nj),
DATA_TYPE POLYBENCH_2D(A,NI,NK,ni,nk),
DATA_TYPE POLYBENCH_2D(B,NK,NJ,nk,nj),
DATA_TYPE POLYBENCH_2D(F,NJ,NL,nj,nl),
DATA_TYPE POLYBENCH_2D(C,NJ,NM,nj,nm),
DATA_TYPE POLYBENCH_2D(D,NM,NL,nm,nl),
DATA_TYPE POLYBENCH_2D(G,NI,NL,ni,nl))
{
int i, j, k;
#pragma scop
#pragma omp parallel private (j, k) num_threads(#P3)
{
/* E := A*B */
#pragma omp for schedule(#P1, #P2)
for (i = 0; i < _PB_NI; i++)
for (j = 0; j < _PB_NJ; j++)
{
E[i][j] = 0;
for (k = 0; k < _PB_NK; ++k)
E[i][j] += A[i][k] * B[k][j];
}
/* F := C*D */
#pragma omp for schedule(#P1, #P2)
for (i = 0; i < _PB_NJ; i++)
for (j = 0; j < _PB_NL; j++)
{
F[i][j] = 0;
for (k = 0; k < _PB_NM; ++k)
F[i][j] += C[i][k] * D[k][j];
}
/* G := E*F */
#pragma omp for schedule(#P1, #P2)
for (i = 0; i < _PB_NI; i++)
for (j = 0; j < _PB_NL; j++)
{
G[i][j] = 0;
for (k = 0; k < _PB_NJ; ++k)
G[i][j] += E[i][k] * F[k][j];
}
}
#pragma endscop
}
int main(int argc, char** argv)
{
/* Retrieve problem size. */
int ni = NI;
int nj = NJ;
int nk = NK;
int nl = NL;
int nm = NM;
/* Variable declaration/allocation. */
POLYBENCH_2D_ARRAY_DECL(E, DATA_TYPE, NI, NJ, ni, nj);
POLYBENCH_2D_ARRAY_DECL(A, DATA_TYPE, NI, NK, ni, nk);
POLYBENCH_2D_ARRAY_DECL(B, DATA_TYPE, NK, NJ, nk, nj);
POLYBENCH_2D_ARRAY_DECL(F, DATA_TYPE, NJ, NL, nj, nl);
POLYBENCH_2D_ARRAY_DECL(C, DATA_TYPE, NJ, NM, nj, nm);
POLYBENCH_2D_ARRAY_DECL(D, DATA_TYPE, NM, NL, nm, nl);
POLYBENCH_2D_ARRAY_DECL(G, DATA_TYPE, NI, NL, ni, nl);
/* Initialize array(s). */
init_array (ni, nj, nk, nl, nm,
POLYBENCH_ARRAY(A),
POLYBENCH_ARRAY(B),
POLYBENCH_ARRAY(C),
POLYBENCH_ARRAY(D));
/* Start timer. */
polybench_start_instruments;
/* Run kernel. */
kernel_3mm (ni, nj, nk, nl, nm,
POLYBENCH_ARRAY(E),
POLYBENCH_ARRAY(A),
POLYBENCH_ARRAY(B),
POLYBENCH_ARRAY(F),
POLYBENCH_ARRAY(C),
POLYBENCH_ARRAY(D),
POLYBENCH_ARRAY(G));
/* Stop and print timer. */
polybench_stop_instruments;
polybench_print_instruments;
/* Prevent dead-code elimination. All live-out data must be printed
by the function call in argument. */
polybench_prevent_dce(print_array(ni, nl, POLYBENCH_ARRAY(G)));
/* Be clean. */
POLYBENCH_FREE_ARRAY(E);
POLYBENCH_FREE_ARRAY(A);
POLYBENCH_FREE_ARRAY(B);
POLYBENCH_FREE_ARRAY(F);
POLYBENCH_FREE_ARRAY(C);
POLYBENCH_FREE_ARRAY(D);
POLYBENCH_FREE_ARRAY(G);
return 0;
}
|
rhs.c | //-------------------------------------------------------------------------//
// //
// This benchmark is an OpenMP C version of the NPB BT code. This OpenMP //
// C version is developed by the Center for Manycore Programming at Seoul //
// National University and derived from the OpenMP Fortran versions in //
// "NPB3.3-OMP" developed by NAS. //
// //
// Permission to use, copy, distribute and modify this software for any //
// purpose with or without fee is hereby granted. This software is //
// provided "as is" without express or implied warranty. //
// //
// Information on NPB 3.3, including the technical report, the original //
// specifications, source code, results and information on how to submit //
// new results, is available at: //
// //
// http://www.nas.nasa.gov/Software/NPB/ //
// //
// Send comments or suggestions for this OpenMP C version to //
// cmp@aces.snu.ac.kr //
// //
// Center for Manycore Programming //
// School of Computer Science and Engineering //
// Seoul National University //
// Seoul 151-744, Korea //
// //
// E-mail: cmp@aces.snu.ac.kr //
// //
//-------------------------------------------------------------------------//
//-------------------------------------------------------------------------//
// Authors: Sangmin Seo, Jungwon Kim, Jun Lee, Jeongho Nah, Gangwon Jo, //
// and Jaejin Lee //
//-------------------------------------------------------------------------//
#include "header.h"
#include "timers.h"
void compute_rhs()
{
int i, j, k, m;
double rho_inv, uijk, up1, um1, vijk, vp1, vm1, wijk, wp1, wm1;
//kai
/* int k1,k2,k3,k4,k5,k6,k7,k8,k9,k10, k11;
consistent_data(&k1, "int", 1);
consistent_data(&k2, "int", 1);
consistent_data(&k3, "int", 1);
consistent_data(&k4, "int", 1);
consistent_data(&k5, "int", 1);
consistent_data(&k6, "int", 1);
consistent_data(&k7, "int", 1);
consistent_data(&k8, "int", 1);
consistent_data(&k9, "int", 1);
consistent_data(&k10, "int", 1);
consistent_data(&k11, "int", 1);
*/
//kaii
// double (*us_t)[grid_points[1]][grid_points[0]]= (double(*)[grid_points[1]][grid_points[0]])us;
if (timeron) timer_start(t_rhs);
#pragma omp parallel default(shared) private(i,j,k,m,rho_inv,uijk,up1,um1,\
vijk,vp1,vm1,wijk,wp1,wm1)
{
//---------------------------------------------------------------------
// compute the reciprocal of density, and the kinetic energy,
// and the speed of sound.
//---------------------------------------------------------------------
#pragma omp for schedule(static) nowait
for (k = 0; k <= grid_points[2]-1; k++) {
for (j = 0; j <= grid_points[1]-1; j++) {
for (i = 0; i <= grid_points[0]-1; i++) {
rho_inv = 1.0/u[k][j][i][0];
rho_i[k][j][i] = rho_inv;
us[k][j][i] = u[k][j][i][1] * rho_inv;
vs[k][j][i] = u[k][j][i][2] * rho_inv;
ws[k][j][i] = u[k][j][i][3] * rho_inv;
square[k][j][i] = 0.5* (
u[k][j][i][1]*u[k][j][i][1] +
u[k][j][i][2]*u[k][j][i][2] +
u[k][j][i][3]*u[k][j][i][3] ) * rho_inv;
qs[k][j][i] = square[k][j][i] * rho_inv;
}
}
//kai
k1 = k;
}
//---------------------------------------------------------------------
// copy the exact forcing term to the right hand side; because
// this forcing term is known, we can store it on the whole grid
// including the boundary
//---------------------------------------------------------------------
#pragma omp for schedule(static)
for (k = 0; k <= grid_points[2]-1; k++) {
for (j = 0; j <= grid_points[1]-1; j++) {
for (i = 0; i <= grid_points[0]-1; i++) {
for (m = 0; m < 5; m++) {
rhs[k][j][i][m] = forcing[k][j][i][m];
}
}
}
//kai
k2 = k;
}
#pragma omp master
if (timeron) timer_start(t_rhsx);
//---------------------------------------------------------------------
// compute xi-direction fluxes
//---------------------------------------------------------------------
#pragma omp for schedule(static) nowait
for (k = 1; k <= grid_points[2]-2; k++) {
for (j = 1; j <= grid_points[1]-2; j++) {
for (i = 1; i <= grid_points[0]-2; i++) {
uijk = us[k][j][i];
up1 = us[k][j][i+1];
um1 = us[k][j][i-1];
rhs[k][j][i][0] = rhs[k][j][i][0] + dx1tx1 *
(u[k][j][i+1][0] - 2.0*u[k][j][i][0] +
u[k][j][i-1][0]) -
tx2 * (u[k][j][i+1][1] - u[k][j][i-1][1]);
rhs[k][j][i][1] = rhs[k][j][i][1] + dx2tx1 *
(u[k][j][i+1][1] - 2.0*u[k][j][i][1] +
u[k][j][i-1][1]) +
xxcon2*con43 * (up1 - 2.0*uijk + um1) -
tx2 * (u[k][j][i+1][1]*up1 -
u[k][j][i-1][1]*um1 +
(u[k][j][i+1][4]- square[k][j][i+1]-
u[k][j][i-1][4]+ square[k][j][i-1])*
c2);
rhs[k][j][i][2] = rhs[k][j][i][2] + dx3tx1 *
(u[k][j][i+1][2] - 2.0*u[k][j][i][2] +
u[k][j][i-1][2]) +
xxcon2 * (vs[k][j][i+1] - 2.0*vs[k][j][i] +
vs[k][j][i-1]) -
tx2 * (u[k][j][i+1][2]*up1 -
u[k][j][i-1][2]*um1);
rhs[k][j][i][3] = rhs[k][j][i][3] + dx4tx1 *
(u[k][j][i+1][3] - 2.0*u[k][j][i][3] +
u[k][j][i-1][3]) +
xxcon2 * (ws[k][j][i+1] - 2.0*ws[k][j][i] +
ws[k][j][i-1]) -
tx2 * (u[k][j][i+1][3]*up1 -
u[k][j][i-1][3]*um1);
rhs[k][j][i][4] = rhs[k][j][i][4] + dx5tx1 *
(u[k][j][i+1][4] - 2.0*u[k][j][i][4] +
u[k][j][i-1][4]) +
xxcon3 * (qs[k][j][i+1] - 2.0*qs[k][j][i] +
qs[k][j][i-1]) +
xxcon4 * (up1*up1 - 2.0*uijk*uijk +
um1*um1) +
xxcon5 * (u[k][j][i+1][4]*rho_i[k][j][i+1] -
2.0*u[k][j][i][4]*rho_i[k][j][i] +
u[k][j][i-1][4]*rho_i[k][j][i-1]) -
tx2 * ( (c1*u[k][j][i+1][4] -
c2*square[k][j][i+1])*up1 -
(c1*u[k][j][i-1][4] -
c2*square[k][j][i-1])*um1 );
}
}
//---------------------------------------------------------------------
// add fourth order xi-direction dissipation
//---------------------------------------------------------------------
for (j = 1; j <= grid_points[1]-2; j++) {
i = 1;
for (m = 0; m < 5; m++) {
rhs[k][j][i][m] = rhs[k][j][i][m]- dssp *
( 5.0*u[k][j][i][m] - 4.0*u[k][j][i+1][m] +
u[k][j][i+2][m]);
}
i = 2;
for (m = 0; m < 5; m++) {
rhs[k][j][i][m] = rhs[k][j][i][m] - dssp *
(-4.0*u[k][j][i-1][m] + 6.0*u[k][j][i][m] -
4.0*u[k][j][i+1][m] + u[k][j][i+2][m]);
}
}
for (j = 1; j <= grid_points[1]-2; j++) {
for (i = 3; i <= grid_points[0]-4; i++) {
for (m = 0; m < 5; m++) {
rhs[k][j][i][m] = rhs[k][j][i][m] - dssp *
( u[k][j][i-2][m] - 4.0*u[k][j][i-1][m] +
6.0*u[k][j][i][m] - 4.0*u[k][j][i+1][m] +
u[k][j][i+2][m] );
}
}
}
for (j = 1; j <= grid_points[1]-2; j++) {
i = grid_points[0]-3;
for (m = 0; m < 5; m++) {
rhs[k][j][i][m] = rhs[k][j][i][m] - dssp *
( u[k][j][i-2][m] - 4.0*u[k][j][i-1][m] +
6.0*u[k][j][i][m] - 4.0*u[k][j][i+1][m] );
}
i = grid_points[0]-2;
for (m = 0; m < 5; m++) {
rhs[k][j][i][m] = rhs[k][j][i][m] - dssp *
( u[k][j][i-2][m] - 4.*u[k][j][i-1][m] +
5.*u[k][j][i][m] );
}
}
//kai
k3 = k;
}
#pragma omp master
{
if (timeron) timer_stop(t_rhsx);
if (timeron) timer_start(t_rhsy);
}
//---------------------------------------------------------------------
// compute eta-direction fluxes
//---------------------------------------------------------------------
#pragma omp for schedule(static)
for (k = 1; k <= grid_points[2]-2; k++) {
for (j = 1; j <= grid_points[1]-2; j++) {
for (i = 1; i <= grid_points[0]-2; i++) {
vijk = vs[k][j][i];
vp1 = vs[k][j+1][i];
vm1 = vs[k][j-1][i];
rhs[k][j][i][0] = rhs[k][j][i][0] + dy1ty1 *
(u[k][j+1][i][0] - 2.0*u[k][j][i][0] +
u[k][j-1][i][0]) -
ty2 * (u[k][j+1][i][2] - u[k][j-1][i][2]);
rhs[k][j][i][1] = rhs[k][j][i][1] + dy2ty1 *
(u[k][j+1][i][1] - 2.0*u[k][j][i][1] +
u[k][j-1][i][1]) +
yycon2 * (us[k][j+1][i] - 2.0*us[k][j][i] +
us[k][j-1][i]) -
ty2 * (u[k][j+1][i][1]*vp1 -
u[k][j-1][i][1]*vm1);
rhs[k][j][i][2] = rhs[k][j][i][2] + dy3ty1 *
(u[k][j+1][i][2] - 2.0*u[k][j][i][2] +
u[k][j-1][i][2]) +
yycon2*con43 * (vp1 - 2.0*vijk + vm1) -
ty2 * (u[k][j+1][i][2]*vp1 -
u[k][j-1][i][2]*vm1 +
(u[k][j+1][i][4] - square[k][j+1][i] -
u[k][j-1][i][4] + square[k][j-1][i])
*c2);
rhs[k][j][i][3] = rhs[k][j][i][3] + dy4ty1 *
(u[k][j+1][i][3] - 2.0*u[k][j][i][3] +
u[k][j-1][i][3]) +
yycon2 * (ws[k][j+1][i] - 2.0*ws[k][j][i] +
ws[k][j-1][i]) -
ty2 * (u[k][j+1][i][3]*vp1 -
u[k][j-1][i][3]*vm1);
rhs[k][j][i][4] = rhs[k][j][i][4] + dy5ty1 *
(u[k][j+1][i][4] - 2.0*u[k][j][i][4] +
u[k][j-1][i][4]) +
yycon3 * (qs[k][j+1][i] - 2.0*qs[k][j][i] +
qs[k][j-1][i]) +
yycon4 * (vp1*vp1 - 2.0*vijk*vijk +
vm1*vm1) +
yycon5 * (u[k][j+1][i][4]*rho_i[k][j+1][i] -
2.0*u[k][j][i][4]*rho_i[k][j][i] +
u[k][j-1][i][4]*rho_i[k][j-1][i]) -
ty2 * ((c1*u[k][j+1][i][4] -
c2*square[k][j+1][i]) * vp1 -
(c1*u[k][j-1][i][4] -
c2*square[k][j-1][i]) * vm1);
}
}
//---------------------------------------------------------------------
// add fourth order eta-direction dissipation
//---------------------------------------------------------------------
j = 1;
for (i = 1; i <= grid_points[0]-2; i++) {
for (m = 0; m < 5; m++) {
rhs[k][j][i][m] = rhs[k][j][i][m]- dssp *
( 5.0*u[k][j][i][m] - 4.0*u[k][j+1][i][m] +
u[k][j+2][i][m]);
}
}
j = 2;
for (i = 1; i <= grid_points[0]-2; i++) {
for (m = 0; m < 5; m++) {
rhs[k][j][i][m] = rhs[k][j][i][m] - dssp *
(-4.0*u[k][j-1][i][m] + 6.0*u[k][j][i][m] -
4.0*u[k][j+1][i][m] + u[k][j+2][i][m]);
}
}
for (j = 3; j <= grid_points[1]-4; j++) {
for (i = 1; i <= grid_points[0]-2; i++) {
for (m = 0; m < 5; m++) {
rhs[k][j][i][m] = rhs[k][j][i][m] - dssp *
( u[k][j-2][i][m] - 4.0*u[k][j-1][i][m] +
6.0*u[k][j][i][m] - 4.0*u[k][j+1][i][m] +
u[k][j+2][i][m] );
}
}
}
j = grid_points[1]-3;
for (i = 1; i <= grid_points[0]-2; i++) {
for (m = 0; m < 5; m++) {
rhs[k][j][i][m] = rhs[k][j][i][m] - dssp *
( u[k][j-2][i][m] - 4.0*u[k][j-1][i][m] +
6.0*u[k][j][i][m] - 4.0*u[k][j+1][i][m] );
}
}
j = grid_points[1]-2;
for (i = 1; i <= grid_points[0]-2; i++) {
for (m = 0; m < 5; m++) {
rhs[k][j][i][m] = rhs[k][j][i][m] - dssp *
( u[k][j-2][i][m] - 4.*u[k][j-1][i][m] +
5.*u[k][j][i][m] );
}
}
//kai
k4 = k;
}
#pragma omp master
{
if (timeron) timer_stop(t_rhsy);
if (timeron) timer_start(t_rhsz);
}
//---------------------------------------------------------------------
// compute zeta-direction fluxes
//---------------------------------------------------------------------
#pragma omp for schedule(static)
for (k = 1; k <= grid_points[2]-2; k++) {
for (j = 1; j <= grid_points[1]-2; j++) {
for (i = 1; i <= grid_points[0]-2; i++) {
wijk = ws[k][j][i];
wp1 = ws[k+1][j][i];
wm1 = ws[k-1][j][i];
rhs[k][j][i][0] = rhs[k][j][i][0] + dz1tz1 *
(u[k+1][j][i][0] - 2.0*u[k][j][i][0] +
u[k-1][j][i][0]) -
tz2 * (u[k+1][j][i][3] - u[k-1][j][i][3]);
rhs[k][j][i][1] = rhs[k][j][i][1] + dz2tz1 *
(u[k+1][j][i][1] - 2.0*u[k][j][i][1] +
u[k-1][j][i][1]) +
zzcon2 * (us[k+1][j][i] - 2.0*us[k][j][i] +
us[k-1][j][i]) -
tz2 * (u[k+1][j][i][1]*wp1 -
u[k-1][j][i][1]*wm1);
rhs[k][j][i][2] = rhs[k][j][i][2] + dz3tz1 *
(u[k+1][j][i][2] - 2.0*u[k][j][i][2] +
u[k-1][j][i][2]) +
zzcon2 * (vs[k+1][j][i] - 2.0*vs[k][j][i] +
vs[k-1][j][i]) -
tz2 * (u[k+1][j][i][2]*wp1 -
u[k-1][j][i][2]*wm1);
rhs[k][j][i][3] = rhs[k][j][i][3] + dz4tz1 *
(u[k+1][j][i][3] - 2.0*u[k][j][i][3] +
u[k-1][j][i][3]) +
zzcon2*con43 * (wp1 - 2.0*wijk + wm1) -
tz2 * (u[k+1][j][i][3]*wp1 -
u[k-1][j][i][3]*wm1 +
(u[k+1][j][i][4] - square[k+1][j][i] -
u[k-1][j][i][4] + square[k-1][j][i])
*c2);
rhs[k][j][i][4] = rhs[k][j][i][4] + dz5tz1 *
(u[k+1][j][i][4] - 2.0*u[k][j][i][4] +
u[k-1][j][i][4]) +
zzcon3 * (qs[k+1][j][i] - 2.0*qs[k][j][i] +
qs[k-1][j][i]) +
zzcon4 * (wp1*wp1 - 2.0*wijk*wijk +
wm1*wm1) +
zzcon5 * (u[k+1][j][i][4]*rho_i[k+1][j][i] -
2.0*u[k][j][i][4]*rho_i[k][j][i] +
u[k-1][j][i][4]*rho_i[k-1][j][i]) -
tz2 * ( (c1*u[k+1][j][i][4] -
c2*square[k+1][j][i])*wp1 -
(c1*u[k-1][j][i][4] -
c2*square[k-1][j][i])*wm1);
}
}
//kai
k5 = k;
}
//---------------------------------------------------------------------
// add fourth order zeta-direction dissipation
//---------------------------------------------------------------------
k = 1;
#pragma omp for schedule(static) nowait
for (j = 1; j <= grid_points[1]-2; j++) {
for (i = 1; i <= grid_points[0]-2; i++) {
for (m = 0; m < 5; m++) {
rhs[k][j][i][m] = rhs[k][j][i][m]- dssp *
( 5.0*u[k][j][i][m] - 4.0*u[k+1][j][i][m] +
u[k+2][j][i][m]);
}
}
//kai
k6 = j;
}
k = 2;
#pragma omp for schedule(static) nowait
for (j = 1; j <= grid_points[1]-2; j++) {
for (i = 1; i <= grid_points[0]-2; i++) {
for (m = 0; m < 5; m++) {
rhs[k][j][i][m] = rhs[k][j][i][m] - dssp *
(-4.0*u[k-1][j][i][m] + 6.0*u[k][j][i][m] -
4.0*u[k+1][j][i][m] + u[k+2][j][i][m]);
}
}
//kai
k7 = j;
}
#pragma omp for schedule(static) nowait
for (k = 3; k <= grid_points[2]-4; k++) {
for (j = 1; j <= grid_points[1]-2; j++) {
for (i = 1; i <= grid_points[0]-2; i++) {
for (m = 0; m < 5; m++) {
rhs[k][j][i][m] = rhs[k][j][i][m] - dssp *
( u[k-2][j][i][m] - 4.0*u[k-1][j][i][m] +
6.0*u[k][j][i][m] - 4.0*u[k+1][j][i][m] +
u[k+2][j][i][m] );
}
}
}
//kai
k8 = k;
}
k = grid_points[2]-3;
#pragma omp for schedule(static) nowait
for (j = 1; j <= grid_points[1]-2; j++) {
for (i = 1; i <= grid_points[0]-2; i++) {
for (m = 0; m < 5; m++) {
rhs[k][j][i][m] = rhs[k][j][i][m] - dssp *
( u[k-2][j][i][m] - 4.0*u[k-1][j][i][m] +
6.0*u[k][j][i][m] - 4.0*u[k+1][j][i][m] );
}
}
//kai
k9 = j;
}
k = grid_points[2]-2;
#pragma omp for schedule(static)
for (j = 1; j <= grid_points[1]-2; j++) {
for (i = 1; i <= grid_points[0]-2; i++) {
for (m = 0; m < 5; m++) {
rhs[k][j][i][m] = rhs[k][j][i][m] - dssp *
( u[k-2][j][i][m] - 4.*u[k-1][j][i][m] +
5.*u[k][j][i][m] );
}
}
//kai
k10 = j;
}
#pragma omp master
if (timeron) timer_stop(t_rhsz);
#pragma omp for schedule(static) nowait
for (k = 1; k <= grid_points[2]-2; k++) {
for (j = 1; j <= grid_points[1]-2; j++) {
for (i = 1; i <= grid_points[0]-2; i++) {
for (m = 0; m < 5; m++) {
rhs[k][j][i][m] = rhs[k][j][i][m] * dt;
}
}
}
//kai
k11 = k;
}
} //end parallel
if (timeron) timer_stop(t_rhs);
}
|
declare_reduction_codegen.c | // RUN: %clang_cc1 -verify -fopenmp -x c -emit-llvm %s -triple %itanium_abi_triple -o - -femit-all-decls -disable-llvm-passes | FileCheck %s
// RUN: %clang_cc1 -fopenmp -x c -triple %itanium_abi_triple -emit-pch -o %t %s -femit-all-decls -disable-llvm-passes
// RUN: %clang_cc1 -fopenmp -x c -triple %itanium_abi_triple -include-pch %t -verify %s -emit-llvm -o - -femit-all-decls -disable-llvm-passes | FileCheck --check-prefix=CHECK-LOAD %s
// RUN: %clang_cc1 -fopenmp -x c -triple %itanium_abi_triple -emit-pch -o %t %s -femit-all-decls -disable-llvm-passes -fopenmp-version=45
// RUN: %clang_cc1 -fopenmp -x c -triple %itanium_abi_triple -include-pch %t -verify %s -emit-llvm -o - -femit-all-decls -disable-llvm-passes -fopenmp-version=45 | FileCheck --check-prefixes=CHECK-LOAD,OMP45-LOAD %s
// RUN: %clang_cc1 -verify -fopenmp-simd -x c -emit-llvm %s -triple %itanium_abi_triple -o - -femit-all-decls -disable-llvm-passes | FileCheck --check-prefix SIMD-ONLY0 %s
// RUN: %clang_cc1 -fopenmp-simd -x c -triple %itanium_abi_triple -emit-pch -o %t %s -femit-all-decls -disable-llvm-passes
// RUN: %clang_cc1 -fopenmp-simd -x c -triple %itanium_abi_triple -include-pch %t -verify %s -emit-llvm -o - -femit-all-decls -disable-llvm-passes | FileCheck --check-prefix SIMD-ONLY0 %s
// SIMD-ONLY0-NOT: {{__kmpc|__tgt}}
// expected-no-diagnostics
#ifndef HEADER
#define HEADER
// CHECK: [[SSS_INT:.+]] = type { i32 }
// CHECK-LOAD: [[SSS_INT:.+]] = type { i32 }
// CHECK-DAG: [[SSS_INIT:@.+]] = private constant %struct.SSS zeroinitializer
// CHECK-DAG: [[INT_INIT:@.+]] = private constant i32 0
#pragma omp declare reduction(+ : int, char : omp_out *= omp_in)
// CHECK: define internal {{.*}}void @{{[^(]+}}(i32* noalias noundef %0, i32* noalias noundef %1)
// CHECK: [[MUL:%.+]] = mul nsw i32
// CHECK-NEXT: store i32 [[MUL]], i32*
// CHECK-NEXT: ret void
// CHECK-NEXT: }
// CHECK-LOAD: define internal {{.*}}void @{{[^(]+}}(i32* noalias noundef %0, i32* noalias noundef %1)
// CHECK-LOAD: [[MUL:%.+]] = mul nsw i32
// CHECK-LOAD-NEXT: store i32 [[MUL]], i32*
// CHECK-LOAD-NEXT: ret void
// CHECK-LOAD-NEXT: }
// CHECK: define internal {{.*}}void @{{[^(]+}}(i8* noalias noundef %0, i8* noalias noundef %1)
// CHECK: sext i8
// CHECK: sext i8
// CHECK: [[MUL:%.+]] = mul nsw i32
// CHECK-NEXT: [[TRUNC:%.+]] = trunc i32 [[MUL]] to i8
// CHECK-NEXT: store i8 [[TRUNC]], i8*
// CHECK-NEXT: ret void
// CHECK-NEXT: }
// CHECK-LOAD: define internal {{.*}}void @{{[^(]+}}(i8* noalias noundef %0, i8* noalias noundef %1)
// CHECK-LOAD: sext i8
// CHECK-LOAD: sext i8
// CHECK-LOAD: [[MUL:%.+]] = mul nsw i32
// CHECK-LOAD-NEXT: [[TRUNC:%.+]] = trunc i32 [[MUL]] to i8
// CHECK-LOAD-NEXT: store i8 [[TRUNC]], i8*
// CHECK-LOAD-NEXT: ret void
// CHECK-LOAD-NEXT: }
#pragma omp declare reduction(fun : float : omp_out += omp_in) initializer(omp_priv = 15 + omp_orig)
// CHECK: define internal {{.*}}void @{{[^(]+}}(float* noalias noundef %0, float* noalias noundef %1)
// CHECK: [[ADD:%.+]] = fadd float
// CHECK-NEXT: store float [[ADD]], float*
// CHECK-NEXT: ret void
// CHECK-NEXT: }
// CHECK: define internal {{.*}}void @{{[^(]+}}(float* noalias noundef %0, float* noalias noundef %1)
// CHECK: [[ADD:%.+]] = fadd float 1.5
// CHECK-NEXT: store float [[ADD]], float*
// CHECK-NEXT: ret void
// CHECK-NEXT: }
// CHECK-LOAD: define internal {{.*}}void @{{[^(]+}}(float* noalias noundef %0, float* noalias noundef %1)
// CHECK-LOAD: [[ADD:%.+]] = fadd float
// CHECK-LOAD-NEXT: store float [[ADD]], float*
// CHECK-LOAD-NEXT: ret void
// CHECK-LOAD-NEXT: }
// CHECK-LOAD: define internal {{.*}}void @{{[^(]+}}(float* noalias noundef %0, float* noalias noundef %1)
// CHECK-LOAD: [[ADD:%.+]] = fadd float 1.5
// CHECK-LOAD-NEXT: store float [[ADD]], float*
// CHECK-LOAD-NEXT: ret void
// CHECK-LOAD-NEXT: }
struct SSS {
int field;
#pragma omp declare reduction(+ : int, char : omp_out *= omp_in)
// CHECK: define internal {{.*}}void @{{[^(]+}}(i32* noalias noundef %0, i32* noalias noundef %1)
// CHECK: [[MUL:%.+]] = mul nsw i32
// CHECK-NEXT: store i32 [[MUL]], i32*
// CHECK-NEXT: ret void
// CHECK-NEXT: }
// CHECK: define internal {{.*}}void @{{[^(]+}}(i8* noalias noundef %0, i8* noalias noundef %1)
// CHECK: sext i8
// CHECK: sext i8
// CHECK: [[MUL:%.+]] = mul nsw i32
// CHECK-NEXT: [[TRUNC:%.+]] = trunc i32 [[MUL]] to i8
// CHECK-NEXT: store i8 [[TRUNC]], i8*
// CHECK-NEXT: ret void
// CHECK-NEXT: }
};
void init(struct SSS *priv, struct SSS orig);
#pragma omp declare reduction(fun : struct SSS : omp_out = omp_in) initializer(init(&omp_priv, omp_orig))
// CHECK: define internal {{.*}}void @{{[^(]+}}([[SSS_INT]]* noalias noundef %0, [[SSS_INT]]* noalias noundef %1)
// CHECK: call void @llvm.memcpy
// CHECK-NEXT: ret void
// CHECK-NEXT: }
// CHECK: define internal {{.*}}void @{{[^(]+}}([[SSS_INT]]* noalias noundef %0, [[SSS_INT]]* noalias noundef %1)
// CHECK: call void @init(
// CHECK-NEXT: ret void
// CHECK-NEXT: }
// CHECK-LOAD: define internal {{.*}}void @{{[^(]+}}([[SSS_INT]]* noalias noundef %0, [[SSS_INT]]* noalias noundef %1)
// CHECK-LOAD: call void @llvm.memcpy
// CHECK-LOAD-NEXT: ret void
// CHECK-LOAD-NEXT: }
// CHECK-LOAD: define internal {{.*}}void @{{[^(]+}}([[SSS_INT]]* noalias noundef %0, [[SSS_INT]]* noalias noundef %1)
// CHECK-LOAD: call void @init(
// CHECK-LOAD-NEXT: ret void
// CHECK-LOAD-NEXT: }
// CHECK-LABEL: @main
// CHECK-LOAD-LABEL: @main
int main() {
#pragma omp declare reduction(fun : struct SSS : omp_out = omp_in) initializer(init(&omp_priv, omp_orig))
// CHECK: define internal {{.*}}void @{{[^(]+}}([[SSS_INT]]* noalias noundef %0, [[SSS_INT]]* noalias noundef %1)
// CHECK: call void @llvm.memcpy
// CHECK-NEXT: ret void
// CHECK-NEXT: }
// CHECK: define internal {{.*}}void @{{[^(]+}}([[SSS_INT]]* noalias noundef %0, [[SSS_INT]]* noalias noundef %1)
// CHECK: call void @init(
// CHECK-NEXT: ret void
// CHECK-NEXT: }
// CHECK-LOAD: define internal {{.*}}void @{{[^(]+}}([[SSS_INT]]* noalias noundef %0, [[SSS_INT]]* noalias noundef %1)
// CHECK-LOAD: call void @llvm.memcpy
// CHECK-LOAD-NEXT: ret void
// CHECK-LOAD-NEXT: }
// CHECK-LOAD: define internal {{.*}}void @{{[^(]+}}([[SSS_INT]]* noalias noundef %0, [[SSS_INT]]* noalias noundef %1)
// CHECK-LOAD: call void @init(
// CHECK-LOAD-NEXT: ret void
// CHECK-LOAD-NEXT: }
{
#pragma omp declare reduction(fun : struct SSS : omp_out = omp_in) initializer(init(&omp_priv, omp_orig))
// CHECK: define internal {{.*}}void @{{[^(]+}}([[SSS_INT]]* noalias noundef %0, [[SSS_INT]]* noalias noundef %1)
// CHECK: call void @llvm.memcpy
// CHECK-NEXT: ret void
// CHECK-NEXT: }
// CHECK: define internal {{.*}}void @{{[^(]+}}([[SSS_INT]]* noalias noundef %0, [[SSS_INT]]* noalias noundef %1)
// CHECK: call void @init(
// CHECK-NEXT: ret void
// CHECK-NEXT: }
// CHECK-LOAD: define internal {{.*}}void @{{[^(]+}}([[SSS_INT]]* noalias noundef %0, [[SSS_INT]]* noalias noundef %1)
// CHECK-LOAD: call void @llvm.memcpy
// CHECK-LOAD-NEXT: ret void
// CHECK-LOAD-NEXT: }
// CHECK-LOAD: define internal {{.*}}void @{{[^(]+}}([[SSS_INT]]* noalias noundef %0, [[SSS_INT]]* noalias noundef %1)
// CHECK-LOAD: call void @init(
// CHECK-LOAD-NEXT: ret void
// CHECK-LOAD-NEXT: }
}
return 0;
}
// OMP45-LOAD: define internal {{.*}}void @{{[^(]+}}(i32* noalias noundef %0, i32* noalias noundef %1)
// OMP45-LOAD: [[MUL:%.+]] = mul nsw i32
// OMP45-LOAD-NEXT: store i32 [[MUL]], i32*
// OMP45-LOAD-NEXT: ret void
// OMP45-LOAD-NEXT: }
// OMP45-LOAD: define internal {{.*}}void @{{[^(]+}}(i8* noalias noundef %0, i8* noalias noundef %1)
// OMP45-LOAD: sext i8
// OMP45-LOAD: sext i8
// OMP45-LOAD: [[MUL:%.+]] = mul nsw i32
// OMP45-LOAD-NEXT: [[TRUNC:%.+]] = trunc i32 [[MUL]] to i8
// OMP45-LOAD-NEXT: store i8 [[TRUNC]], i8*
// OMP45-LOAD-NEXT: ret void
// OMP45-LOAD-NEXT: }
// CHECK-LABEL: bar
struct SSS ss;
int in;
void bar() {
// CHECK: [[SS_PRIV:%.+]] = alloca %struct.SSS,
// CHECK: [[IN_PRIV:%.+]] = alloca i32,
// CHECK: [[BC:%.+]] = bitcast %struct.SSS* [[SS_PRIV]] to i8*
// CHECK: call void @llvm.memcpy.p0i8.p0i8.i{{64|32}}(i8* {{.*}}[[BC]], i8* {{.*}}bitcast (%struct.SSS* [[SSS_INIT]] to i8*), i{{64|32}} 4, i1 false)
// CHECK: [[IN_VAL:%.+]] = load i32, i32* [[INT_INIT]],
// CHECK: store i32 [[IN_VAL]], i32* [[IN_PRIV]],
// CHECK: call void @__kmpc_for_static_init_4(
#pragma omp declare reduction(+ \
: struct SSS \
: omp_out = omp_in)
#pragma omp declare reduction(+ \
: int \
: omp_out = omp_in)
#pragma omp for reduction(+ \
: ss, in)
for (int i = 0; i < 10; ++i)
;
}
#endif
|
GB_binop__islt_uint16.c | //------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB_AaddB__islt_uint16
// A.*B function (eWiseMult): GB_AemultB__islt_uint16
// A*D function (colscale): GB_AxD__islt_uint16
// D*A function (rowscale): GB_DxB__islt_uint16
// C+=B function (dense accum): GB_Cdense_accumB__islt_uint16
// C+=b function (dense accum): GB_Cdense_accumb__islt_uint16
// C+=A+B function (dense ewise3): (none)
// C=A+B function (dense ewise3): GB_Cdense_ewise3_noaccum__islt_uint16
// C=scalar+B GB_bind1st__islt_uint16
// C=scalar+B' GB_bind1st_tran__islt_uint16
// C=A+scalar GB_bind2nd__islt_uint16
// C=A'+scalar GB_bind2nd_tran__islt_uint16
// C type: uint16_t
// A type: uint16_t
// B,b type: uint16_t
// BinaryOp: cij = (aij < bij)
#define GB_ATYPE \
uint16_t
#define GB_BTYPE \
uint16_t
#define GB_CTYPE \
uint16_t
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
uint16_t aij = Ax [pA]
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB) \
uint16_t bij = Bx [pB]
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
uint16_t t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA) \
cij = Ax [pA]
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB) \
cij = Bx [pB]
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z, x, y, i, j) \
z = (x < y) ;
// op is second
#define GB_OP_IS_SECOND \
0
// op is plus_fp32 or plus_fp64
#define GB_OP_IS_PLUS_REAL \
0
// op is minus_fp32 or minus_fp64
#define GB_OP_IS_MINUS_REAL \
0
// GB_cblas_*axpy gateway routine, if it exists for this operator and type:
#define GB_CBLAS_AXPY \
(none)
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_ISLT || GxB_NO_UINT16 || GxB_NO_ISLT_UINT16)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
#if 0
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void (none)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
#endif
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
GrB_Info GB_Cdense_ewise3_noaccum__islt_uint16
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_dense_ewise3_noaccum_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB_Cdense_accumB__islt_uint16
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *GB_RESTRICT kfirst_slice,
const int64_t *GB_RESTRICT klast_slice,
const int64_t *GB_RESTRICT pstart_slice,
const int ntasks,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB_Cdense_accumb__islt_uint16
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type uint16_t
uint16_t bwork = (*((uint16_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB_AxD__islt_uint16
(
GrB_Matrix C,
const GrB_Matrix A, bool A_is_pattern,
const GrB_Matrix D, bool D_is_pattern,
const int64_t *GB_RESTRICT kfirst_slice,
const int64_t *GB_RESTRICT klast_slice,
const int64_t *GB_RESTRICT pstart_slice,
const int ntasks,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint16_t *GB_RESTRICT Cx = (uint16_t *) C->x ;
#include "GB_AxB_colscale_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB_DxB__islt_uint16
(
GrB_Matrix C,
const GrB_Matrix D, bool D_is_pattern,
const GrB_Matrix B, bool B_is_pattern,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint16_t *GB_RESTRICT Cx = (uint16_t *) C->x ;
#include "GB_AxB_rowscale_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseAdd: C = A+B or C<M> = A+B
//------------------------------------------------------------------------------
#undef GB_FREE_ALL
#define GB_FREE_ALL \
{ \
GB_ek_slice_free (&pstart_Mslice, &kfirst_Mslice, &klast_Mslice) ; \
GB_ek_slice_free (&pstart_Aslice, &kfirst_Aslice, &klast_Aslice) ; \
GB_ek_slice_free (&pstart_Bslice, &kfirst_Bslice, &klast_Bslice) ; \
}
GrB_Info GB_AaddB__islt_uint16
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool Ch_is_Mh,
const int64_t *GB_RESTRICT C_to_M,
const int64_t *GB_RESTRICT C_to_A,
const int64_t *GB_RESTRICT C_to_B,
const GB_task_struct *GB_RESTRICT TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t *pstart_Mslice = NULL, *kfirst_Mslice = NULL, *klast_Mslice = NULL ;
int64_t *pstart_Aslice = NULL, *kfirst_Aslice = NULL, *klast_Aslice = NULL ;
int64_t *pstart_Bslice = NULL, *kfirst_Bslice = NULL, *klast_Bslice = NULL ;
#include "GB_add_template.c"
GB_FREE_ALL ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C = A.*B or C<M> = A.*B
//------------------------------------------------------------------------------
GrB_Info GB_AemultB__islt_uint16
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *GB_RESTRICT C_to_M,
const int64_t *GB_RESTRICT C_to_A,
const int64_t *GB_RESTRICT C_to_B,
const GB_task_struct *GB_RESTRICT TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t *pstart_Mslice = NULL, *kfirst_Mslice = NULL, *klast_Mslice = NULL ;
int64_t *pstart_Aslice = NULL, *kfirst_Aslice = NULL, *klast_Aslice = NULL ;
int64_t *pstart_Bslice = NULL, *kfirst_Bslice = NULL, *klast_Bslice = NULL ;
#include "GB_emult_template.c"
GB_FREE_ALL ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB_bind1st__islt_uint16
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *GB_RESTRICT Bb,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint16_t *Cx = (uint16_t *) Cx_output ;
uint16_t x = (*((uint16_t *) x_input)) ;
uint16_t *Bx = (uint16_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Bb, p)) continue ;
uint16_t bij = Bx [p] ;
Cx [p] = (x < bij) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB_bind2nd__islt_uint16
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *GB_RESTRICT Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
uint16_t *Cx = (uint16_t *) Cx_output ;
uint16_t *Ax = (uint16_t *) Ax_input ;
uint16_t y = (*((uint16_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
uint16_t aij = Ax [p] ;
Cx [p] = (aij < y) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
uint16_t aij = Ax [pA] ; \
Cx [pC] = (x < aij) ; \
}
GrB_Info GB_bind1st_tran__islt_uint16
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *GB_RESTRICT *Workspaces,
const int64_t *GB_RESTRICT A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
uint16_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint16_t x = (*((const uint16_t *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
uint16_t
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
uint16_t aij = Ax [pA] ; \
Cx [pC] = (aij < y) ; \
}
GrB_Info GB_bind2nd_tran__islt_uint16
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *GB_RESTRICT *Workspaces,
const int64_t *GB_RESTRICT A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint16_t y = (*((const uint16_t *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.