source stringlengths 3 92 | c stringlengths 26 2.25M |
|---|---|
transpose.c |
/*----------------------------------------------------------------*/
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <math.h>
#include <sys/time.h>
#include <omp.h>
/*----------------------------------------------------------------*/
#include "transpose.h"
/*----------------------------------------------------------------*/
void hcl_local_transpose_scalar_block(
fftw_complex* X1,
fftw_complex* X2,
const int i, const int j,
const int n,
const int block_size,
const unsigned int verbosity)
{
int p, q;
for (p = 0; p < min(n-i,block_size); p++) {
for (q = 0; q < min(n-j,block_size); q++) {
if (verbosity)
printf(
"%d: i %d, j %d, p %d, q %d, index1 %d index2 %d\n",
omp_get_thread_num(),
i, j,
p, q,
i*n+j + p*n+q, j*n+i + q*n+p);
double tmpr = X1[p*n+q][0];
double tmpi = X1[p*n+q][1];
X1[p*n+q][0] = X2[q*n+p][0];
X1[p*n+q][1] = X2[q*n+p][1];
X2[q*n+p][0] = tmpr;
X2[q*n+p][1] = tmpi;
}
}
}
void hcl_transpose_block(
fftw_complex* X,
const int start, const int end,
const int n,
const unsigned int nt,
const int block_size,
const unsigned int verbosity)
{
int i, j;
#pragma omp parallel for shared(X) private(i, j) num_threads(nt)
for (i = 0; i < end; i += block_size) {
for (j = 0; j < end; j += block_size) {
if (verbosity)
printf(
"%d: i %d, j %d\n",
omp_get_thread_num(), i, j);
hcl_local_transpose_scalar_block(
&X[start + i*n + j],
&X[start + j*n + i], i, j, n, block_size, verbosity);
}
}
}
/*----------------------------------------------------------------*/
|
main-omp.c | /* SPMD Single Program Multiple Data
* TransDNA com openMP
*/
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <math.h>
#include <omp.h>
#include "transcription.h"
#include "io.h"
// Constantes
#define TAMANHO_CODON 3
#define NUM_THREADS 4 // Constante para numero Threads de ressalva
int main(int argc, char** argv) {
// Variáveis comuns a todas as threads
double tIni, tFim, tExecucao; // Controladoras de tempo
int codonsPorThread; // Quantidade de Codons que cada Thread tem direito
char** codonsDNA; // Vetor de Codons do DNA
char** codonsRNA; // Vetor de Codons do RNA
char** aminoacidos; // Vetor de Aminoácidos
int qtCodons; // Quantidade de Codons que a cadeia de DNA original possui
/*
* TODO Descomentar esse bloco caso o `export` padrão não funcione como descrito no README
* int qtThreads = NUM_THREADS; // Numero total de threads (definida pelo usuário no terminal)
* omp_set_num_threads(NUM_THREADS);
*/
int qtThreads = omp_get_max_threads(); // TODO Comentar essa declaração caso o `export` padrão não funcione como descrito no README
tIni = omp_get_wtime(); // Pega o tempo de início
// Lê a cadeia do arquivo de entrada e encontra o ponto inicial para transcrição
char *cadeiaDNAoriginal = ler("dna7.txt"); // TODO Alterar para ler o arquivo DNA desejado
printf("Processo: LEITURA NO ARQUIVO CONCLUIDA ");
char *cadeiaDNA = getCistron(cadeiaDNAoriginal);
int tamanhoCadeiaDNA = strlen(cadeiaDNA);
// Particiona a cadeia original em codons (substrings de tamanho 3)
codonsDNA = split(cadeiaDNA, TAMANHO_CODON);
qtCodons = tamanhoCadeiaDNA / TAMANHO_CODON;
codonsPorThread = qtCodons/qtThreads;
printf("\nProcesso: TAMANHO DA CADEIA LIDA = %lu", strlen(cadeiaDNAoriginal));
printf("\nProcesso: TAMANHO DO CISTRON = %i", tamanhoCadeiaDNA);
printf("\nProcesso: TOTAL DE CODONS = %i", qtCodons);
printf("\nProcesso: CODONS POR THREAD = %i", codonsPorThread);
free(cadeiaDNAoriginal);
free(cadeiaDNA);
// Inicia o tamanho dos vetores que armazenarão os codonsRNA e aminoácidos
codonsRNA = malloc(qtCodons * sizeof(char *));
aminoacidos = malloc(qtCodons * sizeof(char *));
// Início da seção paralela (threads)
#pragma omp parallel
{
// Variáveis únicas de cada thread
int idThread = omp_get_thread_num(); // ID da thread
int i; // Iterador padrão
int inicioAreaThread = codonsPorThread * idThread;
int fimAreaThread = codonsPorThread * (idThread + 1);
if((idThread == qtThreads-1) && (qtCodons % qtThreads != 0)){ // Número de threads não divide o número de códons
fimAreaThread = fimAreaThread + (qtCodons % qtThreads);
}
// Informa ao usuário o estado da aplicação
printf("\nThread %i: INICIOU ", idThread);
printf("\nThread %i: CODON INI %i", idThread, inicioAreaThread);
printf("\nThread %i: CODON FIM %i", idThread, fimAreaThread-1);
// Executa a sua parte específica e escreve os resultados na variável compartilhada entre as threads
for (i = inicioAreaThread; i < fimAreaThread; i++) {
codonsRNA[i] = transcription(codonsDNA[i], TAMANHO_CODON);
}
for (i = inicioAreaThread; i < fimAreaThread; i++) {
aminoacidos[i] = aminoacids(codonsRNA[i], TAMANHO_CODON);
}
}; // Fim da área paralela
// Mostra os resultados para o usuário e prepara string final para escrever no arquivo de saída
int i;
char *resultadoArquivo = malloc(tamanhoCadeiaDNA * 7 * sizeof(char));
strcat(resultadoArquivo, ".:RESULTADOS:.\nDNA RNA AMINO");
printf(COR_AZUL "\n .:RESULTADOS:. " COR_PADRAO);
printf(COR_AZUL "\n DNA RNA AMINO " COR_PADRAO);
for (i = 0; i < qtCodons; i++) {
printf(COR_AZUL "\n %s %s %s " COR_PADRAO, codonsDNA[i], codonsRNA[i], aminoacidos[i]);
char *novaLinha = malloc(TAMANHO_CODON * 7 * sizeof(char));
initialize(novaLinha, TAMANHO_CODON * 7);
strcat(novaLinha, "\n");
strcat(novaLinha, codonsDNA[i]);
strcat(novaLinha, " ");
strcat(novaLinha, codonsRNA[i]);
strcat(novaLinha, " ");
strcat(novaLinha, aminoacidos[i]);
strcat(resultadoArquivo, novaLinha);
free(novaLinha);
}
escrever(resultadoArquivo, "resultados-omp.txt");
free(codonsDNA);
free(codonsRNA);
free(aminoacidos);
free(resultadoArquivo);
printf(COR_VERDE "\nProcesso: ESCRITA DE RESULTADOS NO ARQUIVO CONCLUIDA " COR_PADRAO);
// Informa o tempo total de execução
tFim = omp_get_wtime(); // Pega o tempo de fim
tExecucao = tFim - tIni;
printf(COR_VERDE "\nTempo total: %fs\n" COR_PADRAO, tExecucao);
return 0;
}
|
matvec_simd.c | //matvec.c
//Multiplies a matrix by a vector
// This is the linear, no AVX/OpenMP version
#include <stdio.h>
#include <stdlib.h>
#include <time.h>
#include <sys/timeb.h>
#include <malloc.h>
#include <omp.h>
#define N_RUNS 1000
#define N 1200
// read timer in second
double read_timer() {
struct timeb tm;
ftime(&tm);
return (double) tm.time + (double) tm.millitm / 1000.0;
}
//Create a matrix and a vector and fill with random numbers
void init(float *matrix, float *vector) {
for (int i = 0; i<N; i++) {
for (int j = 0; j<N; j++) {
matrix[i*N+j] = (float)rand()/(float)(RAND_MAX/10.0);
}
vector[i] = (float)rand()/(float)(RAND_MAX/10.0);
}
}
void sum(float *matrix, float *vector);
int main(int argc, char **argv) {
//Get default number of threads and size
int num_threads = 4; /* 4 is default number of threads */
omp_set_num_threads(num_threads);
//Set everything up
float *matrix = malloc(sizeof(float)*N*N);
float *vector = malloc(sizeof(float)*N);
srand(time(NULL));
init(matrix, vector);
double start = read_timer();
for (int i = 0; i<N_RUNS; i++)
sum(matrix, vector);
double t = (read_timer() - start);
double gflops = ((2.0 * N) * N * N_RUNS) / (1.0e9 * t);
printf("==================================================================\n");
printf("Performance:\t\t\tRuntime (s)\t GFLOPS\n");
printf("------------------------------------------------------------------\n");
printf("Matrix-vector (SIMD):\t\t%4f\t%4f\n", t, gflops);
return 0;
}
//Our sum function- what it does is pretty straight-forward.
void sum(float *matrix, float *vector) {
float s = 0;
for (int i = 0; i<N; i++) {
s = 0;
#pragma omp simd
for (int j = 0; j<N; j++) {
s += (matrix[i*N+j] * vector[j]);
}
vector[i] = s;
}
}
|
GB_unop__isinf_bool_fp32.c | //------------------------------------------------------------------------------
// GB_unop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_atomics.h"
#include "GB_unop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB (_unop_apply__isinf_bool_fp32)
// op(A') function: GB (_unop_tran__isinf_bool_fp32)
// C type: bool
// A type: float
// cast: float cij = (aij)
// unaryop: cij = isinf (aij)
#define GB_ATYPE \
float
#define GB_CTYPE \
bool
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
float aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = isinf (x) ;
// casting
#define GB_CAST(z, aij) \
float z = (aij) ;
// cij = op (aij)
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
float aij = Ax [pA] ; \
/* Cx [pC] = op (cast (aij)) */ \
float z = (aij) ; \
Cx [pC] = isinf (z) ; \
}
// true if operator is the identity op with no typecasting
#define GB_OP_IS_IDENTITY_WITH_NO_TYPECAST \
0
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_ISINF || GxB_NO_BOOL || GxB_NO_FP32)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_apply__isinf_bool_fp32)
(
bool *Cx, // Cx and Ax may be aliased
const float *Ax,
const int8_t *restrict Ab, // A->b if A is bitmap
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
// TODO: if OP is ONE and uniform-valued matrices are exploited, then
// do this in O(1) time
if (Ab == NULL)
{
#if ( GB_OP_IS_IDENTITY_WITH_NO_TYPECAST )
GB_memcpy (Cx, Ax, anz * sizeof (float), nthreads) ;
#else
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
float aij = Ax [p] ;
float z = (aij) ;
Cx [p] = isinf (z) ;
}
#endif
}
else
{
// bitmap case, no transpose; A->b already memcpy'd into C->b
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!Ab [p]) continue ;
float aij = Ax [p] ;
float z = (aij) ;
Cx [p] = isinf (z) ;
}
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_tran__isinf_bool_fp32)
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
decoder.c | /*! @file
* @brief
*
* @version 1.0.0
*
* (C) Copyright 2017 GoPro Inc (http://gopro.com/).
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
#include "config.h"
#include "timing.h"
#if WARPSTUFF
#include "WarpLib.h"
#endif
//#include <stdlib.h>
#include <stddef.h>
#include <math.h>
#include <memory.h>
#include <time.h>
//#include <stdint.h>
#ifndef DEBUG
#define DEBUG (1 && _DEBUG)
#endif
#ifndef TIMING
#define TIMING (1 && _TIMING)
#endif
#ifndef XMMOPT
#define XMMOPT (1 && _XMMOPT)
#endif
#define GEN_LICENSE 0
#ifndef PI
#define PI 3.14159265359f
#endif
#ifdef _WIN32
#include <windows.h>
#elif __APPLE__
#include "macdefs.h"
#else
#ifndef ZeroMemory
#define ZeroMemory(p,s) memset(p,0,s)
#endif
#endif
#if !defined(_WIN32)
#define min(x,y) (((x) < (y)) ? (x) : (y))
#define max(x,y) (((x) > (y)) ? (x) : (y))
#endif
#include <stdio.h>
#include <assert.h>
#include <emmintrin.h> // Intel aligned alloc and free
#include "dump.h"
#include "decoder.h"
#include "codec.h"
#include "vlc.h"
#include "codebooks.h" // References to the codebooks
#include "debug.h"
#include "color.h" // Color formats supported by image processing routines
#include "image.h"
#include "filter.h"
#include "spatial.h"
#include "temporal.h"
//#include "logo40x5.h"
#include "convert.h"
#include "wavelet.h"
#include "bitstream.h"
#include "frame.h"
#include "cpuid.h"
#include "bayer.h"
#include "metadata.h"
#include "DemoasicFrames.h" //TODO: Change filename to lower case
#include "swap.h"
#include "draw.h"
#include "RGB2YUV.h"
#include "lutpath.h"
#include "exception.h"
extern void FastVignetteInplaceWP13(DECODER *decoder, int displayWidth, int width, int height, int y, float r1, float r2, float gain,
int16_t *sptr, int resolution, int pixelsize);
extern void FastSharpeningBlurHinplaceWP13(int width, int16_t *sptr, float sharpness, int resolution, int pixelsize);
extern void FastSharpeningBlurVWP13(short *Aptr,
short *Bptr,
short *Cptr,
short *Dptr,
short *Eptr,
int pitch,
int edgenear,
short *output,
int pixels,
float sharpness,
int resolution,
int channel_blend_type);
extern void FastSharpeningBlurVW13A(short *Aptr,
short *Bptr,
short *Cptr,
short *Dptr,
short *Eptr,
int pitch,
int edgenear,
short *output,
int pixels,
float sharpness,
int resolution,
int channel_blend_type);
#ifdef SPI_LOADER
#include "spi.h"
#include "keyframes.h"
#endif
#ifndef DUMP
#define DUMP (0 && _DUMP)
#endif
#define ERROR_TOLERANT 1
#if defined(_WIN32) && DEBUG
#include <tchar.h> // For printing debug string in the console window
#endif
#define _DECODE_TRANSFORM 1 // Enable concurrent decoding and inverse transform
#define _TRANSFORM_FIELDPLUS 1 // Use the field plus transform
#if _SIF // In SIF resolution, enable the _DECODE_TRANSFORM switch
#if _DECODE_TRANSFORM == 0
#define _DECODE_TRANSFORM 1
#endif
#endif
#ifndef _FSMBUFFER
#define _FSMBUFFER 0
#endif
// Turn off saturation in this file
#ifdef SATURATE
#undef SATURATE
#endif
#define SATURATE(x) (assert(PIXEL_MIN <= (x) && (x) <= PIXEL_MAX), (x))
#define SATURATE8S(x) (assert(PIXEL8S_MIN <= (x) && (x) <= PIXEL8S_MAX), (x))
//#define SATURATE8S(x) SATURATE_8S(x)
//#define SATURATE(x) (x)
// Enable or disable function inlining
#if 1 //DEBUG
#define inline
#else
#define inline __forceinline
#endif
// Pixel size used for computing the compression ratio
#define BITS_PER_PIXEL 8
// Default processor capabilities
#define DEFAULT_FEATURES (_CPU_FEATURE_MMX )
#define DEMOSAIC_DELAYLINES 4
// Forward references
void AllocDecoderGroup(DECODER *decoder);
bool AllocDecoderBuffer(DECODER *decoder, int width, int height, int format);
void EraseDecoderFrames(DECODER *decoder);
TRANSFORM *AllocGroupTransform(GROUP *group, int channel);
void EraseOutputBuffer(uint8_t *buffer, int width, int height, int32_t pitch, int format);
#if _DEBUG
bool DecodeBandFSM16sNoGap(FSM *fsm, BITSTREAM *stream, PIXEL16S *image, int width, int height, int pitch, FILE *logfile);
#else
bool DecodeBandFSM16sNoGap(FSM *fsm, BITSTREAM *stream, PIXEL16S *image, int width, int height, int pitch);
#endif
bool DecodeBandFSM16sNoGapHighByte(FSM *fsm, BITSTREAM *stream, PIXEL16S *image, int width, int height, int pitch, int quant);
bool DecodeBandFSM16sNoGap2Pass(FSM *fsm, BITSTREAM *stream, PIXEL16S *image, int width, int height, int pitch, int quant);
void CopyLowpassRGB444ToBuffer(DECODER *decoder, IMAGE *image_array[], int num_channels,
uint8_t *output_buffer, int32_t output_pitch,
FRAME_INFO *info, int chroma_offset,
int precision);
extern void Row16uQuarter2OutputFormat(DECODER *decoder, FRAME_INFO *info, int thread_index,
uint8_t *output, int pitch, int frame, void *scratch, size_t scratch_size, int threading,
uint8_t *channeldata[TRANSFORM_MAX_CHANNELS], // used in quarter res decodes
int channelpitch[TRANSFORM_MAX_CHANNELS]); // used in quarter res decodes);
//extern void ComputeCube(DECODER *decoder);
extern bool NeedCube(DECODER *decoder);
extern void LoadTweak();
//extern int g_topdown;
//extern int g_bottomup;
// Performance measurements
#if _TIMING
extern TIMER tk_decompress; // Timers
extern TIMER tk_decoding;
extern TIMER tk_convert;
extern TIMER tk_inverse;
extern COUNTER decode_byte_count; // Counters
extern COUNTER sample_byte_count;
extern COUNTER alloc_group_count;
extern COUNTER alloc_transform_count;
extern COUNTER alloc_buffer_count;
extern COUNTER spatial_decoding_count;
extern COUNTER temporal_decoding_count;
extern COUNTER progressive_decode_count;
#endif
#if 0
// Table that maps from decoded format to pixel size
static const int PixelSize[] =
{
0, // DECODED_FORMAT_UNSUPPORTED
2, // DECODED_FORMAT_YUYV
2, // DECODED_FORMAT_UYVY
2, // DECODED_FORMAT_420
4, // DECODED_FORMAT_RGB32
3, // DECODED_FORMAT_RGB24
2, // DECODED_FORMAT_RGB555
2, // DECODED_FORMAT_RGB565
#if 0
2, // DECODED_FORMAT_YUYV_INVERTED
2, // DECODED_FORMAT_UYVY_INVERTED
2, // DECODED_FORMAT_420_INVERTED
#endif
4, // DECODED_FORMAT_RGB32_INVERTED
3, // DECODED_FORMAT_RGB24_INVERTED
2, // DECODED_FORMAT_RGB555_INVERTED
2, // DECODED_FORMAT_RGB565_INVERTED
3, // DECODED_FORMAT_V210,
4, // DECODED_FORMAT_YU64, // Custom 16 bits per channel (all data scaled up) YUYV format.
4, // DECODED_FORMAT_YR16 // Rows of YUV with 16 bits per channel
};
#if _DEBUG
char *decoded_format_string[] =
{
"Unsupported",
"YUYV",
"UYUV",
"420",
"RGB32",
"RGB24",
"RGB555",
"RGB565",
#if 0
"YUYV Inverted",
"UYVY Inverted",
"420 Inverted",
#endif
//#if BUILD_PROSPECT
"RGB32 Inverted",
"RGB24 Inverted",
"RGB555 Inverted",
"RGB565 Inverted",
"V210"
//#endif
};
#endif
#else
static const int pixel_size_table[] =
{
0, // COLOR_FORMAT_UNKNOWN
2, // COLOR_FORMAT_UYVY
2, // COLOR_FORMAT_YUYV
2, // COLOR_FORMAT_YVYU
0, // COLOR_FORMAT_YV12
0, // COLOR_FORMAT_I420
2, // COLOR_FORMAT_RGB16
3, // COLOR_FORMAT_RGB24
4, // COLOR_FORMAT_RGB32
0,
3, // COLOR_FORMAT_V210
0, // COLOR_FORMAT_RGB10
4, // COLOR_FORMAT_YU64
4, // COLOR_FORMAT_YR16
4, // COLOR_FORMAT_YUVA
};
static const int pixel_size_table_length = sizeof(pixel_size_table)/sizeof(pixel_size_table[0]);
static int PixelSize(int format)
{
int pixel_size = 0;
// Mask off the other fields in the format descriptor
// Use the lookup table to determine the pixel size (if possible)
if (0 <= format && format < pixel_size_table_length)
{
pixel_size = pixel_size_table[format];
//return pixel_size;
}
//TODO: Change the rest of this routine into one big switch statement
// Is this an Avid format?
else if (COLOR_FORMAT_AVID <= format && format <= COLOR_FORMAT_AVID_END)
{
switch (format)
{
case COLOR_FORMAT_CbYCrY_8bit:
case COLOR_FORMAT_CbYCrY_10bit_2_8: // Only valid for the lower plane
pixel_size = 1;
break;
case COLOR_FORMAT_CbYCrY_16bit:
case COLOR_FORMAT_CbYCrY_16bit_2_14:
case COLOR_FORMAT_CbYCrY_16bit_10_6:
pixel_size = 2;
break;
default:
assert(0);
pixel_size = 2; // Assume 16 bits per pixel if the format is unknown
break;
}
}
// Is this a Bayer format?
else if (COLOR_FORMAT_BAYER <= format && format <= COLOR_FORMAT_BAYER_END)
{
pixel_size = (format - 100);
if(pixel_size > 2)
pixel_size = 2;
}
else if (format == COLOR_FORMAT_RG48)
pixel_size = 6;
else if (format == COLOR_FORMAT_RG64)
pixel_size = 8;
else if (format == COLOR_FORMAT_B64A) {
pixel_size = 8;
}
return pixel_size;
}
#endif
int DecodedPixelSize(DECODED_FORMAT format)
{
int pixel_size = 0;
// Compute the pixel size
switch (format)
{
case DECODED_FORMAT_YUYV:
pixel_size = 2;
break;
case DECODED_FORMAT_RGB32:
pixel_size = 4;
break;
case DECODED_FORMAT_RG48:
pixel_size = 6;
break;
case DECODED_FORMAT_CT_UCHAR:
pixel_size = 2;
break;
case DECODED_FORMAT_CT_SHORT:
case DECODED_FORMAT_CT_SHORT_2_14:
case DECODED_FORMAT_CT_USHORT_10_6:
pixel_size = 4;
break;
case DECODED_FORMAT_CT_10Bit_2_8:
case DECODED_FORMAT_V210:
// This routine should not be called to compute the pixel sizes for these formats
assert(0);
return 0;
break;
case DECODED_FORMAT_ROW16U:
pixel_size = 4;
break;
default:
assert(0);
return 0;
break;
}
return pixel_size;
}
#if 0
// Convert FOURCC code to a string
static void str4cc(char *string, uint32_t marker)
{
char *p = (char *)&marker + 3;
char *s = string;
int i;
for (i = 0; i < 4; i++)
*(s++) = *(p--);
*s = '\0';
}
#endif
void GetDisplayAspectRatio(DECODER *decoder, int *w, int *h)
{
int origw,origh, guess = 0;
origw = decoder->frame.width;
origh = decoder->frame.height;
switch(decoder->frame.resolution)
{
case DECODED_RESOLUTION_FULL:
break;
case DECODED_RESOLUTION_HALF:
origw *= 2;
origh *= 2;
break;
case DECODED_RESOLUTION_QUARTER:
origw *= 4;
origh *= 4;
break;
case DECODED_RESOLUTION_LOWPASS_ONLY:
origw *= 8;
origh *= 8;
break;
case DECODED_RESOLUTION_FULL_DEBAYER:
break;
case DECODED_RESOLUTION_HALF_NODEBAYER:
origw *= 2;
origh *= 2;
break;
case DECODED_RESOLUTION_QUARTER_NODEBAYER_SCALED:
origw *= 4;
origh *= 4;
break;
case DECODED_RESOLUTION_HALF_HORIZONTAL_DEBAYER:
//origw *= 2; //DAN20110129 -- seems the width has been corrected elsewhere or was never halved.
break;
case DECODED_RESOLUTION_HALF_HORIZONTAL:
origw *= 2;
break;
case DECODED_RESOLUTION_HALF_VERTICAL:
origh *= 2;
break;
}
if(decoder->codec.picture_aspect_x <= 0 || decoder->codec.picture_aspect_y <= 0)
guess = 1;
// if guess default values, we can't trust them
if(decoder->codec.picture_aspect_x == 16 && decoder->codec.picture_aspect_y == 9)
guess = 1;
if(decoder->pixel_aspect_x && decoder->pixel_aspect_y)
{
int j,den,num;
decoder->codec.picture_aspect_x = num = (origw * decoder->pixel_aspect_x) / decoder->pixel_aspect_y;
decoder->codec.picture_aspect_y = den = origh;
for(j=2; j<num+den; j++)
{
while(num == (num/j)*j && den == (den/j)*j)
{
num /= j;
den /= j;
}
}
decoder->codec.picture_aspect_x = num;
decoder->codec.picture_aspect_y = den;
guess = 0;
}
if(guess)
{
if(origw > 720) //HD.
{
if(origh == 1080)
{
if(origw == 2048)
*w=origw,*h=origh;
else
*w=16,*h=9; // assume 16x9
}
else if(origh == 720)
{
*w=16,*h=9; // assume 16x9
}
else
{
*w=origw,*h=origh; // assume square pixel.
}
}
else
{
if(origh == 720)
{
*w=16,*h=9; // assume 16x9
}
else
{
*w=origw,*h=origh; // assume square pixel.
}
}
}
else
{
*w=decoder->codec.picture_aspect_x;
*h=decoder->codec.picture_aspect_y;
}
}
bool IsValidFrameResolution(int resolution)
{
switch (resolution)
{
case DECODED_RESOLUTION_FULL:
case DECODED_RESOLUTION_HALF:
case DECODED_RESOLUTION_QUARTER:
case DECODED_RESOLUTION_LOWPASS_ONLY:
case DECODED_RESOLUTION_HALF_HORIZONTAL:
case DECODED_RESOLUTION_HALF_HORIZONTAL_DEBAYER:
return true;
default:
return false;
}
}
// Return true if this decoder can decode to quarter resolution
bool IsQuarterResolutionEnabled(DECODER *decoder)
{
return true;
}
size_t DecoderSize()
{
return sizeof(DECODER);
}
void InitDecoder(DECODER *decoder, FILE *logfile, CODESET *cs)
{
#if (0 && DEBUG)
if (logfile) {
fprintf(logfile, "InitDecoder, decoder: 0x%p\n", decoder);
}
#endif
{
//TODO: Clear the decoder before setting the CPU limit and affinity
int i;
//int thread_limit=0, thread_affinity=0, set_thread_params=0, capabilities=0;
//save key params
Thread_cntrl saved_params = decoder->thread_cntrl;
// Clear everything
memset(decoder, 0, sizeof(DECODER));
//restore key params
if(saved_params.set_thread_params == 1) // used by the DShow Interface
{
decoder->thread_cntrl = saved_params;
}
#if _TIMING
InitTiming();
#endif
// Set the file for status information during decoding
decoder->logfile = logfile;
// Initialize the decoding error to no error
decoder->error = CODEC_ERROR_OKAY;
// Most recent marker found during decoding
decoder->marker = 0;
// Count of frames decoded
decoder->frame_count = 0;
// Set the codebooks that will be used for decoding
if (cs != NULL)
{
// Use the codeset provided in the call
for(i=0; i<CODEC_NUM_CODESETS; i++)
{
// Codebook for decoding highpass coefficients
decoder->magsbook[i] = cs[i].magsbook;
// Codebook for decoding runs of coefficients
decoder->runsbook[i] = cs[i].runsbook;
// Lookup table for fast codebook search
decoder->fastbook[i] = cs[i].fastbook;
}
}
else
{
// Use the default codeset
decoder->magsbook[0] = cs9.magsbook;
decoder->runsbook[0] = cs9.runsbook;
decoder->fastbook[0] = cs9.fastbook;
}
// Initialize the codec state
InitCodecState(&decoder->codec);
InitScratchBuffer(&decoder->scratch, NULL, 0);
#if _DUMP
// Initialize the descriptor for controlling debug output
decoder->dump.enabled = false;
decoder->dump.channel_mask = 0;
decoder->dump.wavelet_mask = 0;
memset(decoder->dump.directory, 0, sizeof(decoder->dump.directory));
memset(decoder->dump.filename, 0, sizeof(decoder->dump.filename));
#endif
}
//REDTEST
decoder->frm = 0;
decoder->run = 1;
#if _ALLOCATOR
decoder->allocator = NULL;
#endif
decoder->initialized = 1; //DAN20060912
}
void InitDecoderLicense(DECODER *decoder, const unsigned char *licensekey)
{
if (decoder && licensekey)
{
const unsigned char unlicensed[16] = {0};
//memset(unlicensed, 0, sizeof(unlicensed));
// Has the license been set?
if (memcmp(decoder->licensekey, unlicensed, sizeof(decoder->licensekey)) == 0)
{
// Copy the license into the decoder
memcpy(decoder->licensekey, licensekey, sizeof(decoder->licensekey));
}
}
}
// Free data allocated within the decoder
void ClearDecoder(DECODER *decoder)
{
#if (1 && DEBUG)
FILE *logfile = decoder->logfile;
#endif
#if _ALLOCATOR
ALLOCATOR *allocator = decoder->allocator;
#endif
// Free the transforms allocated in the decoder
int i;
if(decoder->initialized == 0)
return; // nothing to free //DAN20060912
#if _GRAPHICS
DrawClose(decoder);
#endif
for(i=0; i<=METADATA_PRIORITY_MAX; i++)
{
if(decoder->DataBases[i])
{
#if _ALLOCATOR
Free(decoder->allocator, decoder->DataBases[i]);
#else
MEMORY_FREE(decoder->DataBases[i]);
#endif
decoder->DataBases[i] = NULL;
decoder->DataBasesSize[i] = 0;
decoder->DataBasesAllocSize[i] = 0;
}
}
if(decoder->sqrttable)
{
#if _ALLOCATOR
Free(decoder->allocator, decoder->sqrttable);
#else
MEMORY_FREE(decoder->sqrttable);
#endif
decoder->sqrttable = NULL;
}
for (i = 0; i < TRANSFORM_MAX_CHANNELS; i++)
{
#if _ALLOCATOR
FreeTransform(allocator, decoder->transform[i]);
#else
FreeTransform(decoder->transform[i]);
#endif
decoder->transform[i] = NULL;
}
if(decoder->aligned_sample_buffer)
{
#if _ALLOCATOR
FreeAligned(decoder->allocator, decoder->aligned_sample_buffer);
#else
MEMORY_ALIGNED_FREE(decoder->aligned_sample_buffer);
#endif
decoder->aligned_sample_buffer = NULL;
decoder->aligned_sample_buffer_size = 0;
}
if(decoder->tools)
{
#if _ALLOCATOR
Free(decoder->allocator, decoder->tools);
#else
MEMORY_FREE(decoder->tools);
#endif
decoder->tools = NULL;
}
// Free the buffer allocated for decoding
if (decoder->buffer != NULL)
{
#if DEBUG_BUFFER_USAGE
int i;
char *ptr = (char *)decoder->buffer;
FILE *fp = fopen("C:/free.txt", "a");
fprintf(fp, "decoder->buffer = %08x buffer_size = %d\n", decoder->buffer ,decoder->buffer_size);
i = decoder->buffer_size-1;
while(ptr[i] == 1) i--;
fprintf(fp, "used %2.3f percent\n", 100.0*(float)i/(float)decoder->buffer_size);
fclose(fp);
#endif
#if _ALLOCATOR
FreeAligned(allocator, decoder->buffer);
#else
MEMORY_ALIGNED_FREE(decoder->buffer);
#endif
decoder->buffer = NULL;
decoder->buffer_size = 0;
// Clear the fields in the scratch buffer descriptor
memset(&decoder->scratch, 0, sizeof(SCRATCH));
// Eventually the buffer and buffer size fields will be obsolete
}
for(i=0;i<_MAX_CPUS;i++)
{
if(decoder->threads_buffer[i])
{
#if _ALLOCATOR
FreeAligned(decoder->allocator, decoder->threads_buffer[i]);
#else
MEMORY_ALIGNED_FREE(decoder->threads_buffer[i]);
#endif
decoder->threads_buffer[i] = NULL;
}
}
decoder->threads_buffer_size = 0;
// Do not attempt to free the codebooks since the
// codebook pointers are references to static tables
// Can free some of the data structures allocated by the decoder
FreeCodebooks(decoder);
#if _INTERLACED_WORKER_THREADS
if(decoder->interlaced_worker.lock_init) // threads started
{
int i;
// Signal this thread to stop
SetEvent(decoder->interlaced_worker.stop_event);
// Free all handles used by the worker threads
for(i=0; i<THREADS_IN_LAST_WAVELET; i++)
{
WaitForSingleObject(decoder->interlaced_worker.handle[i], INFINITE); //JY20080307
CloseHandle(decoder->interlaced_worker.handle[i]);
CloseHandle(decoder->interlaced_worker.start_event[i]);
CloseHandle(decoder->interlaced_worker.done_event[i]);
}
CloseHandle(decoder->interlaced_worker.row_semaphore);
CloseHandle(decoder->interlaced_worker.stop_event);
for(i=0; i<THREADS_IN_LAST_WAVELET; i++)
{
decoder->interlaced_worker.handle[i] = 0;
decoder->interlaced_worker.start_event[i] = 0;
decoder->interlaced_worker.done_event[i] = 0;
}
decoder->interlaced_worker.row_semaphore = 0;
decoder->interlaced_worker.stop_event = 0;
}
// Free the critical section used by the worker threads
DeleteCriticalSection(&decoder->interlaced_worker.lock);
decoder->interlaced_worker.lock_init = 0;
#endif
#if _THREADED
if(decoder->entropy_worker_new.pool.thread_count)
{
ThreadPoolDelete(&decoder->entropy_worker_new.pool);
DeleteLock(&decoder->entropy_worker_new.lock);
}
if(decoder->worker_thread.pool.thread_count)
{
ThreadPoolDelete(&decoder->worker_thread.pool);
DeleteLock(&decoder->worker_thread.lock);
}
if(decoder->draw_thread.pool.thread_count)
{
ThreadPoolDelete(&decoder->draw_thread.pool);
DeleteLock(&decoder->draw_thread.lock);
}
/*
if(decoder->qt_convert_worker.pool.thread_count)
{
ThreadPoolDelete(&decoder->qt_convert_worker.pool);
DeleteLock(&decoder->qt_convert_worker.lock);
}
if(decoder->qt_scale_worker.pool.thread_count)
{
ThreadPoolDelete(&decoder->qt_scale_worker.pool);
DeleteLock(&decoder->qt_scale_worker.lock);
}
*/
if(decoder->parallelDecoder)
{
if(decoder->parallelDecoder->decoder_thread.pool.thread_count)
{
ThreadPoolDelete(&decoder->parallelDecoder->decoder_thread.pool);
DeleteLock(&decoder->parallelDecoder->decoder_thread.lock);
decoder->parallelDecoder->decoder_thread.pool.thread_count = 0;
}
ClearDecoder(decoder->parallelDecoder);
#if _ALLOCATOR
Free(decoder->allocator, decoder->parallelDecoder);
#else
MEMORY_FREE(decoder->parallelDecoder);
#endif
decoder->parallelDecoder = NULL;
}
#endif
//MEMORY_ALIGNED_FREE(RawBayer16);
#if _ALLOCATOR
if(decoder->RGBFilterBuffer16)
{
FreeAligned(decoder->allocator, decoder->RGBFilterBuffer16);
decoder->RGBFilterBuffer16 = 0;
decoder->RGBFilterBufferSize = 0;
}
if(decoder->RawBayer16)
{
FreeAligned(decoder->allocator, decoder->RawBayer16);
decoder->RawBayer16 = 0;
decoder->RawBayerSize = 0;
}
if(decoder->StereoBuffer)
{
FreeAligned(decoder->allocator, decoder->StereoBuffer);
decoder->StereoBuffer = 0;
decoder->StereoBufferSize = 0;
}
if(decoder->RawCube)
{
FreeAligned(decoder->allocator, decoder->RawCube);
decoder->RawCube = 0;
}
if(decoder->Curve2Linear)
{
FreeAligned(decoder->allocator, decoder->Curve2Linear);
decoder->Curve2Linear = 0;
}
if(decoder->Linear2CurveRed)
{
FreeAligned(decoder->allocator, decoder->Linear2CurveRed);
decoder->Linear2CurveRed = NULL;
}
if(decoder->Linear2CurveGrn)
{
FreeAligned(decoder->allocator, decoder->Linear2CurveGrn);
decoder->Linear2CurveGrn = NULL;
}
if(decoder->Linear2CurveBlu)
{
FreeAligned(decoder->allocator, decoder->Linear2CurveBlu);
decoder->Linear2CurveBlu = NULL;
}
if(decoder->BYR4LinearRestore)
{
FreeAligned(decoder->allocator, decoder->BYR4LinearRestore);
decoder->BYR4LinearRestore = NULL;
}
if(decoder->GammaContrastRed)
{
FreeAligned(decoder->allocator, decoder->GammaContrastRed);
decoder->GammaContrastRed = NULL;
}
if(decoder->GammaContrastGrn)
{
FreeAligned(decoder->allocator, decoder->GammaContrastGrn);
decoder->GammaContrastGrn = NULL;
}
if(decoder->GammaContrastBlu)
{
FreeAligned(decoder->allocator, decoder->GammaContrastBlu);
decoder->GammaContrastBlu = NULL;
}
//3d LUT
{
if(decoder->LUTcache)
Free(decoder->allocator, decoder->LUTcache);
decoder->LUTcache = NULL;
decoder->LUTcacheCRC = 0;
}
#if WARPSTUFF
{
if (decoder->lens_correct_buffer)
#if _ALLOCATOR
Free(decoder->allocator, decoder->lens_correct_buffer);
#else
MEMORY_ALIGNED_FREE(decoder->lens_correct_buffer);
#endif
if (decoder->mesh)
geomesh_destroy(decoder->mesh);
decoder->lastLensOffsetX = 0;
decoder->lastLensOffsetY = 0;
decoder->lastLensOffsetZ = 0;
decoder->lastLensOffsetR = 0;
decoder->lastLensZoom = 0;
decoder->lastLensFishFOV = 0;
decoder->lastLensGoPro = 0;
decoder->lastLensSphere = 0;
decoder->lastLensFill = 0;
decoder->lastLensStyleSel = 0;
memset(decoder->lastLensCustomSRC, 0, sizeof(decoder->lastLensCustomSRC));
memset(decoder->lastLensCustomDST, 0, sizeof(decoder->lastLensCustomDST));
decoder->mesh = NULL;
decoder->lens_correct_buffer = NULL;
}
#endif
if(decoder->overrideData)
{
Free(decoder->allocator, decoder->overrideData);
decoder->overrideData = NULL;
decoder->overrideSize = 0;
}
for(i=0; i<64; i++)
{
if(decoder->mdc[i])
Free(decoder->allocator, decoder->mdc[i]);
decoder->mdc[i] = NULL;
decoder->mdc_size[i] = 0;
}
#else
if(decoder->RGBFilterBuffer16)
{
MEMORY_ALIGNED_FREE(decoder->RGBFilterBuffer16);
decoder->RGBFilterBuffer16 = NULL;
}
if(decoder->RawBayer16)
{
MEMORY_ALIGNED_FREE(decoder->RawBayer16);
decoder->RawBayer16 = NULL;
}
if(decoder->StereoBuffer)
{
MEMORY_ALIGNED_FREE(decoder->StereoBuffer);
decoder->StereoBuffer = NULL;
decoder->StereoBufferSize = 0;
}
if(decoder->RawCube)
{
MEMORY_ALIGNED_FREE(decoder->RawCube);
decoder->RawCube = NULL;
}
if(decoder->Curve2Linear)
{
MEMORY_ALIGNED_FREE(decoder->Curve2Linear);
decoder->Curve2Linear = NULL;
}
if(decoder->BYR4LinearRestore)
{
MEMORY_ALIGNED_FREE(decoder->BYR4LinearRestore);
decoder->BYR4LinearRestore = NULL;
}
if(decoder->Linear2CurveRed)
{
MEMORY_ALIGNED_FREE(decoder->Linear2CurveRed);
decoder->Linear2CurveRed = NULL;
}
if(decoder->Linear2CurveGrn)
{
MEMORY_ALIGNED_FREE(decoder->Linear2CurveGrn);
decoder->Linear2CurveGrn = NULL;
}
if(decoder->Linear2CurveBlu)
{
MEMORY_ALIGNED_FREE(decoder->Linear2CurveBlu);
decoder->Linear2CurveBlu = NULL;
}
if(decoder->GammaContrastRed)
{
MEMORY_ALIGNED_FREE(decoder->GammaContrastRed);
decoder->GammaContrastRed = NULL;
}
if(decoder->GammaContrastGrn)
{
MEMORY_ALIGNED_FREE(decoder->GammaContrastGrn);
decoder->GammaContrastGrn = NULL;
}
if(decoder->GammaContrastBlu)
{
MEMORY_ALIGNED_FREE(decoder->GammaContrastBlu);
decoder->GammaContrastBlu = NULL;
}
//3d LUT
{
if(decoder->LUTcache)
MEMORY_FREE(decoder->LUTcache);
decoder->LUTcache = NULL;
decoder->LUTcacheCRC = 0;
}
#if WARPSTUFF
{
if (decoder->lens_correct_buffer)
#if _ALLOCATOR
Free(decoder->allocator, decoder->lens_correct_buffer);
#else
MEMORY_ALIGNED_FREE(decoder->lens_correct_buffer);
#endif
if (decoder->mesh)
geomesh_destroy(mesh);
decoder->mesh = NULL;
decoder->lens_correct_buffer = NULL;
decoder->lastLensOffsetX = 0;
decoder->lastLensOffsetY = 0;
decoder->lastLensOffsetZ = 0;
decoder->lastLensOffsetR = 0;
decoder->lastLensZoom = 0;
decoder->lastLensFishFOV = 0;
decoder->lastLlensGoPro = 0;
decoder->lastLlensSphere = 0;
decoder->lastLlensFill = 0;
decoder->lastLlensStyleSel = 0;
memset(decoder->lastLensCustomSRC, 0, sizeof(decoder->lastLensCustomSRC));
memset(decoder->lastLensCustomDST, 0, sizeof(decoder->lastLensCustomDST));
}
#endif
if(decoder->overrideData)
{
MEMORY_FREE(decoder->overrideData);
decoder->overrideData = NULL;
decoder->overrideSize = 0;
}
for(i=0; i<64; i++)
{
if(decoder->mdc[i])
MEMORY_FREE(decoder->mdc[i]);
decoder->mdc[i] = NULL;
decoder->mdc_size[i] = 0;
}
#endif
#ifdef SPI_LOADER
SPIReleaseAll(decoder);
//KeyframesReleaseAll(decoder);
#endif
decoder->initialized = 0;// cleared
}
void ExitDecoder(DECODER *decoder)
{
// Let the caller keep the logfile open or choose to close it
//if (logfile) fclose(logfile);
// Free data allocated within the decoder
ClearDecoder(decoder);
}
// Allocate the data structures for decoding a group
void AllocDecoderGroup(DECODER *decoder)
{
#if _ALLOCATOR
ALLOCATOR *allocator = decoder->allocator;
#endif
//CODEC_STATE *codec = &decoder->codec;
//int num_channels = codec->num_channels;//DAN07022004
int channel;
assert(decoder->codec.num_channels <= TRANSFORM_MAX_CHANNELS); //DAN07022004
for (channel = 0; channel < TRANSFORM_MAX_CHANNELS; channel++)//DAN07022004
{
TRANSFORM *transform = decoder->transform[channel];
// Need to allocate a transform data structure?
if (transform == NULL) {
#if _ALLOCATOR
transform = (TRANSFORM *)Alloc(allocator, sizeof(TRANSFORM));
#else
transform = (TRANSFORM *)MEMORY_ALLOC(sizeof(TRANSFORM));
#endif
assert(transform != NULL);
if (transform == NULL) {
decoder->error = CODEC_ERROR_TRANSFORM_MEMORY;
return;
}
memset(transform, 0, sizeof(TRANSFORM));
decoder->transform[channel] = transform;
#if _TIMING
alloc_transform_count++;
#endif
}
}
}
// Allocate the buffer used for intermediate results during decoding
bool AllocDecoderBuffer(DECODER *decoder, int width, int height, int format)
{
int cpus;
size_t size;
size_t row_size;
char *buffer;
#if 0
// Allocate a buffer large enough for six rows of cache lines
size = width * sizeof(PIXEL);
size = ALIGN(size, _CACHE_LINE_SIZE);
size = 2 * TRANSFORM_MAX_CHANNELS * size;
#else
// Allocate a buffer large enough for nine rows of cache lines
size = width * sizeof(PIXEL) * 4;
size = ALIGN(size, _CACHE_LINE_SIZE);
size = 3 * TRANSFORM_MAX_CHANNELS * size;
#endif
switch (format)
{
case DECODED_FORMAT_V210:
case DECODED_FORMAT_YU64:
// Increase the buffer size for decoding to the V210 format
row_size = 4 * width * sizeof(PIXEL);
row_size = ALIGN(row_size, _CACHE_LINE_SIZE);
size += 4 * 2 * row_size;
break;
case DECODED_FORMAT_YR16:
case DECODED_FORMAT_CbYCrY_10bit_2_8:
case DECODED_FORMAT_CbYCrY_16bit_2_14:
case DECODED_FORMAT_CbYCrY_16bit_10_6:
// Increase the buffer size for decoding to the YUV16 format
row_size = 4 * width * sizeof(PIXEL);
row_size = ALIGN(row_size, _CACHE_LINE_SIZE);
size += 8 * 2 * row_size;
break;
case DECODED_FORMAT_RG48:
case DECODED_FORMAT_WP13:
// Increase the buffer size for decoding to the YUV16 format
row_size = 6 * width * sizeof(PIXEL);
row_size = ALIGN(row_size, _CACHE_LINE_SIZE);
size += 12 * 2 * row_size;
break;
case DECODED_FORMAT_RG64:
// Increase the buffer size for decoding to the YUV16 format
row_size = 8 * width * sizeof(PIXEL);
row_size = ALIGN(row_size, _CACHE_LINE_SIZE);
size += 16 * 2 * row_size;
break;
case DECODED_FORMAT_BYR3:
// Increase the buffer size for decoding to the YUV16 format
row_size = 2 * width * sizeof(PIXEL);
row_size = ALIGN(row_size, _CACHE_LINE_SIZE);
size += 4 * 2 * row_size;
break;
case DECODED_FORMAT_BYR4:
// Increase the buffer size for decoding to the YUV16 format
row_size = 2 * width * sizeof(PIXEL);
row_size = ALIGN(row_size, _CACHE_LINE_SIZE);
size += 4 * 2 * row_size;
break;
case DECODED_FORMAT_B64A:
case DECODED_FORMAT_W13A:
// Increase the buffer size for decoding to the B64A format
row_size = 8 * width * sizeof(PIXEL);
row_size = ALIGN(row_size, _CACHE_LINE_SIZE);
size += 16 * 2 * row_size;
break;
default:
// Increase the buffer size for YUV to RGB conversion
row_size = 3 * width * sizeof(PIXEL);
row_size = ALIGN(row_size, _CACHE_LINE_SIZE);
size += 2 * 2 * row_size;
break;
}
cpus = decoder->thread_cntrl.capabilities >> 16;
if(cpus > 4)
size *= 4;
if(cpus > 16) //DAN20120803 -- 4444 clips
size *= 2;
// Has a buffer already been allocated?
if (decoder->buffer != NULL)
{
// Is the buffer large enough?
if (decoder->buffer_size < size)
{
// Free the previous buffer
#if _ALLOCATOR
FreeAligned(decoder->allocator, decoder->buffer);
#else
MEMORY_ALIGNED_FREE(decoder->buffer);
#endif
decoder->buffer = NULL;
decoder->buffer_size = 0;
}
else
{
return true;
}
}
buffer = decoder->buffer;
if(buffer == NULL)
{
// Allocate the decoding buffer
#if _ALLOCATOR
buffer = (char *)AllocAligned(decoder->allocator, size, _CACHE_LINE_SIZE);
#else
buffer = (char *)MEMORY_ALIGNED_ALLOC(size, _CACHE_LINE_SIZE);
#endif
if(buffer == NULL)
{
return false;
}
}
#if DEBUG_BUFFER_USAGE
memset(buffer, 1, size);
#endif
// Save the buffer and its size in the decoder
decoder->buffer = buffer;
decoder->buffer_size = size;
// Initialize the scratch space descriptor
InitScratchBuffer(&decoder->scratch, buffer, size);
// allocate buffer for each debayer/color formating thread
{
int i;
size = (width+16)*3*2*4*2*4;// sixteen lines
if(height*4 > width*3) //square or tall images where running out of scratch space for zooms.
size *= 1 + ((height+(width/2))/width);
if (decoder->threads_buffer_size < size)
{
for(i=0;i<_MAX_CPUS;i++)
{
if(decoder->threads_buffer[i])
{
#if _ALLOCATOR
FreeAligned(decoder->allocator, decoder->threads_buffer[i]);
#else
MEMORY_ALIGNED_FREE(decoder->threads_buffer[i]);
#endif
decoder->threads_buffer[i] = NULL;
}
}
decoder->threads_buffer_size = 0;
}
for(i=0;i<cpus;i++)
{
if(decoder->threads_buffer[i] == NULL)
{
#if _ALLOCATOR
decoder->threads_buffer[i] = (char *)AllocAligned(decoder->allocator, size, _CACHE_LINE_SIZE);
#else
decoder->threads_buffer[i] = (char *)MEMORY_ALIGNED_ALLOC(size, _CACHE_LINE_SIZE);
#endif
if(decoder->threads_buffer[i] == NULL)
{
return false;
}
}
}
decoder->threads_buffer_size = size;
}
// Eventually the scratch space descriptor will replace the buffer and buffer_size fields
return true;
}
bool ResizeDecoderBuffer(DECODER *decoder, int width, int height, int format)
{
// Check that the dimensions are valid
assert(width > 0);
assert(height > 0);
// Just call the allocation routine
return AllocDecoderBuffer(decoder, width, height, format);
}
void ClearTransformFlags(DECODER *decoder)
{
TRANSFORM **transform_array = decoder->transform;
int channel;
for (channel = 0; channel < TRANSFORM_MAX_CHANNELS; channel++)
{
TRANSFORM *transform = transform_array[channel];
int index;
if (transform == NULL) break;
for (index = 0; index < TRANSFORM_MAX_WAVELETS; index++)
{
IMAGE *wavelet = transform->wavelet[index];
if (wavelet != NULL) {
wavelet->band_valid_flags = 0;
wavelet->band_started_flags = 0;
}
}
}
}
// Initialize the tables for decoding the wavelet transforms
void InitWaveletDecoding(DECODER *decoder, int subband_wavelet_index[], int subband_band_index[], int num_subbands)
{
size_t subband_table_size = num_subbands * sizeof(int);
memset(decoder->subband_wavelet_index, 0, sizeof(decoder->subband_wavelet_index));
memcpy(decoder->subband_wavelet_index, subband_wavelet_index, subband_table_size);
memset(decoder->subband_band_index, 0, sizeof(decoder->subband_band_index));
memcpy(decoder->subband_band_index, subband_band_index, subband_table_size);
}
#if 0
static bool IsValidFormat(int format)
{
bool valid_format = true;
//TODO: Change this routine into a switch statement
if(format == COLOR_FORMAT_BYR5)
return true; // can decode to BYR5
if(format == COLOR_FORMAT_BYR4)
return true; // can decode to BYR4
if(format == COLOR_FORMAT_BYR3)
return true; // can decode to BYR3
if(format == COLOR_FORMAT_BYR2)
return true; // can decode to BYR2
if(format == COLOR_FORMAT_RG48)
return true; // can decode to RGB48
if(format == COLOR_FORMAT_RG64)
return true; // can decode to RGBA64
if (format == COLOR_FORMAT_B64A)
{
return true; // Can decode to B64A
}
if (!(COLOR_FORMAT_UNKNOWN < format && format <= MAX_DECODED_COLOR_FORMAT)) {
valid_format = false;
}
return valid_format;
}
#endif
#if _INTERLACED_WORKER_THREADS
void StartInterlaceWorkerThreads(DECODER *decoder)
{
int i;
if(decoder->interlaced_worker.lock_init == 0)
{
// Create events for starting the worker threads
for(i=0; i<THREADS_IN_LAST_WAVELET; i++)
{
decoder->interlaced_worker.start_event[i] = CreateEvent(NULL, false, false, NULL);
}
// Create a semaphore to signal the worker threads to process rows
decoder->interlaced_worker.row_semaphore = CreateSemaphore(NULL, 0, LONG_MAX, NULL);
// Create an event for each worker thread to signal that it has finished
for(i=0; i<THREADS_IN_LAST_WAVELET; i++)
{
decoder->interlaced_worker.done_event[i] = CreateEvent(NULL, false, false, NULL);
}
// Create an event for forcing the worker threads to terminate
decoder->interlaced_worker.stop_event = CreateEvent(NULL, true, false, NULL);
// Zero the count of worker threads that are active
decoder->interlaced_worker.thread_count = 0;
// Initialize the lock for controlling access to the worker thread data
InitializeCriticalSection(&decoder->interlaced_worker.lock);
decoder->interlaced_worker.lock_init = 1;
for (i = 0; i < THREADS_IN_LAST_WAVELET; i++)
{
decoder->interlaced_worker.id[i] = 0;
decoder->interlaced_worker.handle[i] = CreateThread(NULL, 0, InterlacedWorkerThreadProc, decoder, 0, &decoder->interlaced_worker.id[i]);
assert(decoder->interlaced_worker.handle[i] != NULL);
}
}
}
#endif
#if 0
int TestException(int x)
{
static volatile int y1 = 100;
volatile int x1 = x;
return y1 / x1;
}
#endif
// Process device driver request to initialize the decoder
#if _ALLOCATOR
bool DecodeInit(ALLOCATOR *allocator, DECODER *decoder, int width, int height, int format, int resolution, FILE *logfile)
#else
bool DecodeInit(DECODER *decoder, int width, int height, int format, int resolution, FILE *logfile)
#endif
{
CODESET codesets[CODEC_NUM_CODESETS];
int i;
int cpus;
//int x = 0;
#if CODEC_NUM_CODESETS == 3
memcpy(&codesets[0], &CURRENT_CODESET, sizeof(CODESET));
memcpy(&codesets[1], &SECOND_CODESET, sizeof(CODESET));
memcpy(&codesets[2], &THIRD_CODESET, sizeof(CODESET));
#elif CODEC_NUM_CODESETS == 2
memcpy(&codesets[0], &CURRENT_CODESET, sizeof(CODESET));
memcpy(&codesets[1], &SECOND_CODESET, sizeof(CODESET));
#else
memcpy(&codesets[0], &CURRENT_CODESET, sizeof(CODESET));
#endif
#ifdef _WIN32
// Set the handler for system exceptions
SetDefaultExceptionHandler();
#endif
//TestException(x);
// Clear all decoder fields except the logfile and set the codebooks for decoding
InitDecoder(decoder, logfile, &codesets[0]);
#if _ALLOCATOR
decoder->allocator = allocator;
#endif
if(decoder->thread_cntrl.capabilities == 0)
{
// Determine the processor capabilities
SetDecoderCapabilities(decoder);
}
cpus = decoder->thread_cntrl.capabilities >> 16;
assert(cpus > 0 && cpus <= _MAX_CPUS);
// Decode to half resolution?
if (resolution == DECODED_RESOLUTION_HALF)
{
// Reduce the frame size by half in each dimension
width = width/2;
height = height/2;
}
else if (resolution == DECODED_RESOLUTION_QUARTER)
{
// Reduce the frame size by one fourth in each dimension
width = width/4;
height = height/4;
}
// Initialize the codebooks
#if _ALLOCATOR
if (!InitCodebooks(decoder->allocator, codesets)) {
//decoder->error = CODEC_ERROR_INIT_CODEBOOKS;
// The subroutine has already set the error code
return false;
}
#else
if (!InitCodebooks(codesets)) {
//decoder->error = CODEC_ERROR_INIT_CODEBOOKS;
// The subroutine has already set the error code
return false;
}
#endif
// Initize the FSM
InitDecoderFSM(decoder, &codesets[0]);
// Check the frame dimensions and format
//assert(width > 0);
//assert(height > 0);
// assert(IsValidFormat(format));
#if _THREADED_DECODER
// Create a semaphore to signal the transform thread to begin processing
// Initialize the transform queue
decoder->transform_queue.started = 0;
decoder->transform_queue.num_entries = 0;
decoder->transform_queue.next_entry = 0;
decoder->transform_queue.free_entry = 0;
memset(decoder->transform_queue.queue, 0, sizeof(decoder->transform_queue.queue));
#endif
#if _INTERLACED_WORKER_THREADS && _DELAY_THREAD_START==0
StartInterlaceWorkerThreads(decoder);
#endif
#if _THREADED
#if !_DELAY_THREAD_START //start threads now if not _DELAY_THREAD_START
if(cpus > 1)
{
int threads = cpus;
if(threads > 4)
threads = 4;
CreateLock(&decoder->entropy_worker_new.lock);
// Initialize the pool of transform worker threads
ThreadPoolCreate(&decoder->entropy_worker_new.pool,
threads,
EntropyWorkerThreadProc,
decoder);
}
// Initialize the lock that controls access to the generic worker thread data
CreateLock(&decoder->worker_thread.lock);
// Initialize the pool of transform worker threads
ThreadPoolCreate(&decoder->worker_thread.pool,
cpus,
WorkerThreadProc,
decoder);
#endif
#endif
// Set the frame dimensions and format
SetDecoderFormat(decoder, width, height, format, resolution);
// Allocate the data structure for decoding the samples
AllocDecoderGroup(decoder);
// Note that this code assumes that the samples to decode are groups
// as opposed to isolated frames which are not supported in this code
// Allocate a buffer for storing intermediate results during decoding
if (!AllocDecoderBuffer(decoder, width, height, format)) {
return false;
}
// Should check that the finite state machine tables were initialized
assert(decoder->fsm[0].table.flags < 0);
// Initialize the finite state machine for this decoder
for(i=0; i<CODEC_NUM_CODESETS; i++)
{
InitFSM(&decoder->fsm[i], codesets[i].fsm_table);
#if _COMPANDING
// Scale the values in the finite state machine entries for companding
ScaleFSM(&decoder->fsm[i].table);
#endif
}
// Indicate that the decoder has been initialized
decoder->state = DECODER_STATE_INITIALIZED;
#if (1 && DUMP)
// Write the wavelet bands as images
SetDumpDirectory(CODEC_TYPE(decoder), DUMP_DECODER_DIRECTORY);
SetDumpFilename(CODEC_TYPE(decoder), DUMP_DEFAULT_FILENAME);
SetDumpChannelMask(CODEC_TYPE(decoder), 1/*ULONG_MAX*/);
// SetDumpWaveletMask(CODEC_TYPE(decoder), 7<<4 | 1/*ULONG_MAX*/);
SetDumpWaveletMask(CODEC_TYPE(decoder), ULONG_MAX);
// Set this flag to enable output
decoder->dump.enabled = true;
#endif
#if _TIMING
// Initialize the global timers and counters
InitTiming();
#endif
//DAN20160203 Fix for a memory leak in InitCookbooks
for (i = 0; i < CODEC_NUM_CODESETS; i++)
{
#if _ALLOCATOR
Free(allocator, codesets[i].codebook_runbook); codesets[i].codebook_runbook = NULL;
Free(allocator, codesets[i].fastbook); codesets[i].fastbook = NULL;
Free(allocator, codesets[i].valuebook); codesets[i].valuebook = NULL;
#else
MEMORY_FREE(codesets[i].codebook_runbook); codesets[i].codebook_runbook = NULL;
MEMORY_FREE(codesets[i].fastbook); codesets[i].fastbook = NULL;
MEMORY_FREE(codesets[i].valuebook); codesets[i].valuebook = NULL;
#endif
}
// The decoder has been initialized successfully
return true;
}
void DecodeEntropyInit(DECODER *decoder)
{
int cpus = 1;
if(decoder->thread_cntrl.capabilities == 0)
{
// Determine the processor capabilities
SetDecoderCapabilities(decoder);
}
cpus = decoder->thread_cntrl.capabilities >> 16;
if (cpus > (int)decoder->cfhddata.cpu_limit && decoder->cfhddata.cpu_limit)
{
cpus = decoder->cfhddata.cpu_limit;
decoder->thread_cntrl.limit = cpus;
decoder->thread_cntrl.set_thread_params = 1;
decoder->thread_cntrl.capabilities &= 0xffff;
decoder->thread_cntrl.capabilities |= cpus<<16;
}
assert(cpus > 0 && cpus <= _MAX_CPUS);
#if _THREADED
#if _DELAY_THREAD_START //start threads now if not _DELAY_THREAD_START
if(cpus > 1 && decoder->entropy_worker_new.pool.thread_count == 0)
{
int threads = cpus;
if(threads > 4)
threads = 4;
CreateLock(&decoder->entropy_worker_new.lock);
// Initialize the pool of transform worker threads
ThreadPoolCreate(&decoder->entropy_worker_new.pool,
threads,
EntropyWorkerThreadProc,
decoder);
}
#endif
#endif
}
bool DecodeOverrides(DECODER *decoder, unsigned char *overrideData, int overrideSize)
{
if(decoder->overrideData)
{
#if _ALLOCATOR
Free(decoder->allocator, decoder->overrideData);
#else
MEMORY_FREE(decoder->overrideData);
#endif
decoder->overrideData = NULL;
decoder->overrideSize = 0;
}
if(overrideSize)
{
#if _ALLOCATOR
decoder->overrideData = Alloc(decoder->allocator, overrideSize);
#else
decoder->overrideData = MEMORY_ALLOC(overrideSize);
#endif
if(decoder->overrideData)
{
memcpy(decoder->overrideData, overrideData, overrideSize);
decoder->overrideSize = overrideSize;
}
}
else
{
int i;
for(i=METADATA_PRIORITY_OVERRIDE; i<=METADATA_PRIORITY_MAX; i++) //This was 0 to max but that cause right eye primary corrections(side-by-side) mode to flicker.
// This database cleariing was added but I don't know why.
{
if(decoder->DataBases[i])
{
#if _ALLOCATOR
Free(decoder->allocator, decoder->DataBases[i]);
#else
MEMORY_FREE(decoder->DataBases[i]);
#endif
decoder->DataBases[i] = NULL;
decoder->DataBasesSize[i] = 0;
decoder->DataBasesAllocSize[i] = 0;
}
}
}
return true;
}
TRANSFORM *AllocGroupTransform(GROUP *group, int channel)
{
#if _ALLOCATOR
//TODO:ALLOC Change this routine to take an allocator as the first argument
ALLOCATOR *allocator = NULL;
#endif
TRANSFORM *transform;
// Channel zero is a special case because it may mean
// that the group header has not been decoded yet
if (channel != 0)
{
// Make sure that the channel number is in range
assert(0 <= channel && channel < group->header.num_channels);
if (!(0 <= channel && channel < group->header.num_channels))
return NULL;
}
transform = group->transform[channel];
// Need to allocate a transform data structure?
if (transform == NULL) {
#if _ALLOCATOR
transform = (TRANSFORM *)Alloc(allocator, sizeof(TRANSFORM));
#else
transform = (TRANSFORM *)MEMORY_ALLOC(sizeof(TRANSFORM));
#endif
assert(transform != NULL);
if (transform == NULL) return NULL;
memset(transform, 0, sizeof(TRANSFORM));
group->transform[channel] = transform;
#if _TIMING
alloc_transform_count++;
#endif
}
return transform;
}
//extern FILE *logfile;
void EraseOutputBuffer(uint8_t *buffer, int width, int height, int32_t pitch, int format)
{
size_t size = height * pitch;
union {
uint8_t byte[4];
uint32_t word;
} output;
switch (format)
{
case DECODED_FORMAT_YUYV:
output.byte[0] = COLOR_LUMA_BLACK;
output.byte[1] = COLOR_CHROMA_ZERO;
output.byte[2] = COLOR_LUMA_BLACK;
output.byte[3] = COLOR_CHROMA_ZERO;
break;
default:
//if (logfile) fprintf(logfile,"**Unknown format: %d\n", format);
//assert(0);
output.word = 0;
break;
}
memset(buffer, output.word, size);
}
// Decode the coefficients in a subband
bool DecodeSampleSubband(DECODER *decoder, BITSTREAM *input, int subband);
// Decode the coefficients in a lowpass band
bool DecodeSampleLowPassBand(DECODER *decoder, BITSTREAM *stream, IMAGE *wavelet);
// Decode the coefficients in a highpass band
bool DecodeSampleHighPassBand(DECODER *decoder, BITSTREAM *stream, IMAGE *wavelet, int band, int threading);
// Decode an empty band
bool DecodeSampleEmptyBand(DECODER *decoder, BITSTREAM *stream, IMAGE *wavelet, int band);
bool DecodeBand16s(DECODER *decoder, BITSTREAM *stream, IMAGE *wavelet,
int band_index, int width, int height);
bool DecodeBand16sLossless(DECODER *decoder, BITSTREAM *stream, IMAGE *wavelet,
int band_index, int width, int height);
// Decode a sample channel header
bool DecodeSampleChannelHeader(DECODER *decoder, BITSTREAM *input);
// Apply the inverse horizontal-temporal transform to reconstruct the output frame
void ReconstructSampleFrameToBuffer(DECODER *decoder, int frame, uint8_t *output, int pitch);
#if 0
// Reconstruct the frame to quarter resolution at full frame rate
void ReconstructQuarterFrame(DECODER *decoder, int num_channels,
uint8_t *frame1, uint8_t *frame2, int output_pitch,
FRAME_INFO *info, char *buffer, size_t buffer_size);
#else
// Reconstruct the frame to quarter resolution at full frame rate
void ReconstructQuarterFrame(DECODER *decoder, int num_channels,
int frame_index, uint8_t *output, int output_pitch,
FRAME_INFO *info, const SCRATCH *scratch, int precision);
#endif
// Copy the quarter resolution lowpass channels from the spatial transform
void CopyQuarterFrameToBuffer(TRANSFORM **transform_array, int num_channels,
uint8_t *output, int output_pitch,
FRAME_INFO *info, int precision);
// Convert the quarter resolution lowpass channels to the specified output format
void ConvertQuarterFrameToBuffer(DECODER *decoder, TRANSFORM **transform_array, int num_channels,
uint8_t *output, int output_pitch,
FRAME_INFO *info, int precision);
// Routines for converting the new encoded formats to the requested output format
CODEC_ERROR ReconstructSampleFrameRGB444ToBuffer(DECODER *decoder, int frame, uint8_t *output, int pitch);
CODEC_ERROR ReconstructSampleFrameRGBA4444ToBuffer(DECODER *decoder, int frame, uint8_t *output, int pitch);
CODEC_ERROR ReconstructSampleFrameYUVA4444ToBuffer(DECODER *decoder, int frame, uint8_t *output, int pitch);
// The first Bayer routine calls the other Bayer routines for the decoded resolution
CODEC_ERROR ReconstructSampleFrameBayerToBuffer(DECODER *decoder, FRAME_INFO *info, int frame, uint8_t *output, int pitch);
CODEC_ERROR ReconstructSampleFrameDeBayerFullToBuffer(DECODER *decoder, FRAME_INFO *info, int frame, uint8_t *output, int pitch);
CODEC_ERROR ReconstructSampleFrameBayerFullToBuffer(DECODER *decoder, FRAME_INFO *info, int frame, uint8_t *output, int pitch);
CODEC_ERROR ReconstructSampleFrameBayerHalfToBuffer(DECODER *decoder, FRAME_INFO *info, int frame, uint8_t *output, int pitch);
CODEC_ERROR ReconstructSampleFrameBayerQuarterToBuffer(DECODER *decoder, int frame, uint8_t *output, int pitch);
CODEC_ERROR UncompressedSampleFrameBayerToBuffer(DECODER *decoder, FRAME_INFO *info, int frame, uint8_t *output, int pitch);
CODEC_ERROR UncompressedSampleFrameYUVToBuffer(DECODER *decoder, FRAME_INFO *info, int frame, uint8_t *output, int pitch);
CODEC_ERROR UncompressedSampleFrameRGBToBuffer(DECODER *decoder, FRAME_INFO *info, int frame, uint8_t *output, int pitch);
// New code for handling the original YUV 4:2:2 encoded format
CODEC_ERROR ReconstructSampleFrameYUV422ToBuffer(DECODER *decoder, int frame, uint8_t *output, int pitch);
// Return true if the rest of the channel does not have to be decoded
static bool CanSkipChannel(DECODER *decoder, int resolution)
{
CODEC_STATE *codec = &decoder->codec;
int channel = codec->channel;
TRANSFORM *transform = decoder->transform[channel];
int transform_type = transform->type;
// Can the rest of the channel be skipped?
if (transform_type == TRANSFORM_TYPE_FIELDPLUS)
{
switch (resolution)
{
case DECODED_RESOLUTION_HALF:
if(decoder->codec.encoded_format != ENCODED_FORMAT_BAYER)
return ((codec->decoded_subband_flags & DECODED_SUBBAND_MASK_HALF) == DECODED_SUBBAND_MASK_HALF);
break;
case DECODED_RESOLUTION_QUARTER:
if(decoder->codec.encoded_format != ENCODED_FORMAT_BAYER)
return ((codec->decoded_subband_flags & DECODED_SUBBAND_MASK_QUARTER) == DECODED_SUBBAND_MASK_QUARTER);
break;
case DECODED_RESOLUTION_LOWPASS_ONLY:
return (codec->decoded_subband_flags & 1);
break;
default:
if(decoder->codec.encoded_format != ENCODED_FORMAT_BAYER)
{
if(decoder->frame.format == DECODED_FORMAT_YUYV || decoder->frame.format == DECODED_FORMAT_UYVY)
{
// If we are requesting a YUV decode we don't need the 4th channel
if(codec->channel == 3)
{
return true;
}
}
}
break;
}
}
else
{
const uint32_t decoded_subband_mask_half = 0x7F;
const uint32_t decoded_subband_mask_quarter = 0x0F;
assert(transform_type == TRANSFORM_TYPE_SPATIAL);
switch (resolution)
{
case DECODED_RESOLUTION_HALF:
if(decoder->codec.encoded_format != ENCODED_FORMAT_BAYER)
return ((codec->decoded_subband_flags & decoded_subband_mask_half) == decoded_subband_mask_half);
break;
case DECODED_RESOLUTION_QUARTER:
if(decoder->codec.encoded_format != ENCODED_FORMAT_BAYER)
return ((codec->decoded_subband_flags & decoded_subband_mask_quarter) == decoded_subband_mask_quarter);
break;
case DECODED_RESOLUTION_LOWPASS_ONLY:
return (codec->decoded_subband_flags & 1);
break;
default:
if(decoder->codec.encoded_format != ENCODED_FORMAT_BAYER)
{
if(decoder->frame.format == DECODED_FORMAT_YUYV || decoder->frame.format == DECODED_FORMAT_UYVY)
{
// If we are requesting a YUV decode we don't need the 4th channel
if(codec->channel == 3)
{
return true;
}
}
}
break;
}
}
// Cannot skip the rest of the channel
return false;
}
#if 0
static bool CanSkipSubband(DECODER *decoder, int subband)
{
// Bitmask indicates which subbands must be decoded for quarter resolution
static uint32_t quarter_resolution_mask = 0x008F;
// Convert the subband number into a bitmask (could use a lookup table)
uint32_t subband_mask = SUBBAND_MASK(subband);
// Select the resolution of the fully decoded frames
int resolution = decoder->frame.resolution;
switch (resolution)
{
case DECODED_RESOLUTION_QUARTER:
//if (4 <= subband && subband <= 6)
if(decoder->codec.encoded_format != ENCODED_FORMAT_BAYER)
{
if ((subband_mask & quarter_resolution_mask) == 0) {
return true;
}
}
break;
default:
// Assume that the subband must be decoded
break;
}
return false;
}
#endif
// Return true if the wavelet exists and all bands are valid
static bool AllBandsValid(IMAGE *wavelet)
{
return (wavelet != NULL && BANDS_ALL_VALID(wavelet));
}
#if DEBUG
static bool AllTransformBandsValid(TRANSFORM *transform_array[], int num_channels, int frame_index)
{
int channel;
if (!(1 <= num_channels && num_channels <= TRANSFORM_MAX_CHANNELS)) {
assert(0);
return false;
}
if (!(0 <= frame_index && frame_index < TRANSFORM_MAX_FRAMES)) {
assert(0);
return false;
}
for (channel = 0; channel < num_channels; channel++)
{
IMAGE *wavelet = transform_array[channel]->wavelet[frame_index];
if (!AllBandsValid(wavelet))
{
return false;
}
}
// All wavelet bands in all channels are valid
return true;
}
static bool AllLowpassBandsValid(TRANSFORM *transform_array[], int num_channels, int frame_index)
{
int channel;
if (!(0 < num_channels && num_channels <= TRANSFORM_MAX_CHANNELS)) {
return false;
}
if (!(0 <= frame_index && frame_index < TRANSFORM_MAX_FRAMES)) {
return false;
}
for (channel = 0; channel < num_channels; channel++)
{
IMAGE *wavelet = transform_array[channel]->wavelet[frame_index];
if (!(wavelet != NULL && wavelet->band_valid_flags & BAND_VALID_MASK(0))) {
return false;
}
}
// All lowpass bands in all channels are valid
return true;
}
#endif
static bool
ComputeFrameDimensionsFromFirstWavelet(int transform_type,
int first_wavelet_width,
int first_wavelet_height,
int *frame_width_out,
int *frame_height_out)
{
int frame_width;
int frame_height;
int expansion = 8;
switch (transform_type)
{
case TRANSFORM_TYPE_SPATIAL:
frame_width = first_wavelet_width * expansion;
frame_height = first_wavelet_height * expansion;
break;
case TRANSFORM_TYPE_FIELDPLUS:
frame_width = first_wavelet_width * expansion;
frame_height = first_wavelet_height * expansion;
break;
default:
assert(0);
return false;
}
// Return the frame dimensions
*frame_width_out = frame_width;
*frame_height_out = frame_height;
return true;
}
// Decode the sample header to determine the type of sample and other parameters
bool ParseSampleHeader(BITSTREAM *input, SAMPLE_HEADER *header)
{
TAGVALUE segment;
int sample_type;
int sample_size = 0;
// Group index
uint32_t channel_size[TRANSFORM_MAX_CHANNELS];
// Number of channels in the group index
int channel_count;
// Values used for computing the frame width and height (if necessary)
int transform_type = -1;
int first_wavelet_width = 0;
int first_wavelet_height = 0;
int display_height = 0;
int current_channel = 0;
int currentVideoChannel = header->videoChannels;
int find_lowpass_bands = header->find_lowpass_bands & 1;
int find_uncompressed = header->find_lowpass_bands & 2 ? 1 : 0;
int find_header_info_only = header->find_lowpass_bands & 4 ? 1 : 0;
if (header == NULL) {
return false;
}
if(currentVideoChannel == 0)
currentVideoChannel = 1;
// Clear the entire sample header to prevent early return from this routine
memset(header, 0, sizeof(SAMPLE_HEADER));
// Clear the error code
header->error = CODEC_ERROR_OKAY;
// Initialize the frame dimensions to unknown
header->width = 0;
header->height = 0;
header->videoChannels = 1;
// Initialize the original pixel format to unknown
header->input_format = COLOR_FORMAT_UNKNOWN;
// Initialize the encoded format to unknown
header->encoded_format = ENCODED_FORMAT_UNKNOWN;
// Clear the frame number in case it is not present in the sample
header->frame_number = 0;
// The video is not progressive if the sample flags are not present
header->hdr_progressive = false;
#if _BITSTREAM_UNALIGNED
// Record the alignment of the bitstream within the sample
SetBitstreamAlignment(input, 0);
#endif
sample_size = input->nWordsUsed;
// Get the type of sample (should be the first tag value pair)
segment = GetTagValue(input);
assert(segment.tuple.tag == CODEC_TAG_SAMPLE);
if (!IsValidSegment(input, segment, CODEC_TAG_SAMPLE)) {
header->error = CodecErrorBitstream(input);
return false;
}
sample_type = segment.tuple.value;
switch (sample_type)
{
case SAMPLE_TYPE_GROUP: // Group of frames
header->key_frame = true;
header->difference_frame = false;
header->droppable_frame = false;
break;
case SAMPLE_TYPE_FRAME: // The second or later frame in a group
header->key_frame = false;
header->difference_frame = true;
header->droppable_frame = true;
break;
case SAMPLE_TYPE_IFRAME: // One frame in the group
header->key_frame = true;
header->difference_frame = false;
header->droppable_frame = true;
break;
case SAMPLE_TYPE_SEQUENCE_HEADER:
// Treat the video sequence header like a keyframe that can be dropped
header->key_frame = true;
header->difference_frame = false;
header->droppable_frame = true;
break;
default:
// Unknown type of sample
header->error = CODEC_ERROR_SAMPLE_TYPE;
return false;
break;
}
// Continue parsing the sample header until all of the information has been found
while ( (find_lowpass_bands == 1 && current_channel < 3) || //parse all
(find_uncompressed == 1 && current_channel < 1) ||
display_height == 0 ||
header->width == 0 ||
header->height == 0 ||
header->input_format == COLOR_FORMAT_UNKNOWN ||
header->frame_number == 0 ||
(header->interlaced_flags == 0 && header->hdr_progressive == 0))
{
int chunksize = 0;
// Get the next tag value pair from the bitstream
segment = GetSegment(input);
// Did the bitstream end before the last tag was found?
if (input->error == BITSTREAM_ERROR_UNDERFLOW) {
break;
}
// Did an error occur while reading the bitstream?
if (input->error != BITSTREAM_ERROR_OKAY) {
header->error = CodecErrorBitstream(input);
return false;
}
// Is this an optional tag?
if (segment.tuple.tag < 0) {
segment.tuple.tag = NEG(segment.tuple.tag);
}
if(segment.tuple.tag & 0x2000)
{
chunksize = segment.tuple.value;
chunksize &= 0xffff;
chunksize += ((segment.tuple.tag&0xff)<<16);
}
else if(segment.tuple.tag & 0x4000)
{
chunksize = segment.tuple.value;
chunksize &= 0xffff;
}
// else if(tag == CODEC_TAG_INDEX) // handled below
// {
// chunksize = value;
// chunksize &= 0xffff;
// }
else
{
chunksize = 0;
}
if((int)(segment.tuple.tag) <= ((int)CODEC_TAG_LAST_NON_SIZED) || segment.tuple.tag & 0x6000)
{
int skip = 1;
if((segment.tuple.tag & 0xff00) == 0x2200) //sample size
{
if(sample_size < chunksize*4)
find_header_info_only = 1;
skip = find_header_info_only;
if(currentVideoChannel <= 1 && header->videoChannels == 2 && !find_header_info_only)
{
BITSTREAM input2;
SAMPLE_HEADER header2;
BITWORD *eye2 = (BITWORD *)(input->lpCurrentWord + chunksize*4);
int eye_offset = sample_size - input->nWordsUsed + chunksize*4; //approx
int eye_sample_size = input->nWordsUsed - eye_offset;
// Search for first sample of the next frame
while((eye2[1] != (uint8_t)CODEC_TAG_SAMPLE || eye2[0] != 0 || eye2[2] != 0) && eye_sample_size > 0)
{
eye2 += 4;
chunksize ++;
eye_offset += 4;
eye_sample_size -= 4;
}
// Save the offset to the right stereo sample
header->left_sample_size = eye_offset;
{
InitBitstreamBuffer(&input2, eye2, eye_sample_size, BITSTREAM_ACCESS_READ);
memset(&header2, 0, sizeof(SAMPLE_HEADER));
header2.find_lowpass_bands = 1;
currentVideoChannel++;
header2.videoChannels = currentVideoChannel;
if(ParseSampleHeader(&input2, &header2))
{
int i;
for(i=0;i<4;i++)
{
if(header2.thumbnail_channel_offsets[i])
header->thumbnail_channel_offsets_2nd_Eye[i] = eye_offset + header2.thumbnail_channel_offsets[i];
}
}
}
}
}
if((segment.tuple.tag & 0xff00) == 0x2300) //uncompressed sample size
{
header->hdr_uncompressed = 1;
skip = 1;
if(find_lowpass_bands != 1)
break;
}
if((segment.tuple.tag & 0xff00) == 0x2100) //level
{
if(find_lowpass_bands == 1)
{
skip = 0;
}
else
{
skip = 1; // no header data after the fix level
break;
}
}
if(chunksize)
{
if(skip)
{
input->lpCurrentWord += chunksize*4;
input->nWordsUsed -= chunksize*4;
}
}
else
{
switch (segment.tuple.tag)
{
case CODEC_TAG_VERSION: // Version number of the encoder used in each GOP.
header->encoder_version = (((segment.tuple.value>>12) & 0xf)<<16) |
(((segment.tuple.value>>8) & 0xf)<<8) |
((segment.tuple.value) & 0xff);
break;
case CODEC_TAG_INDEX:
// Get the number of channels in the index to skip
channel_count = segment.tuple.value;
DecodeGroupIndex( input, (uint32_t*)&channel_size[ 0 ], min( channel_count, TRANSFORM_MAX_CHANNELS ) );
break;
case CODEC_TAG_FRAME_WIDTH:
// Record the frame width in the sample header
header->width = segment.tuple.value;
break;
case CODEC_TAG_FRAME_HEIGHT:
// Record the frame height in the sample header
header->height = segment.tuple.value;
break;
case CODEC_TAG_FRAME_DISPLAY_HEIGHT:
display_height = segment.tuple.value;
break;
case CODEC_TAG_LOWPASS_WIDTH:
// Save the width of the smallest wavelet for computing the frame dimensions
first_wavelet_width = segment.tuple.value;
break;
case CODEC_TAG_LOWPASS_HEIGHT:
// Save the height of the smallest wavelet for computing the frame dimensions
first_wavelet_height = segment.tuple.value;
break;
case CODEC_TAG_TRANSFORM_TYPE:
// Save the type of transform for computing the frame dimensions (if necessary)
transform_type = segment.tuple.value;
break;
case CODEC_TAG_INPUT_FORMAT:
// Record the original format of the encoded frames
header->input_format = (COLOR_FORMAT)segment.tuple.value;
break;
case CODEC_TAG_ENCODED_FORMAT:
case CODEC_TAG_OLD_ENCODED_FORMAT:
// Record the encoded format (internal representation)
header->encoded_format = (ENCODED_FORMAT)segment.tuple.value;
if(header->encoded_format == ENCODED_FORMAT_RGBA_4444 && channel_count == 3)
header->encoded_format = ENCODED_FORMAT_RGB_444;
break;
case CODEC_TAG_FRAME_NUMBER:
// Record the frame number for debugging
header->frame_number = segment.tuple.value;
break;
case CODEC_TAG_INTERLACED_FLAGS:
// Record the flags that indicate the field type
header->interlaced_flags = segment.tuple.value;
break;
case CODEC_TAG_SAMPLE_FLAGS:
// The sample flags specify progressive versus interlaced decoding
header->hdr_progressive = !!(segment.tuple.value & SAMPLE_FLAGS_PROGRESSIVE);
if (header->hdr_progressive) {
// Clear the interlaced flags
header->interlaced_flags = 0;
}
break;
case CODEC_TAG_LOWPASS_SUBBAND:
if(segment.tuple.value == 0) // low pass band
{
int count = 8;
uint32_t *lptr = (uint32_t *)input->lpCurrentWord;
do
{
uint32_t longword = SwapInt32(lptr[count]);
unsigned short t,v;
t = (longword>>16) & 0xffff;
v = (longword) & 0xffff;
if (t == CODEC_TAG_MARKER && IsLowPassBandMarker(v) && current_channel < 4)
{
header->thumbnail_channel_offsets[current_channel] = (sample_size - input->nWordsUsed) + count*4 + 4;
break;
}
count++;
} while(count < 32);
current_channel++;
}
break;
case CODEC_TAG_ENCODED_CHANNELS:
if(header->videoChannels == 1)
{
header->videoChannels = segment.tuple.value;
if(header->videoChannels < 1)
header->videoChannels = 1;
}
break;
case CODEC_TAG_QUALITY_L: //
header->encode_quality &= 0xffff0000;
header->encode_quality |= segment.tuple.value;
break;
case CODEC_TAG_QUALITY_H: //
header->encode_quality &= 0xffff;
header->encode_quality |= segment.tuple.value<<16;
break;
}
// Have the encoded frame dimensions been computed?
if (header->width == 0 || header->height == 0)
{
// Found the first wavelet in the bitstream?
if (transform_type >= 0 && first_wavelet_width > 0 && first_wavelet_height > 0)
{
// The group header did not contain tags for the frame dimensions
// prior to the release of support for RGB 4:4:4, so must attempt to
// compute the frame dimensions from the dimensions of the lowpass band.
int frame_width = 0;
int frame_height = 0;
// Use the dimensions of the first wavelet to compute the frame width and height
if (!ComputeFrameDimensionsFromFirstWavelet(transform_type,
first_wavelet_width,
first_wavelet_height,
&frame_width,
&frame_height)) {
// Could not compute the frame dimensions
header->error = CODEC_ERROR_FRAME_DIMENSIONS;
return false;
}
// Save the frame dimensions in the sample header
header->width = frame_width;
header->height = frame_height;
// No more header information after finding the lowpass band
break;
}
}
if(find_lowpass_bands != 1 && find_uncompressed != 1)
{
// No more header information after the first encoded band
if (segment.tuple.tag == CODEC_TAG_BAND_NUMBER)
{
// Stop looking for header information
break;
}
// No more header information after the frame index
if (segment.tuple.tag == CODEC_TAG_FRAME_INDEX)
{
// Stop looking for header information
break;
}
// No more header information after the lowpass band header
if (segment.tuple.tag == CODEC_TAG_PIXEL_DEPTH)
{
// Stop looking for header information
break;
}
}
}
}
}
if (header->width == 0 || header->height == 0) {
assert(0);
}
// Fill in the encoded format if it was not present in the header
if (header->encoded_format == ENCODED_FORMAT_UNKNOWN) {
header->encoded_format = GetEncodedFormat(header->input_format, header->encode_quality, channel_count);
}
if (display_height > 0) {
header->height = display_height;
}
if (header->encoded_format == ENCODED_FORMAT_BAYER)
{
header->width *= 2;
header->height *= 2;
if(display_height == 0)
{
if(header->height == 1088)
header->height = 1080;
}
}
// Return true if the header was parsed completely and correctly
return (header->width > 0 &&
header->height > 0 &&
((sample_type == SAMPLE_TYPE_FRAME) ||
(header->input_format != COLOR_FORMAT_UNKNOWN &&
header->encoded_format != ENCODED_FORMAT_UNKNOWN)));
// It is not an error if the frame number was not found in the sample header
}
bool DumpSampleHeader(BITSTREAM *input, FILE *logfile)
{
TAGVALUE segment;
int lowpass_width = 0;
int lowpass_height = 0;
// Parse the sample header until the lowpass band is found
while (lowpass_width == 0 && lowpass_height == 0)
{
// Get the next tag value pair from the bitstream
segment = GetSegment(input);
// Did an error occur while reading the bitstream?
if (input->error != BITSTREAM_ERROR_OKAY) {
return false;
}
// Is this an optional tag?
if (segment.tuple.tag < 0) {
segment.tuple.tag = NEG(segment.tuple.tag);
}
// Check that the tag is valid
assert(CODEC_TAG_ZERO < segment.tuple.tag && segment.tuple.tag <= CODEC_TAG_LAST_NON_SIZED);
switch (segment.tuple.tag)
{
case CODEC_TAG_SAMPLE:
fprintf(logfile, "Sample type: %d\n", segment.tuple.value);
break;
case CODEC_TAG_FRAME_WIDTH:
fprintf(logfile, "Frame width: %d\n", segment.tuple.value);
break;
case CODEC_TAG_FRAME_HEIGHT:
fprintf(logfile, "Frame height: %d\n", segment.tuple.value);
break;
case CODEC_TAG_LOWPASS_WIDTH:
lowpass_width = segment.tuple.value;
fprintf(logfile, "Lowpass width: %d\n", lowpass_width);
break;
case CODEC_TAG_LOWPASS_HEIGHT:
lowpass_height = segment.tuple.value;
fprintf(logfile, "Lowpass height: %d\n", lowpass_height);
break;
case CODEC_TAG_TRANSFORM_TYPE:
fprintf(logfile, "Transform type: %d\n", segment.tuple.value);
break;
case CODEC_TAG_INPUT_FORMAT:
fprintf(logfile, "Input format: %d\n", segment.tuple.value);
break;
case CODEC_TAG_ENCODED_FORMAT:
case CODEC_TAG_OLD_ENCODED_FORMAT:
fprintf(logfile, "Encoded format: %d\n", segment.tuple.value);
break;
case CODEC_TAG_FRAME_NUMBER:
fprintf(logfile, "Frame number: %d\n", segment.tuple.value);
break;
}
}
return true;
}
int SkipVideoChannel(DECODER *decoder, BITSTREAM *input, int skip_to_channel) // 3D work
{
TAGWORD tag,value=1;
unsigned char *pos = NULL;
int readsize = input->nWordsUsed;
if(readsize > 4096) // only need to scan the first few tuplets
{
readsize = 4096;
}
else
{
//Tiny therefore P-frame, nothing to be read so:
value=decoder->real_channels; // return the last value.
return value;
}
pos = GetTupletAddr(input->lpCurrentBuffer, readsize, CODEC_TAG_ENCODED_CHANNELS, &value);
if(pos && value>1 && skip_to_channel>1)
{
int chunksize = 0;
intptr_t offset;
int count = 0;
do
{
tag = *pos++<<8;
tag |= *pos++;
value = *pos++<<8;
value |= *pos++;
if (tag < 0)
{
tag = NEG(tag);
}
} while((tag & 0xff00) != CODEC_TAG_SAMPLE_SIZE && count++ < 10);
if((tag & 0xff00) == CODEC_TAG_SAMPLE_SIZE)
{
chunksize = value;
chunksize &= 0xffff;
chunksize += ((tag&0xff)<<16);
offset = ((intptr_t)pos - (intptr_t)input->lpCurrentWord) + chunksize*4;
input->lpCurrentWord += offset;
input->nWordsUsed -= (int)offset;
{
uint8_t *tag = (uint8_t *)input->lpCurrentWord;
// Search for first sample of the next frame
while((tag[1] != (uint8_t)CODEC_TAG_SAMPLE || tag[0] != 0 || tag[2] != 0) && input->nWordsUsed > 0)
{
input->lpCurrentWord += 4;
input->nWordsUsed -= 4;
tag += 4;
}
}
}
}
//if(value == 0) value = 1; // old non-stereo file
return value;
}
#define SUBPIXEL 64
static short gains[SUBPIXEL+1][4] = {
{0*128,0*128,0x7fff,0*128},
{0*128,2*128,0x7fff,-2*128},
{0*128,5*128,255*128,-4*128},
{0*128,8*128,254*128,-6*128},
{0*128,11*128,253*128,-8*128},
{0*128,14*128,252*128,-10*128},
{0*128,18*128,250*128,-12*128},
{0*128,21*128,248*128,-13*128},
{-1*128,25*128,247*128,-15*128},
{-1*128,29*128,244*128,-16*128},
{-1*128,33*128,241*128,-17*128},
{-2*128,37*128,239*128,-18*128},
{-2*128,41*128,236*128,-19*128},
{-3*128,46*128,233*128,-20*128},
{-3*128,50*128,229*128,-20*128},
{-4*128,55*128,226*128,-21*128},
{-4*128,60*128,221*128,-21*128},
{-5*128,65*128,217*128,-21*128},
{-5*128,70*128,213*128,-22*128},
{-6*128,75*128,209*128,-22*128},
{-7*128,80*128,205*128,-22*128},
{-7*128,85*128,199*128,-21*128},
{-8*128,91*128,194*128,-21*128},
{-9*128,96*128,190*128,-21*128},
{-10*128,102*128,185*128,-21*128},
{-10*128,107*128,179*128,-20*128},
{-11*128,113*128,174*128,-20*128},
{-12*128,118*128,169*128,-19*128},
{-13*128,124*128,164*128,-19*128},
{-14*128,129*128,159*128,-18*128},
{-14*128,135*128,152*128,-17*128},
{-15*128,141*128,147*128,-17*128},
{-16*128,144*128,144*128,-16*128},
{-17*128,147*128,141*128,-15*128},
{-17*128,152*128,135*128,-14*128},
{-18*128,159*128,129*128,-14*128},
{-19*128,164*128,124*128,-13*128},
{-19*128,169*128,118*128,-12*128},
{-20*128,174*128,113*128,-11*128},
{-20*128,179*128,107*128,-10*128},
{-21*128,185*128,102*128,-10*128},
{-21*128,190*128,96*128,-9*128},
{-21*128,194*128,91*128,-8*128},
{-21*128,199*128,85*128,-7*128},
{-22*128,205*128,80*128,-7*128},
{-22*128,209*128,75*128,-6*128},
{-22*128,213*128,70*128,-5*128},
{-21*128,217*128,65*128,-5*128},
{-21*128,221*128,60*128,-4*128},
{-21*128,226*128,55*128,-4*128},
{-20*128,229*128,50*128,-3*128},
{-20*128,233*128,46*128,-3*128},
{-19*128,236*128,41*128,-2*128},
{-18*128,239*128,37*128,-2*128},
{-17*128,241*128,33*128,-1*128},
{-16*128,244*128,29*128,-1*128},
{-15*128,247*128,25*128,-1*128},
{-13*128,248*128,21*128,0*128},
{-12*128,250*128,18*128,0*128},
{-10*128,252*128,14*128,0*128},
{-8*128,253*128,11*128,0*128},
{-6*128,254*128,8*128,0*128},
{-4*128,255*128,5*128,0*128},
{-2*128,0x7fff,2*128,0*128},
{0*128,0*128,0x7fff,0*128}
};
static int lanczos[256] =
{
0,
-2,
-8,
-18,
-33,
-53,
-77,
-106,
-141,
-179,
-223,
-272,
-325,
-384,
-447,
-514,
-586,
-662,
-742,
-826,
-913,
-1004,
-1097,
-1193,
-1290,
-1389,
-1490,
-1591,
-1692,
-1792,
-1892,
-1990,
-2086,
-2179,
-2269,
-2355,
-2436,
-2511,
-2580,
-2643,
-2697,
-2744,
-2781,
-2809,
-2826,
-2832,
-2826,
-2808,
-2776,
-2730,
-2670,
-2594,
-2503,
-2395,
-2271,
-2129,
-1969,
-1790,
-1593,
-1377,
-1141,
-886,
-611,
-315,
0,
336,
692,
1069,
1466,
1884,
2321,
2778,
3255,
3750,
4265,
4797,
5347,
5914,
6498,
7097,
7711,
8340,
8982,
9636,
10301,
10977,
11663,
12357,
13058,
13765,
14477,
15192,
15910,
16630,
17349,
18066,
18781,
18871,
19580,
20285,
20986,
21678,
22361,
23035,
23697,
24348,
24983,
25604,
26206,
26790,
27354,
27898,
28419,
28915,
29387,
29832,
30249,
30638,
30997,
31326,
31623,
31886,
32117,
32314,
32476,
32603,
32695,
32749,
32767, //was 32768, issue for SSE2
32749,
32695,
32603,
32476,
32314,
32117,
31886,
31623,
31326,
30997,
30638,
30249,
29832,
29387,
28915,
28419,
27898,
27354,
26790,
26206,
25604,
24983,
24348,
23697,
23035,
22361,
21678,
20986,
20285,
19580,
18871,
18159,
18066,
17349,
16630,
15910,
15192,
14477,
13765,
13058,
12357,
11663,
10977,
10301,
9636,
8982,
8340,
7711,
7097,
6498,
5914,
5347,
4797,
4265,
3750,
3255,
2778,
2321,
1884,
1466,
1069,
692,
336,
0,
-315,
-611,
-886,
-1141,
-1377,
-1593,
-1790,
-1969,
-2129,
-2271,
-2395,
-2503,
-2594,
-2670,
-2730,
-2776,
-2808,
-2826,
-2832,
-2826,
-2809,
-2781,
-2744,
-2697,
-2643,
-2580,
-2511,
-2436,
-2355,
-2269,
-2179,
-2086,
-1990,
-1892,
-1792,
-1692,
-1591,
-1490,
-1389,
-1290,
-1193,
-1097,
-1004,
-913,
-826,
-742,
-662,
-586,
-514,
-447,
-384,
-325,
-272,
-223,
-179,
-141,
-106,
-77,
-53,
-33,
-18,
-8,
-2,
};
void RGB48VerticalShiftZoom(DECODER *decoder, unsigned short *RGB48, unsigned short *buffer,
int widthbytes, int height, int pitch, float offset,
float zoom)
{
float yposf,ystepf;
int x;
//int endofSSEline = 0;
unsigned short *scanline[4];
//int spitch = pitch/2;
int neg = 0,step;
__m128i lA,lB,lC,lD,gA,gB,gC,gD,o128,t1;
__m128i *lineA, *lineB, *lineC, *lineD, *outline128;
offset = -offset;
yposf = height * offset;
yposf = (float)height*(0.5f - 1.0f/(2.0f*zoom) - offset);
ystepf = 1.0f/zoom;
if(yposf < 0.0)
neg = 1;
if(pitch < 0)
yposf -= ystepf;
/* yposi = floor(yposf);
remainf = yposf - (float)yposi;
tablepos = (remainf*(float)SUBPIXEL);
yposi = abs(yposi);
if(yposi==0 && tablepos == 0)
return; // no move required
*/
// -3 , 0 best small notch at zero?
//
switch(decoder->StereoBufferFormat)
{
case DECODED_FORMAT_RGB32:
case DECODED_FORMAT_RGB24:
case DECODED_FORMAT_YUYV:
step = 16;
break;
case DECODED_FORMAT_W13A:
case DECODED_FORMAT_RG64:
case DECODED_FORMAT_WP13:
case DECODED_FORMAT_RG48:
default:
step = 32;
break;
}
{
static char zeroline[1024] = {0};
int y,yoffset = ((int)(yposf-2.0)),yend = ((int)(yposf+2.0+ystepf*height));
unsigned char *src = (unsigned char *)RGB48;
unsigned char *dst = (unsigned char *)RGB48;
unsigned char *ptr = (unsigned char *)buffer;
if(yoffset < 0) yoffset = 0;
if(yend > height) yend = height;
src += pitch * yoffset;
for(y=yoffset; y<yend; y++)
{
memcpy(ptr, src, widthbytes);
ptr += widthbytes;
src += pitch;
}
ptr = (unsigned char *)buffer;
for(y=0;y<height; y++)
{
int i,t,yp = ((int)yposf);
int rmdr = 63-((int)(yposf*64.0) & 63);
int gains[4];
yp -= 1; // use -2 cause a image down shift //DAN20100225
t = 0;
for(i=0; i<4; i++)
{
if(yp<0 || yp>= height) // skip 0 line as the top line was zagged
{
t += gains[i] = lanczos[rmdr];
scanline[i] = (unsigned short *)zeroline;
}
else
{
t += gains[i] = lanczos[rmdr];
scanline[i] = (unsigned short *)&ptr[widthbytes*(yp-yoffset)];
}
yp++;
rmdr+=64;
}
if(t)
{
__m128i half;
gA = _mm_set1_epi16(gains[0]);
gB = _mm_set1_epi16(gains[1]);
gC = _mm_set1_epi16(gains[2]);
gD = _mm_set1_epi16(gains[3]);
outline128 = (__m128i *)dst;
lineA = (__m128i *)scanline[0];
lineB = (__m128i *)scanline[1];
lineC = (__m128i *)scanline[2];
lineD = (__m128i *)scanline[3];
switch(decoder->StereoBufferFormat)
{
case DECODED_FORMAT_W13A:
case DECODED_FORMAT_WP13:
for(x=0;x<widthbytes; x+=step)
{
lA = _mm_loadu_si128(lineA++);
lB = _mm_loadu_si128(lineB++);
lC = _mm_loadu_si128(lineC++);
lD = _mm_loadu_si128(lineD++);
o128 = _mm_mulhi_epi16(lA, gA);
t1 = _mm_mulhi_epi16(lB, gB);
o128 = _mm_adds_epi16(o128,t1);
t1 = _mm_mulhi_epi16(lC, gC);
o128 = _mm_adds_epi16(o128,t1);
t1 = _mm_mulhi_epi16(lD, gD);
o128 = _mm_adds_epi16(o128,t1);
// upper limit to 32767
o128 = _mm_adds_epi16(o128, _mm_set1_epi16(0x7fff - 0x3fff));
o128 = _mm_subs_epu16(o128, _mm_set1_epi16(0x7fff - 0x3fff));
o128 = _mm_slli_epi16(o128,1);
_mm_storeu_si128(outline128++, o128);
lA = _mm_loadu_si128(lineA++);
lB = _mm_loadu_si128(lineB++);
lC = _mm_loadu_si128(lineC++);
lD = _mm_loadu_si128(lineD++);
o128 = _mm_mulhi_epi16(lA, gA);
t1 = _mm_mulhi_epi16(lB, gB);
o128 = _mm_adds_epi16(o128,t1);
t1 = _mm_mulhi_epi16(lC, gC);
o128 = _mm_adds_epi16(o128,t1);
t1 = _mm_mulhi_epi16(lD, gD);
o128 = _mm_adds_epi16(o128,t1);
// upper limit to 32767
o128 = _mm_adds_epi16(o128, _mm_set1_epi16(0x7fff - 0x3fff));
o128 = _mm_subs_epu16(o128, _mm_set1_epi16(0x7fff - 0x3fff));
o128 = _mm_slli_epi16(o128,1);
_mm_storeu_si128(outline128++, o128);
}
break;
case DECODED_FORMAT_RG64:
case DECODED_FORMAT_RG48:
for(x=0;x<widthbytes; x+=step)
{
lA = _mm_loadu_si128(lineA++);
lA = _mm_srli_epi16(lA,3); //13-bit unsigned
lB = _mm_loadu_si128(lineB++);
lB = _mm_srli_epi16(lB,3); //13-bit unsigned
lC = _mm_loadu_si128(lineC++);
lC = _mm_srli_epi16(lC,3); //13-bit unsigned
lD = _mm_loadu_si128(lineD++);
lD = _mm_srli_epi16(lD,3); //13-bit unsigned
o128 = _mm_mulhi_epi16(lA, gA);
t1 = _mm_mulhi_epi16(lB, gB);
o128 = _mm_adds_epi16(o128,t1);
t1 = _mm_mulhi_epi16(lC, gC);
o128 = _mm_adds_epi16(o128,t1);
t1 = _mm_mulhi_epi16(lD, gD);
o128 = _mm_adds_epi16(o128,t1);
o128 = _mm_adds_epi16(o128, _mm_set1_epi16(0x7fff - 0x0fff));
o128 = _mm_subs_epu16(o128, _mm_set1_epi16(0x7fff - 0x0fff));
o128 = _mm_slli_epi16(o128,4);
_mm_storeu_si128(outline128++, o128);
lA = _mm_loadu_si128(lineA++);
lA = _mm_srli_epi16(lA,3); //13-bit unsigned
lB = _mm_loadu_si128(lineB++);
lB = _mm_srli_epi16(lB,3); //13-bit unsigned
lC = _mm_loadu_si128(lineC++);
lC = _mm_srli_epi16(lC,3); //13-bit unsigned
lD = _mm_loadu_si128(lineD++);
lD = _mm_srli_epi16(lD,3); //13-bit unsigned
o128 = _mm_mulhi_epi16(lA, gA);
t1 = _mm_mulhi_epi16(lB, gB);
o128 = _mm_adds_epi16(o128,t1);
t1 = _mm_mulhi_epi16(lC, gC);
o128 = _mm_adds_epi16(o128,t1);
t1 = _mm_mulhi_epi16(lD, gD);
o128 = _mm_adds_epi16(o128,t1);
o128 = _mm_adds_epi16(o128, _mm_set1_epi16(0x7fff - 0x0fff));
o128 = _mm_subs_epu16(o128, _mm_set1_epi16(0x7fff - 0x0fff));
o128 = _mm_slli_epi16(o128,4);
_mm_storeu_si128(outline128++, o128);
}
break;
case DECODED_FORMAT_RGB32:
case DECODED_FORMAT_RGB24:
case DECODED_FORMAT_YUYV:
for(x=0;x<widthbytes; x+=step)
{
lA = _mm_loadu_si128(lineA);
lA = _mm_unpackhi_epi8 (_mm_setzero_si128(), lA);
lB = _mm_loadu_si128(lineB);
lB = _mm_unpackhi_epi8 (_mm_setzero_si128(), lB);
lC = _mm_loadu_si128(lineC);
lC = _mm_unpackhi_epi8 (_mm_setzero_si128(), lC);
lD = _mm_loadu_si128(lineD);
lD = _mm_unpackhi_epi8 (_mm_setzero_si128(), lD);
lA = _mm_srli_epi16(lA,3); //13-bit unsigned
lB = _mm_srli_epi16(lB,3); //13-bit unsigned
lC = _mm_srli_epi16(lC,3); //13-bit unsigned
lD = _mm_srli_epi16(lD,3); //13-bit unsigned
o128 = _mm_mulhi_epi16(lA, gA);
t1 = _mm_mulhi_epi16(lB, gB);
o128 = _mm_adds_epi16(o128,t1);
t1 = _mm_mulhi_epi16(lC, gC);
o128 = _mm_adds_epi16(o128,t1);
t1 = _mm_mulhi_epi16(lD, gD);
o128 = _mm_adds_epi16(o128,t1);
o128 = _mm_adds_epi16(o128, _mm_set1_epi16(0x7fff - 0x0fff));
o128 = _mm_subs_epu16(o128, _mm_set1_epi16(0x7fff - 0x0fff));
o128 = _mm_slli_epi16(o128,4);
half = o128;
lA = _mm_loadu_si128(lineA++);
lA = _mm_unpacklo_epi8 (_mm_setzero_si128(), lA);
lB = _mm_loadu_si128(lineB++);
lB = _mm_unpacklo_epi8 (_mm_setzero_si128(), lB);
lC = _mm_loadu_si128(lineC++);
lC = _mm_unpacklo_epi8 (_mm_setzero_si128(), lC);
lD = _mm_loadu_si128(lineD++);
lD = _mm_unpacklo_epi8 (_mm_setzero_si128(), lD);
lA = _mm_srli_epi16(lA,3); //13-bit unsigned
lB = _mm_srli_epi16(lB,3); //13-bit unsigned
lC = _mm_srli_epi16(lC,3); //13-bit unsigned
lD = _mm_srli_epi16(lD,3); //13-bit unsigned
o128 = _mm_mulhi_epi16(lA, gA);
t1 = _mm_mulhi_epi16(lB, gB);
o128 = _mm_adds_epi16(o128,t1);
t1 = _mm_mulhi_epi16(lC, gC);
o128 = _mm_adds_epi16(o128,t1);
t1 = _mm_mulhi_epi16(lD, gD);
o128 = _mm_adds_epi16(o128,t1);
o128 = _mm_adds_epi16(o128, _mm_set1_epi16(0x7fff - 0x0fff));
o128 = _mm_subs_epu16(o128, _mm_set1_epi16(0x7fff - 0x0fff));
o128 = _mm_slli_epi16(o128,4);
half = _mm_srli_epi16(half,8);
o128 = _mm_srli_epi16(o128,8);
o128 = _mm_packus_epi16(o128, half);
_mm_storeu_si128(outline128++, o128);
}
break;
}
}
else
{
if(decoder->StereoBufferFormat == DECODED_FORMAT_YUYV)
{
memset(dst, 0x10801080, widthbytes);
}
else
{
memset(dst, 0, widthbytes);
}
}
yposf += ystepf;
dst += pitch;
}
/*ptr = (unsigned char *)buffer;
for(y=0;y<height; y++)
{
int r,g,b,yp = ((int)yposf);
yposf += ystepf;
if(yp<0 || yp>= height)
{
memset(dst, 0, widthbytes);
}
else
{
memcpy(dst, &ptr[widthbytes*yp], widthbytes);
}
dst += pitch;
}*/
}
}
void RGB48VerticalShiftZoomFine(DECODER *decoder, unsigned short *RGB48, unsigned short *buffer,
int widthbytes, int height, int pitch, float offset,
float zoom, int xx)
{
float yposf,ystepf;
//int endofSSEline = 0;
unsigned short *scanline[4];
//int spitch = pitch/2;
int neg = 0,step;
__m128i lA,lB,lC,lD,gA,gB,gC,gD,o128,t1;
uint8_t *lineAPos, *lineBPos, *lineCPos, *lineDPos;
uint8_t *outlinePos8;
uint16_t *outlinePos16;
offset = -offset;
//yposf = height * offset;
yposf = (float)height*(0.5f - 1.0f/(2.0f*zoom) - offset);
ystepf = 1.0f/zoom;
if(yposf < 0.0)
neg = 1;
if(pitch < 0)
yposf -= ystepf;
/* yposi = floor(yposf);
remainf = yposf - (float)yposi;
tablepos = (remainf*(float)SUBPIXEL);
yposi = abs(yposi);
if(yposi==0 && tablepos == 0)
return; // no move required
*/
// -3 , 0 best small notch at zero?
//
switch(decoder->StereoBufferFormat)
{
case DECODED_FORMAT_RGB32:
step = 4;
break;
case DECODED_FORMAT_RGB24:
step = 3;
break;
case DECODED_FORMAT_YUYV:
step = 4;
break;
case DECODED_FORMAT_W13A:
case DECODED_FORMAT_RG64:
step = 8;
break;
case DECODED_FORMAT_WP13:
case DECODED_FORMAT_RG48:
step = 6;
break;
default:
assert(0);
break;
}
{
static char zeroline[1024] = {0};
int y,yoffset = ((int)(yposf-2.0)),yend = ((int)(yposf+2.0+ystepf*height));
unsigned char *src = (unsigned char *)RGB48;
unsigned char *dst = (unsigned char *)RGB48;
unsigned char *ptr = (unsigned char *)buffer;
if(yoffset < 0) yoffset = 0;
if(yend > height) yend = height;
src += pitch * yoffset;
for(y=yoffset; y<yend; y++)
{
memcpy(ptr, src, widthbytes);
ptr += widthbytes;
src += pitch;
}
ptr = (unsigned char *)buffer;
for(y=0;y<height; y++)
{
int i,t,yp = ((int)yposf);
int rmdr = 63-((int)(yposf*64.0) & 63);
int gains[4];
yp -= 1; // use -2 cause a image down shift //DAN20100225
t = 0;
for(i=0; i<4; i++)
{
if(yp<0 || yp>= height) // skip 0 line as the top line was zagged
{
t += gains[i] = lanczos[rmdr];
scanline[i] = (unsigned short *)zeroline;
}
else
{
t += gains[i] = lanczos[rmdr];
scanline[i] = (unsigned short *)&ptr[widthbytes*(yp-yoffset)];
}
yp++;
rmdr+=64;
}
if(t)
{
gA = _mm_set1_epi16(gains[0]);
gB = _mm_set1_epi16(gains[1]);
gC = _mm_set1_epi16(gains[2]);
gD = _mm_set1_epi16(gains[3]);
outlinePos8 = (uint8_t *)dst;
outlinePos16 = (uint16_t *)dst;
lineAPos = (uint8_t *)scanline[0];
lineBPos = (uint8_t *)scanline[1];
lineCPos = (uint8_t *)scanline[2];
lineDPos = (uint8_t *)scanline[3];
switch(decoder->StereoBufferFormat)
{
case DECODED_FORMAT_W13A:
lA = _mm_loadu_si128((__m128i *)lineAPos); lineAPos+=8;
lB = _mm_loadu_si128((__m128i *)lineBPos); lineBPos+=8;
lC = _mm_loadu_si128((__m128i *)lineCPos); lineCPos+=8;
lD = _mm_loadu_si128((__m128i *)lineDPos); lineDPos+=8;
o128 = _mm_mulhi_epi16(lA, gA);
t1 = _mm_mulhi_epi16(lB, gB);
o128 = _mm_adds_epi16(o128,t1);
t1 = _mm_mulhi_epi16(lC, gC);
o128 = _mm_adds_epi16(o128,t1);
t1 = _mm_mulhi_epi16(lD, gD);
o128 = _mm_adds_epi16(o128,t1);
// upper limit to 32767
o128 = _mm_adds_epi16(o128, _mm_set1_epi16(0x7fff - 0x3fff));
o128 = _mm_subs_epu16(o128, _mm_set1_epi16(0x7fff - 0x3fff));
o128 = _mm_slli_epi16(o128,1);
//_mm_storeu_si128((__m128i *)outlinePos, o128);
outlinePos16[0] = _mm_extract_epi16(o128, 0);
outlinePos16[1] = _mm_extract_epi16(o128, 1);
outlinePos16[2] = _mm_extract_epi16(o128, 2);
outlinePos16[3] = _mm_extract_epi16(o128, 3);
outlinePos16+=4;
break;
case DECODED_FORMAT_WP13:
lA = _mm_loadu_si128((__m128i *)lineAPos); lineAPos+=6;
lB = _mm_loadu_si128((__m128i *)lineBPos); lineBPos+=6;
lC = _mm_loadu_si128((__m128i *)lineCPos); lineCPos+=6;
lD = _mm_loadu_si128((__m128i *)lineDPos); lineDPos+=6;
o128 = _mm_mulhi_epi16(lA, gA);
t1 = _mm_mulhi_epi16(lB, gB);
o128 = _mm_adds_epi16(o128,t1);
t1 = _mm_mulhi_epi16(lC, gC);
o128 = _mm_adds_epi16(o128,t1);
t1 = _mm_mulhi_epi16(lD, gD);
o128 = _mm_adds_epi16(o128,t1);
// upper limit to 32767
o128 = _mm_adds_epi16(o128, _mm_set1_epi16(0x7fff - 0x3fff));
o128 = _mm_subs_epu16(o128, _mm_set1_epi16(0x7fff - 0x3fff));
o128 = _mm_slli_epi16(o128,1);
//_mm_storeu_si128((__m128i *)outlinePos, o128);
outlinePos16[0] = _mm_extract_epi16(o128, 0);
outlinePos16[1] = _mm_extract_epi16(o128, 1);
outlinePos16[2] = _mm_extract_epi16(o128, 2);
outlinePos16+=3;
break;
case DECODED_FORMAT_RG64:
lA = _mm_loadu_si128((__m128i *)lineAPos); lineAPos+=8;
lB = _mm_loadu_si128((__m128i *)lineBPos); lineBPos+=8;
lC = _mm_loadu_si128((__m128i *)lineCPos); lineCPos+=8;
lD = _mm_loadu_si128((__m128i *)lineDPos); lineDPos+=8;
lA = _mm_srli_epi16(lA,3); //13-bit unsigned
lB = _mm_srli_epi16(lB,3); //13-bit unsigned
lC = _mm_srli_epi16(lC,3); //13-bit unsigned
lD = _mm_srli_epi16(lD,3); //13-bit unsigned
o128 = _mm_mulhi_epi16(lA, gA);
t1 = _mm_mulhi_epi16(lB, gB);
o128 = _mm_adds_epi16(o128,t1);
t1 = _mm_mulhi_epi16(lC, gC);
o128 = _mm_adds_epi16(o128,t1);
t1 = _mm_mulhi_epi16(lD, gD);
o128 = _mm_adds_epi16(o128,t1);
o128 = _mm_adds_epi16(o128, _mm_set1_epi16(0x7fff - 0x0fff));
o128 = _mm_subs_epu16(o128, _mm_set1_epi16(0x7fff - 0x0fff));
o128 = _mm_slli_epi16(o128,4);
//_mm_storeu_si128((__m128i *)outlinePos, o128);
outlinePos16[0] = _mm_extract_epi16(o128, 0);
outlinePos16[1] = _mm_extract_epi16(o128, 1);
outlinePos16[2] = _mm_extract_epi16(o128, 2);
outlinePos16[3] = _mm_extract_epi16(o128, 3);
outlinePos16+=4;
break;
case DECODED_FORMAT_RG48:
lA = _mm_loadu_si128((__m128i *)lineAPos); lineAPos+=6;
lB = _mm_loadu_si128((__m128i *)lineBPos); lineBPos+=6;
lC = _mm_loadu_si128((__m128i *)lineCPos); lineCPos+=6;
lD = _mm_loadu_si128((__m128i *)lineDPos); lineDPos+=6;
lA = _mm_srli_epi16(lA,3); //13-bit unsigned
lB = _mm_srli_epi16(lB,3); //13-bit unsigned
lC = _mm_srli_epi16(lC,3); //13-bit unsigned
lD = _mm_srli_epi16(lD,3); //13-bit unsigned
o128 = _mm_mulhi_epi16(lA, gA);
t1 = _mm_mulhi_epi16(lB, gB);
o128 = _mm_adds_epi16(o128,t1);
t1 = _mm_mulhi_epi16(lC, gC);
o128 = _mm_adds_epi16(o128,t1);
t1 = _mm_mulhi_epi16(lD, gD);
o128 = _mm_adds_epi16(o128,t1);
o128 = _mm_adds_epi16(o128, _mm_set1_epi16(0x7fff - 0x0fff));
o128 = _mm_subs_epu16(o128, _mm_set1_epi16(0x7fff - 0x0fff));
o128 = _mm_slli_epi16(o128,4);
//_mm_storeu_si128((__m128i *)outlinePos, o128);
outlinePos16[0] = _mm_extract_epi16(o128, 0);
outlinePos16[1] = _mm_extract_epi16(o128, 1);
outlinePos16[2] = _mm_extract_epi16(o128, 2);
outlinePos16+=3;
break;
case DECODED_FORMAT_RGB32:
case DECODED_FORMAT_YUYV:
lA = _mm_loadu_si128((__m128i *)lineAPos); lineAPos+=4;
lA = _mm_unpackhi_epi8 (_mm_setzero_si128(), lA);
lB = _mm_loadu_si128((__m128i *)lineBPos); lineBPos+=4;
lB = _mm_unpackhi_epi8 (_mm_setzero_si128(), lB);
lC = _mm_loadu_si128((__m128i *)lineCPos); lineCPos+=4;
lC = _mm_unpackhi_epi8 (_mm_setzero_si128(), lC);
lD = _mm_loadu_si128((__m128i *)lineDPos); lineDPos+=4;
lD = _mm_unpackhi_epi8 (_mm_setzero_si128(), lD);
lA = _mm_srli_epi16(lA,3); //13-bit unsigned
lB = _mm_srli_epi16(lB,3); //13-bit unsigned
lC = _mm_srli_epi16(lC,3); //13-bit unsigned
lD = _mm_srli_epi16(lD,3); //13-bit unsigned
o128 = _mm_mulhi_epi16(lA, gA);
t1 = _mm_mulhi_epi16(lB, gB);
o128 = _mm_adds_epi16(o128,t1);
t1 = _mm_mulhi_epi16(lC, gC);
o128 = _mm_adds_epi16(o128,t1);
t1 = _mm_mulhi_epi16(lD, gD);
o128 = _mm_adds_epi16(o128,t1);
o128 = _mm_adds_epi16(o128, _mm_set1_epi16(0x7fff - 0x0fff));
o128 = _mm_subs_epu16(o128, _mm_set1_epi16(0x7fff - 0x0fff));
o128 = _mm_srli_epi16(o128,4);
outlinePos8[0] = _mm_extract_epi16(o128, 0);
outlinePos8[1] = _mm_extract_epi16(o128, 1);
outlinePos8[2] = _mm_extract_epi16(o128, 2);
outlinePos8[3] = _mm_extract_epi16(o128, 3);
outlinePos8+=4;
break;
case DECODED_FORMAT_RGB24:
{
int r,g,b;
b = ((lineAPos[0] * gains[0])>>7) +
((lineBPos[0] * gains[1])>>7) +
((lineCPos[0] * gains[2])>>7) +
((lineDPos[0] * gains[3])>>7); //16-bit
g = ((lineAPos[1] * gains[0])>>7) +
((lineBPos[1] * gains[1])>>7) +
((lineCPos[1] * gains[2])>>7) +
((lineDPos[1] * gains[3])>>7); //16-bit
r = ((lineAPos[2] * gains[0])>>7) +
((lineBPos[2] * gains[1])>>7) +
((lineCPos[2] * gains[2])>>7) +
((lineDPos[2] * gains[3])>>7); //16-bit
if(r<0) r = 0; if(r>65535) r = 65535;
if(g<0) g = 0; if(g>65535) g = 65535;
if(b<0) b = 0; if(b>65535) b = 65535;
lineAPos+=3;
lineBPos+=3;
lineCPos+=3;
lineDPos+=3;
outlinePos8[0] = b >> 8; //b
outlinePos8[1] = g >> 8; //g
outlinePos8[2] = r >> 8; //r
outlinePos8+=3;
/* SSE2 can't load byte alligned
lA = _mm_loadu_si128((__m128i *)lineAPos); lineAPos+=3;
lA = _mm_unpackhi_epi8 (_mm_setzero_si128(), lA);
lB = _mm_loadu_si128((__m128i *)lineBPos); lineBPos+=3;
lB = _mm_unpackhi_epi8 (_mm_setzero_si128(), lB);
lC = _mm_loadu_si128((__m128i *)lineCPos); lineCPos+=3;
lC = _mm_unpackhi_epi8 (_mm_setzero_si128(), lC);
lD = _mm_loadu_si128((__m128i *)lineDPos); lineDPos+=3;
lD = _mm_unpackhi_epi8 (_mm_setzero_si128(), lD);
lA = _mm_srli_epi16(lA,3); //13-bit unsigned
lB = _mm_srli_epi16(lB,3); //13-bit unsigned
lC = _mm_srli_epi16(lC,3); //13-bit unsigned
lD = _mm_srli_epi16(lD,3); //13-bit unsigned
o128 = _mm_mulhi_epi16(lA, gA);
t1 = _mm_mulhi_epi16(lB, gB);
o128 = _mm_adds_epi16(o128,t1);
t1 = _mm_mulhi_epi16(lC, gC);
o128 = _mm_adds_epi16(o128,t1);
t1 = _mm_mulhi_epi16(lD, gD);
o128 = _mm_adds_epi16(o128,t1);
o128 = _mm_adds_epi16(o128, _mm_set1_epi16(0x7fff - 0x0fff));
o128 = _mm_subs_epu16(o128, _mm_set1_epi16(0x7fff - 0x0fff));
o128 = _mm_srli_epi16(o128,4);
outlinePos8[0] = _mm_extract_epi16(o128, 0); //b
outlinePos8[1] = _mm_extract_epi16(o128, 1); //g
outlinePos8[2] = _mm_extract_epi16(o128, 2); //r
outlinePos8+=3;
*/
}
break;
}
}
else
{
if(decoder->StereoBufferFormat == DECODED_FORMAT_YUYV)
{
memset(dst, 0x10801080, widthbytes);
}
else
{
memset(dst, 0, widthbytes);
}
}
yposf += ystepf;
dst += pitch;
}
}
}
void RGB48VerticalShift(DECODER *decoder, unsigned short *RGB48, unsigned short *buffer,
int widthbytes, int height, int pitch, float offset)
{
float yposf,remainf;
int yposi,tablepos,x,y;
int gainA,gainB,gainC,gainD;
//int endofSSEline = 0;
unsigned short *scanline[4], *tline;
int spitch = pitch/2;
int neg = 0,shift = 0,skip,step;
int origwidthbytes = widthbytes;
int origwidthextra;
__m128i lA, lB, lC, lD, gA, gB, gC, gD, o128, t1;
__m128i *lineA, *lineB, *lineC, *lineD, *outline128;
// offset = -offset;
if(offset < 0.0)
neg = 1;
yposf = height * offset;
yposi = (int)floor(yposf);
remainf = yposf - (float)yposi;
tablepos = (int)(remainf*(float)SUBPIXEL);
yposi = abs(yposi);
if(yposi==0 && tablepos == 0)
return; // no move required
// -3 , 0 best small notch at zero?
//
if(neg)
{
yposi -= 2;
gainA = gains[tablepos][0];
gainB = gains[tablepos][1];
gainC = gains[tablepos][2];
gainD = gains[tablepos][3];
}
else
{
yposi -= 1; //offset inherent in the table
gainD = gains[tablepos][0];
gainC = gains[tablepos][1];
gainB = gains[tablepos][2];
gainA = gains[tablepos][3];
}
gA = _mm_set1_epi16(gainA);
gB = _mm_set1_epi16(gainB);
gC = _mm_set1_epi16(gainC);
gD = _mm_set1_epi16(gainD);
switch(decoder->StereoBufferFormat)
{
case DECODED_FORMAT_RGB32:
skip = 4;
step = 16;
break;
case DECODED_FORMAT_RGB24:
skip = 3;
step = 16;
break;
case DECODED_FORMAT_YUYV:
skip = 2;
step = 16;
break;
case DECODED_FORMAT_WP13:
case DECODED_FORMAT_RG48:
case DECODED_FORMAT_W13A:
case DECODED_FORMAT_RG64:
default:
skip = 6;
step = 32;
break;
}
// scanline[0] = buffer;
// scanline[1] = buffer + width*skip/2;
// scanline[2] = buffer + width*skip/2*2;
// scanline[3] = buffer + width*skip/2*3;
widthbytes += (step - 1);
widthbytes -= (widthbytes % step);
origwidthextra = (origwidthbytes % step);
scanline[0] = buffer;
scanline[1] = buffer + widthbytes/2;
scanline[2] = buffer + widthbytes/2*2;
scanline[3] = buffer + widthbytes/2*3;
for(y=0; y<4; y++)
{
if(yposi+y >=0 && yposi+y<height)
{
unsigned short *ptr = RGB48;
if(neg)
ptr += (height-1-yposi-y)*spitch;
else
ptr += (yposi+y)*spitch;
memcpy(scanline[y], ptr, origwidthbytes);
}
else
{
memset(scanline[y], 0, origwidthbytes);
}
}
{
for(y=0;y<height; y++)
{
unsigned short *ptr = RGB48;
if(neg)
ptr += (height-y-1)*spitch;
else
ptr += y*spitch;
outline128 = (__m128i *)ptr;
lineA = (__m128i *)scanline[0];
lineB = (__m128i *)scanline[1];
lineC = (__m128i *)scanline[2];
lineD = (__m128i *)scanline[3];
//for(x=0;x<width*skip/2; x+=step)
for(x=0;x<widthbytes; x+=step)
{
__m128i half;
switch(decoder->StereoBufferFormat)
{
case DECODED_FORMAT_W13A:
case DECODED_FORMAT_WP13:
{
lA = _mm_loadu_si128(lineA++);
lB = _mm_loadu_si128(lineB++);
lC = _mm_loadu_si128(lineC++);
lD = _mm_loadu_si128(lineD++);
shift = 0;
}
break;
case DECODED_FORMAT_RG64:
case DECODED_FORMAT_RG48:
{
lA = _mm_loadu_si128(lineA++);
lA = _mm_srli_epi16(lA,3); //13-bit unsigned
lB = _mm_loadu_si128(lineB++);
lB = _mm_srli_epi16(lB,3); //13-bit unsigned
lC = _mm_loadu_si128(lineC++);
lC = _mm_srli_epi16(lC,3); //13-bit unsigned
lD = _mm_loadu_si128(lineD++);
lD = _mm_srli_epi16(lD,3); //13-bit unsigned
shift = 3;
}
break;
case DECODED_FORMAT_RGB32:
case DECODED_FORMAT_RGB24:
case DECODED_FORMAT_YUYV:
lA = _mm_loadu_si128(lineA);
lA = _mm_unpackhi_epi8 (_mm_setzero_si128(), lA);
lB = _mm_loadu_si128(lineB);
lB = _mm_unpackhi_epi8 (_mm_setzero_si128(), lB);
lC = _mm_loadu_si128(lineC);
lC = _mm_unpackhi_epi8 (_mm_setzero_si128(), lC);
lD = _mm_loadu_si128(lineD);
lD = _mm_unpackhi_epi8 (_mm_setzero_si128(), lD);
lA = _mm_srli_epi16(lA,3); //13-bit unsigned
lB = _mm_srli_epi16(lB,3); //13-bit unsigned
lC = _mm_srli_epi16(lC,3); //13-bit unsigned
lD = _mm_srli_epi16(lD,3); //13-bit unsigned
shift = 3;
break;
}
o128 = _mm_mulhi_epi16(lA, gA);
t1 = _mm_mulhi_epi16(lB, gB);
o128 = _mm_adds_epi16(o128,t1);
t1 = _mm_mulhi_epi16(lC, gC);
o128 = _mm_adds_epi16(o128,t1);
t1 = _mm_mulhi_epi16(lD, gD);
o128 = _mm_adds_epi16(o128,t1);
if(shift)
{
o128 = _mm_adds_epi16(o128, _mm_set1_epi16(0x7fff - 0x0fff));
o128 = _mm_subs_epu16(o128, _mm_set1_epi16(0x7fff - 0x0fff));
o128 = _mm_slli_epi16(o128,4);
}
else
{
// upper limit to 32767
o128 = _mm_adds_epi16(o128, _mm_set1_epi16(0x7fff - 0x3fff));
o128 = _mm_subs_epu16(o128, _mm_set1_epi16(0x7fff - 0x3fff));
o128 = _mm_slli_epi16(o128,1);
}
if(skip == 6) //RGB48 || WP13
{
if(widthbytes == origwidthbytes || x+16 < origwidthbytes)
_mm_storeu_si128(outline128++, o128);
else
{
//if(x < origwidthbytes+16/*bytes in an SSE2 reg*/)
_mm_storeu_si128((__m128i *)scanline[0], o128);
memcpy((char *)outline128, (char *)scanline[0], origwidthextra);
outline128++;
}
}
else
{
half = o128;
}
switch(decoder->StereoBufferFormat)
{
case DECODED_FORMAT_W13A:
case DECODED_FORMAT_WP13:
{
lA = _mm_loadu_si128(lineA++);
lB = _mm_loadu_si128(lineB++);
lC = _mm_loadu_si128(lineC++);
lD = _mm_loadu_si128(lineD++);
shift = 0;
}
break;
case DECODED_FORMAT_RG64:
case DECODED_FORMAT_RG48:
{
lA = _mm_loadu_si128(lineA++);
lA = _mm_srli_epi16(lA,3); //13-bit unsigned
lB = _mm_loadu_si128(lineB++);
lB = _mm_srli_epi16(lB,3); //13-bit unsigned
lC = _mm_loadu_si128(lineC++);
lC = _mm_srli_epi16(lC,3); //13-bit unsigned
lD = _mm_loadu_si128(lineD++);
lD = _mm_srli_epi16(lD,3); //13-bit unsigned
shift = 3;
}
break;
case DECODED_FORMAT_RGB32:
case DECODED_FORMAT_RGB24:
case DECODED_FORMAT_YUYV:
lA = _mm_loadu_si128(lineA++);
lA = _mm_unpacklo_epi8 (_mm_setzero_si128(), lA);
lB = _mm_loadu_si128(lineB++);
lB = _mm_unpacklo_epi8 (_mm_setzero_si128(), lB);
lC = _mm_loadu_si128(lineC++);
lC = _mm_unpacklo_epi8 (_mm_setzero_si128(), lC);
lD = _mm_loadu_si128(lineD++);
lD = _mm_unpacklo_epi8 (_mm_setzero_si128(), lD);
lA = _mm_srli_epi16(lA,3); //13-bit unsigned
lB = _mm_srli_epi16(lB,3); //13-bit unsigned
lC = _mm_srli_epi16(lC,3); //13-bit unsigned
lD = _mm_srli_epi16(lD,3); //13-bit unsigned
shift = 3;
break;
}
o128 = _mm_mulhi_epi16(lA, gA);
t1 = _mm_mulhi_epi16(lB, gB);
o128 = _mm_adds_epi16(o128,t1);
t1 = _mm_mulhi_epi16(lC, gC);
o128 = _mm_adds_epi16(o128,t1);
t1 = _mm_mulhi_epi16(lD, gD);
o128 = _mm_adds_epi16(o128,t1);
if(shift)
{
o128 = _mm_adds_epi16(o128, _mm_set1_epi16(0x7fff - 0x0fff));
o128 = _mm_subs_epu16(o128, _mm_set1_epi16(0x7fff - 0x0fff));
o128 = _mm_slli_epi16(o128,4);
}
else
{
// upper limit to 32767
o128 = _mm_adds_epi16(o128, _mm_set1_epi16(0x7fff - 0x3fff));
o128 = _mm_subs_epu16(o128, _mm_set1_epi16(0x7fff - 0x3fff));
o128 = _mm_slli_epi16(o128,1);
}
if(skip != 6) //!RGB48 || !WP13
{
half = _mm_srli_epi16(half,8);
o128 = _mm_srli_epi16(o128,8);
o128 = _mm_packus_epi16(o128, half);
}
if(widthbytes == origwidthbytes || x+32 < origwidthbytes)
{
_mm_storeu_si128(outline128++, o128);
}
else
{
//if(x+16 < origwidthbytes+16)
if(origwidthextra > 16)
{
_mm_storeu_si128((__m128i *)scanline[0], o128);
memcpy((char *)outline128, (char *)scanline[0], origwidthextra - 16);
}
outline128++;
}
}
tline = scanline[0];
scanline[0] = scanline[1];
scanline[1] = scanline[2];
scanline[2] = scanline[3];
scanline[3] = tline;
if(yposi+y+4 >=0 && yposi+y+4<height)
{
unsigned short *ptr = RGB48;
if(neg)
ptr += (height-1-(yposi+y+4))*spitch;
else
ptr += (yposi+y+4)*spitch;
memcpy(scanline[3], ptr, origwidthbytes);
}
else
{
memset(scanline[3], 0, origwidthbytes);
}
}
}
}
void RGB48HoriShiftZoom(DECODER *decoder, unsigned short *RGB48, unsigned short *buffer, int width, int height, int line, float hoffset, float roffset, float zoom, int flip, float frameTilt, int eye)
{
float xposf,xstepf;
int x;
//int endofSSEline = 0;
unsigned short *scanline = (unsigned short *)buffer;
short *sscanline = (short *)buffer;
int neg = 0;
float offset = hoffset;
if(flip)
{
unsigned short *ptrL = RGB48;
unsigned short *ptrR = RGB48;
ptrR += (width*3) - 3;
for(x=0;x<width/2;x++)
{
int t;
t = *ptrL;
*ptrL++ = *ptrR;
*ptrR++ = t;
t = *ptrL;
*ptrL++ = *ptrR;
*ptrR++ = t;
t = *ptrL;
*ptrL++ = *ptrR;
*ptrR++ = t;
ptrR -= 6;
}
}
if(eye > 0)
{
zoom *= 1.0f + frameTilt;
}
else
{
zoom /= 1.0f + frameTilt;
}
xposf = (float)width*(0.5f - 1.0f/(2.0f*zoom) - offset);
xposf -= width * roffset * 0.5f / zoom;
xposf += (float)line * ((float)width* roffset / ((float)height*zoom));
if(xposf < 0.0)
neg = 1;
xstepf = 1.0f/zoom;
memcpy(scanline, RGB48, width*3*2);
{
//unsigned short zeroline[3] = {0};
int xx = 0;
int ixpos = (int)(xposf * 65536.0f);
int ixstep = (int)(xstepf * 65536.0f);
float xbase = xposf / (float)width;
float xstep = xstepf / (float)width;
float z = (decoder->cfhddata.FrameHDynamic - 1.0f)*2.0f;
// int holdstart = width*5/10; // Use to specify a area of uniform stretch
// int holdend = width*5/10;
int holdstart = (int)((decoder->cfhddata.FrameHDynCenter - decoder->cfhddata.FrameHDynWidth*0.125)*(float)width);
int holdend = (int)((decoder->cfhddata.FrameHDynCenter + decoder->cfhddata.FrameHDynWidth*0.125)*(float)width);
float flatxstep;
float modified_xstep_avg;
float bottomxstep;
float basexstepstart;
float basexstepend;
float range;
#if MMXSUPPORTED //TODO DANREMOVE
__m64 overflowprotect = _mm_set1_pi16(0x7fff-0x3fff);
#endif
if(holdstart < 0) holdstart = 0, holdend = (int)((decoder->cfhddata.FrameHDynWidth*0.5)*(float)width);
if(holdend > width) holdend = width, holdstart = (int)((1.0 - decoder->cfhddata.FrameHDynWidth*0.5)*(float)width);
range = (float)(holdend - holdstart);
flatxstep = xstep-z*0.5f*xstep;
modified_xstep_avg = (xstep * (float)width - range * flatxstep) / ((float)width - range);
bottomxstep = modified_xstep_avg - (flatxstep - modified_xstep_avg);
if(holdstart == (width-holdend))
{
basexstepstart = bottomxstep;
basexstepend = bottomxstep;
}
else if(holdstart < (width-holdend))
{
float a = (float)holdstart / (float)(width-holdend);
float startavg = a * modified_xstep_avg + (1.0f - a) * flatxstep;
float endavg = (modified_xstep_avg * ((float)width-range) - startavg * (float)holdstart) / (float)(width-holdend);
basexstepstart = startavg - (flatxstep - startavg);
basexstepend = endavg - (flatxstep - endavg);
}
else
{
float a = (float)(width-holdend) / (float)holdstart;
float endavg = a * modified_xstep_avg + (1.0f - a) * flatxstep;
float startavg = (modified_xstep_avg * ((float)width-range) - endavg * (float)(width-holdend)) / (float)holdstart;
basexstepstart = startavg - (flatxstep - startavg);
basexstepend = endavg - (flatxstep - endavg);
}
if(decoder->StereoBufferFormat == DECODED_FORMAT_WP13)
{
float fxpos = xbase;
for(x=0;x<width; x++) //RGB
{
int gains = 0;
int xp, rmdr;
if(z != 0.0)
{
if(x<holdstart)
{
fxpos += basexstepstart*((float)(holdstart-x)/(float)holdstart) + flatxstep*((float)x/(float)holdstart);
}
else if(x>holdend)
{
int diff = width - x;
int range = width - holdend;
fxpos += basexstepend*((float)(range-diff)/(float)range) + flatxstep*((float)(diff)/(float)range);
}
else
{
fxpos += flatxstep;
}
xp = (int)(fxpos * 65536.0f*(float)width);
rmdr = 63-((xp>>10) & 63);
xp >>= 16;
}
else
{
xp = ixpos>>16;
rmdr = 63-((ixpos>>10) & 63);
ixpos += ixstep;
}
xp -= 1;// was -2 causing a right shift //DAN20100225
#if MMXSUPPORTED //TODO DANREMOVE
if(xp>4 && xp<width-4 && xx < (width-1)*3) //We need 3 values for RGB< yet we write 4, so the last pixel can't be done with MMX
{
__m64 *src64;
__m64 *dst64;
__m64 sumx16;
__m64 rgbx16;
__m64 gain16;
int linepos = (xp-1)*3;
src64 = (__m64 *)&sscanline[linepos];
rgbx16 = *src64;
gain16 = _mm_set1_pi16(lanczos[rmdr]); //15-bit
sumx16 = _mm_mulhi_pi16(rgbx16, gain16); //13*15-bit
src64 = (__m64 *)&sscanline[linepos+3];
rgbx16 = *src64;
gain16 = _mm_set1_pi16(lanczos[rmdr+64]); //15-bit
rgbx16 = _mm_mulhi_pi16(rgbx16, gain16); //13*15-bit
sumx16 = _mm_adds_pi16(sumx16, rgbx16);
src64 = (__m64 *)&sscanline[linepos+6];
rgbx16 = *src64;
gain16 = _mm_set1_pi16(lanczos[rmdr+128]); //15-bit
rgbx16 = _mm_mulhi_pi16(rgbx16, gain16); //13*15-bit
sumx16 = _mm_adds_pi16(sumx16, rgbx16);
src64 = (__m64 *)&sscanline[linepos+9];
rgbx16 = *src64;
gain16 = _mm_set1_pi16(lanczos[rmdr+192]); //15-bit
rgbx16 = _mm_mulhi_pi16(rgbx16, gain16); //13*15-bit
sumx16 = _mm_adds_pi16(sumx16, rgbx16);
sumx16 = _mm_adds_pi16(sumx16, overflowprotect);
sumx16 = _mm_subs_pu16(sumx16, overflowprotect);
sumx16 = _mm_slli_pi16(sumx16, 1);
dst64 = (__m64 *)&RGB48[xx];
*dst64 = sumx16;
}
else
#endif
{
int i,r=0,g=0,b=0;
for(i=0; i<4; i++)
{
if(xp<=0 || xp>= width)
{
gains += lanczos[rmdr]>>1;
}
else
{
gains += lanczos[rmdr]>>1;
r += (gains * sscanline[xp*3]);
g += (gains * sscanline[xp*3+1]);
b += (gains * sscanline[xp*3+2]);
gains = 0;
}
xp++;
rmdr+=64;
}
r >>= 14;
g >>= 14;
b >>= 14;
if(r<0) r=0; else if(r>65535) r=65535;
if(g<0) g=0; else if(g>65535) g=65535;
if(b<0) b=0; else if(b>65535) b=65535;
RGB48[xx] = r;
RGB48[xx+1] = g;
RGB48[xx+2] = b;
}
xx+=3;
}
}
else
{
float fxpos = xbase;
for(x=0;x<width; x++) //RGB
{
int gains = 0;
int xp, rmdr;
if(z != 0.0)
{
if(x<holdstart)
{
fxpos += basexstepstart*((float)(holdstart-x)/(float)holdstart) + flatxstep*((float)x/(float)holdstart);
}
else if(x>holdend)
{
int diff = width - x;
int range = width - holdend;
fxpos += basexstepend*((float)(range-diff)/(float)range) + flatxstep*((float)(diff)/(float)range);
}
else
{
fxpos += flatxstep;
}
xp = (int)(fxpos * 65536.0f*(float)width);
rmdr = 63-((xp>>10) & 63);
xp >>= 16;
}
else
{
xp = ixpos>>16;
rmdr = 63-((ixpos>>10) & 63);
ixpos += ixstep;
}
xp -= 1; // was -2 causing a right shift //DAN20100225
#if MMXSUPPORTED //TODO DANREMOVE
if(xp>4 && xp<width-4)
{
__m64 *src64;
__m64 *dst64;
__m64 sumx16;
__m64 rgbx16;
__m64 gain16;
int linepos = (xp-0)*3; //DAN20102602 -- fix left edge error.
src64 = (__m64 *)&scanline[linepos];
rgbx16 = *src64;
gain16 = _mm_set1_pi16(lanczos[rmdr]); //15-bit
rgbx16 = _mm_srli_pi16(rgbx16, 1); //15-bit
sumx16 = _mm_mulhi_pi16(rgbx16, gain16); //15*15-bit
src64 = (__m64 *)&scanline[linepos+3];
rgbx16 = *src64;
gain16 = _mm_set1_pi16(lanczos[rmdr+64]); //15-bit
rgbx16 = _mm_srli_pi16(rgbx16, 1); //15-bit
rgbx16 = _mm_mulhi_pi16(rgbx16, gain16); //15*15-bit
sumx16 = _mm_adds_pi16(sumx16, rgbx16);
src64 = (__m64 *)&scanline[linepos+6];
rgbx16 = *src64;
gain16 = _mm_set1_pi16(lanczos[rmdr+128]); //15-bit
rgbx16 = _mm_srli_pi16(rgbx16, 1); //15-bit
rgbx16 = _mm_mulhi_pi16(rgbx16, gain16); //15*15-bit
sumx16 = _mm_adds_pi16(sumx16, rgbx16);
src64 = (__m64 *)&scanline[linepos+9];
rgbx16 = *src64;
gain16 = _mm_set1_pi16(lanczos[rmdr+192]); //15-bit
rgbx16 = _mm_srli_pi16(rgbx16, 1); //15-bit
rgbx16 = _mm_mulhi_pi16(rgbx16, gain16); //15*15-bit
sumx16 = _mm_adds_pi16(sumx16, rgbx16);
sumx16 = _mm_adds_pi16(sumx16, overflowprotect);
sumx16 = _mm_subs_pu16(sumx16, overflowprotect);
sumx16 = _mm_slli_pi16(sumx16, 2);
dst64 = (__m64 *)&RGB48[xx];
*dst64 = sumx16;
}
else
#endif
{
int i,r=0,g=0,b=0;
for(i=0; i<4; i++)
{
if(xp<=0 || xp>= width)
{
gains += lanczos[rmdr]>>1;
}
else
{
gains += lanczos[rmdr]>>1;
r += (gains * scanline[xp*3]);
g += (gains * scanline[xp*3+1]);
b += (gains * scanline[xp*3+2]);
gains = 0;
}
xp++;
rmdr+=64;
}
r >>= 14;
g >>= 14;
b >>= 14;
if(r<0) r=0; else if(r>65535) r=65535;
if(g<0) g=0; else if(g>65535) g=65535;
if(b<0) b=0; else if(b>65535) b=65535;
RGB48[xx] = r;
RGB48[xx+1] = g;
RGB48[xx+2] = b;
}
xx+=3;
}
}
}
#if MMXSUPPORTED //TODO DANREMOVE
//_mm_empty();
#endif
}
#if 0 //Why is this not used?
void RGB48HoriShiftZoomFine(DECODER *decoder, unsigned short *RGB48, unsigned short *buffer, int width, int height, int line, float hoffset, float roffset, float zoom, int flip, float frameTilt, int eye)
{
float xposf,remainf,xstepf;
int xposi,tablepos,x;
int Ra,Rb,Rc,Rd;
int Ga,Gb,Gc,Gd;
int Ba,Bb,Bc,Bd;
int gainA,gainB,gainC,gainD;
int endofSSEline = 0;
unsigned short *scanline = (unsigned short *)buffer;
short *sscanline = (short *)buffer;
int neg = 0,shift = 0;
float offset = hoffset;
__m128i l1,l2,l3,gA,gB,gC,gD,o128,t1,t2;
__m128i *line128, *outline128;
if(flip)
{
unsigned short *ptrL = RGB48;
unsigned short *ptrR = RGB48;
ptrR += (width*3) - 3;
for(x=0;x<width/2;x++)
{
int t;
t = *ptrL;
*ptrL++ = *ptrR;
*ptrR++ = t;
t = *ptrL;
*ptrL++ = *ptrR;
*ptrR++ = t;
t = *ptrL;
*ptrL++ = *ptrR;
*ptrR++ = t;
ptrR -= 6;
}
}
if(eye > 0)
{
zoom *= 1.0 + frameTilt;
}
else
{
zoom /= 1.0 + frameTilt;
}
xposf = (float)width*(0.5 - 1.0/(2.0*zoom) - offset);
xposf -= width * roffset * 0.5 / zoom;
xposf += (float)line * ((float)width* roffset / ((float)height*zoom));
if(xposf < 0.0)
neg = 1;
xstepf = 1.0/zoom;
memcpy(scanline, RGB48, width*3*2);
{
unsigned short zeroline[3] = {0};
int xx = 0;
int ixpos = xposf * 65536.0;
int ixstep = xstepf * 65536.0;
float xbase = xposf / (float)width;
float xstep = xstepf / (float)width;
float z = (decoder->cfhddata.FrameHDynamic - 1.0)*2.0;
int holdstart = width*5/10; // Use to specify a area of uniform stretch
int holdend = width*5/10;
float flatxstep = xstep-z*0.5*xstep;
float modified_xstep_avg = (xstep * (float)width - (float)(holdend - holdstart) * flatxstep) / (float)(width - (holdend - holdstart));
float bottomxstep = modified_xstep_avg - (flatxstep- modified_xstep_avg);
__m64 overflowprotect = _mm_set1_pi16(0x7fff-0x3fff);
if(bottomxstep < 0.0)
{
bottomxstep = 0.0;
flatxstep = modified_xstep_avg + modified_xstep_avg;
}
if(flatxstep < 0.0)
{
flatxstep = 0.0;
bottomxstep = modified_xstep_avg - (flatxstep- modified_xstep_avg);
}
if(decoder->StereoBufferFormat == DECODED_FORMAT_WP13)
{
float fxpos = xbase;
for(x=0;x<width; x++) //RGB
{
int gains = 0;
int xp, rmdr;
if(z != 0.0)
{
if(x<holdstart)
{
fxpos += bottomxstep*((float)(holdstart-x)/(float)holdstart) + flatxstep*((float)x/(float)holdstart);
}
else if(x>holdend)
{
int diff = width - x;
int range = width - holdend;
fxpos += bottomxstep*((float)(range-diff)/(float)range) + flatxstep*((float)(diff)/(float)range);
}
else
{
fxpos += flatxstep;
}
/* fxpos = xbase + xstep * x;//(float)ixpos/(65536.0*(float)width);
if(fxpos >= 0.0 && fxpos <= 1.0)
{
if(z > 0.0)
{
fxpos = 1.8*fxpos - 2.4*fxpos*fxpos + (1.6*fxpos*fxpos*fxpos);
fxpos = fxpos * (z) + (xbase + xstep * x) * (1.0-z);
}
else
{
fxpos = 3.0*fxpos*fxpos - 2.0*fxpos*fxpos*fxpos;
fxpos = fxpos * (-z) + (xbase + xstep * x) * (1.0+z);
}
}
*/
xp = (fxpos * 65536.0*(float)width);
rmdr = 63-((xp>>10) & 63);
xp >>= 16;
}
else
{
xp = ixpos>>16;
rmdr = 63-((ixpos>>10) & 63);
ixpos += ixstep;
}
xp -= 1;// was -2 causing a right shift //DAN20100225
if(xp>4 && xp<width-4)
{
__m64 *src64;
__m64 *dst64;
__m64 sumx16;
__m64 rgbx16;
__m64 gain16;
int linepos = (xp-1)*3;
src64 = (__m64 *)&sscanline[linepos];
rgbx16 = *src64;
gain16 = _mm_set1_pi16(lanczos[rmdr]); //15-bit
sumx16 = _mm_mulhi_pi16(rgbx16, gain16); //13*15-bit
src64 = (__m64 *)&sscanline[linepos+3];
rgbx16 = *src64;
gain16 = _mm_set1_pi16(lanczos[rmdr+64]); //15-bit
rgbx16 = _mm_mulhi_pi16(rgbx16, gain16); //13*15-bit
sumx16 = _mm_adds_pi16(sumx16, rgbx16);
src64 = (__m64 *)&sscanline[linepos+6];
rgbx16 = *src64;
gain16 = _mm_set1_pi16(lanczos[rmdr+128]); //15-bit
rgbx16 = _mm_mulhi_pi16(rgbx16, gain16); //13*15-bit
sumx16 = _mm_adds_pi16(sumx16, rgbx16);
src64 = (__m64 *)&sscanline[linepos+9];
rgbx16 = *src64;
gain16 = _mm_set1_pi16(lanczos[rmdr+192]); //15-bit
rgbx16 = _mm_mulhi_pi16(rgbx16, gain16); //13*15-bit
sumx16 = _mm_adds_pi16(sumx16, rgbx16);
sumx16 = _mm_adds_pi16(sumx16, overflowprotect);
sumx16 = _mm_subs_pu16(sumx16, overflowprotect);
sumx16 = _mm_slli_pi16(sumx16, 1);
dst64 = (__m64 *)&RGB48[xx];
*dst64 = sumx16;
}
else
{
int i,t,r=0,g=0,b=0;
for(i=0; i<4; i++)
{
if(xp<=0 || xp>= width)
{
/* if(i == 3) //DAN20101112 this code was crashing disparity zoom
{
gains = lanczos[rmdr]>>1;
r += (gains * sscanline[(xp-1)*3]);
g += (gains * sscanline[(xp-1)*3+1]);
b += (gains * sscanline[(xp-1)*3+2]);
}
else */
{
gains += lanczos[rmdr]>>1;
}
}
else
{
gains += lanczos[rmdr]>>1;
r += (gains * sscanline[xp*3]);
g += (gains * sscanline[xp*3+1]);
b += (gains * sscanline[xp*3+2]);
gains = 0;
}
xp++;
rmdr+=64;
}
r >>= 14;
g >>= 14;
b >>= 14;
if(r<0) r=0; else if(r>65535) r=65535;
if(g<0) g=0; else if(g>65535) g=65535;
if(b<0) b=0; else if(b>65535) b=65535;
RGB48[xx] = r;
RGB48[xx+1] = g;
RGB48[xx+2] = b;
}
xx+=3;
}
}
else
{
float fxpos = xbase;
for(x=0;x<width; x++) //RGB
{
int gains = 0;
int xp, rmdr;
if(z != 0.0)
{
if(x<holdstart)
{
fxpos += bottomxstep*((float)(holdstart-x)/(float)holdstart) + flatxstep*((float)x/(float)holdstart);
}
else if(x>holdend)
{
int diff = width - x;
int range = width - holdend;
fxpos += bottomxstep*((float)(range-diff)/(float)range) + flatxstep*((float)(diff)/(float)range);
}
else
{
fxpos += flatxstep;
}
/* fxpos = xbase + xstep * x;//(float)ixpos/(65536.0*(float)width);
if(fxpos >= 0.0 && fxpos <= 1.0)
{
if(z > 0.0)
{
fxpos = 1.8*fxpos - 2.4*fxpos*fxpos + (1.6*fxpos*fxpos*fxpos);
fxpos = fxpos * (z) + (xbase + xstep * x) * (1.0-z);
}
else
{
fxpos = 3.0*fxpos*fxpos - 2.0*fxpos*fxpos*fxpos;
fxpos = fxpos * (-z) + (xbase + xstep * x) * (1.0+z);
}
}
*/
xp = (fxpos * 65536.0*(float)width);
rmdr = 63-((xp>>10) & 63);
xp >>= 16;
}
else
{
xp = ixpos>>16;
rmdr = 63-((ixpos>>10) & 63);
ixpos += ixstep;
}
xp -= 1; // was -2 causing a right shift //DAN20100225
if(xp>4 && xp<width-4)
{
__m64 *src64;
__m64 *dst64;
__m64 sumx16;
__m64 rgbx16;
__m64 gain16;
int linepos = (xp-0)*3; //DAN20102602 -- fix left edge error.
src64 = (__m64 *)&scanline[linepos];
rgbx16 = *src64;
gain16 = _mm_set1_pi16(lanczos[rmdr]); //15-bit
rgbx16 = _mm_srli_pi16(rgbx16, 1); //15-bit
sumx16 = _mm_mulhi_pi16(rgbx16, gain16); //15*15-bit
src64 = (__m64 *)&scanline[linepos+3];
rgbx16 = *src64;
gain16 = _mm_set1_pi16(lanczos[rmdr+64]); //15-bit
rgbx16 = _mm_srli_pi16(rgbx16, 1); //15-bit
rgbx16 = _mm_mulhi_pi16(rgbx16, gain16); //15*15-bit
sumx16 = _mm_adds_pi16(sumx16, rgbx16);
src64 = (__m64 *)&scanline[linepos+6];
rgbx16 = *src64;
gain16 = _mm_set1_pi16(lanczos[rmdr+128]); //15-bit
rgbx16 = _mm_srli_pi16(rgbx16, 1); //15-bit
rgbx16 = _mm_mulhi_pi16(rgbx16, gain16); //15*15-bit
sumx16 = _mm_adds_pi16(sumx16, rgbx16);
src64 = (__m64 *)&scanline[linepos+9];
rgbx16 = *src64;
gain16 = _mm_set1_pi16(lanczos[rmdr+192]); //15-bit
rgbx16 = _mm_srli_pi16(rgbx16, 1); //15-bit
rgbx16 = _mm_mulhi_pi16(rgbx16, gain16); //15*15-bit
sumx16 = _mm_adds_pi16(sumx16, rgbx16);
sumx16 = _mm_adds_pi16(sumx16, overflowprotect);
sumx16 = _mm_subs_pu16(sumx16, overflowprotect);
sumx16 = _mm_slli_pi16(sumx16, 2);
dst64 = (__m64 *)&RGB48[xx];
*dst64 = sumx16;
}
else
{
int i,t,r=0,g=0,b=0;
for(i=0; i<4; i++)
{
if(xp<=0 || xp>= width)
{
/* if(i == 3) //DAN20101112 this code was crashing disparity zoom
{
gains = lanczos[rmdr]>>1;
r += (gains * scanline[(xp-1)*3]);
g += (gains * scanline[(xp-1)*3+1]);
b += (gains * scanline[(xp-1)*3+2]);
}
else */
{
gains += lanczos[rmdr]>>1;
}
}
else
{
gains += lanczos[rmdr]>>1;
r += (gains * scanline[xp*3]);
g += (gains * scanline[xp*3+1]);
b += (gains * scanline[xp*3+2]);
gains = 0;
}
xp++;
rmdr+=64;
}
r >>= 14;
g >>= 14;
b >>= 14;
if(r<0) r=0; else if(r>65535) r=65535;
if(g<0) g=0; else if(g>65535) g=65535;
if(b<0) b=0; else if(b>65535) b=65535;
RGB48[xx] = r;
RGB48[xx+1] = g;
RGB48[xx+2] = b;
}
xx+=3;
}
}
}
/*
memcpy(scanline, RGB48, width*3*2);
{
for(x=0;x<width*3; x+=3) //RGB
{
int r,g,b,xp = ((int)xposf)*3;
xposf += xstepf;
if(xp<0 || xp>= width*3)
{
RGB48[x] = 0;
RGB48[x+1] = 0;
RGB48[x+2] = 0;
}
else
{
r = scanline[xp];
g = scanline[xp+1];
b = scanline[xp+2];
RGB48[x] = r;
RGB48[x+1] = g;
RGB48[x+2] = b;
}
}
}
*/
//_mm_empty();
}
#endif
void RGBA64HoriShiftZoom(DECODER *decoder, unsigned short *RGB48, unsigned short *buffer, int width, int height, int line, float hoffset, float roffset, float zoom, int flip, float frameTilt, int eye)
{
float xposf,xstepf;
int x;
//int endofSSEline = 0;
unsigned short *scanline = (unsigned short *)buffer;
short *sscanline = (short *)buffer;
int neg = 0;
float offset = hoffset;
if(flip)
{
unsigned short *ptrL = RGB48;
unsigned short *ptrR = RGB48;
ptrR += (width*4) - 4;
for(x=0;x<width/2;x++)
{
int t;
t = *ptrL;
*ptrL++ = *ptrR;
*ptrR++ = t;
t = *ptrL;
*ptrL++ = *ptrR;
*ptrR++ = t;
t = *ptrL;
*ptrL++ = *ptrR;
*ptrR++ = t;
t = *ptrL;
*ptrL++ = *ptrR;
*ptrR++ = t;
ptrR -= 4;
}
}
if(eye > 0)
{
zoom *= 1.0f + frameTilt;
}
else
{
zoom /= 1.0f + frameTilt;
}
xposf = (float)width*(0.5f - 1.0f/(2.0f*zoom) - offset);
xposf -= width * roffset * 0.5f;
xposf += line * (width* roffset / ((float)height*zoom));
if(xposf < 0.0)
neg = 1;
xstepf = 1.0f/zoom;
memcpy(scanline, RGB48, width*4*2);
{
//unsigned short zeroline[3] = {0};
int xx = 0;
int ixpos = (int)(xposf * 65536.0f);
int ixstep = (int)(xstepf * 65536.0f);
float xbase = xposf / (float)width;
float xstep = xstepf / (float)width;
float z = (decoder->cfhddata.FrameHDynamic - 1.0f)*2.0f;
int holdstart = width*5/10; // Use to specify a area of uniform stretch
int holdend = width*5/10;
float flatxstep = xstep-z*0.5f*xstep;
float modified_xstep_avg = (xstep * (float)width - (float)(holdend - holdstart) * flatxstep) / (float)(width - (holdend - holdstart));
float bottomxstep = modified_xstep_avg - (flatxstep- modified_xstep_avg);
#if MMXSUPPORTED //TODO DANREMOVE
__m64 overflowprotect = _mm_set1_pi16(0x7fff-0x3fff);
#endif
if(bottomxstep < 0.0)
{
bottomxstep = 0.0;
flatxstep = modified_xstep_avg + modified_xstep_avg;
}
if(flatxstep < 0.0)
{
flatxstep = 0.0;
bottomxstep = modified_xstep_avg - (flatxstep- modified_xstep_avg);
}
if(decoder->StereoBufferFormat == DECODED_FORMAT_W13A)
{
float fxpos = xbase;
for(x=0;x<width; x++) //RGB
{
int gains = 0;
int xp, rmdr;
if(z != 0.0)
{
if(x<holdstart)
{
fxpos += bottomxstep*((float)(holdstart-x)/(float)holdstart) + flatxstep*((float)x/(float)holdstart);
}
else if(x>holdend)
{
int diff = width - x;
int range = width - holdend;
fxpos += bottomxstep*((float)(range-diff)/(float)range) + flatxstep*((float)(diff)/(float)range);
}
else
{
fxpos += flatxstep;
}
xp = (int)(fxpos * 65536.0f*(float)width);
rmdr = 63-((xp>>10) & 63);
xp >>= 16;
}
else
{
xp = ixpos>>16;
rmdr = 63-((ixpos>>10) & 63);
ixpos += ixstep;
}
xp -= 1;// was -2 causing a right shift //DAN20100225
#if MMXSUPPORTED //TODO DANREMOVE
if(xp>4 && xp<width-4)
{
__m64 *src64;
__m64 *dst64;
__m64 sumx16;
__m64 rgbx16;
__m64 gain16;
int linepos = (xp-1)*4;
src64 = (__m64 *)&sscanline[linepos];
rgbx16 = *src64;
gain16 = _mm_set1_pi16(lanczos[rmdr]); //15-bit
sumx16 = _mm_mulhi_pi16(rgbx16, gain16); //13*15-bit
src64 = (__m64 *)&sscanline[linepos+4];
rgbx16 = *src64;
gain16 = _mm_set1_pi16(lanczos[rmdr+64]); //15-bit
rgbx16 = _mm_mulhi_pi16(rgbx16, gain16); //13*15-bit
sumx16 = _mm_adds_pi16(sumx16, rgbx16);
src64 = (__m64 *)&sscanline[linepos+8];
rgbx16 = *src64;
gain16 = _mm_set1_pi16(lanczos[rmdr+128]); //15-bit
rgbx16 = _mm_mulhi_pi16(rgbx16, gain16); //13*15-bit
sumx16 = _mm_adds_pi16(sumx16, rgbx16);
src64 = (__m64 *)&sscanline[linepos+12];
rgbx16 = *src64;
gain16 = _mm_set1_pi16(lanczos[rmdr+192]); //15-bit
rgbx16 = _mm_mulhi_pi16(rgbx16, gain16); //13*15-bit
sumx16 = _mm_adds_pi16(sumx16, rgbx16);
sumx16 = _mm_adds_pi16(sumx16, overflowprotect);
sumx16 = _mm_subs_pu16(sumx16, overflowprotect);
sumx16 = _mm_slli_pi16(sumx16, 1);
dst64 = (__m64 *)&RGB48[xx];
*dst64 = sumx16;
}
else
#endif
{
int i,r=0,g=0,b=0,a=0;
for(i=0; i<4; i++)
{
if(xp<=0 || xp>= width)
{
gains += lanczos[rmdr]>>1;
}
else
{
gains += lanczos[rmdr]>>1;
r += (gains * sscanline[xp*4]);
g += (gains * sscanline[xp*4+1]);
b += (gains * sscanline[xp*4+2]);
a += (gains * sscanline[xp*4+3]);
gains = 0;
}
xp++;
rmdr+=64;
}
r >>= 14;
g >>= 14;
b >>= 14;
a >>= 14;
if(r<0) r=0; else if(r>65535) r=65535;
if(g<0) g=0; else if(g>65535) g=65535;
if(b<0) b=0; else if(b>65535) b=65535;
if(a<0) a=0; else if(a>65535) a=65535;
RGB48[xx] = r;
RGB48[xx+1] = g;
RGB48[xx+2] = b;
RGB48[xx+3] = a;
}
xx+=4;
}
}
else
{
float fxpos = xbase;
for(x=0;x<width; x++) //RGB
{
int gains = 0;
int xp, rmdr;
if(z != 0.0)
{
if(x<holdstart)
{
fxpos += bottomxstep*((float)(holdstart-x)/(float)holdstart) + flatxstep*((float)x/(float)holdstart);
}
else if(x>holdend)
{
int diff = width - x;
int range = width - holdend;
fxpos += bottomxstep*((float)(range-diff)/(float)range) + flatxstep*((float)(diff)/(float)range);
}
else
{
fxpos += flatxstep;
}
xp = (int)(fxpos * 65536.0f*(float)width);
rmdr = 63-((xp>>10) & 63);
xp >>= 16;
}
else
{
xp = ixpos>>16;
rmdr = 63-((ixpos>>10) & 63);
ixpos += ixstep;
}
xp -= 1; // was -2 causing a right shift //DAN20100225
#if MMXSUPPORTED //TODO DANREMOVE
if(xp>4 && xp<width-4)
{
__m64 *src64;
__m64 *dst64;
__m64 sumx16;
__m64 rgbx16;
__m64 gain16;
int linepos = (xp-0)*4; //DAN20102602 -- fix left edge error.
src64 = (__m64 *)&scanline[linepos];
rgbx16 = *src64;
gain16 = _mm_set1_pi16(lanczos[rmdr]); //15-bit
rgbx16 = _mm_srli_pi16(rgbx16, 1); //15-bit
sumx16 = _mm_mulhi_pi16(rgbx16, gain16); //15*15-bit
src64 = (__m64 *)&scanline[linepos+4];
rgbx16 = *src64;
gain16 = _mm_set1_pi16(lanczos[rmdr+64]); //15-bit
rgbx16 = _mm_srli_pi16(rgbx16, 1); //15-bit
rgbx16 = _mm_mulhi_pi16(rgbx16, gain16); //15*15-bit
sumx16 = _mm_adds_pi16(sumx16, rgbx16);
src64 = (__m64 *)&scanline[linepos+8];
rgbx16 = *src64;
gain16 = _mm_set1_pi16(lanczos[rmdr+128]); //15-bit
rgbx16 = _mm_srli_pi16(rgbx16, 1); //15-bit
rgbx16 = _mm_mulhi_pi16(rgbx16, gain16); //15*15-bit
sumx16 = _mm_adds_pi16(sumx16, rgbx16);
src64 = (__m64 *)&scanline[linepos+12];
rgbx16 = *src64;
gain16 = _mm_set1_pi16(lanczos[rmdr+192]); //15-bit
rgbx16 = _mm_srli_pi16(rgbx16, 1); //15-bit
rgbx16 = _mm_mulhi_pi16(rgbx16, gain16); //15*15-bit
sumx16 = _mm_adds_pi16(sumx16, rgbx16);
sumx16 = _mm_adds_pi16(sumx16, overflowprotect);
sumx16 = _mm_subs_pu16(sumx16, overflowprotect);
sumx16 = _mm_slli_pi16(sumx16, 2);
dst64 = (__m64 *)&RGB48[xx];
*dst64 = sumx16;
}
else
#endif
{
int i,r=0,g=0,b=0,a=0;
for(i=0; i<4; i++)
{
if(xp<=0 || xp>= width)
{
gains += lanczos[rmdr]>>1;
}
else
{
gains += lanczos[rmdr]>>1;
r += (gains * scanline[xp*4]);
g += (gains * scanline[xp*4+1]);
b += (gains * scanline[xp*4+2]);
a += (gains * scanline[xp*4+3]);
gains = 0;
}
xp++;
rmdr+=64;
}
r >>= 14;
g >>= 14;
b >>= 14;
a >>= 14;
if(r<0) r=0; else if(r>65535) r=65535;
if(g<0) g=0; else if(g>65535) g=65535;
if(b<0) b=0; else if(b>65535) b=65535;
if(a<0) a=0; else if(a>65535) a=65535;
RGB48[xx] = r;
RGB48[xx+1] = g;
RGB48[xx+2] = b;
RGB48[xx+3] = a;
}
xx+=4;
}
}
}
#if MMXSUPPORTED //TODO DANREMOVE
//_mm_empty();
#endif
}
void RGB48WindowMask(DECODER *decoder, unsigned short *RGB48, int width, int channel, float windowMask)
{
float line = (float)width * fabsf(windowMask);
int pixelbytes = 6;
float frac = (float)(line-(float)((int)line));
switch(decoder->StereoBufferFormat)
{
case DECODED_FORMAT_RGB32:
case DECODED_FORMAT_W13A:
case DECODED_FORMAT_RG64:
pixelbytes = 8;
break;
}
if(decoder->StereoBufferFormat == DECODED_FORMAT_W13A ||
decoder->StereoBufferFormat == DECODED_FORMAT_WP13) // signed math needed
{
short *ptrL = (short *)RGB48;
short *ptrR = (short *)RGB48;
if(windowMask < 0)
channel = channel == 0 ? 1 : 0;
if(pixelbytes == 6)
{
if(channel == 0)
{
memset(ptrL, 0, 6*(int)line);
ptrL += ((int)line*3);
ptrL[0] = (int)((float)ptrL[0] * (1.0-frac));
ptrL[1] = (int)((float)ptrL[1] * (1.0-frac));
ptrL[2] = (int)((float)ptrL[2] * (1.0-frac));
}
else
{
ptrR += ((width-(int)line)*3);
memset(ptrR, 0, 6*(int)line);
ptrR[-1] = (int)((float)ptrR[-1] * (1.0-frac));
ptrR[-2] = (int)((float)ptrR[-2] * (1.0-frac));
ptrR[-3] = (int)((float)ptrR[-3] * (1.0-frac));
}
}
else
{
if(channel == 0)
{
memset(ptrL, 0, 8*(int)line);
ptrL += ((int)line*4);
ptrL[0] = (int)((float)ptrL[0] * (1.0-frac));
ptrL[1] = (int)((float)ptrL[1] * (1.0-frac));
ptrL[2] = (int)((float)ptrL[2] * (1.0-frac));
ptrL[3] = (int)((float)ptrL[3] * (1.0-frac));
}
else
{
ptrR += ((width-(int)line)*4);
memset(ptrR, 0, 8*(int)line);
ptrR[-1] = (int)((float)ptrR[-1] * (1.0-frac));
ptrR[-2] = (int)((float)ptrR[-2] * (1.0-frac));
ptrR[-3] = (int)((float)ptrR[-3] * (1.0-frac));
ptrR[-4] = (int)((float)ptrR[-4] * (1.0-frac));
}
}
}
else
{
unsigned short *ptrL = RGB48;
unsigned short *ptrR = RGB48;
if(windowMask < 0)
channel = channel == 0 ? 1 : 0;
if(pixelbytes == 6)
{
if(channel == 0)
{
memset(ptrL, 0, 6*(int)line);
ptrL += ((int)line*3);
ptrL[0] = (int)((float)ptrL[0] * (1.0-frac));
ptrL[1] = (int)((float)ptrL[1] * (1.0-frac));
ptrL[2] = (int)((float)ptrL[2] * (1.0-frac));
}
else
{
ptrR += ((width-(int)line)*3);
memset(ptrR, 0, 6*(int)line);
ptrR[-1] = (int)((float)ptrR[-1] * (1.0-frac));
ptrR[-2] = (int)((float)ptrR[-2] * (1.0-frac));
ptrR[-3] = (int)((float)ptrR[-3] * (1.0-frac));
}
}
else
{
if(channel == 0)
{
memset(ptrL, 0, 8*(int)line);
ptrL += ((int)line*4);
ptrL[0] = (int)((float)ptrL[0] * (1.0-frac));
ptrL[1] = (int)((float)ptrL[1] * (1.0-frac));
ptrL[2] = (int)((float)ptrL[2] * (1.0-frac));
ptrL[3] = (int)((float)ptrL[3] * (1.0-frac));
}
else
{
ptrR += ((width-(int)line)*4);
memset(ptrR, 0, 8*(int)line);
ptrR[-1] = (int)((float)ptrR[-1] * (1.0-frac));
ptrR[-2] = (int)((float)ptrR[-2] * (1.0-frac));
ptrR[-3] = (int)((float)ptrR[-3] * (1.0-frac));
ptrR[-4] = (int)((float)ptrR[-4] * (1.0-frac));
}
}
}
}
void RGB48HoriShift(DECODER *decoder, unsigned short *RGB48, unsigned short *buffer, int width, float offset, int flip)
{
float xposf,remainf;
int xposi,tablepos,x;
int gainA,gainB,gainC,gainD;
//int endofSSEline = 0;
unsigned short *scanline = (unsigned short *)buffer;
int neg = 0,shift = 0;
__m128i l1,l2,l3,gA,gB,gC,gD,o128,t1,t2;
__m128i *line128, *outline128;
if(flip)
{
unsigned short *ptrL = RGB48;
unsigned short *ptrR = RGB48;
ptrR += (width*3) - 3;
for(x=0;x<width/2;x++)
{
int t1,t2,t3;
t1 = ptrL[0];
ptrL[0] = ptrR[0];
ptrR[0] = t1;
t2 = ptrL[1];
ptrL[1] = ptrR[1];
ptrR[1] = t2;
t3 = ptrL[2];
ptrL[2] = ptrR[2];
ptrR[2] = t3;
ptrL += 3;
ptrR -= 3;
}
}
if(offset < 0.0)
neg = 1;
xposf = width * offset;
xposi = (int)floorf(xposf);
remainf = xposf - (float)xposi;
tablepos = (int)(remainf*(float)SUBPIXEL);
xposi = abs(xposi);
if(xposi==0 && tablepos == 0)
return; // no move required
gainA = gains[tablepos][0];
gainB = gains[tablepos][1];
gainC = gains[tablepos][2];
gainD = gains[tablepos][3];
if(neg == 0)
{
unsigned short *ptr = scanline;
int nwidth = width-xposi+16;
if(nwidth > width)
nwidth = width;
for(x=0;x<xposi+2;x++)
{
*ptr++ = 0;//r
*ptr++ = 0;//g
*ptr++ = 0;//b
}
memcpy(ptr, RGB48, (nwidth)*3*2);
ptr += (nwidth)*3;
for(x=0;x<16;x++)
{
*ptr++ = 0;//r
*ptr++ = 0;//g
*ptr++ = 0;//b
}
}
else
{
unsigned short *ptr = scanline;
for(x=0;x<2;x++)
{
if(x+xposi-2>=0)
{
*ptr++ = RGB48[(x+xposi-2)*3];//r
*ptr++ = RGB48[(x+xposi-2)*3+1];//g
*ptr++ = RGB48[(x+xposi-2)*3+2];//b
}
else
{
*ptr++ = 0;//r
*ptr++ = 0;//g
*ptr++ = 0;//b
}
}
memcpy(ptr, &RGB48[xposi*3], (width-xposi)*3*2);
ptr += (width-xposi)*3;
for(x=0;x<xposi+16;x++)
{
*ptr++ = 0;//r
*ptr++ = 0;//g
*ptr++ = 0;//b
}
}
gA = _mm_set1_epi16(gainA);
gB = _mm_set1_epi16(gainB);
gC = _mm_set1_epi16(gainC);
gD = _mm_set1_epi16(gainD);
line128 = (__m128i *)&scanline[0];
//outline128 = line128;
outline128 = (__m128i *)&RGB48[0];
//l1 = load128;//r1,g1,b1,r2,g2,b2,r3,g3,
//l2 = load128;//b3,r4,g4,b4,r5,g5,b5,r6
//l3 = load128;//g6,b6,r7,g7,b7,r8,g8,b8
if(decoder->StereoBufferFormat == DECODED_FORMAT_WP13)
{
l1 = _mm_loadu_si128(line128++);
l2 = _mm_loadu_si128(line128++);
l3 = _mm_loadu_si128(line128++);
shift = 0;
}
else
{
l1 = _mm_loadu_si128(line128++);
l1 = _mm_srli_epi16(l1,3); //13-bit unsigned
l2 = _mm_loadu_si128(line128++);
l2 = _mm_srli_epi16(l2,3); //13-bit unsigned
l3 = _mm_loadu_si128(line128++);
l3 = _mm_srli_epi16(l3,3); //13-bit unsigned
shift = 3;
}
for(x=0;x<width*3; x+=8)
{
//o=l1* gainA
o128 = _mm_mulhi_epi16(l1, gA);
//t1 = l1<<3*16 //t1 = r2,g2,b2,r3,g3, 0 0 0
//t2 = l2>>16*5 //t2 = 0 0 0 0 0 b3,r4,g4
//t1 += t2; //t1 = r2,g2,b2,r3,g3,b3,r4,g4
//l1 = t1 //l1 = r2,g2,b2,r3,g3,b3,r4,g4
//t1 *= gainB
//o += t1
t1 = _mm_srli_si128(l1,3*2);
t2 = _mm_slli_si128(l2,5*2);
t1 = _mm_adds_epi16(t1,t2);
l1 = t1;
t1 = _mm_mulhi_epi16(t1, gB);
o128 = _mm_adds_epi16(o128,t1);
//t1 = l1<<3*16 //t1 = r3,g3,b3,r4,g4 0 0 0
//t2 = l2<<3*16; //t2 = b4,r5,g5,b5,r6 0 0 0
//t2 >>= 5*16; //t2 = 0 0 0 0 0 b4,r5,g5
//t1 += t2 //t1 = r3,g3,b3,r4,g4,b4,r5,g5
//l1 = t1 //l1 = r3,g3,b3,r4,g4,b4,r5,g5
//t1 *= gainC
//o += t1
t1 = _mm_srli_si128(l1,3*2);
t2 = _mm_srli_si128(l2,3*2);
t2 = _mm_slli_si128(t2,5*2);
t1 = _mm_adds_epi16(t1,t2);
l1 = t1;
t1 = _mm_mulhi_epi16(t1, gC);
o128 = _mm_adds_epi16(o128,t1);
//t1 = l1<<3*16 //t1 = r4,g4,b4,r5,g5 0 0 0
//t2 = l2<<6*16 //t2 = b5,r6 0 0 0 0 0 0
//t2 >>= 5 * 16; //t2 = 0 0 0 0 0 b5,r6 0
//t1 += t2 //t1 = r4,g4,b4,r5,g5,b5,r6, 0
//t2 = l3>>7*16 //t2 = 0 0 0 0 0 0 0 g6
//t1 += t2 //t1 = r4,g4,b4,r5,g5,b5,r6,g6
//t1 *= gainD
//o += t1
t1 = _mm_srli_si128(l1,3*2);
t2 = _mm_srli_si128(l2,6*2);
t2 = _mm_slli_si128(t2,5*2);
t1 = _mm_adds_epi16(t1,t2);
t2 = _mm_slli_si128(l3,7*2);
t1 = _mm_adds_epi16(t1,t2);
t1 = _mm_mulhi_epi16(t1, gD);
o128 = _mm_adds_epi16(o128,t1);
l1 = l2;
l2 = l3;
l3 = _mm_loadu_si128(line128++);
if(shift)
{
l3 = _mm_srli_epi16(l3,3); //13-bit unsigned
o128 = _mm_adds_epi16(o128, _mm_set1_epi16(0x7fff - 0x0fff));
o128 = _mm_subs_epu16(o128, _mm_set1_epi16(0x7fff - 0x0fff));
o128 = _mm_slli_epi16(o128,4);
}
else
{
// upper limit to 32767
o128 = _mm_adds_epi16(o128, _mm_set1_epi16(0x7fff - 0x3fff));
o128 = _mm_subs_epu16(o128, _mm_set1_epi16(0x7fff - 0x3fff));
o128 = _mm_slli_epi16(o128,1);
}
_mm_storeu_si128(outline128++, o128);
}
}
void RGBA64HoriShift(DECODER *decoder, unsigned short *RGB48, unsigned short *buffer, int width, float offset, int flip)
{
float xposf,remainf;
int xposi,tablepos,x;
int gainA,gainB,gainC,gainD;
//int endofSSEline = 0;
unsigned short *scanline = (unsigned short *)buffer;
int neg = 0,shift = 0;
__m128i l1,l2,l3,gA,gB,gC,gD,o128,t1,t2;
__m128i *line128, *outline128;
if(flip)
{
unsigned short *ptrL = RGB48;
unsigned short *ptrR = RGB48;
ptrR += (width*4) - 4;
for(x=0;x<width/2;x++)
{
int t1,t2,t3,t4;
t1 = ptrL[0];
ptrL[0] = ptrR[0];
ptrR[0] = t1;
t2 = ptrL[1];
ptrL[1] = ptrR[1];
ptrR[1] = t2;
t3 = ptrL[2];
ptrL[2] = ptrR[2];
ptrR[2] = t3;
t4 = ptrL[2];
ptrL[3] = ptrR[3];
ptrR[3] = t4;
ptrL += 4;
ptrR -= 4;
}
}
if(offset < 0.0)
neg = 1;
xposf = width * offset;
xposi = (int)floorf(xposf);
remainf = xposf - (float)xposi;
tablepos = (int)(remainf*(float)SUBPIXEL);
xposi = abs(xposi);
if(xposi==0 && tablepos == 0)
return; // no move required
gainA = gains[tablepos][0];
gainB = gains[tablepos][1];
gainC = gains[tablepos][2];
gainD = gains[tablepos][3];
if(neg == 0)
{
unsigned short *ptr = scanline;
int nwidth = width-xposi+16;
if(nwidth > width)
nwidth = width;
for(x=0;x<xposi+2;x++)
{
*ptr++ = 0;//r
*ptr++ = 0;//g
*ptr++ = 0;//b
*ptr++ = 0;//a
}
memcpy(ptr, RGB48, (nwidth)*4*2);
ptr += (nwidth)*4;
for(x=0;x<16;x++)
{
*ptr++ = 0;//r
*ptr++ = 0;//g
*ptr++ = 0;//b
*ptr++ = 0;//a
}
}
else
{
unsigned short *ptr = scanline;
for(x=0;x<2;x++)
{
if(x+xposi-2>=0)
{
*ptr++ = RGB48[(x+xposi-2)*4];//r
*ptr++ = RGB48[(x+xposi-2)*4+1];//g
*ptr++ = RGB48[(x+xposi-2)*4+2];//b
*ptr++ = RGB48[(x+xposi-2)*4+3];//a
}
else
{
*ptr++ = 0;//r
*ptr++ = 0;//g
*ptr++ = 0;//b
*ptr++ = 0;//a
}
}
memcpy(ptr, &RGB48[xposi*4], (width-xposi)*4*2);
ptr += (width-xposi)*4;
for(x=0;x<xposi+16;x++)
{
*ptr++ = 0;//r
*ptr++ = 0;//g
*ptr++ = 0;//b
*ptr++ = 0;//a
}
}
gA = _mm_set1_epi16(gainA);
gB = _mm_set1_epi16(gainB);
gC = _mm_set1_epi16(gainC);
gD = _mm_set1_epi16(gainD);
line128 = (__m128i *)&scanline[0];
//outline128 = line128;
outline128 = (__m128i *)&RGB48[0];
//l1 = load128;//r1,g1,b1,a1,r2,g2,b2,a2,
//l2 = load128;//r3,g3,b3,a3,r4,g4,b4,a4,
//l3 = load128;//r5,g5,b5,a5,r6,g6,b6,a6,
//l4 = load128;//r7,g7,b7,a7,r8,g8,b8,a8,
if(decoder->StereoBufferFormat == DECODED_FORMAT_WP13 || decoder->StereoBufferFormat == DECODED_FORMAT_W13A)
{
l1 = _mm_loadu_si128(line128++);
l2 = _mm_loadu_si128(line128++);
l3 = _mm_loadu_si128(line128++);
shift = 0;
}
else
{
l1 = _mm_loadu_si128(line128++);
l1 = _mm_srli_epi16(l1,3); //13-bit unsigned
l2 = _mm_loadu_si128(line128++);
l2 = _mm_srli_epi16(l2,3); //13-bit unsigned
l3 = _mm_loadu_si128(line128++);
l3 = _mm_srli_epi16(l3,3); //13-bit unsigned
shift = 3;
}
for(x=0;x<width*4; x+=8)
{
//o=l1* gainA
o128 = _mm_mulhi_epi16(l1, gA);
//t1 = l1<<4*16 //t1 = r2,g2,b2,a2,0, 0 0 0
//t2 = l2>>4*16 //t2 = 0 0 0 0 r3,g3,b3,a4
//t1 += t2; //t1 = r2,g2,b2,a2,r3,g3,b3,a4
//l1 = t1 //l1 = r2,g2,b2,a2,r3,g3,b3,a4
//t1 *= gainB
//o += t1
t1 = _mm_srli_si128(l1,4*2);
t2 = _mm_slli_si128(l2,4*2);
t1 = _mm_adds_epi16(t1,t2);
l1 = t1;
t1 = _mm_mulhi_epi16(t1, gB);
o128 = _mm_adds_epi16(o128,t1);
//t1 = l1<<4*16 //t1 = r3,g3,b3,a3, 0 0 0 0
//t2 = l2<<4*16;//t2 = r4,g4,b4,a4, 0 0 0 0
//t2 >>= 4*16; //t2 = 0 0 0 0 r4,g4,b4,a4
//t1 += t2 //t1 = r3,g3,b3,a4,r4,g4,b4,a4
//l1 = t1 //l1 = r3,g3,b3,a4,r4,g4,b4,a4
//t1 *= gainC
//o += t1
t1 = _mm_srli_si128(l1,4*2);
t2 = _mm_srli_si128(l2,4*2);
t2 = _mm_slli_si128(t2,4*2);
t1 = _mm_adds_epi16(t1,t2);
l1 = t1;
t1 = _mm_mulhi_epi16(t1, gC);
o128 = _mm_adds_epi16(o128,t1);
//t1 = l1<<4*16 //t1 = r4,g4,b4,a4,0 0 0 0
//t2 = l3>>4*16 //t2 = 0 0 0 0 r5,g5,b5,a5
//t1 += t2 //t1 = r4,g4,b4,a4,r5,g5,b5,a5
//t1 *= gainD
//o += t1
t1 = _mm_srli_si128(l1,4*2);
t2 = _mm_slli_si128(l3,4*2);
t1 = _mm_adds_epi16(t1,t2);
t1 = _mm_mulhi_epi16(t1, gD);
o128 = _mm_adds_epi16(o128,t1);
l1 = l2;
l2 = l3;
l3 = _mm_loadu_si128(line128++);
if(shift)
{
l3 = _mm_srli_epi16(l3,3); //13-bit unsigned
o128 = _mm_adds_epi16(o128, _mm_set1_epi16(0x7fff - 0x0fff));
o128 = _mm_subs_epu16(o128, _mm_set1_epi16(0x7fff - 0x0fff));
o128 = _mm_slli_epi16(o128,4);
}
else
{
// upper limit to 32767
o128 = _mm_adds_epi16(o128, _mm_set1_epi16(0x7fff - 0x3fff));
o128 = _mm_subs_epu16(o128, _mm_set1_epi16(0x7fff - 0x3fff));
o128 = _mm_slli_epi16(o128,1);
}
_mm_storeu_si128(outline128++, o128);
}
}
void RGB48HoriShiftAnaglyph(DECODER *decoder, unsigned short *RGB48, unsigned short *buffer, int width,
float offsetR, float offsetG, float offsetB ,
int flipR, int flipG, int flipB)
{
float Rxposf,Rremainf;
int Rxposi,Rtablepos;
float Gxposf,Gremainf;
int Gxposi,Gtablepos;
float Bxposf,Bremainf;
int Bxposi,Btablepos;
int x;
int RgainA,RgainB,RgainC,RgainD;
int GgainA,GgainB,GgainC,GgainD;
int BgainA,BgainB,BgainC,BgainD;
//int endofSSEline = 0;
unsigned short *scanline = (unsigned short *)buffer;
int negR = 0;
int negG = 0;
int negB = 0;
int shift = 0;
__m128i l1,l2,l3,o128,t1,t2;
__m128i *line128, *outline128;
__m128i gA1,gB1,gC1,gD1,gA2,gB2,gC2,gD2,gA3,gB3,gC3,gD3;
if(flipR)
{
unsigned short *ptrL = RGB48;
unsigned short *ptrR = RGB48;
ptrR += (width*3) - 3;
for(x=0;x<width/2;x++)
{
int t;
t = *ptrL;
*ptrL = *ptrR;
*ptrR = t;
ptrL += 3;
ptrR -= 3;
}
}
if(flipG)
{
unsigned short *ptrL = &RGB48[1];
unsigned short *ptrR = &RGB48[1];
ptrR += (width*3) - 3;
for(x=0;x<width/2;x++)
{
int t;
t = *ptrL;
*ptrL = *ptrR;
*ptrR = t;
ptrL += 3;
ptrR -= 3;
}
}
if(flipB)
{
unsigned short *ptrL = &RGB48[2];
unsigned short *ptrR = &RGB48[2];
ptrR += (width*3) - 3;
for(x=0;x<width/2;x++)
{
int t;
t = *ptrL;
*ptrL = *ptrR;
*ptrR = t;
ptrL += 3;
ptrR -= 3;
}
}
if(offsetR < 0.0)
negR = 1;
if(offsetG < 0.0)
negG = 1;
if(offsetB < 0.0)
negB = 1;
Rxposf = width * offsetR;
Rxposi = (int)floorf(Rxposf);
Rremainf = Rxposf - (float)Rxposi;
Rtablepos = (int)(Rremainf*(float)SUBPIXEL);
Gxposf = width * offsetG;
Gxposi = (int)floorf(Gxposf);
Gremainf = Gxposf - (float)Gxposi;
Gtablepos = (int)(Gremainf*(float)SUBPIXEL);
Bxposf = width * offsetB;
Bxposi = (int)floorf(Bxposf);
Bremainf = Bxposf - (float)Bxposi;
Btablepos = (int)(Bremainf*(float)SUBPIXEL);
Rxposi = abs(Rxposi);
Gxposi = abs(Gxposi);
Bxposi = abs(Bxposi);
if(Rxposi==0 && Rtablepos == 0)
return; // no move required
RgainA = gains[Rtablepos][0];
RgainB = gains[Rtablepos][1];
RgainC = gains[Rtablepos][2];
RgainD = gains[Rtablepos][3];
GgainA = gains[Gtablepos][0];
GgainB = gains[Gtablepos][1];
GgainC = gains[Gtablepos][2];
GgainD = gains[Gtablepos][3];
BgainA = gains[Btablepos][0];
BgainB = gains[Btablepos][1];
BgainC = gains[Btablepos][2];
BgainD = gains[Btablepos][3];
if(negR == 0)
{
unsigned short *ptr = scanline;
int nwidth = width-Rxposi+16;
if(nwidth > width)
nwidth = width;
for(x=0;x<Rxposi+2;x++)
{
*ptr++ = 0;//r
ptr++;//g
ptr++;//b
}
for(x=0;x<nwidth;x++)
{
*ptr++ = RGB48[x*3];//r
ptr++;//g
ptr++;//b
}
for(x=0;x<16;x++)
{
*ptr++ = 0;//r
ptr++;//g
ptr++;//b
}
}
else
{
unsigned short *ptr = scanline;
for(x=0;x<2;x++)
{
if(x+Rxposi-2>=0)
{
*ptr++ = RGB48[(x+Rxposi-2)*3];//r
ptr++;//g
ptr++;//b
}
else
{
*ptr++ = 0;//r
ptr++;//g
ptr++;//b
}
}
//memcpy(ptr, &RGB48[xposi*3], (width-xposi)*3*2);
//ptr += (width-xposi)*3;
for(x=Rxposi;x<width;x++)
{
*ptr++ = RGB48[x*3];//r
ptr++;//g
ptr++;//b
}
for(x=0;x<Rxposi+16;x++)
{
*ptr++ = 0;//r
ptr++;//g
ptr++;//b
}
}
if(negG == 0)
{
unsigned short *ptr = scanline;
int nwidth = width-Gxposi+16;
if(nwidth > width)
nwidth = width;
for(x=0;x<Gxposi+2;x++)
{
ptr++;//r
*ptr++ = 0;//g
ptr++;//b
}
for(x=0;x<nwidth;x++)
{
ptr++;//r
*ptr++ = RGB48[x*3+1];//g
ptr++;//b
}
for(x=0;x<16;x++)
{
ptr++;//r
*ptr++ = 0;//g
ptr++;//b
}
}
else
{
unsigned short *ptr = scanline;
for(x=0;x<2;x++)
{
if(x+Gxposi-2>=0)
{
ptr++;//r
*ptr++ = RGB48[(x+Gxposi-2)*3+1];//g
ptr++;//b
}
else
{
ptr++;//r
*ptr++ = 0;//g
ptr++;//b
}
}
//memcpy(ptr, &RGB48[xposi*3], (width-xposi)*3*2);
//ptr += (width-xposi)*3;
for(x=Gxposi;x<width;x++)
{
ptr++;//r
*ptr++ = RGB48[x*3+1];//g
ptr++;//b
}
for(x=0;x<Gxposi+16;x++)
{
ptr++;//r
*ptr++ = 0;//g
ptr++;//b
}
}
if(negB == 0)
{
unsigned short *ptr = scanline;
int nwidth = width-Bxposi+16;
if(nwidth > width)
nwidth = width;
for(x=0;x<Bxposi+2;x++)
{
ptr++;//r
ptr++;//g
*ptr++ = 0;//b
}
for(x=0;x<nwidth;x++)
{
ptr++;//r
ptr++;//g
*ptr++ = RGB48[x*3+2];//b
}
for(x=0;x<16;x++)
{
ptr++;//r
ptr++;//g
*ptr++ = 0;//b
}
}
else
{
unsigned short *ptr = scanline;
for(x=0;x<2;x++)
{
if(x+Bxposi-2>=0)
{
ptr++;//r
ptr++;//g
*ptr++ = RGB48[(x+Bxposi-2)*3+2];//b
}
else
{
ptr++;//r
ptr++;//g
*ptr++ = 0;//b
}
}
//memcpy(ptr, &RGB48[xposi*3], (width-xposi)*3*2);
//ptr += (width-xposi)*3;
for(x=Bxposi;x<width;x++)
{
ptr++;//r
ptr++;//g
*ptr++ = RGB48[x*3+2];//b
}
for(x=0;x<Bxposi+16;x++)
{
ptr++;//r
ptr++;//g
*ptr++ = 0;//b
}
}
gA1 = _mm_set_epi16(RgainA,GgainA,BgainA,RgainA,GgainA,BgainA,RgainA,GgainA);
gA2 = _mm_set_epi16(BgainA,RgainA,GgainA,BgainA,RgainA,GgainA,BgainA,RgainA);
gA3 = _mm_set_epi16(GgainA,BgainA,RgainA,GgainA,BgainA,RgainA,GgainA,BgainA);
gB1 = _mm_set_epi16(RgainB,GgainB,BgainB,RgainB,GgainB,BgainB,RgainB,GgainB);
gB2 = _mm_set_epi16(BgainB,RgainB,GgainB,BgainB,RgainB,GgainB,BgainB,RgainB);
gB3 = _mm_set_epi16(GgainB,BgainB,RgainB,GgainB,BgainB,RgainB,GgainB,BgainB);
gC1 = _mm_set_epi16(RgainC,GgainC,BgainC,RgainC,GgainC,BgainC,RgainC,GgainC);
gC2 = _mm_set_epi16(BgainC,RgainC,GgainC,BgainC,RgainC,GgainC,BgainC,RgainC);
gC3 = _mm_set_epi16(GgainC,BgainC,RgainC,GgainC,BgainC,RgainC,GgainC,BgainC);
gD1 = _mm_set_epi16(RgainD,GgainD,BgainD,RgainD,GgainD,BgainD,RgainD,GgainD);
gD2 = _mm_set_epi16(BgainD,RgainD,GgainD,BgainD,RgainD,GgainD,BgainD,RgainD);
gD3 = _mm_set_epi16(GgainD,BgainD,RgainD,GgainD,BgainD,RgainD,GgainD,BgainD);
line128 = (__m128i *)&scanline[0];
//outline128 = line128;
outline128 = (__m128i *)&RGB48[0];
//l1 = load128;//r1,g1,b1,r2,g2,b2,r3,g3,
//l2 = load128;//b3,r4,g4,b4,r5,g5,b5,r6
//l3 = load128;//g6,b6,r7,g7,b7,r8,g8,b8
if(decoder->StereoBufferFormat == DECODED_FORMAT_WP13)
{
l1 = _mm_loadu_si128(line128++);
l2 = _mm_loadu_si128(line128++);
l3 = _mm_loadu_si128(line128++);
shift = 0;
}
else
{
l1 = _mm_loadu_si128(line128++);
l1 = _mm_srli_epi16(l1,3); //13-bit unsigned
l2 = _mm_loadu_si128(line128++);
l2 = _mm_srli_epi16(l2,3); //13-bit unsigned
l3 = _mm_loadu_si128(line128++);
l3 = _mm_srli_epi16(l3,3); //13-bit unsigned
shift = 3;
}
for(x=0;x<width*3; x+=8)
{
//o=l1* gainA
o128 = _mm_mulhi_epi16(l1, gA1);
//t1 = l1<<3*16 //t1 = r2,g2,b2,r3,g3, 0 0 0
//t2 = l2>>16*5 //t2 = 0 0 0 0 0 b3,r4,g4
//t1 += t2; //t1 = r2,g2,b2,r3,g3,b3,r4,g4
//l1 = t1 //l1 = r2,g2,b2,r3,g3,b3,r4,g4
//t1 *= gainB
//o += t1
t1 = _mm_srli_si128(l1,3*2);
t2 = _mm_slli_si128(l2,5*2);
t1 = _mm_adds_epi16(t1,t2);
l1 = t1;
t1 = _mm_mulhi_epi16(t1, gB1);
o128 = _mm_adds_epi16(o128,t1);
//t1 = l1<<3*16 //t1 = r3,g3,b3,r4,g4 0 0 0
//t2 = l2<<3*16; //t2 = b4,r5,g5,b5,r6 0 0 0
//t2 >>= 5*16; //t2 = 0 0 0 0 0 b4,r5,g5
//t1 += t2 //t1 = r3,g3,b3,r4,g4,b4,r5,g5
//l1 = t1 //l1 = r3,g3,b3,r4,g4,b4,r5,g5
//t1 *= gainC
//o += t1
t1 = _mm_srli_si128(l1,3*2);
t2 = _mm_srli_si128(l2,3*2);
t2 = _mm_slli_si128(t2,5*2);
t1 = _mm_adds_epi16(t1,t2);
l1 = t1;
t1 = _mm_mulhi_epi16(t1, gC1);
o128 = _mm_adds_epi16(o128,t1);
//t1 = l1<<3*16 //t1 = r4,g4,b4,r5,g5 0 0 0
//t2 = l2<<6*16 //t2 = b5,r6 0 0 0 0 0 0
//t2 >>= 5 * 16; //t2 = 0 0 0 0 0 b5,r6 0
//t1 += t2 //t1 = r4,g4,b4,r5,g5,b5,r6, 0
//t2 = l3>>7*16 //t2 = 0 0 0 0 0 0 0 g6
//t1 += t2 //t1 = r4,g4,b4,r5,g5,b5,r6,g6
//t1 *= gainD
//o += t1
t1 = _mm_srli_si128(l1,3*2);
t2 = _mm_srli_si128(l2,6*2);
t2 = _mm_slli_si128(t2,5*2);
t1 = _mm_adds_epi16(t1,t2);
t2 = _mm_slli_si128(l3,7*2);
t1 = _mm_adds_epi16(t1,t2);
t1 = _mm_mulhi_epi16(t1, gD1);
o128 = _mm_adds_epi16(o128,t1);
t1 = gA1;
gA1 = gA2;
gA2 = gA3;
gA3 = t1;
t1 = gB1;
gB1 = gB2;
gB2 = gB3;
gB3 = t1;
t1 = gC1;
gC1 = gC2;
gC2 = gC3;
gC3 = t1;
t1 = gD1;
gD1 = gD2;
gD2 = gD3;
gD3 = t1;
l1 = l2;
l2 = l3;
l3 = _mm_loadu_si128(line128++);
if(shift)
{
l3 = _mm_srli_epi16(l3,3); //13-bit unsigned
o128 = _mm_adds_epi16(o128, _mm_set1_epi16(0x7fff - 0x0fff));
o128 = _mm_subs_epu16(o128, _mm_set1_epi16(0x7fff - 0x0fff));
o128 = _mm_slli_epi16(o128,4);
}
else
{
// upper limit to 32767
o128 = _mm_adds_epi16(o128, _mm_set1_epi16(0x7fff - 0x3fff));
o128 = _mm_subs_epu16(o128, _mm_set1_epi16(0x7fff - 0x3fff));
o128 = _mm_slli_epi16(o128,1);
}
_mm_storeu_si128(outline128++, o128);
}
}
void HistogramLine(DECODER *decoder, unsigned short *sbase, int width, int format, int whitepoint)
{
int x,val,ypos=0,upos=1,vpos=3;
int step = 1,pos=0;
short *ssbase = (short *)sbase;
uint32_t *lbase = (uint32_t *)sbase;
ToolsHandle *tools = decoder->tools;
int scaledvectorscope = 0;
if(tools == NULL)
return;
if(whitepoint == 13)
{
if(format == DECODED_FORMAT_RG64)
format = DECODED_FORMAT_W13A;
else
format = DECODED_FORMAT_WP13;
}
while(width/step > 360)
{
step*=2;
}
tools->waveformWidth = width/step;
decoder->tools->blurUVdone = 0;
switch(format & 0xffffff)
{
case DECODED_FORMAT_WP13:
decoder->tools->histogram = 1;
for(x=0,pos=0; x<width; x+=step,pos++)
{
int32_t R,G,B,U,V;
R = ssbase[0]>>5;
G = ssbase[1]>>5;
B = ssbase[2]>>5;
if(R > 255) R = 255;
if(R < 0) R = 0;
if(G > 255) G = 255;
if(G < 0) G = 0;
if(B > 255) B = 255;
if(B < 0) B = 0;
tools->histR[R]++;
tools->histG[G]++;
tools->histB[B]++;
tools->waveR[pos][R]++;
tools->waveG[pos][G]++;
tools->waveB[pos][B]++;
//Y = (((1499 * R) + (5030 * G) + (508 * B))>>13) + 16;
if(scaledvectorscope)
{
U = ((((-672* R) - (2249 * G) + (2920* B))>>13)) + 128; //* 255.0/314.0
V = ((((3758* R) - (3416 * G) - (343 * B))>>13)) + 128; //* 255.0/244.0
}
else
{
U = ((((-827* R) - (2769 * G) + (3596* B))>>13)) + 128;
V = ((((3596* R) - (3269 * G) - (328 * B))>>13)) + 128;
}
if(U<0) U=0; if(U>255) U=255;
if(V<0) V=0; if(V>255) V=255;
tools->scopeUV[U][V]++;
ssbase += step*3;
}
break;
case DECODED_FORMAT_W13A:
tools->histogram = 1;
for(x=0,pos=0; x<width; x+=step,pos++)
{
int32_t R,G,B,U,V;
R = ssbase[0]>>5;
G = ssbase[1]>>5;
B = ssbase[2]>>5;
if(R > 255) R = 255;
if(R < 0) R = 0;
if(G > 255) G = 255;
if(G < 0) G = 0;
if(B > 255) B = 255;
if(B < 0) B = 0;
tools->histR[R]++;
tools->histG[G]++;
tools->histB[B]++;
tools->waveR[pos][R]++;
tools->waveG[pos][G]++;
tools->waveB[pos][B]++;
//Y = (((1499 * R) + (5030 * G) + (508 * B))>>13) + 16;
if(scaledvectorscope)
{
U = ((((-672* R) - (2249 * G) + (2920* B))>>13)) + 128; //* 255.0/314.0
V = ((((3758* R) - (3416 * G) - (343 * B))>>13)) + 128; //* 255.0/244.0
}
else
{
U = ((((-827* R) - (2769 * G) + (3596* B))>>13)) + 128;
V = ((((3596* R) - (3269 * G) - (328 * B))>>13)) + 128;
}
if(U<0) U=0; if(U>255) U=255;
if(V<0) V=0; if(V>255) V=255;
tools->scopeUV[U][V]++;
ssbase += step*4;
}
break;
case DECODED_FORMAT_RG48:
tools->histogram = 1;
for(x=0,pos=0; x<width; x+=step,pos++)
{
int32_t R,G,B,U,V;
R = sbase[0]>>8;
G = sbase[1]>>8;
B = sbase[2]>>8;
tools->histR[R]++;
tools->histG[G]++;
tools->histB[B]++;
tools->waveR[pos][R]++;
tools->waveG[pos][G]++;
tools->waveB[pos][B]++;
//Y = (((1499 * R) + (5030 * G) + (508 * B))>>13) + 16;
if(scaledvectorscope)
{
U = ((((-672* R) - (2249 * G) + (2920* B))>>13)) + 128; //* 255.0/314.0
V = ((((3758* R) - (3416 * G) - (343 * B))>>13)) + 128; //* 255.0/244.0
}
else
{
U = ((((-827* R) - (2769 * G) + (3596* B))>>13)) + 128;
V = ((((3596* R) - (3269 * G) - (328 * B))>>13)) + 128;
}
if(U<0) U=0; if(U>255) U=255;
if(V<0) V=0; if(V>255) V=255;
tools->scopeUV[U][V]++;
sbase += step*3;
}
break;
case DECODED_FORMAT_AB10:
case DECODED_FORMAT_RG30:
tools->histogram = 1;
for(x=0,pos=0; x<width; x+=step,pos++)
{
int32_t R,G,B,U,V;
val = lbase[x];
R = (val>>22)&0xff;
G = (val>>12)&0xff;
B = (val>>02)&0xff;
tools->histR[R]++;
tools->histG[G]++;
tools->histB[B]++;
tools->waveR[pos][R]++;
tools->waveG[pos][G]++;
tools->waveB[pos][B]++;
//Y = (((1499 * R) + (5030 * G) + (508 * B))>>13) + 16;
if(scaledvectorscope)
{
U = ((((-672* R) - (2249 * G) + (2920* B))>>13)) + 128; //* 255.0/314.0
V = ((((3758* R) - (3416 * G) - (343 * B))>>13)) + 128; //* 255.0/244.0
}
else
{
U = ((((-827* R) - (2769 * G) + (3596* B))>>13)) + 128;
V = ((((3596* R) - (3269 * G) - (328 * B))>>13)) + 128;
}
if(U<0) U=0; if(U>255) U=255;
if(V<0) V=0; if(V>255) V=255;
tools->scopeUV[U][V]++;
}
break;
case DECODED_FORMAT_AR10:
tools->histogram = 1;
for(x=0,pos=0; x<width; x+=step,pos++)
{
int32_t R,G,B,U,V;
val = lbase[x];
B = (val>>22)&0xff;
G = (val>>12)&0xff;
R = (val>>02)&0xff;
tools->histR[R]++;
tools->histG[G]++;
tools->histB[B]++;
tools->waveR[pos][R]++;
tools->waveG[pos][G]++;
tools->waveB[pos][B]++;
//Y = (((1499 * R) + (5030 * G) + (508 * B))>>13) + 16;
if(scaledvectorscope)
{
U = ((((-672* R) - (2249 * G) + (2920* B))>>13)) + 128; //* 255.0/314.0
V = ((((3758* R) - (3416 * G) - (343 * B))>>13)) + 128; //* 255.0/244.0
}
else
{
U = ((((-827* R) - (2769 * G) + (3596* B))>>13)) + 128;
V = ((((3596* R) - (3269 * G) - (328 * B))>>13)) + 128;
}
if(U<0) U=0; if(U>255) U=255;
if(V<0) V=0; if(V>255) V=255;
tools->scopeUV[U][V]++;
}
break;
case DECODED_FORMAT_R210:
tools->histogram = 1;
for(x=0,pos=0; x<width; x+=step,pos++)
{
int32_t R,G,B,U,V;
val = SwapInt32BtoN(lbase[x]);
R = (val>>22)&0xff;
G = (val>>12)&0xff;
B = (val>>02)&0xff;
tools->histR[R]++;
tools->histG[G]++;
tools->histB[B]++;
tools->waveR[pos][R]++;
tools->waveG[pos][G]++;
tools->waveB[pos][B]++;
//Y = (((1499 * R) + (5030 * G) + (508 * B))>>13) + 16;
if(scaledvectorscope)
{
U = ((((-672* R) - (2249 * G) + (2920* B))>>13)) + 128; //* 255.0/314.0
V = ((((3758* R) - (3416 * G) - (343 * B))>>13)) + 128; //* 255.0/244.0
}
else
{
U = ((((-827* R) - (2769 * G) + (3596* B))>>13)) + 128;
V = ((((3596* R) - (3269 * G) - (328 * B))>>13)) + 128;
}
if(U<0) U=0; if(U>255) U=255;
if(V<0) V=0; if(V>255) V=255;
tools->scopeUV[U][V]++;
}
break;
case DECODED_FORMAT_DPX0:
tools->histogram = 1;
for(x=0,pos=0; x<width; x+=step,pos++)
{
int32_t R,G,B,U,V;
val = SwapInt32BtoN(lbase[x]);
R = (val>>24)&0xff;
G = (val>>14)&0xff;
B = (val>>04)&0xff;
tools->histR[R]++;
tools->histG[G]++;
tools->histB[B]++;
tools->waveR[pos][R]++;
tools->waveG[pos][G]++;
tools->waveB[pos][B]++;
//Y = (((1499 * R) + (5030 * G) + (508 * B))>>13) + 16;
if(scaledvectorscope)
{
U = ((((-672* R) - (2249 * G) + (2920* B))>>13)) + 128; //* 255.0/314.0
V = ((((3758* R) - (3416 * G) - (343 * B))>>13)) + 128; //* 255.0/244.0
}
else
{
U = ((((-827* R) - (2769 * G) + (3596* B))>>13)) + 128;
V = ((((3596* R) - (3269 * G) - (328 * B))>>13)) + 128;
}
if(U<0) U=0; if(U>255) U=255;
if(V<0) V=0; if(V>255) V=255;
tools->scopeUV[U][V]++;
}
break;
case DECODED_FORMAT_RG64:
case DECODED_FORMAT_B64A:
tools->histogram = 1;
for(x=0,pos=0; x<width; x+=step,pos++)
{
int32_t R,G,B,U,V;
R = sbase[1]>>8;
G = sbase[2]>>8;
B = sbase[3]>>8;
tools->histR[R]++;
tools->histG[G]++;
tools->histB[B]++;
tools->waveR[pos][R]++;
tools->waveG[pos][G]++;
tools->waveB[pos][B]++;
//Y = (((1499 * R) + (5030 * G) + (508 * B))>>13) + 16;
if(scaledvectorscope)
{
U = ((((-672* R) - (2249 * G) + (2920* B))>>13)) + 128; //* 255.0/314.0
V = ((((3758* R) - (3416 * G) - (343 * B))>>13)) + 128; //* 255.0/244.0
}
else
{
U = ((((-827* R) - (2769 * G) + (3596* B))>>13)) + 128;
V = ((((3596* R) - (3269 * G) - (328 * B))>>13)) + 128;
}
if(U<0) U=0; if(U>255) U=255;
if(V<0) V=0; if(V>255) V=255;
tools->scopeUV[U][V]++;
sbase += step*4;
}
break;
case COLOR_FORMAT_UYVY:
ypos=1,upos=0,vpos=2;
case DECODED_FORMAT_CbYCrY_8bit: // CMD: 20100109
case COLOR_FORMAT_YUYV:
tools->histogram = 1;
for(x=0,pos=0; x<width; x+=step,pos++)
{
int Y,U,V,R,G,B;
uint8_t *bptr = (uint8_t *)sbase;
bptr += x * 2;
Y = bptr[ypos]-16;
U = bptr[upos]-128;
Y+= bptr[ypos+2]-16; Y>>=1;
V = bptr[vpos]-128;
R = (9535*Y + 14688*V)>>13; //13-bit white
G = (9535*Y - 4375*V - 1745*U)>>13;
B = (9535*Y + 17326*U)>>13;
//TODO much -20 to 120 RGB range.
if(R > 255) R = 255;
if(R < 0) R = 0;
if(G > 255) G = 255;
if(G < 0) G = 0;
if(B > 255) B = 255;
if(B < 0) B = 0;
tools->histR[R]++;
tools->histG[G]++;
tools->histB[B]++;
tools->waveR[pos][R]++;
tools->waveG[pos][G]++;
tools->waveB[pos][B]++;
if(scaledvectorscope)
{
U *= 255; U /= 314;
V *= 255; V /= 244;
}
//* 255.0/314.0
//* 255.0/244.0
U += 128;
V += 128;
if(U<0) U=0; if(U>255) U=255;
if(V<0) V=0; if(V>255) V=255;
tools->scopeUV[U][V]++;
}
break;
case COLOR_FORMAT_YU64:
tools->histogram = 1;
for(x=0,pos=0; x<width; x+=step,pos++)
{
int Y,U,V,R,G,B;
uint8_t *bptr = (uint8_t *)sbase;
bptr += x * 4;
bptr++; //read only the high byte out of the 16-bit
Y = bptr[0]-16;
V = bptr[2]-128;
Y+= bptr[4]-16; Y>>=1;
U = bptr[6]-128;
R = (9535*Y + 14688*V)>>13; //13-bit white
G = (9535*Y - 4375*V - 1745*U)>>13;
B = (9535*Y + 17326*U)>>13;
if(R > 255) R = 255;
if(R < 0) R = 0;
if(G > 255) G = 255;
if(G < 0) G = 0;
if(B > 255) B = 255;
if(B < 0) B = 0;
tools->histR[R]++;
tools->histG[G]++;
tools->histB[B]++;
tools->waveR[pos][R]++;
tools->waveG[pos][G]++;
tools->waveB[pos][B]++;
if(scaledvectorscope)
{
U *= 255; U /= 314;
V *= 255; V /= 244;
}
U += 128;
V += 128;
if(U<0) U=0; if(U>255) U=255;
if(V<0) V=0; if(V>255) V=255;
tools->scopeUV[U][V]++;
}
break;
case COLOR_FORMAT_V210:
tools->histogram = 1;
for(x=0,pos=0; x<width; x+=step,pos++)
{
int Y,U,V,R,G,B;
uint32_t *lptr = (uint32_t *)sbase;
lptr += (x/6)*4;
switch(x % 6)
{
case 0:
V = ((*lptr>>02) & 0xff) - 128;
Y = ((*lptr>>12) & 0xff) - 16;
U = ((*lptr>>22) & 0xff) - 128;
lptr++;
Y+= ((*lptr>>02) & 0xff) - 16; Y>>=1;
break;
case 1:
lptr++;
Y = ((*lptr>>02) & 0xff) - 16;
V = ((*lptr>>12) & 0xff) - 128;
Y+= ((*lptr>>22) & 0xff) - 16; Y>>=1;
lptr--;
U = ((*lptr>>22) & 0xff) - 128;
break;
case 2:
lptr++;
Y = ((*lptr>>22) & 0xff) - 16;
lptr++;
U = ((*lptr>>02) & 0xff) - 128;
Y+= ((*lptr>>12) & 0xff) - 16; Y>>=1;
V = ((*lptr>>22) & 0xff) - 128;
break;
case 3:
lptr++;
V = ((*lptr>>12) & 0xff) - 128;
lptr++;
U = ((*lptr>>02) & 0xff) - 128;
Y = ((*lptr>>12) & 0xff) - 16;
lptr++;
Y+= ((*lptr>>02) & 0xff) - 16; Y>>=1;
break;
case 4:
lptr+=2;
V = ((*lptr>>22) & 0xff) - 128;
lptr++;
Y = ((*lptr>>02) & 0xff) - 16;
U = ((*lptr>>12) & 0xff) - 128;
Y+= ((*lptr>>22) & 0xff) - 16; Y>>=1;
break;
case 5:
lptr+=2;
V = ((*lptr>>22) & 0xff) - 128;
lptr++;
U = ((*lptr>>12) & 0xff) - 128;
Y = ((*lptr>>22) & 0xff) - 16;
lptr++;
Y+= ((*lptr>>02) & 0xff) - 16; Y>>=1;
break;
}
R = (9535*Y + 14688*V)>>13; //13-bit white
G = (9535*Y - 4375*V - 1745*U)>>13;
B = (9535*Y + 17326*U)>>13;
if(R > 255) R = 255;
if(R < 0) R = 0;
if(G > 255) G = 255;
if(G < 0) G = 0;
if(B > 255) B = 255;
if(B < 0) B = 0;
tools->histR[R]++;
tools->histG[G]++;
tools->histB[B]++;
tools->waveR[pos][R]++;
tools->waveG[pos][G]++;
tools->waveB[pos][B]++;
if(scaledvectorscope)
{
U *= 255; U /= 314;
V *= 255; V /= 244;
}
U += 128;
V += 128;
if(U<0) U=0; if(U>255) U=255;
if(V<0) V=0; if(V>255) V=255;
tools->scopeUV[U][V]++;
}
break;
case COLOR_FORMAT_RGB24:
tools->histogram = 1;
for(x=0,pos=0; x<width; x+=step,pos++)
{
int R,G,B,U,V;
uint8_t *bptr = (uint8_t *)sbase;
bptr += x * 3;
R = bptr[2];
G = bptr[1];
B = bptr[0];
tools->histR[R]++;
tools->histG[G]++;
tools->histB[B]++;
tools->waveR[pos][R]++;
tools->waveG[pos][G]++;
tools->waveB[pos][B]++;
//Y = (((1499 * R) + (5030 * G) + (508 * B))>>13) + 16;
if(scaledvectorscope)
{
U = ((((-672* R) - (2249 * G) + (2920* B))>>13)) + 128; //* 255.0/314.0
V = ((((3758* R) - (3416 * G) - (343 * B))>>13)) + 128; //* 255.0/244.0
}
else
{
U = ((((-827* R) - (2769 * G) + (3596* B))>>13)) + 128;
V = ((((3596* R) - (3269 * G) - (328 * B))>>13)) + 128;
}
if(U<0) U=0; if(U>255) U=255;
if(V<0) V=0; if(V>255) V=255;
tools->scopeUV[U][V]++;
}
break;
case COLOR_FORMAT_RGB32:
tools->histogram = 1;
for(x=0,pos=0; x<width; x+=step,pos++)
{
int R,G,B,U,V;
uint8_t *bptr = (uint8_t *)sbase;
bptr += x * 4;
R = bptr[2];
G = bptr[1];
B = bptr[0];
tools->histR[R]++;
tools->histG[G]++;
tools->histB[B]++;
tools->waveR[pos][R]++;
tools->waveG[pos][G]++;
tools->waveB[pos][B]++;
//Y = (((1499 * R) + (5030 * G) + (508 * B))>>13) + 16;
if(scaledvectorscope)
{
U = ((((-672* R) - (2249 * G) + (2920* B))>>13)) + 128; //* 255.0/314.0
V = ((((3758* R) - (3416 * G) - (343 * B))>>13)) + 128; //* 255.0/244.0
}
else
{
U = ((((-827* R) - (2769 * G) + (3596* B))>>13)) + 128;
V = ((((3596* R) - (3269 * G) - (328 * B))>>13)) + 128;
}
if(U<0) U=0; if(U>255) U=255;
if(V<0) V=0; if(V>255) V=255;
tools->scopeUV[U][V]++;
}
break;
case COLOR_FORMAT_BYR2:
case COLOR_FORMAT_BYR4:
//do nothing
break;
default:
assert(0);
#if (0 && DEBUG)
fprintf(stderr,"decoder.HistogramLine: Unsupported pixel format\n");
#endif
break;
}
}
void GhostBust(DECODER *decoder, unsigned short *sbaseL, unsigned short *sbaseR, int width, int ileakL, int ileakR)
{
#if 1
int x,RL,GL,BL,RR,GR,BR;
int nRL,nGL,nBL;
int nRR,nGR,nBR;
int max = 1024*1024-1;
unsigned short *sqrttable = decoder->sqrttable;
ileakL>>=6;
ileakR>>=6;
if(sqrttable == NULL)
return;
for(x=0;x<width;x++)
{
RL = sbaseL[0]>>6;
GL = sbaseL[1]>>6; //10-bit
BL = sbaseL[2]>>6;
RL*=RL;
GL*=GL; //20-bit
BL*=BL;
RR = sbaseR[0]>>6;
GR = sbaseR[1]>>6; //10-bit
BR = sbaseR[2]>>6;
RR*=RR;
GR*=GR; //20-bit
BR*=BR;
nRL = RL*(1023-ileakL) + ileakL*max - RR*ileakL; //30-bit
nGL = GL*(1023-ileakL) + ileakL*max - GR*ileakL;
nBL = BL*(1023-ileakL) + ileakL*max - BR*ileakL;
nRL >>= 10; //20-bit
nGL >>= 10;
nBL >>= 10;
if(nRL>max) nRL=max; if(nRL<0) nRL=0;
if(nGL>max) nGL=max; if(nGL<0) nGL=0;
if(nBL>max) nBL=max; if(nBL<0) nBL=0;
if(sqrttable[nRL] == 65535)
sqrttable[nRL] = (int)sqrt(nRL);
if(sqrttable[nGL] == 65535)
sqrttable[nGL] = (int)sqrt(nGL);
if(sqrttable[nBL] == 65535)
sqrttable[nBL] = (int)sqrt(nBL);
sbaseL[0] = sqrttable[nRL]<<6;
sbaseL[1] = sqrttable[nGL]<<6;
sbaseL[2] = sqrttable[nBL]<<6;
sbaseL += 3;
nRR = RR*(1023-ileakR) + ileakR*max - RL*ileakR; //30-bit
nGR = GR*(1023-ileakR) + ileakR*max - GL*ileakR;
nBR = BR*(1023-ileakR) + ileakR*max - BL*ileakR;
nRR >>= 10; //20-bit
nGR >>= 10;
nBR >>= 10;
if(nRR>max) nRR=max; if(nRR<0) nRR=0;
if(nGR>max) nGR=max; if(nGR<0) nGR=0;
if(nBR>max) nBR=max; if(nBR<0) nBR=0;
if(sqrttable[nRR] == 65535)
sqrttable[nRR] = (int)sqrt(nRR);
if(sqrttable[nGR] == 65535)
sqrttable[nGR] = (int)sqrt(nGR);
if(sqrttable[nBR] == 65535)
sqrttable[nBR] = (int)sqrt(nBR);
sbaseR[0] = sqrttable[nRR]<<6;
sbaseR[1] = sqrttable[nGR]<<6;
sbaseR[2] = sqrttable[nBR]<<6;
sbaseR += 3;
}
#else // works and fast but has not image linearization, not as good
__m128i *ptrL = (__m128i *)sbaseL;
__m128i *ptrR = (__m128i *)sbaseR;
__m128i t,L,R,nL,nR;
int x,width8 = (width*3) & ~7;
__m128i white_epi16 = _mm_set1_epi16(32767);
__m128i leak_epi16 = _mm_set1_epi16(ileak>>1);
__m128i oneNegLeak_epi16 = _mm_set1_epi16(32767-(ileak>>1));
for(x=0;x<width8;x+=8)
{
L = _mm_load_si128(ptrL);
R = _mm_load_si128(ptrR);
L = _mm_srli_epi16(L,1); //15-bit
R = _mm_srli_epi16(R,1); //15-bit
nL = _mm_mulhi_epi16(L, oneNegLeak_epi16);
t = _mm_mulhi_epi16(white_epi16, leak_epi16);
nL = _mm_adds_epi16(nL, t);
t = _mm_mulhi_epi16(R, leak_epi16);
nL = _mm_subs_epu16(nL, t);
nR = _mm_mulhi_epi16(R, oneNegLeak_epi16);
t = _mm_mulhi_epi16(white_epi16, leak_epi16);
nR = _mm_adds_epi16(nR, t);
t = _mm_mulhi_epi16(L, leak_epi16);
nR = _mm_subs_epu16(nR, t);
L = _mm_slli_epi16(nL,2);
R = _mm_slli_epi16(nR,2);
_mm_store_si128(ptrL++, L);
_mm_store_si128(ptrR++, R);
}
#endif
}
void GhostBustRC(DECODER *decoder, unsigned short *sbase, int width, int ileakL, int ileakR)
{
#if 1
int x,R,G,B;
int nR,nG,nB;
int max = 1024*1024-1;
unsigned short *sqrttable = decoder->sqrttable;
ileakL>>=6;
ileakR>>=6;
if(sqrttable == NULL)
return;
for(x=0;x<width;x++)
{
R = sbase[0]>>6;
G = sbase[1]>>6; //10-bit
B = sbase[2]>>6;
R*=R;
G*=G; //20-bit
B*=B;
nR = R*(1023-ileakL) + ileakL*max - ((G+B)>>1)*ileakL; //30-bit
nG = G*(1023-ileakR) + ileakR*max - R*ileakR;
nB = B*(1023-ileakR) + ileakR*max - R*ileakR;
nR >>= 10; //20-bit
nG >>= 10;
nB >>= 10;
if(nR>max) nR=max; if(nR<0) nR=0;
if(nG>max) nG=max; if(nG<0) nG=0;
if(nB>max) nB=max; if(nB<0) nB=0;
if(sqrttable[nR] == 65535)
sqrttable[nR] = (int)sqrt(nR);
if(sqrttable[nG] == 65535)
sqrttable[nG] = (int)sqrt(nG);
if(sqrttable[nB] == 65535)
sqrttable[nB] = (int)sqrt(nB);
sbase[0] = sqrttable[nR]<<6;
sbase[1] = sqrttable[nG]<<6;
sbase[2] = sqrttable[nB]<<6;
sbase += 3;
}
#elif 0
int x;
float R,G,B;
float nR,nG,nB;
float fleakL = (float)ileakL / 65535.0;
float fleakR = (float)ileakR / 65535.0;
for(x=0;x<width;x++)
{
R = sbase[0];
G = sbase[1];
B = sbase[2];
R /= 65535.0;
G /= 65535.0;
B /= 65535.0;
R *= R;
G *= G;
B *= B;
nR = R*(1.0-fleakL) + fleakL - (G+B)*0.5*fleakL;
nG = G*(1.0-fleakR) + fleakR - R*fleakR;
nB = B*(1.0-fleakR) + fleakR - R*fleakR;
if(nR<0) nR=0;
if(nG<0) nG=0;
if(nB<0) nB=0;
nR = sqrt(nR);
nG = sqrt(nG);
nB = sqrt(nB);
sbase[0] = nR * 65535.0;
sbase[1] = nG * 65535.0;
sbase[2] = nB * 65535.0;
sbase += 3;
}
#elif 0
__m128i RGBRGB,rgb_epi32,RGB1,RGB2;
__m128i zero_epi128 = _mm_setzero_si128();
int x,width6 = (width*3) / 6 * 6;
__m128 white_ps = _mm_set1_ps(1.0);
__m128 mul_neg_leak_ps = _mm_set_ps(1.0 - ((float)ileakL/65536.0), 1.0 - ((float)ileakR/65536.0), 1.0 - ((float)ileakR/65536.0), 1.0 - ((float)ileakL/65536.0));
__m128 leak_ps = _mm_set_ps((float)ileakL/65536.0, (float)ileakR/65536.0, (float)ileakR/65536.0, (float)ileakL/65536.0);
__m128 scale_ps = _mm_set1_ps(65535.0);
__m128 scalehalf_ps = _mm_set1_ps(32767.0);
__m128 zero_ps = _mm_set1_ps(0.0);
__m128 rgb_ps, alt_rgb_ps;
__m128i sub_epi32;
__m128 sub_ps;
for(x=0;x<width6;x+=6) // two RGB pairs
{
int R,G,B;
RGBRGB = _mm_loadu_si128((__m128i *)sbase);
R = _mm_extract_epi16(RGBRGB, 0);
G = _mm_extract_epi16(RGBRGB, 1);
B = _mm_extract_epi16(RGBRGB, 2);
G+=B;
G>>=1;
sub_epi32 = _mm_set_epi32(G,R,R,G);
sub_ps = _mm_cvtepi32_ps(sub_epi32); // range 0 to 65535.0
sub_ps = _mm_div_ps(sub_ps, scale_ps); // range 0 to 1.0
sub_ps = _mm_mul_ps(sub_ps, sub_ps); // square
rgb_epi32 = _mm_unpacklo_epi16(RGBRGB, zero_epi128);
rgb_ps = _mm_cvtepi32_ps(rgb_epi32); // range 0 to 65535.0
rgb_ps = _mm_div_ps(rgb_ps, scale_ps); // range 0 to 1.0
rgb_ps = _mm_mul_ps(rgb_ps, rgb_ps); // square
rgb_ps = _mm_mul_ps(rgb_ps, mul_neg_leak_ps); // [R*(1.0-fleakL)] + fleakL - (G+B)*0.5*fleakL;
rgb_ps = _mm_add_ps(rgb_ps, leak_ps); // R*(1.0-fleakL) [+ fleakL] - (G+B)*0.5*fleakL;
sub_ps = _mm_mul_ps(sub_ps, leak_ps); // R*(1.0-fleakL) + fleakL - [(G+B)*0.5*fleakL;]
rgb_ps = _mm_sub_ps(rgb_ps, sub_ps); // R*(1.0-fleakL) + fleakL] [- (G+B)*0.5*fleakL;]
rgb_ps = _mm_max_ps(rgb_ps, zero_ps); // if(x < 0) x= 0;
rgb_ps = _mm_sqrt_ps(rgb_ps); // sqrt()
rgb_ps = _mm_mul_ps(rgb_ps, scalehalf_ps); // range 0 to 32767
RGB1 = _mm_cvtps_epi32(rgb_ps);
RGB1 = _mm_packs_epi32 (RGB1, zero_epi128);
RGB1 = _mm_slli_si128(RGB1, 10);
RGB1 = _mm_srli_si128(RGB1, 10);
RGBRGB = _mm_srli_si128(RGBRGB, 6);
R = _mm_extract_epi16(RGBRGB, 0);
G = _mm_extract_epi16(RGBRGB, 1);
B = _mm_extract_epi16(RGBRGB, 2);
G+=B;
G>>=1;
sub_epi32 = _mm_set_epi32(G,R,R,G);
sub_ps = _mm_cvtepi32_ps(sub_epi32); // range 0 to 65535.0
sub_ps = _mm_div_ps(sub_ps, scale_ps); // range 0 to 1.0
sub_ps = _mm_mul_ps(sub_ps, sub_ps); // square
rgb_epi32 = _mm_unpacklo_epi16(RGBRGB, zero_epi128);
rgb_ps = _mm_cvtepi32_ps(rgb_epi32); // range 0 to 65535.0
rgb_ps = _mm_div_ps(rgb_ps, scale_ps); // range 0 to 1.0
rgb_ps = _mm_mul_ps(rgb_ps, rgb_ps); // square
rgb_ps = _mm_mul_ps(rgb_ps, mul_neg_leak_ps); // [R*(1.0-fleakL)] + fleakL - (G+B)*0.5*fleakL;
rgb_ps = _mm_add_ps(rgb_ps, leak_ps); // R*(1.0-fleakL) [+ fleakL] - (G+B)*0.5*fleakL;
sub_ps = _mm_mul_ps(sub_ps, leak_ps); // R*(1.0-fleakL) + fleakL - [(G+B)*0.5*fleakL;]
rgb_ps = _mm_sub_ps(rgb_ps, sub_ps); // R*(1.0-fleakL) + fleakL] [- (G+B)*0.5*fleakL;]
rgb_ps = _mm_max_ps(rgb_ps, zero_ps); // if(x < 0) x= 0;
rgb_ps = _mm_sqrt_ps(rgb_ps); // sqrt()
rgb_ps = _mm_mul_ps(rgb_ps, scalehalf_ps); // range 0 to 32767
RGB2 = _mm_cvtps_epi32(rgb_ps);
RGB2 = _mm_packs_epi32 (RGB2, zero_epi128);
RGB2 = _mm_slli_si128(RGB2, 6);
RGB1 = _mm_adds_epi16(RGB1, RGB2);
RGB1 = _mm_slli_epi16(RGB1, 1);
RGB1 = _mm_slli_si128(RGB1, 4);
RGB1 = _mm_srli_si128(RGB1, 4);
RGBRGB = _mm_srli_si128(RGBRGB, 6);
RGBRGB = _mm_slli_si128(RGBRGB, 12);
RGBRGB = _mm_adds_epi16(RGB1, RGBRGB);
_mm_storeu_si128((__m128i *)sbase, RGBRGB);
sbase += 6;
}
#endif
}
void GhostBustAB(DECODER *decoder, unsigned short *sbase, int width, int ileakL, int ileakR)
{
int x,R,G,B;
int nR,nG,nB;
int max = 1024*1024-1;
unsigned short *sqrttable = decoder->sqrttable;
ileakL>>=6;
ileakR>>=6;
if(sqrttable == NULL)
return;
for(x=0;x<width;x++)
{
R = sbase[0]>>6;
G = sbase[1]>>6; //10-bit
B = sbase[2]>>6;
R*=R;
G*=G; //20-bit
B*=B;
nR = R*(1023-ileakL) + ileakL*max - B*ileakL;
nG = G*(1023-ileakL) + ileakL*max - B*ileakL;
nB = B*(1023-ileakR) + ileakR*max - ((R+G)>>1)*ileakR;
nR >>= 10; //20-bit
nG >>= 10;
nB >>= 10;
if(nR>max) nR=max; if(nR<0) nR=0;
if(nG>max) nG=max; if(nG<0) nG=0;
if(nB>max) nB=max; if(nB<0) nB=0;
if(sqrttable[nR] == 65535)
sqrttable[nR] = (int)sqrt(nR);
if(sqrttable[nG] == 65535)
sqrttable[nG] = (int)sqrt(nG);
if(sqrttable[nB] == 65535)
sqrttable[nB] = (int)sqrt(nB);
sbase[0] = sqrttable[nR]<<6;
sbase[1] = sqrttable[nG]<<6;
sbase[2] = sqrttable[nB]<<6;
sbase += 3;
}
}
void GhostBustGM(DECODER *decoder, unsigned short *sbase, int width, int ileakL, int ileakR)
{
int x,R,G,B;
int nR,nG,nB;
int max = 1024*1024-1;
unsigned short *sqrttable = decoder->sqrttable;
ileakL>>=6;
ileakR>>=6;
if(sqrttable == NULL)
return;
for(x=0;x<width;x++)
{
R = sbase[0]>>6;
G = sbase[1]>>6; //10-bit
B = sbase[2]>>6;
R*=R;
G*=G; //20-bit
B*=B;
nR = R*(1023-ileakL) + ileakL*max - G*ileakL;
nG = G*(1023-ileakR) + ileakR*max - ((R+B)>>1)*ileakR;
nB = B*(1023-ileakL) + ileakL*max - G*ileakL;
nR >>= 10; //20-bit
nG >>= 10;
nB >>= 10;
if(nR>max) nR=max; if(nR<0) nR=0;
if(nG>max) nG=max; if(nG<0) nG=0;
if(nB>max) nB=max; if(nB<0) nB=0;
if(sqrttable[nR] == 65535)
sqrttable[nR] = (int)sqrt(nR);
if(sqrttable[nG] == 65535)
sqrttable[nG] = (int)sqrt(nG);
if(sqrttable[nB] == 65535)
sqrttable[nB] = (int)sqrt(nB);
sbase[0] = sqrttable[nR]<<6;
sbase[1] = sqrttable[nG]<<6;
sbase[2] = sqrttable[nB]<<6;
sbase += 3;
}
}
void ProcessLine3D(DECODER *decoder, uint8_t *buffer, int bufferremain, uint8_t *output, int pitch, uint8_t *source_buffer, int source_pitch, int channel_offset, int y, int blank)
{
uint16_t *scratchline,*scratchline2,*scratchline3;
uint16_t *sptr;
uint16_t *srclineA,*srclineB;
uint16_t *dstlineA,*dstlineB;
int x,y2;
int width = decoder->frame.width;
int height = decoder->frame.height;
int skip = 3;
int sskip = 3;
uint8_t *bptr1;
uint8_t *bptr2;
uint8_t *baseptr1;
uint8_t *baseptr2;
float windowMaskL = decoder->cfhddata.channel[0].FloatingWindowMaskL;
float windowMaskR = decoder->cfhddata.channel[0].FloatingWindowMaskR;
float frameTilt = decoder->cfhddata.channel[0].FrameTilt;
float horizOffset = decoder->cfhddata.channel[1].HorizontalOffset;
float horizOffsetR = decoder->cfhddata.channel[2].HorizontalOffset;
float rotOffset = decoder->cfhddata.channel[1].RotationOffset;
float rotOffsetR = decoder->cfhddata.channel[2].RotationOffset;
float horizOffsetStep = 0;
float horizOffsetStepR = 0;
int flip1=0,flip2=0;
int channel_flip = decoder->cfhddata.channel_flip;
int source_pitch1 = source_pitch;
int source_pitch2 = source_pitch;
uint8_t *outputline = output+y*pitch;
uint8_t *outputline2 = NULL;
float horizOffsetBase;
float rotOffsetBase;
float horizOffsetBaseR;
float rotOffsetBaseR;
int formatdone = 0;
float xmin = decoder->cfhddata.channel[0].FrameMask.topLftX;
float xmax = decoder->cfhddata.channel[0].FrameMask.topRgtX;
//float ymin = decoder->cfhddata.channel[0].FrameMask.topLftY;
float ymax = decoder->cfhddata.channel[0].FrameMask.botLftY;
float zoom;
float zoomR;
float frameZoom1 = decoder->cfhddata.channel[1].FrameZoom;
float frameZoom2 = decoder->cfhddata.channel[2].FrameZoom;
float frameAutoZoom = decoder->cfhddata.channel[0].FrameAutoZoom;
float frameDiffZoom1 = decoder->cfhddata.channel[1].FrameDiffZoom;
float frameDiffZoom2 = decoder->cfhddata.channel[2].FrameDiffZoom;
float frameHDynamic = decoder->cfhddata.FrameHDynamic;
float frameHDynCenter = decoder->cfhddata.FrameHDynCenter;
float frameHDynWidth = decoder->cfhddata.FrameHDynWidth;
float frameHScale = decoder->cfhddata.FrameHScale;
int alphachannel = 0;
int whitepoint = 16;
float blursharpenL = decoder->cfhddata.channel[1].user_blur_sharpen;
float blursharpenR = decoder->cfhddata.channel[2].user_blur_sharpen;
float vignette = decoder->cfhddata.channel[0].user_vignette_start;
int flip_LR = 0;
float vig_r1;
float vig_r2;
float vig_gain;
if(blank) // blankline, no shifts required
{
windowMaskL = 0;
windowMaskR = 0;
frameTilt = 0;
horizOffset = 0;
horizOffsetR = 0;
rotOffset = 0;
rotOffsetR = 0;
frameZoom1 = 1.0;
frameZoom2 = 1.0;
frameAutoZoom = 1.0;
frameDiffZoom1 = 1.0;
frameDiffZoom2 = 1.0;
frameHScale = 1.0;
frameHDynamic = 1.0;
frameHDynCenter = 0.5;
frameHDynWidth = 0.0;
}
if( decoder->StereoBufferFormat == DECODED_FORMAT_RG64 ||
decoder->StereoBufferFormat == DECODED_FORMAT_W13A ||
decoder->StereoBufferFormat == DECODED_FORMAT_RGB32)
alphachannel = 1;
if(xmax == 0.0) xmax = 1.0;
if(ymax == 0.0) ymax = 1.0;
if(decoder->frame.resolution == DECODED_RESOLUTION_HALF_HORIZONTAL)
{
width *= 2;
}
if(decoder->source_channels < 2) // 2D
{
channel_flip &= 0x3;
channel_flip |= channel_flip<<2;
decoder->cfhddata.channel_flip = channel_flip;
}
if(!(decoder->cfhddata.process_path_flags & PROCESSING_COLORMATRIX) ||
decoder->frame.resolution == DECODED_RESOLUTION_QUARTER ||
decoder->frame.resolution == DECODED_RESOLUTION_LOWPASS_ONLY ||
decoder->frame.resolution == DECODED_RESOLUTION_QUARTER_NODEBAYER_SCALED)
{
blursharpenL = 0.0;
blursharpenR = 0.0;
}
if(!(decoder->cfhddata.process_path_flags & PROCESSING_ORIENTATION))
{
horizOffset = rotOffset = 0;
horizOffsetR = rotOffsetR = 0;
frameTilt = 0;
frameAutoZoom = 1.0;
frameDiffZoom1 = 1.0;
frameDiffZoom2 = 1.0;
}
if(!(decoder->cfhddata.process_path_flags & PROCESSING_IMAGEFLIPS))
{
channel_flip = 0;
}
if(decoder->cfhddata.process_path_flags & PROCESSING_FRAMING)
{
horizOffset += decoder->cfhddata.FrameOffsetX;
horizOffsetR -= decoder->cfhddata.FrameOffsetX;
frameZoom1 += frameHScale - 1.0f;
frameZoom2 += frameHScale - 1.0f;
if(frameHDynamic != 1.0)
{
frameZoom1 += 0.00001f;
frameZoom2 += 0.00001f;
}
if(vignette != 0.0)
{
float vig_diag = sqrtf(1.0f + ((float)decoder->frame.height / (float) decoder->frame.width) * ((float)decoder->frame.height / (float) decoder->frame.width));
vig_r1 = (vignette+1.0f);
vig_r2 = (decoder->cfhddata.channel[0].user_vignette_end+1.0f);
vig_gain = decoder->cfhddata.channel[0].user_vignette_gain;
vig_r1 *= vig_diag;
vig_r2 *= vig_diag;
}
}
else
{
frameZoom1 = 1.0f;
frameZoom2 = 1.0f;
vignette = 0;
}
zoom = frameZoom1 * frameAutoZoom * frameDiffZoom1;
if(frameDiffZoom2 != 0.0)
zoomR = frameZoom2 * frameAutoZoom / frameDiffZoom2;
else
zoomR = 0.0;
if(decoder->cfhddata.process_path_flags & PROCESSING_FRAMING)
{
if(decoder->cfhddata.InvertOffset)
{
rotOffset = -rotOffset;
rotOffsetR = -rotOffsetR;
rotOffset -= decoder->cfhddata.FrameOffsetR;
rotOffsetR -= -decoder->cfhddata.FrameOffsetR;
}
else
{
rotOffset += decoder->cfhddata.FrameOffsetR;
rotOffsetR += -decoder->cfhddata.FrameOffsetR;
}
}
rotOffsetBase = rotOffset;
horizOffsetBase = horizOffset;
rotOffsetBaseR = rotOffsetR;
horizOffsetBaseR = horizOffsetR;
horizOffset -= rotOffset * 0.5f;
horizOffsetStep = rotOffset / (float)height;
horizOffsetR -= rotOffsetR * 0.5f;
horizOffsetStepR = rotOffsetR / (float)height;
horizOffset += horizOffsetStep * y;
horizOffsetR += horizOffsetStepR * y;
assert(bufferremain >= width * 8 * 2 * 2);
baseptr1 = source_buffer;
baseptr2 = source_buffer + channel_offset;
if(channel_flip & 0xf)
{
if(channel_flip & 1)
{
flip1 = 1;
}
if(channel_flip & 4)
{
flip2 = 1;
}
}
if(source_pitch1 < 0)
flip_LR = 1;
decoder->sharpen_flip = 0;
if(channel_flip & 2) //ProcessLine3D
{
if(decoder->channel_blend_type == BLEND_NONE && decoder->channel_current == 1) // right channel only (stored in baseptr1)
{
}
else
{
baseptr1 += source_pitch1*(height-1);
source_pitch1 = -source_pitch1;
decoder->sharpen_flip = 1;
}
}
if(channel_flip & 8)
{
if(decoder->channel_blend_type == BLEND_NONE && decoder->channel_current == 1) // right channel only (stored in baseptr1)
{
baseptr1 += source_pitch1*(height-1);
source_pitch1 = -source_pitch1;
decoder->sharpen_flip = 1;
}
else
{
baseptr2 += source_pitch2*(height-1);
source_pitch2 = -source_pitch2;
}
}
bptr1 = baseptr1 + y*source_pitch1;
bptr2 = baseptr2 + y*source_pitch2;
y2 = y;
if(decoder->channel_blend_type == BLEND_FREEVIEW) //FreeView
{
if(y2 < height/4)
{
blank = 1;
y2 = 0;
}
else
{
y2 -= height/4;
y2 *= 2;
if(y2 >= height-1)
{
blank = 1;
y2 = height - 2;
}
}
bptr1 = baseptr1 + y2*source_pitch1;
bptr2 = baseptr2 + y2*source_pitch2;
}
srclineA = (uint16_t *)bptr1;
srclineB = (uint16_t *)bptr2;
scratchline = (uint16_t *)buffer;
scratchline2 = (uint16_t *)(buffer + width * 6 + width) /* as we pad the line */ ;;
scratchline3 = (uint16_t *)(buffer + width * 6*2 + width*2) /* as we pad the line */ ;
if(alphachannel)
{
scratchline = (uint16_t *)buffer;
scratchline2 = (uint16_t *)(buffer + width * 8 + width) /* as we pad the line */ ;;
scratchline3 = (uint16_t *)(buffer + width * 8*2 + width*2) /* as we pad the line */ ;
}
dstlineA = sptr = scratchline;
dstlineB = scratchline3;
switch(decoder->StereoBufferFormat)
{
case DECODED_FORMAT_RG64:
whitepoint = 16;
skip = 8;
sskip = 4;
break;
case DECODED_FORMAT_W13A:
whitepoint = 13;
skip = 8;
sskip = 4;
break;
case DECODED_FORMAT_WP13:
whitepoint = 13;
skip = 6;
sskip = 3;
break;
case DECODED_FORMAT_RG48:
skip = 6;
sskip = 3;
break;
case DECODED_FORMAT_RGB32:
skip = 4;
break;
case DECODED_FORMAT_RGB24:
skip = 3;
break;
case DECODED_FORMAT_YUYV:
skip = 2;
break;
}
if(blank)
{
if(srclineA)
memset(srclineA, 0, width*skip);
if(srclineB && decoder->channel_decodes > 1)
memset(srclineB, 0, width*skip);
}
if(blursharpenL != 0.0 || blursharpenR != 0.0)
{
if(decoder->channel_blend_type == BLEND_FREEVIEW ||
decoder->channel_blend_type == BLEND_STACKED_ANAMORPHIC ||
decoder->channel_blend_type == BLEND_LINE_INTERLEAVED
)
{
decoder->doVerticalFilter = 0;
}
else
{
decoder->doVerticalFilter = 1;
}
}
{
switch(decoder->channel_blend_type)
{
case BLEND_FREEVIEW:
case BLEND_SIDEBYSIDE_ANAMORPHIC: //side by side
if(!blank)
{
if(decoder->frame.resolution == DECODED_RESOLUTION_HALF_HORIZONTAL || decoder->frame.resolution == DECODED_RESOLUTION_HALF_HORIZONTAL_DEBAYER)
{
dstlineA = srclineA;
sptr = dstlineA;
if(zoom != 1.0 || zoomR != 1.0 || horizOffsetR || horizOffset || channel_flip || frameTilt)
{
if(!alphachannel)
{
if(zoom == 1.0 && zoomR == 1.0 && frameTilt == 0.0)
{
RGB48HoriShift(decoder, srclineA, scratchline2, width/2, -horizOffset, flip1);
RGB48HoriShift(decoder, srclineB, scratchline2, width/2, horizOffsetR, flip2);
}
else
{
RGB48HoriShiftZoom(decoder, srclineA, scratchline2, width/2, height, y, -horizOffsetBase, rotOffsetBase, zoom, flip1, frameTilt, 0);
RGB48HoriShiftZoom(decoder, srclineB, scratchline2, width/2, height, y, horizOffsetBaseR, -rotOffsetBaseR, zoomR, flip2, frameTilt, 1);
}
}
else
{
if(zoom == 1.0 && zoomR == 1.0 && frameTilt == 0.0)
{
RGBA64HoriShift(decoder, srclineA, scratchline2, width/2, -horizOffset, flip1);
RGBA64HoriShift(decoder, srclineB, scratchline2, width/2, horizOffsetR, flip2);
}
else
{
RGBA64HoriShiftZoom(decoder, srclineA, scratchline2, width/2, height, y, -horizOffsetBase, rotOffsetBase, zoom, flip1, frameTilt, 0);
RGBA64HoriShiftZoom(decoder, srclineB, scratchline2, width/2, height, y, horizOffsetBaseR, -rotOffsetBaseR, zoomR, flip2, frameTilt, 1);
}
}
}
if(vignette != 0.0)
{
int cwidth= width/2;
if(decoder->channel_blend_type == BLEND_SIDEBYSIDE_ANAMORPHIC)
cwidth= width;
FastVignetteInplaceWP13(decoder, width/2, cwidth, height, y, vig_r1, vig_r2, vig_gain,
(int16_t *)srclineA, decoder->frame.resolution, skip);
FastVignetteInplaceWP13(decoder, width/2, cwidth, height, y, vig_r1, vig_r2, vig_gain,
(int16_t *)srclineB, decoder->frame.resolution, skip);
}
if(blursharpenL != 0.0) FastSharpeningBlurHinplaceWP13(width/2, (int16_t *)srclineA, blursharpenL, decoder->frame.resolution, skip);
if(blursharpenR != 0.0) FastSharpeningBlurHinplaceWP13(width/2, (int16_t *)srclineB, blursharpenR, decoder->frame.resolution, skip);
memcpy(dstlineA+sskip*(width/2), srclineB, width/2*sskip*2);
}
else
{
int16_t *ptr;
int16_t *ptr1 = (int16_t *)srclineA;
int16_t *ptr2 = (int16_t *)srclineB;
if(!alphachannel)
{
if(zoom != 1.0 || zoomR != 1.0 || horizOffsetR || horizOffset || channel_flip || frameTilt)
{
if(zoom == 1.0 && zoomR == 1.0 && frameTilt == 0.0)
{
RGB48HoriShift(decoder, srclineA, scratchline2, width, -horizOffset, flip1);
RGB48HoriShift(decoder, srclineB, scratchline2, width, horizOffset, flip2);
}
else
{
RGB48HoriShiftZoom(decoder, srclineA, scratchline2, width, height, y, -horizOffsetBase, rotOffsetBase, zoom, flip1, frameTilt, 0);
RGB48HoriShiftZoom(decoder, srclineB, scratchline2, width, height, y, horizOffsetBaseR, -rotOffsetBaseR, zoomR, flip2, frameTilt, 1);
}
}
}
else
{
if(zoom != 1.0 || zoomR != 1.0 || horizOffsetR || horizOffset || channel_flip || frameTilt)
{
if(zoom == 1.0 && zoomR == 1.0 && frameTilt == 0.0)
{
RGBA64HoriShift(decoder, srclineA, scratchline2, width, -horizOffset, flip1);
RGBA64HoriShift(decoder, srclineB, scratchline2, width, horizOffset, flip2);
}
else
{
RGBA64HoriShiftZoom(decoder, srclineA, scratchline2, width, height, y, -horizOffsetBase, rotOffsetBase, zoom, flip1, frameTilt, 0);
RGBA64HoriShiftZoom(decoder, srclineB, scratchline2, width, height, y, horizOffsetBaseR, -rotOffsetBaseR, zoomR, flip2, frameTilt, 1);
}
}
}
if(vignette != 0.0)
{
int cwidth= width/2;
if(decoder->channel_blend_type == BLEND_SIDEBYSIDE_ANAMORPHIC)
cwidth= width;
FastVignetteInplaceWP13(decoder, width, cwidth, height, y, vig_r1, vig_r2, vig_gain,
(int16_t *)srclineA, decoder->frame.resolution, skip);
FastVignetteInplaceWP13(decoder, width, cwidth, height, y, vig_r1, vig_r2, vig_gain,
(int16_t *)srclineB, decoder->frame.resolution, skip);
}
if(blursharpenL != 0.0) FastSharpeningBlurHinplaceWP13(width, (int16_t *)srclineA, blursharpenL, decoder->frame.resolution, skip);
if(blursharpenR != 0.0) FastSharpeningBlurHinplaceWP13(width, (int16_t *)srclineB, blursharpenR, decoder->frame.resolution, skip);
dstlineA = srclineA;
ptr = (int16_t *)srclineA;
for(x=0; x<width/2; x++)
{
*ptr++ = (ptr1[0]+ptr1[3])>>1;
*ptr++ = (ptr1[1]+ptr1[4])>>1;
*ptr++ = (ptr1[2]+ptr1[5])>>1 ;
ptr1+=sskip*2;
}
for(; x<width; x++)
{
*ptr++ = (ptr2[0]+ptr2[3])>>1;
*ptr++ = (ptr2[1]+ptr2[4])>>1;
*ptr++ = (ptr2[2]+ptr2[5])>>1;
ptr2+=sskip*2;
}
}
if(windowMaskL || xmin)
{
float mask = windowMaskL > xmin ? windowMaskL : xmin;
RGB48WindowMask(decoder, dstlineA, width/2, 0, mask);
if(windowMaskL < 0)
RGB48WindowMask(decoder, dstlineA, width/2, 0, windowMaskL);
if(xmin)
{
RGB48WindowMask(decoder, dstlineA, width/2, 1, xmin);
}
}
if(windowMaskR || (1.0-xmax))
{
float mask = windowMaskR > (1.0f-xmax) ? windowMaskR : (1.0f-xmax);
RGB48WindowMask(decoder, dstlineA+width*sskip/2, width/2, 1, mask);
if(windowMaskR < 0)
RGB48WindowMask(decoder, dstlineA+width*sskip/2, width/2, 1, windowMaskR);
if(xmin)
{
RGB48WindowMask(decoder, dstlineA+width*sskip/2, width/2, 0, xmin);
}
}
if(decoder->channel_swapped_flags & FLAG3D_GHOSTBUST)
{
if(decoder->ghost_bust_left || decoder->ghost_bust_right)
{
GhostBust(decoder, dstlineA, dstlineA+width*sskip/2, width/2, decoder->ghost_bust_left, decoder->ghost_bust_right);
}
}
if(decoder->channel_swapped_flags & FLAG3D_SWAPPED)
{
memcpy(scratchline2+width*sskip/2, dstlineA, width*sskip*2/2);
memcpy(dstlineA, dstlineA+width*sskip/2, width*sskip*2/2);
memcpy(dstlineA+width*sskip/2, scratchline2+width*sskip/2, width*sskip*2/2);
}
}
break;
case BLEND_STACKED_ANAMORPHIC: //stacked
case BLEND_LINE_INTERLEAVED: //fields
if((y & 1) == 1) return;
if(!blank)
{
uint16_t *ptrA1 = (uint16_t *)srclineA;
uint16_t *ptrA2 = (uint16_t *)srclineA + (source_pitch1>>1);
uint16_t *ptrB1 = (uint16_t *)srclineB;
uint16_t *ptrB2 = (uint16_t *)srclineB + (source_pitch2>>1);
FastBlendWP13((short *)ptrA1, (short *)ptrA2, (short *)ptrA1/*output*/, width*skip);
FastBlendWP13((short *)ptrB1, (short *)ptrB2, (short *)ptrB1/*output*/, width*skip);
if(zoom != 1.0 || zoomR != 1.0 || horizOffset || horizOffsetR || channel_flip || frameTilt)
{
if(!alphachannel)
{
if(zoom == 1.0 && zoomR == 1.0 && frameTilt == 0.0)
{
RGB48HoriShift(decoder, srclineA, scratchline2, width, -horizOffset, flip1);
RGB48HoriShift(decoder, srclineB, scratchline2, width, horizOffsetR, flip2);
}
else
{
RGB48HoriShiftZoom(decoder, srclineA, scratchline2, width, height, y, -horizOffsetBase, rotOffsetBase, zoom, flip1, frameTilt, 0);
RGB48HoriShiftZoom(decoder, srclineB, scratchline2, width, height, y, horizOffsetBaseR, -rotOffsetBaseR, zoomR, flip2, frameTilt, 1);
}
}
else
{
if(zoom == 1.0 && zoomR == 1.0 && frameTilt == 0.0)
{
RGBA64HoriShift(decoder, srclineA, scratchline2, width, -horizOffset, flip1);
RGBA64HoriShift(decoder, srclineB, scratchline2, width, horizOffsetR, flip2);
}
else
{
RGBA64HoriShiftZoom(decoder, srclineA, scratchline2, width, height, y, -horizOffsetBase, rotOffsetBase, zoom, flip1, frameTilt, 0);
RGBA64HoriShiftZoom(decoder, srclineB, scratchline2, width, height, y, horizOffsetBaseR, -rotOffsetBaseR, zoomR, flip2, frameTilt, 1);
}
}
}
if(vignette != 0.0)
{
FastVignetteInplaceWP13(decoder, width, width, height, y, vig_r1, vig_r2, vig_gain,
(short *)srclineA, decoder->frame.resolution, skip);
FastVignetteInplaceWP13(decoder, width, width, height, y, vig_r1, vig_r2, vig_gain,
(short *)srclineB, decoder->frame.resolution, skip);
}
if(blursharpenL != 0.0) FastSharpeningBlurHinplaceWP13(width, (short *)srclineA, blursharpenL, decoder->frame.resolution, skip);
if(blursharpenR != 0.0) FastSharpeningBlurHinplaceWP13(width, (short *)srclineB, blursharpenR, decoder->frame.resolution, skip);
if(windowMaskL || xmin)
{
float mask = windowMaskL > xmin ? windowMaskL : xmin;
RGB48WindowMask(decoder, srclineA, width, 0, mask);
if(windowMaskL < 0)
RGB48WindowMask(decoder, srclineA, width, 0, windowMaskL);
if(xmin)
{
RGB48WindowMask(decoder, srclineA, width, 1, xmin);
}
}
if(windowMaskR || (1.0-xmax))
{
float mask = windowMaskR > (1.0f-xmax) ? windowMaskR : (1.0f-xmax);
RGB48WindowMask(decoder, srclineB, width, 1, mask);
if(windowMaskR < 0)
RGB48WindowMask(decoder, srclineB, width, 1, windowMaskR);
if(xmin)
{
RGB48WindowMask(decoder, srclineB, width, 0, xmin);
}
}
if(decoder->channel_swapped_flags & FLAG3D_GHOSTBUST)
{
if(decoder->ghost_bust_left || decoder->ghost_bust_right)
{
GhostBust(decoder, srclineA, srclineB, width, decoder->ghost_bust_left, decoder->ghost_bust_right);
}
}
if(decoder->doVerticalFilter == 0)
{
if(decoder->channel_blend_type==BLEND_STACKED_ANAMORPHIC) //stacked
{
if(decoder->channel_swapped_flags & FLAG3D_SWAPPED)
{
outputline2 = output+(y>>1)*pitch;
outputline = output+((y>>1)+(height/2))*pitch;
}
else
{
outputline = output+(y>>1)*pitch;
outputline2 = output+((y>>1)+(height/2))*pitch;
}
}
else //fields
{
if(decoder->channel_swapped_flags & FLAG3D_SWAPPED)
{
outputline = output+(y)*pitch;
outputline2 = output+(y+1)*pitch;
}
else
{
outputline2 = output+(y)*pitch;
outputline = output+(y+1)*pitch;
}
}
if(flip_LR/*source_pitch1 < 0*/) // flip Left and Right
{
uint8_t *tmp = outputline2;
outputline2 = outputline;
outputline = tmp;
}
}
else
{
if(decoder->channel_swapped_flags & FLAG3D_SWAPPED)
{
memcpy(scratchline2, srclineA, width*skip);
memcpy(srclineA, srclineB, width*skip);
memcpy(srclineB, scratchline2, width*skip);
}
}
}
break;
case BLEND_ONION: //onion
case BLEND_DIFFERENCE: //difference
case BLEND_SPLITVIEW: //splitView
if(!blank)
{
//dstlineA = source_buffer;
//dstlineA += (source_pitch>>1) * y;
sptr = dstlineA = srclineA;
srclineA = (uint16_t *)bptr1;
srclineB = (uint16_t *)bptr2;
if(zoom != 1.0 || zoomR != 1.0 || horizOffset || horizOffsetR || channel_flip || frameTilt)
{
if(!alphachannel)
{
if(zoom == 1.0 && zoomR == 1.0 && frameTilt == 0.0)
{
RGB48HoriShift(decoder, srclineA, scratchline2, width, -horizOffset, flip1);
RGB48HoriShift(decoder, srclineB, scratchline2, width, horizOffsetR, flip2);
}
else
{
RGB48HoriShiftZoom(decoder, srclineA, scratchline2, width, height, y, -horizOffsetBase, rotOffsetBase, zoom, flip1, frameTilt, 0);
RGB48HoriShiftZoom(decoder, srclineB, scratchline2, width, height, y, horizOffsetBaseR, -rotOffsetBaseR, zoomR, flip2, frameTilt, 1);
}
}
else
{
if(zoom == 1.0 && zoomR == 1.0 && frameTilt == 0.0)
{
RGBA64HoriShift(decoder, srclineA, scratchline2, width, -horizOffset, flip1);
RGBA64HoriShift(decoder, srclineB, scratchline2, width, horizOffsetR, flip2);
}
else
{
RGBA64HoriShiftZoom(decoder, srclineA, scratchline2, width, height, y, -horizOffsetBase, rotOffsetBase, zoom, flip1, frameTilt, 0);
RGBA64HoriShiftZoom(decoder, srclineB, scratchline2, width, height, y, horizOffsetBaseR, -rotOffsetBaseR, zoomR, flip2, frameTilt, 1);
}
}
}
if(vignette != 0.0)
{
FastVignetteInplaceWP13(decoder, width, width, height, y, vig_r1, vig_r2, vig_gain,
(short *)srclineA, decoder->frame.resolution, skip);
FastVignetteInplaceWP13(decoder, width, width, height, y, vig_r1, vig_r2, vig_gain,
(short *)srclineB, decoder->frame.resolution, skip);
}
if(blursharpenL != 0.0) FastSharpeningBlurHinplaceWP13(width, (short *)srclineA, blursharpenL, decoder->frame.resolution, skip);
if(blursharpenR != 0.0) FastSharpeningBlurHinplaceWP13(width, (short *)srclineB, blursharpenR, decoder->frame.resolution, skip);
if(windowMaskL || xmin)
{
float mask = windowMaskL > xmin ? windowMaskL : xmin;
RGB48WindowMask(decoder, srclineA, width, 0, mask);
if(windowMaskL < 0)
RGB48WindowMask(decoder, srclineA, width, 0, windowMaskL);
if(xmin)
{
RGB48WindowMask(decoder, srclineA, width, 1, xmin);
}
}
if(windowMaskR || (1.0-xmax))
{
float mask = windowMaskR > (1.0f-xmax) ? windowMaskR : (1.0f-xmax);
RGB48WindowMask(decoder, srclineB, width, 1, mask);
if(windowMaskR < 0)
RGB48WindowMask(decoder, srclineB, width, 1, windowMaskR);
if(xmin)
{
RGB48WindowMask(decoder, srclineB, width, 0, xmin);
}
}
x = 0;
if(decoder->channel_blend_type == BLEND_SPLITVIEW) //split view
{
int xsplit = width * (decoder->cfhddata.split_pos_xy & 0xff) / 255;
for(x = xsplit*sskip; x<width*sskip; x++)
{
srclineA[x] = srclineB[x];
}
}
else if(decoder->channel_blend_type == BLEND_ONION) //onion
{
FastBlendWP13((short *)srclineA, (short *)srclineB, (short *)dstlineA/*output*/, width*skip);
}
else if(decoder->channel_blend_type == BLEND_DIFFERENCE) //difference
{
#if XMMOPT
int width8 = (width*sskip) & 0xfff8;
__m128i mid_epi16;
//int unaligned = ((int)sbase) & 15;
//unaligned += ((int)in_rgb8) & 15;
if(whitepoint == 13)
mid_epi16 = _mm_set1_epi16(0x0fff);
else
mid_epi16 = _mm_set1_epi16(0x1fff);
for(x=0; x<width8; x+=8)
{
__m128i rgb16A = _mm_load_si128((__m128i *)&srclineA[x]);
__m128i rgb16B = _mm_load_si128((__m128i *)&srclineB[x]);
// 0 to 0xffff
if(decoder->channel_swapped_flags & FLAG3D_SWAPPED)
{
rgb16A = _mm_subs_epi16(rgb16B, rgb16A); // -3fff to 3fff
}
else
{
rgb16A = _mm_subs_epi16(rgb16A, rgb16B);
}
rgb16A = _mm_adds_epi16(rgb16A, mid_epi16); // -0x1fff to 0x5fff , avg 0x1fff
_mm_store_si128((__m128i *)&dstlineA[x], rgb16A);
}
#endif
for(; x<width*sskip; x++)
{
int val;
if(decoder->channel_swapped_flags & FLAG3D_SWAPPED)
{
val = (srclineB[x] - srclineA[x]) + 32768;
}
else
{
val = (srclineA[x] - srclineB[x]) + 32768;
}
if(val > 0x7fff) val = 0x7fff;
if(val < 0) val = 0;
dstlineA[x] = val;
}
}
}
break;
case BLEND_ANAGLYPH_RC:
case BLEND_ANAGLYPH_RC_BW:
case BLEND_ANAGLYPH_AB:
case BLEND_ANAGLYPH_AB_BW:
case BLEND_ANAGLYPH_GM:
case BLEND_ANAGLYPH_GM_BW:
case BLEND_ANAGLYPH_DUBOIS: //Optimized
{
uint16_t *sptr1 = scratchline2;
uint16_t *sptr2 = scratchline3;
dstlineA = (uint16_t *)bptr1;
// dstlineA += (source_pitch>>1) * y;
sptr = dstlineA;
sptr1 = srclineA = (uint16_t *)bptr1;
sptr2 = srclineB = (uint16_t *)bptr2;
if(zoom != 1.0 || zoomR != 1.0 || horizOffset || horizOffsetR || channel_flip || frameTilt)
{
if(!alphachannel)
{
if(zoom == 1.0 && zoomR == 1.0 && frameTilt == 0.0)
{
RGB48HoriShift(decoder, srclineA, scratchline, width, -horizOffset, flip1);
RGB48HoriShift(decoder, srclineB, scratchline, width, horizOffsetR, flip2);
}
else
{
RGB48HoriShiftZoom(decoder, srclineA, scratchline, width, height, y, -horizOffsetBase, rotOffsetBase, zoom, flip1, frameTilt, 0);
RGB48HoriShiftZoom(decoder, srclineB, scratchline, width, height, y, horizOffsetBaseR, -rotOffsetBaseR, zoomR, flip2, frameTilt, 1);
}
}
else
{
if(zoom == 1.0 && zoomR == 1.0 && frameTilt == 0.0)
{
RGBA64HoriShift(decoder, scratchline2, scratchline, width, -horizOffset, flip1);
RGBA64HoriShift(decoder, scratchline3, scratchline, width, horizOffsetR, flip2);
}
else
{
RGBA64HoriShiftZoom(decoder, scratchline2, scratchline, width, height, y, -horizOffsetBase, rotOffsetBase, zoom, flip1, frameTilt, 0);
RGBA64HoriShiftZoom(decoder, scratchline3, scratchline, width, height, y, horizOffsetBaseR, -rotOffsetBaseR, zoomR, flip2, frameTilt, 1);
}
}
}
if(vignette != 0.0)
{
FastVignetteInplaceWP13(decoder, width, width, height, y, vig_r1, vig_r2, vig_gain,
(short *)srclineA, decoder->frame.resolution, skip);
FastVignetteInplaceWP13(decoder, width, width, height, y, vig_r1, vig_r2, vig_gain,
(short *)srclineB, decoder->frame.resolution, skip);
}
if(blursharpenL != 0.0) FastSharpeningBlurHinplaceWP13(width, (short *)srclineA, blursharpenL, decoder->frame.resolution, skip);
if(blursharpenR != 0.0) FastSharpeningBlurHinplaceWP13(width, (short *)srclineB, blursharpenR, decoder->frame.resolution, skip);
if(decoder->channel_swapped_flags & FLAG3D_GHOSTBUST)
{
if(decoder->ghost_bust_left || decoder->ghost_bust_right)
{
GhostBust(decoder, srclineA, srclineB, width, decoder->ghost_bust_left, decoder->ghost_bust_right);
}
}
if(windowMaskL || xmin)
{
float mask = windowMaskL > xmin ? windowMaskL : xmin;
RGB48WindowMask(decoder, srclineA, width, 0, mask);
if(windowMaskL < 0)
RGB48WindowMask(decoder, srclineA, width, 0, windowMaskL);
if(xmin)
{
RGB48WindowMask(decoder, srclineA, width, 1, xmin);
}
}
if(windowMaskR || (1.0-xmax))
{
float mask = windowMaskR > (1.0f-xmax) ? windowMaskR : (1.0f-xmax);
RGB48WindowMask(decoder, srclineB, width, 1, mask);
if(windowMaskR < 0)
RGB48WindowMask(decoder, srclineB, width, 1, windowMaskR);
if(xmin)
{
RGB48WindowMask(decoder, srclineB, width, 0, xmin);
}
}
if(decoder->channel_swapped_flags & FLAG3D_SWAPPED)
{
uint16_t *tmp = srclineA;
srclineA = srclineB;
srclineB = tmp;
}
switch(decoder->channel_blend_type)
{
case BLEND_ANAGLYPH_RC:
{
int16_t *ptr1 = (int16_t *)srclineA;
int16_t *ptr2 = (int16_t *)srclineB;
if(decoder->channel_swapped_flags & FLAG3D_SWAPPED)
{
for(x=0; x<width; x++)
{
sptr[0] = ptr2[0];
sptr[1] = ptr1[1];
sptr[2] = ptr1[2];
ptr1 += sskip;
ptr2 += sskip;
sptr += sskip;
}
}
else
{
for(x=0; x<width; x++)
{
sptr[0] = ptr1[0];
sptr[1] = ptr2[1];
sptr[2] = ptr2[2];
ptr1 += sskip;
ptr2 += sskip;
sptr += sskip;
}
}
}
break;
case BLEND_ANAGLYPH_RC_BW:
{
int16_t *ptr1 = (int16_t *)srclineA;
int16_t *ptr2 = (int16_t *)srclineB;
if(decoder->channel_swapped_flags & FLAG3D_SWAPPED)
{
for(x=0; x<width; x++)
{
int y1 = (ptr1[0]*5+ptr1[1]*10+ptr1[2])>>4;
int y2 = (ptr2[0]*5+ptr2[1]*10+ptr2[2])>>4;
sptr[0] = y2;
sptr[1] = y1;
sptr[2] = y1;
ptr1 += sskip;
ptr2 += sskip;
sptr += sskip;
}
}
else
{
for(x=0; x<width; x++)
{
int y1 = (ptr1[0]*5+ptr1[1]*10+ptr1[2])>>4;
int y2 = (ptr2[0]*5+ptr2[1]*10+ptr2[2])>>4;
sptr[0] = y1;
sptr[1] = y2;
sptr[2] = y2;
ptr1 += sskip;
ptr2 += sskip;
sptr += sskip;
}
}
}
break;
case BLEND_ANAGLYPH_AB:
{
int16_t *ptr1 = (int16_t *)srclineA;
int16_t *ptr2 = (int16_t *)srclineB;
if(decoder->channel_swapped_flags & FLAG3D_SWAPPED)
{
for(x=0; x<width; x++)
{
sptr[0] = ptr2[0];
sptr[1] = ptr2[1];
sptr[2] = ptr1[2];
ptr1 += sskip;
ptr2 += sskip;
sptr += sskip;
}
}
else
{
for(x=0; x<width; x++)
{
sptr[0] = ptr1[0];
sptr[1] = ptr1[1];
sptr[2] = ptr2[2];
ptr1 += sskip;
ptr2 += sskip;
sptr += sskip;
}
}
}
break;
case BLEND_ANAGLYPH_AB_BW:
{
int16_t *ptr1 = (int16_t *)srclineA;
int16_t *ptr2 = (int16_t *)srclineB;
if(decoder->channel_swapped_flags & FLAG3D_SWAPPED)
{
for(x=0; x<width; x++)
{
int y1 = (ptr1[0]*5+ptr1[1]*10+ptr1[2])>>4;
int y2 = (ptr2[0]*5+ptr2[1]*10+ptr2[2])>>4;
sptr[0] = y2;
sptr[1] = y2;
sptr[2] = y1;
ptr1 += sskip;
ptr2 += sskip;
sptr += sskip;
}
}
else
{
for(x=0; x<width; x++)
{
int y1 = (ptr1[0]*5+ptr1[1]*10+ptr1[2])>>4;
int y2 = (ptr2[0]*5+ptr2[1]*10+ptr2[2])>>4;
sptr[0] = y1;
sptr[1] = y1;
sptr[2] = y2;
ptr1 += sskip;
ptr2 += sskip;
sptr += sskip;
}
}
}
break;
case BLEND_ANAGLYPH_GM:
{
int16_t *ptr1 = (int16_t *)srclineA;
int16_t *ptr2 = (int16_t *)srclineB;
if(decoder->channel_swapped_flags & FLAG3D_SWAPPED)
{
for(x=0; x<width; x++)
{
sptr[0] = ptr1[0];
sptr[1] = ptr2[1];
sptr[2] = ptr1[2];
ptr1 += sskip;
ptr2 += sskip;
sptr += sskip;
}
}
else
{
for(x=0; x<width; x++)
{
sptr[0] = ptr2[0];
sptr[1] = ptr1[1];
sptr[2] = ptr2[2];
ptr1 += sskip;
ptr2 += sskip;
sptr += sskip;
}
}
}
break;
case BLEND_ANAGLYPH_GM_BW:
{
int16_t *ptr1 = (int16_t *)srclineA;
int16_t *ptr2 = (int16_t *)srclineB;
if(decoder->channel_swapped_flags & FLAG3D_SWAPPED)
{
for(x=0; x<width; x++)
{
int y1 = (ptr1[0]*5+ptr1[1]*10+ptr1[2])>>4;
int y2 = (ptr2[0]*5+ptr2[1]*10+ptr2[2])>>4;
sptr[0] = y1;
sptr[1] = y2;
sptr[2] = y1;
ptr1 += sskip;
ptr2 += sskip;
sptr += sskip;
}
}
else
{
for(x=0; x<width; x++)
{
int y1 = (ptr1[0]*5+ptr1[1]*10+ptr1[2])>>4;
int y2 = (ptr2[0]*5+ptr2[1]*10+ptr2[2])>>4;
sptr[0] = y2;
sptr[1] = y1;
sptr[2] = y2;
ptr1 += sskip;
ptr2 += sskip;
sptr += sskip;
}
}
}
break;
case BLEND_ANAGLYPH_DUBOIS: //Optimized
{
int16_t *ptr1 = (int16_t *)srclineA;
int16_t *ptr2 = (int16_t *)srclineB;
int r,g,b;
for(x=0; x<width; x++)
{
r =(ptr1[0]*456 + ptr1[1]*500 + ptr1[2]*176 + ptr2[0]*-43 + ptr2[1]*-88 + ptr2[2]*-2 ) / 1000;
g =(ptr1[0]*-40 + ptr1[1]*-38 + ptr1[2]*-16 + ptr2[0]*378 + ptr2[1]*734 + ptr2[2]*-18 ) / 1000;
b =(ptr1[0]*-15 + ptr1[1]*-21 + ptr1[2]*-5 + ptr2[0]*-72 + ptr2[1]*-113+ ptr2[2]*1226) / 1000;
if(r<0) r=0; if(r>0x3fff) r=0x3fff;
if(g<0) g=0; if(g>0x3fff) g=0x3fff;
if(b<0) b=0; if(b>0x3fff) b=0x3fff;
sptr[0] = r;
sptr[1] = g;
sptr[2] = b;
ptr1 += sskip;
ptr2 += sskip;
sptr += sskip;
}
}
break;
}
}
break;
case BLEND_NONE:
default:
if(decoder->channel_decodes == 1) // only one channel
{
if(skip == 8)
{
//the data is already in the correct format
sptr = (unsigned short *)bptr1;
// shift if needed.
if(zoom != 1.0 || zoomR != 1.0 || horizOffsetR || horizOffset || channel_flip || frameTilt)
{
if(decoder->channel_current == 0)
{
if(zoom == 1.0 && zoomR == 1.0 && frameTilt == 0.0)
RGBA64HoriShift(decoder, sptr, scratchline2, width, -horizOffset, flip1);
else
RGBA64HoriShiftZoom(decoder, sptr, scratchline2, width, height, y, -horizOffsetBase, rotOffsetBase, zoom, flip1, frameTilt, 0);
}
else
{
if(zoom == 1.0 && zoomR == 1.0 && frameTilt == 0.0)
RGBA64HoriShift(decoder, sptr, scratchline2, width, horizOffsetR, flip2);
else
RGBA64HoriShiftZoom(decoder, sptr, scratchline2, width, height, y, horizOffsetBaseR, -rotOffsetBaseR, zoomR, flip2, frameTilt, 1);
}
}
}
else if(skip == 6)
{
//the data is already in the correct format
dstlineA = sptr = (unsigned short *)srclineA;
// shift if needed.
if(zoom != 1.0 || zoomR != 1.0 || horizOffsetR || horizOffset || channel_flip || frameTilt)
{
if(decoder->channel_current == 0)
{
if(zoom == 1.0 && zoomR == 1.0 && frameTilt == 0.0)
RGB48HoriShift(decoder, srclineA, scratchline2, width, -horizOffset, flip1);
else
RGB48HoriShiftZoom(decoder, srclineA, scratchline2, width, height, y, -horizOffsetBase, rotOffsetBase, zoom, flip1, frameTilt, 0);
}
else
{
if(zoom == 1.0 && zoomR == 1.0 && frameTilt == 0.0)
RGB48HoriShift(decoder, srclineA, scratchline2, width, horizOffsetR, flip2);
else
RGB48HoriShiftZoom(decoder, srclineA, scratchline2, width, height, y, horizOffsetBaseR, -rotOffsetBaseR, zoomR, flip2, frameTilt, 1);
}
}
if(vignette != 0.0)
{
FastVignetteInplaceWP13(decoder, width, width, height, y, vig_r1, vig_r2, vig_gain,
(int16_t *)srclineA, decoder->frame.resolution, skip);
}
if(decoder->channel_current == 0)
{
if(blursharpenL != 0.0)
{
FastSharpeningBlurHinplaceWP13(width, (int16_t *)srclineA, blursharpenL, decoder->frame.resolution, skip);
}
}
else
{
if(blursharpenR != 0.0)
{
FastSharpeningBlurHinplaceWP13(width, (int16_t *)srclineA, blursharpenR, decoder->frame.resolution, skip);
}
}
}
if ((windowMaskL && decoder->channel_current == 0) || xmin)
{
float mask = windowMaskL > xmin ? windowMaskL : xmin;
if(decoder->channel_current != 0) mask = xmin;
if(windowMaskL < 0)
RGB48WindowMask(decoder, srclineA, width, 0, windowMaskL);
RGB48WindowMask(decoder, srclineA, width, 0, mask);
}
if ((windowMaskR && decoder->channel_current == 1) || (1.0f-xmax))
{
float mask = windowMaskR > (1.0f-xmax) ? windowMaskR : (1.0f-xmax);
if(decoder->channel_current != 1) mask = (1.0f-xmax);
if(windowMaskR < 0)
RGB48WindowMask(decoder, srclineA, width, 1, windowMaskR);
RGB48WindowMask(decoder, srclineA, width, 1, mask);
}
}
else
{
outputline2 = output+(y+height)*pitch;
if(zoom != 1.0 || zoomR != 1.0 || horizOffsetR || horizOffset || channel_flip || frameTilt)
{
if(zoom == 1.0 && zoomR == 1.0 && frameTilt == 0.0)
RGB48HoriShift(decoder, srclineA, scratchline2, width, -horizOffset, flip1);
else
RGB48HoriShiftZoom(decoder, srclineA, scratchline2, width, height, y, -horizOffsetBase, rotOffsetBase, zoom, flip1, frameTilt, 0);
if(zoom == 1.0 && zoomR == 1.0 && frameTilt == 0.0)
RGB48HoriShift(decoder, srclineB, scratchline2, width, horizOffset, flip2);
else
RGB48HoriShiftZoom(decoder, srclineB, scratchline2, width, height, y, horizOffsetBaseR, -rotOffsetBaseR, zoomR, flip2, frameTilt, 1);
}
if(windowMaskL || xmin)
{
float mask = windowMaskL > xmin ? windowMaskL : xmin;
RGB48WindowMask(decoder, srclineA, width, 0, mask);
if(windowMaskL < 0)
RGB48WindowMask(decoder, srclineA, width, 0, windowMaskL);
}
if(windowMaskR || (1.0-xmax))
{
float mask = windowMaskR > (1.0f-xmax) ? windowMaskR : (1.0f-xmax);
RGB48WindowMask(decoder, srclineB, width, 1, mask);
if(windowMaskR < 0)
RGB48WindowMask(decoder, srclineB, width, 1, windowMaskR);
}
if(decoder->channel_swapped_flags & FLAG3D_GHOSTBUST)
{
if(decoder->ghost_bust_left || decoder->ghost_bust_right)
{
GhostBust(decoder, srclineA, srclineB, width, decoder->ghost_bust_left, decoder->ghost_bust_right);
}
}
}
break;
}
}
if(!formatdone)
{
int flags = ACTIVEMETADATA_PRESATURATED;
int whitebitdepth = 16;
if(decoder->StereoBufferFormat == DECODED_FORMAT_WP13 || decoder->StereoBufferFormat == DECODED_FORMAT_W13A)
{
flags = 0;
whitebitdepth = 13;
}
if(outputline2)
{
// if(decoder->cfhddata.ComputeFlags&2 && (0 == (y&3)) && decoder->tools)
// HistogramLine(decoder, srclineA, width, DECODED_FORMAT_RG48, whitebitdepth);
if(decoder->doVerticalFilter == 0) // No sharp stage so output now
{
if(alphachannel)
Convert4444LinesToOutput(decoder, width, 1, y, srclineA,
outputline, pitch, decoder->frame.format, whitebitdepth, flags);
else
ConvertLinesToOutput(decoder, width, 1, y, srclineA,
outputline, pitch, decoder->frame.format, whitebitdepth, flags);
//if(decoder->cfhddata.ComputeFlags&2 && (0 == (y&3)) && decoder->tools)
// HistogramLine(decoder, dstlineA, width, DECODED_FORMAT_RG48, whitebitdepth);
if(alphachannel)
Convert4444LinesToOutput(decoder, width, 1, y, srclineB,
outputline2, pitch, decoder->frame.format, whitebitdepth, flags);
else
ConvertLinesToOutput(decoder, width, 1, y, srclineB,
outputline2, pitch, decoder->frame.format, whitebitdepth, flags);
}
}
else
{
//if(decoder->cfhddata.ComputeFlags&2 && (0 == (y&3)) && decoder->tools)
//{
// if(alphachannel)
// HistogramLine(decoder, srclineA, width, DECODED_FORMAT_RG64, whitebitdepth);
// else
// HistogramLine(decoder, srclineA, width, DECODED_FORMAT_RG48, whitebitdepth);
//}
if(decoder->doVerticalFilter == 0) // No sharp stage so output now
{
if(alphachannel)
Convert4444LinesToOutput(decoder, width, 1, y, srclineA,
outputline, pitch, decoder->frame.format, whitebitdepth, flags);
else
ConvertLinesToOutput(decoder, width, 1, y, srclineA,
outputline, pitch, decoder->frame.format, whitebitdepth, flags);
}
}
}
}
void SharpenLine(DECODER *decoder, uint8_t *buffer, int bufferremain, uint8_t *output, int pitch, uint8_t *local_output, int local_pitch, int channel_offset, int y, int thread_index)
{
uint16_t *sbase;//*sbase2 = NULL;
int width = decoder->frame.width;
int height = decoder->frame.height;
int skip = 3;
//int flip1=0;//flip2=0;
int channel_flip = decoder->cfhddata.channel_flip;
//int local_pitch1 = local_pitch;
//int local_pitch2 = local_pitch;
uint8_t *outputline = output+y*pitch;
//uint8_t *outputline2 = NULL;
short *scratch;
//int formatdone = 0;
//float xmin = decoder->cfhddata.channel[0].FrameMask.topLftX;
//float xmax = decoder->cfhddata.channel[0].FrameMask.topRgtX;
//float ymin = decoder->cfhddata.channel[0].FrameMask.topLftY;
//float ymax = decoder->cfhddata.channel[0].FrameMask.botLftY;
int alphachannel = 0;
float blursharpen = 0;
int line_max = decoder->frame.height;
int yy = y;
if(decoder->channel_current == 0)
blursharpen = decoder->cfhddata.channel[1].user_blur_sharpen; // TODO LEFT and RIGHT separate vertical sharpen
else
blursharpen = decoder->cfhddata.channel[2].user_blur_sharpen; // TODO LEFT and RIGHT separate vertical sharpen
if(!(decoder->cfhddata.process_path_flags & PROCESSING_COLORMATRIX)||
decoder->frame.resolution == DECODED_RESOLUTION_QUARTER ||
decoder->frame.resolution == DECODED_RESOLUTION_LOWPASS_ONLY ||
decoder->frame.resolution == DECODED_RESOLUTION_QUARTER_NODEBAYER_SCALED)
{
blursharpen = 0.0;
}
if(decoder->channel_mix_half_res == 1)
line_max *= 2;
if(!(decoder->cfhddata.process_path_flags & PROCESSING_IMAGEFLIPS))
{
channel_flip = 0;
}
if(decoder->sharpen_flip) //SharpenLine
{
//if(!(decoder->channel_blend_type == BLEND_NONE && decoder->channel_current == 1)) // right channel only (stored in baseptr1)
{
yy = (line_max - 1 - y);
outputline = output+yy*pitch;
}
}
if( decoder->StereoBufferFormat == DECODED_FORMAT_RG64 ||
decoder->StereoBufferFormat == DECODED_FORMAT_W13A ||
decoder->StereoBufferFormat == DECODED_FORMAT_RGB32)
alphachannel = 1;
if(decoder->frame.resolution == DECODED_RESOLUTION_HALF_HORIZONTAL)
{
width *= 2;
}
sbase = (uint16_t *)local_output;
sbase += (local_pitch>>1) * y;
switch(decoder->StereoBufferFormat)
{
case DECODED_FORMAT_RG64:
case DECODED_FORMAT_W13A:
skip = 8;
break;
case DECODED_FORMAT_WP13:
skip = 6;
break;
case DECODED_FORMAT_RG48:
skip = 6;
break;
case DECODED_FORMAT_RGB32:
skip = 4;
break;
case DECODED_FORMAT_RGB24:
skip = 3;
break;
case DECODED_FORMAT_YUYV:
skip = 2;
break;
}
scratch = (short*)(buffer + width * skip * thread_index);
{
int flags = ACTIVEMETADATA_PRESATURATED;
int whitebitdepth = 16;
if((decoder->StereoBufferFormat == DECODED_FORMAT_WP13 || decoder->StereoBufferFormat == DECODED_FORMAT_W13A))
{
int use_pitch = local_pitch;
int edgeclose = 0;
flags = 0;
whitebitdepth = 13;
if(blursharpen != 0.0 && local_pitch != 0)
{
short *Aptr,*Bptr,*Cptr,*Dptr,*Eptr;
switch(decoder->channel_blend_type)
{
case BLEND_STACKED_ANAMORPHIC:
sbase = (uint16_t *)local_output;
sbase += (local_pitch>>1) * y * 2;
if(y<=4) edgeclose = 1;
if(y>=2) Aptr = (short *)sbase - (local_pitch>>1) * 4; else Aptr = (short *)sbase;
if(y>=1) Bptr = (short *)sbase - (local_pitch>>1) * 2; else Bptr = (short *)sbase;
Cptr = (short *)sbase;
if(y<height-1) Dptr = (short *)sbase + (local_pitch>>1) * 2; else Dptr = (short *)sbase;
if(y<height-2) Eptr = (short *)sbase + (local_pitch>>1) * 4; else Eptr = (short *)sbase;
if(y>=height-4) edgeclose = 1;
use_pitch = local_pitch * 2;
break;
case BLEND_LINE_INTERLEAVED:
sbase = (uint16_t *)local_output;
if(y & 1)
{
y--;
sbase += (local_pitch>>1) * y;
}
else
{
sbase += (local_pitch>>1) * y;
sbase += channel_offset>>1;
}
if(y<=8) edgeclose = 1;
if(y>=4) Aptr = (short *)sbase - (local_pitch>>1) * 4; else Aptr = (short *)sbase;
if(y>=2) Bptr = (short *)sbase - (local_pitch>>1) * 2; else Bptr = (short *)sbase;
Cptr = (short *)sbase;
if(y<height-2) Dptr = (short *)sbase + (local_pitch>>1) * 2; else Dptr = (short *)sbase;
if(y<height-4) Eptr = (short *)sbase + (local_pitch>>1) * 4; else Eptr = (short *)sbase;
if(y>=height-8) edgeclose = 1;
use_pitch = local_pitch * 2;
break;
default:
if(y<=4) edgeclose = 1;
if(y>=2) Aptr = (short *)sbase - (local_pitch>>1) * 2; else Aptr = (short *)sbase;
if(y>=1) Bptr = (short *)sbase - (local_pitch>>1) * 1; else Bptr = (short *)sbase;
Cptr = (short *)sbase;
if(y<height-1) Dptr = (short *)sbase + (local_pitch>>1) * 1; else Dptr = (short *)sbase;
if(y<height-2) Eptr = (short *)sbase + (local_pitch>>1) * 2; else Eptr = (short *)sbase;
if(y>=height-4) edgeclose = 1;
use_pitch = local_pitch;
break;
}
if(skip == 8)
{
FastSharpeningBlurVW13A(Aptr, Bptr, Cptr, Dptr, Eptr, use_pitch, edgeclose,
scratch, width, blursharpen,
decoder->frame.resolution,
decoder->channel_blend_type);
}
else
{
FastSharpeningBlurVWP13(Aptr, Bptr, Cptr, Dptr, Eptr, use_pitch, edgeclose,
scratch, width, blursharpen,
decoder->frame.resolution,
decoder->channel_blend_type);
}
sbase = (uint16_t *)scratch;
}
}
if(alphachannel)
Convert4444LinesToOutput(decoder, width, 1, y, sbase,
outputline, pitch, decoder->frame.format, whitebitdepth, flags);
else
ConvertLinesToOutput(decoder, width, 1, y, sbase,
outputline, pitch, decoder->frame.format, whitebitdepth, flags);
}
}
#if _GRAPHICS
void PaintFrame(DECODER *decoder, uint8_t *output, int pitch, int output_format)
{
int x,y,v,width, height;
int maxR=0,maxG=0,maxB=0;
width = decoder->frame.width;
height = decoder->frame.height;
if(decoder->cfhddata.BurninFlags == 0)
return;
if(decoder->cfhddata.BurninFlags & 2 && decoder->cfhddata.ComputeFlags & ~1) // tools
{
if(decoder->tools == NULL)
{
#if _ALLOCATOR
decoder->tools = (ToolsHandle *)Alloc(decoder->allocator, sizeof(ToolsHandle));
#else
decoder->tools = (ToolsHandle *)MEMORY_ALLOC(sizeof(ToolsHandle));
#endif
if(decoder->tools)
{
memset(decoder->tools, 0, sizeof(ToolsHandle));
}
else
{
return;
}
}
}
decoder->frame.output_format = output_format;
#if _THREADED && 1
if(decoder->cfhddata.BurninFlags & 2 && decoder->cfhddata.ComputeFlags & ~1 && decoder->tools) // histogram/scopes/waveform
{
WORKER_THREAD_DATA *mailbox = &decoder->worker_thread.data;
int workunits;
#if _DELAY_THREAD_START
if(decoder->tools->histogram == 0 && decoder->worker_thread.pool.thread_count == 0)
{
CreateLock(&decoder->worker_thread.lock);
// Initialize the pool of transform worker threads
ThreadPoolCreate(&decoder->worker_thread.pool,
decoder->thread_cntrl.capabilities >> 16/*cpus*/,
WorkerThreadProc,
decoder);
}
#endif
{
int avgR=0,avgG=0,avgB=0;
// Post a message to the mailbox
mailbox->output = output;
if(height >= 1080)
{
mailbox->pitch = pitch*4; // only read every 4th scan line
workunits = height/4; // only read every 4th scan line
}
else if(height >= 540)
{
mailbox->pitch = pitch*2; // only read every 2th scan line
workunits = height/2; // only read every 2th scan line
}
else
{
mailbox->pitch = pitch; // read every scan line
workunits = height; // read every scan line
}
if(decoder->tools->histogram == 0)
{
mailbox->jobType = JOB_TYPE_HISTOGRAM; // histogram
// Set the work count to the number of rows to process
ThreadPoolSetWorkCount(&decoder->worker_thread.pool, workunits);
// Start the transform worker threads
ThreadPoolSendMessage(&decoder->worker_thread.pool, THREAD_MESSAGE_START);
// Wait for all of the worker threads to finish
ThreadPoolWaitAllDone(&decoder->worker_thread.pool);
}
for(x=0;x<256;x++)
{
avgR += decoder->tools->histR[x];
avgG += decoder->tools->histG[x];
avgB += decoder->tools->histB[x];
//if(maxR < decoder->histR[x]) maxR = decoder->histR[x];
//if(maxG < decoder->histG[x]) maxG = decoder->histG[x];
//if(maxB < decoder->histB[x]) maxB = decoder->histB[x];
}
avgR /= 256;
avgG /= 256;
avgB /= 256;
//maxR++;
//maxG++;
//maxB++;
decoder->tools->maxR = avgR*3;//maxR;
decoder->tools->maxG = avgG*3;//maxG;
decoder->tools->maxB = avgB*3;//maxB;
}
}
#endif
if(decoder->cfhddata.BurninFlags && DrawOpen(decoder))
{
if(decoder->cfhddata.BurninFlags & 3) // overlays / tools
{
#if _THREADED
//DrawInit(decoder);
//DrawStartThreaded(decoder);
if(decoder->draw_thread.pool.thread_count > 0)
{
DrawWaitThreaded(decoder);
}
else
#endif
{
DrawInit(decoder);
DrawMetadataObjects(decoder);
}
}
else
{
DrawInit(decoder);
}
if(decoder->drawSafeMarkers)
DrawSafeMarkers(decoder);
if(decoder->cfhddata.BurninFlags & 2) // tools
{
if(decoder->tools)
{
if(decoder->tools->histogram && decoder->cfhddata.ComputeFlags & 16)
DrawGrid(decoder, 0/*decoder->MDPcurrent.parallax*/);
if(decoder->tools->histogram && decoder->cfhddata.ComputeFlags & 2)
DrawHistogram(decoder, 0/*decoder->MDPcurrent.parallax*/);
if(decoder->tools->histogram && decoder->cfhddata.ComputeFlags & 4)
DrawWaveform(decoder, 0/*decoder->MDPcurrent.parallax*/);
if(decoder->tools->histogram && decoder->cfhddata.ComputeFlags & 8)
DrawVectorscope(decoder, 0/*decoder->MDPcurrent.parallax*/);
}
}
DrawScreen(decoder, output, pitch, output_format);
}
#if 0
#if _THREADED && 1
if(decoder->cfhddata.BurninFlags & 2 && decoder->cfhddata.ComputeFlags & 2 && decoder->tools) // histogram
{
WORKER_THREAD_DATA *mailbox = &decoder->worker_thread.data;
int workunits;
int targetW, targetH;
if(width < 256 || height < 256)
return;
targetW = width / 4;
targetH = height / 8;
mailbox->output = output;
mailbox->pitch = pitch;
workunits = targetW;
mailbox->jobType = JOB_TYPE_BURNINS; // burnin
// Set the work count to the number of rows to process
ThreadPoolSetWorkCount(&decoder->worker_thread.pool, workunits);
// Start the transform worker threads
ThreadPoolSendMessage(&decoder->worker_thread.pool, THREAD_MESSAGE_START);
// Wait for all of the worker threads to finish
ThreadPoolWaitAllDone(&decoder->worker_thread.pool);
}
#else
if(decoder->histogram == 0)
{
for(y=0; y<height; y+=4)
{
uint8_t *bptr = output;
bptr += pitch * y;
HistogramLine(decoder, (unsigned short *)bptr, width, output_format);
if(decoder->histogram == 0)
return; // don't know how to create Histogram for that format
}
}
for(x=1;x<255;x++)
{
if(maxR < decoder->histR[x]) maxR = decoder->histR[x];
if(maxG < decoder->histG[x]) maxG = decoder->histG[x];
if(maxB < decoder->histB[x]) maxB = decoder->histB[x];
}
maxR++;
maxG++;
maxB++;
decoder->maxR = maxR;
decoder->maxG = maxG;
decoder->maxB = maxB;
for(x=0; x<targetW; x++)
{
HistogramRender(decoder, output, pitch, output_format, x, targetW, targetH);
}
#endif
#endif
if(decoder->tools)
memset(decoder->tools, 0, sizeof(ToolsHandle));
}
#endif
extern int geomesh_alloc_cache(void *gm);
#define DEG2RAD(d) (PI*(d)/180.0f)
#define RAD2DEG(r) (180.0f*(r)/PI)
bool approx_equal(int x, int y)
{
if(y > 1080)
{
x >>= 6;
y >>= 6;
}
else if(y > 540)
{
x >>= 5;
y >>= 5;
} else
{
x >>= 4;
y >>= 4;
}
if(x == y || x+1 == y || x == y+1)
return true;
return false;
}
bool approx_equal_float(float x, float y)
{
if (x*0.99 < y && y < x*1.01)
return true;
return false;
}
#if WARPSTUFF
void WarpFrame(DECODER *decoder, uint8_t *output, int pitch, int output_format)
{
int width, height;
//int maxR = 0, maxG = 0, maxB = 0;
int status = WARPLIB_SUCCESS;
CFHDDATA *cfhddata = &decoder->cfhddata;
int backgroundfill = cfhddata->lensFill;
float sensorcrop = 1.0;
float phi, theta, rho;
int srcLens = HERO4;
if (!cfhddata->doMesh) return;
if (decoder->lastLensOffsetX != cfhddata->LensOffsetX ||
decoder->lastLensOffsetY != cfhddata->LensOffsetY ||
decoder->lastLensOffsetZ != cfhddata->LensOffsetZ ||
decoder->lastLensOffsetR != cfhddata->LensOffsetR ||
decoder->lastLensZoom != cfhddata->LensZoom ||
decoder->lastLensFishFOV != cfhddata->LensFishFOV ||
decoder->lastLensGoPro != cfhddata->lensGoPro ||
decoder->lastLensSphere != cfhddata->lensSphere ||
decoder->lastLensFill != cfhddata->lensFill ||
decoder->lastLensStyleSel != cfhddata->lensStyleSel ||
memcmp(decoder->lastLensCustomSRC, cfhddata->lensCustomSRC, sizeof(cfhddata->lensCustomSRC)) ||
memcmp(decoder->lastLensCustomDST, cfhddata->lensCustomDST, sizeof(cfhddata->lensCustomDST)) )
{
if (decoder->mesh)
geomesh_destroy(decoder->mesh);
width = decoder->frame.width;
height = decoder->frame.height;
if (approx_equal(width, height * 2)) // approx. 2:1
{
float outputaspect = 16.0f/9.0f;
srcLens = EQUIRECT;
sensorcrop = 1.00623f; // Fixes the slight calculation error difference between 16x9 with a 4x3, and 16x9 within a 2x1 image.
if (cfhddata->lensCustomSRC[1])
{
outputaspect = cfhddata->lensCustomSRC[0] / cfhddata->lensCustomSRC[1];
if (outputaspect >= 1.0f && outputaspect <= 3.0f)
{
//float sourceratio = (float)width / (float)height;
if (approx_equal_float(outputaspect, 4.0f / 3.0f))
sensorcrop = sqrtf((float)(width*width + height*height)) / sqrtf((float)((width * 2 / 3)*(width * 2 / 3) + (height*height)));
if (approx_equal_float(outputaspect, 16.0f / 9.0f)) // 0.88;
sensorcrop = 1.00623f; // Fixes the slight calculation error difference between 16x9 with a 4x3, and 16x9 within a 2x1 image.
}
}
if (width >= 2496)
decoder->mesh = geomesh_create(199, 99);
else if (width >= 1272)
decoder->mesh = geomesh_create(99, 49);
else
decoder->mesh = geomesh_create(49, 25);
phi = cfhddata->LensOffsetX * DEG2RAD(720.0f); // +-180deg HFOV for 2:1
theta = cfhddata->LensOffsetY * DEG2RAD(720.0f); // +-180deg VFOV for 2:1
rho = (cfhddata->LensOffsetZ - 1.0f)*4.0f* DEG2RAD(360.0f); // +-360deg
}
else if (approx_equal(width * 3, height * 4)) // approx. 4:3
{
srcLens = HERO4;
sensorcrop = 1.0;
if (width > 2880) // UHD
decoder->mesh = geomesh_create(159, 119);
else if (width >= 1920) //HD/2.7K
decoder->mesh = geomesh_create(79, 59);
else
decoder->mesh = geomesh_create(39, 29);
phi = cfhddata->LensOffsetX * DEG2RAD(120.0f); // +-60deg HFOV for 16:9
theta = cfhddata->LensOffsetY * DEG2RAD(98.0f); // +-49deg VFOV for 16:9
rho = (cfhddata->LensOffsetZ - 1.0f)*4.0f* DEG2RAD(360.0f); // +-360deg
}
else //if(approx_equal(width*9,height*16)) // approx. 16:9
{
srcLens = HERO4;
sensorcrop = sqrtf(1920 * 1920 + 1080 * 1080) / sqrtf(2000 * 2000 + 1500 * 1500); // 3840x2160 from 4000x3000
if (width > 2880) // UHD
decoder->mesh = geomesh_create(159, 119);
else if (width >= 1920) //HD/2.7K
decoder->mesh = geomesh_create(79, 59);
else
decoder->mesh = geomesh_create(39, 29);
phi = cfhddata->LensOffsetX * DEG2RAD(120.0f); // +-60.1deg HFOV for 16:9
theta = cfhddata->LensOffsetY * DEG2RAD(70.0f); // +-34.75deg VFOV for 16:9
rho = (cfhddata->LensOffsetZ - 1.0f)*4.0f* DEG2RAD(360.0f); // +-360deg
}
if ((output_format & 0x7fffffff) == COLOR_FORMAT_YUYV)
status |= geomesh_init(decoder->mesh, width, height, pitch, WARPLIB_FORMAT_YUY2, width, height, pitch, WARPLIB_FORMAT_YUY2, backgroundfill);
else if ((output_format & 0x7fffffff) == COLOR_FORMAT_RGB32)
status |= geomesh_init(decoder->mesh, width, height, pitch, WARPLIB_FORMAT_32BGRA, width, height, pitch, WARPLIB_FORMAT_32BGRA, backgroundfill);
else if ((output_format & 0x7fffffff) == COLOR_FORMAT_W13A)
status |= geomesh_init(decoder->mesh, width, height, pitch, WARPLIB_FORMAT_W13A, width, height, pitch, WARPLIB_FORMAT_W13A, backgroundfill);
else if ((output_format & 0x7fffffff) == COLOR_FORMAT_WP13)
status |= geomesh_init(decoder->mesh, width, height, pitch, WARPLIB_FORMAT_WP13, width, height, pitch, WARPLIB_FORMAT_WP13, backgroundfill);
else if ((output_format & 0x7fffffff) == COLOR_FORMAT_RG48)
status |= geomesh_init(decoder->mesh, width, height, pitch, WARPLIB_FORMAT_RG48, width, height, pitch, WARPLIB_FORMAT_RG48, backgroundfill);
else if ((output_format & 0x7fffffff) == COLOR_FORMAT_BGRA64)
status |= geomesh_init(decoder->mesh, width, height, pitch, WARPLIB_FORMAT_64ARGB, width, height, pitch, WARPLIB_FORMAT_64ARGB, backgroundfill);
else
assert(0);
if (cfhddata->lensSphere == 1)
{
if (cfhddata->lensGoPro != 2) // not outputting EQUIRECT
{
if (cfhddata->LensOffsetR != 0.0)
{
//float angle = 360.0 * asinf(cfhddata->LensOffsetR * 1.7777777777) / (2.0 * 3.14159);
float angle = 360.0f * cfhddata->LensOffsetR * cfhddata->LensOffsetR * 2.1f;//asinf(cfhddata->LensOffsetR * 1.7777777777) / (2.0 * 3.14159);
if (cfhddata->LensOffsetR < 0.0) angle = -angle;
geomesh_transform_rotate(decoder->mesh, angle);
}
if (cfhddata->LensZoom != 1.0)
geomesh_transform_scale(decoder->mesh, cfhddata->LensZoom, cfhddata->LensZoom);
if (cfhddata->LensFishFOV != 0.0) // DeFish
{
float fov = cfhddata->LensFishFOV;// *180.0;
if (fov > 89.9f) fov = 89.9f;
if (fov < -89.9f) fov = -89.9f;
if (fov)
status |= geomesh_transform_defish(decoder->mesh, fov);
}
}
switch (cfhddata->lensGoPro)
{
case 0: geomesh_transform_repoint_src_to_dst(decoder->mesh, sensorcrop, phi, theta, rho, srcLens, RECTILINEAR); break;
case 1: geomesh_transform_repoint_src_to_dst(decoder->mesh, sensorcrop, phi, theta, rho, srcLens, HERO4); break;
case 2: geomesh_transform_repoint_src_to_dst(decoder->mesh, sensorcrop, phi, theta, rho, srcLens, EQUIRECT); break;
case 4:
geomesh_set_custom_lens(decoder->mesh, cfhddata->lensCustomSRC, cfhddata->lensCustomDST, sizeof(cfhddata->lensCustomDST));
if (srcLens == EQUIRECT) geomesh_transform_repoint_src_to_dst(decoder->mesh, sensorcrop, phi, theta, rho, EQUIRECT, CUSTOM_LENS);
else geomesh_transform_repoint_src_to_dst(decoder->mesh, sensorcrop, phi, theta, rho, CUSTOM_LENS, CUSTOM_LENS);
break;
}
}
else // old boring geometry
{
if (cfhddata->LensZoom != 1.0)
geomesh_transform_scale(decoder->mesh, cfhddata->LensZoom, cfhddata->LensZoom);
// basic orthographic moves
if (cfhddata->LensOffsetX != 0.0 || cfhddata->LensOffsetY != 0.0)
geomesh_transform_pan(decoder->mesh, cfhddata->LensOffsetX*(float)width, -cfhddata->LensOffsetY*(float)height);
if (cfhddata->LensOffsetR != 0.0)
{
float angle = 360.0f * asinf(cfhddata->LensOffsetR * 1.7777777777f) / (2.0f * 3.14159f);
geomesh_transform_rotate(decoder->mesh, angle);
}
if (cfhddata->lensGoPro == 0) //Rectilear
status |= geomesh_transform_gopro_to_rectilinear(decoder->mesh, sensorcrop);
//status |= geomesh_fisheye_gopro_adjustmesh(mesh, &correction_mode, WARPLIB_ALGORITHM_PRESERVE_EVERYTHING,//WARPLIB_ALGORITHM_BEST_FIT,
// width, height, product, model, lens_type, fov, (int)decoder->frame.resolution);
}
geomesh_alloc_cache(decoder->mesh); // required for JOB_TYPE_WARP_CACHE
if (status == WARPLIB_SUCCESS)
{
if (decoder->lens_correct_buffer == NULL)
{
#if _ALLOCATOR
decoder->lens_correct_buffer = (int *)Alloc(decoder->allocator, pitch * height);
#else
decoder->lens_correct_buffer = (int *)MEMORY_ALLOC(pitch * height);
#endif
}
}
else
{
return;
}
/* need resources?
{
if(decoder->tools == NULL)
{
#if _ALLOCATOR
decoder->tools = (ToolsHandle *)Alloc(decoder->allocator, sizeof(ToolsHandle));
#else
decoder->tools = (ToolsHandle *)MEMORY_ALLOC(sizeof(ToolsHandle));
#endif
if(decoder->tools)
{
memset(decoder->tools, 0, sizeof(ToolsHandle));
}
else
{
return;
}
}
}
*/
#if _THREADED && 1
{
WORKER_THREAD_DATA *mailbox = &decoder->worker_thread.data;
int workunits = decoder->frame.height;
#if _DELAY_THREAD_START
if (decoder->worker_thread.pool.thread_count == 0)
{
CreateLock(&decoder->worker_thread.lock);
// Initialize the pool of transform worker threads
ThreadPoolCreate(&decoder->worker_thread.pool,
decoder->thread_cntrl.capabilities >> 16,
WorkerThreadProc,
decoder);
}
#endif
{
// Post a message to the mailbox
mailbox->data = decoder->mesh;
mailbox->output = output;
mailbox->local_output = (uint8_t *)decoder->lens_correct_buffer;
mailbox->line_max = decoder->frame.height;
mailbox->chunk_size = 16;
workunits = (mailbox->line_max + mailbox->chunk_size - 1) / mailbox->chunk_size;
mailbox->jobType = JOB_TYPE_WARP_CACHE;
// Set the work count to the number of rows to process
ThreadPoolSetWorkCount(&decoder->worker_thread.pool, workunits);
// Start the transform worker threads
ThreadPoolSendMessage(&decoder->worker_thread.pool, THREAD_MESSAGE_START);
// Wait for all of the worker threads to finish
ThreadPoolWaitAllDone(&decoder->worker_thread.pool);
}
}
#endif
//decoder->frame.output_format = output_format;
decoder->lastLensOffsetX = cfhddata->LensOffsetX;
decoder->lastLensOffsetY = cfhddata->LensOffsetY;
decoder->lastLensOffsetZ = cfhddata->LensOffsetZ;
decoder->lastLensOffsetR = cfhddata->LensOffsetR;
decoder->lastLensZoom = cfhddata->LensZoom;
decoder->lastLensFishFOV = cfhddata->LensFishFOV;
decoder->lastLensGoPro = cfhddata->lensGoPro;
decoder->lastLensSphere = cfhddata->lensSphere;
decoder->lastLensFill = cfhddata->lensFill;
decoder->lastLensStyleSel = cfhddata->lensStyleSel;
memcpy(decoder->lastLensCustomSRC, cfhddata->lensCustomSRC, sizeof(cfhddata->lensCustomSRC));
memcpy(decoder->lastLensCustomDST, cfhddata->lensCustomDST, sizeof(cfhddata->lensCustomDST));
}
#if _THREADED && 1
{
WORKER_THREAD_DATA *mailbox = &decoder->worker_thread.data;
int workunits = decoder->frame.height;
mailbox->data = decoder->mesh;
mailbox->output = output;
mailbox->local_output = (uint8_t *)decoder->lens_correct_buffer;
mailbox->line_max = decoder->frame.height;
mailbox->chunk_size = 16;
workunits = (mailbox->line_max + mailbox->chunk_size-1)/mailbox->chunk_size;
mailbox->jobType = JOB_TYPE_WARP;
// Set the work count to the number of rows to process
ThreadPoolSetWorkCount(&decoder->worker_thread.pool, workunits);
// Start the transform worker threads
ThreadPoolSendMessage(&decoder->worker_thread.pool, THREAD_MESSAGE_START);
// Wait for all of the worker threads to finish
ThreadPoolWaitAllDone(&decoder->worker_thread.pool);
if(backgroundfill) // may need to blur the filled in areas
{
mailbox->data = decoder->mesh;
mailbox->output = (uint8_t *)decoder->lens_correct_buffer;
mailbox->local_output = (uint8_t *)decoder->lens_correct_buffer;
mailbox->line_max = decoder->frame.width;
mailbox->chunk_size = 16;
mailbox->pitch = pitch;
workunits = (mailbox->line_max + mailbox->chunk_size-1)/mailbox->chunk_size;
mailbox->jobType = JOB_TYPE_WARP_BLURV;
// Set the work count to the number of rows to process
ThreadPoolSetWorkCount(&decoder->worker_thread.pool, workunits);
// Start the transform worker threads
ThreadPoolSendMessage(&decoder->worker_thread.pool, THREAD_MESSAGE_START);
// Wait for all of the worker threads to finish
ThreadPoolWaitAllDone(&decoder->worker_thread.pool);
}
}
#else // not threading
{
//geomesh_cache_init_bilinear(decoder->mesh); //bad
geomesh_cache_init_bilinear_range(decoder->mesh, 0, decoder->frame.height); //good
geomesh_apply_bilinear(decoder->mesh, (unsigned char *)output, (unsigned char *)decoder->lens_correct_buffer, 0, decoder->frame.height);
}
#endif
memcpy(output, decoder->lens_correct_buffer, pitch * decoder->frame.height);
/*
if(lens_correct_buffer)
#if _ALLOCATOR
Free(decoder->allocator, lens_correct_buffer);
#else
MEMORY_ALIGNED_FREE(lens_correct_buffer);
#endif
geomesh_destroy(mesh);
*/
}
void MaskFrame(DECODER *decoder, uint8_t *output, int pitch, int output_format)
{
int x, y, width, height;
int minY, maxY;
int minX, maxX;
CFHDDATA *cfhddata = &decoder->cfhddata;
uint8_t *line = output;
uint32_t fillA = 0;
uint32_t fillB = 0;
int bitsize = 8;
if (!cfhddata->doMesh) return;
width = decoder->frame.width;
height = decoder->frame.height;
if (decoder->cfhddata.LensYmin == 0.0 && decoder->cfhddata.LensXmin == 0.0 && decoder->cfhddata.LensYmax == 0.0 && decoder->cfhddata.LensXmax == 0.0) return;
if (decoder->cfhddata.LensYmin == 0.0 && decoder->cfhddata.LensXmin == 0.0 && decoder->cfhddata.LensYmax == 1.0 && decoder->cfhddata.LensXmax == 1.0) return;
minY = (int)(decoder->cfhddata.LensYmin*(float)height);
maxY = (int)(decoder->cfhddata.LensYmax*(float)height);
minX = 0xfffc & (int)(decoder->cfhddata.LensXmin*(float)pitch);
maxX = 0xfffc & (int)(decoder->cfhddata.LensXmax*(float)pitch);
if (FORMATRGB(output_format))
{
line = output;
// Top rows
for (y = 0; y < minY; y++)
{
memset(line, 0, abs(pitch));
line += pitch;
}
// Left and Right edges of middle rows
if (maxX - minX != pitch)
{
for (; y < maxY; y++)
{
memset(line, 0, minX);
memset(line + maxX, 0, pitch - maxX);
line += pitch;
}
}
//Bottom wows
y = maxY;
line = output + y*pitch;
for (; y < height; y++)
{
memset(line, 0, abs(pitch));
line += pitch;
}
}
else
{
switch (output_format & 0x7fffffff)
{
case COLOR_FORMAT_YVYU:
case COLOR_FORMAT_YUYV:
fillA = 0x10;
fillB = 0x80;
break;
case COLOR_FORMAT_UYVY:
case COLOR_FORMAT_2VUY:
fillA = 0x80;
fillB = 0x10;
break;
case COLOR_FORMAT_YU64:
fillA = 0x8000;
fillB = 0x1000;
bitsize = 16;
break;
}
}
if (bitsize == 8)
{
line = output;
// Top rows
for (y = 0; y < minY; y++)
{
for (x = 0; x < pitch; x += 2)
{
line[x] = fillA;
line[x + 1] = fillB;
}
line += pitch;
}
// Left and Right edges of middle rows
if (maxX - minX != pitch)
{
for (; y < maxY; y++)
{
for (x = 0; x < minX; x += 2)
{
line[x] = fillA;
line[x + 1] = fillB;
}
for (x = maxX; x < pitch; x += 2)
{
line[x] = fillA;
line[x + 1] = fillB;
}
line += pitch;
}
}
//Bottom wows
y = maxY;
line = output + y*pitch;
for (; y < height; y++)
{
for (x = 0; x < pitch; x += 2)
{
line[x] = fillA;
line[x + 1] = fillB;
}
line += pitch;
}
}
}
#endif //#if WARPSTUFF
void ConvertLocalToOutput(DECODER *decoder, uint8_t *output, int pitch, int output_format, uint8_t *local_output, int local_pitch, int channel_offset)
{
uint8_t *local_output_double = local_output;
//Frame_Region emptyFrameMask = {0};
if(decoder->StereoBuffer)
local_output_double = local_output = (uint8_t *)decoder->StereoBuffer;
if(channel_offset < 0) // channel swapped
{
channel_offset = -channel_offset;
}
if(INVERTEDFORMAT(decoder->frame.format) != INVERTEDFORMAT(output_format))
{
local_output += local_pitch*(decoder->frame.height-1);
if(decoder->channel_blend_type == BLEND_STACKED_ANAMORPHIC)
local_output_double += local_pitch*(decoder->frame.height*decoder->channel_decodes-1);
else
local_output_double = local_output;
local_pitch = -local_pitch;
}
if(FLIPCOLORS(output_format) || output_format & 0x80000000)
{
decoder->cfhddata.InvertOffset = 1;
}
else
{
decoder->cfhddata.InvertOffset = 0;
}
decoder->frame.format = output_format;
//decoder->frame.colorspace = COLOR_SPACE_CG_601;
#if _THREADED
{
WORKER_THREAD_DATA *mailbox = &decoder->worker_thread.data;
int workunits;
#if _DELAY_THREAD_START
if(decoder->worker_thread.pool.thread_count == 0)
{
CreateLock(&decoder->worker_thread.lock);
// Initialize the pool of transform worker threads
ThreadPoolCreate(&decoder->worker_thread.pool,
decoder->thread_cntrl.capabilities >> 16/*cpus*/,
WorkerThreadProc,
decoder);
}
#endif
if( ((decoder->cfhddata.process_path_flags & PROCESSING_ORIENTATION) &&
(decoder->cfhddata.channel[0].FrameAutoZoom * decoder->cfhddata.channel[1].FrameDiffZoom != 1.0 ||
decoder->cfhddata.channel[1].FrameKeyStone ||
decoder->cfhddata.channel[1].VerticalOffset ||
decoder->cfhddata.channel[1].RotationOffset ||
decoder->cfhddata.channel[1].FrameTilt ||
decoder->cfhddata.channel[0].FrameAutoZoom / decoder->cfhddata.channel[2].FrameDiffZoom != 1.0 ||
decoder->cfhddata.channel[2].FrameKeyStone ||
decoder->cfhddata.channel[2].VerticalOffset ||
decoder->cfhddata.channel[2].RotationOffset ||
decoder->cfhddata.channel[2].FrameTilt))
||
((decoder->cfhddata.process_path_flags & PROCESSING_FRAMING) &&
(decoder->cfhddata.FrameOffsetY ||
decoder->cfhddata.FrameOffsetR ||
// decoder->cfhddata.FrameOffsetX || ||
decoder->cfhddata.FrameHScale != 1.0 ||
decoder->cfhddata.FrameHDynamic != 1.0 ||
decoder->cfhddata.channel[1].FrameZoom != 1.0 ||
decoder->cfhddata.channel[2].FrameZoom != 1.0) ))
{
//int x;
int xbytes, xstep;
//uint8_t *base = local_output;
int width, height, chunk_size;
int fine_vertical = 0;
width = decoder->frame.width;
height = decoder->frame.height;
switch(decoder->StereoBufferFormat)
{
case DECODED_FORMAT_RGB32:
xbytes = width*4;
xstep = 16;
break;
case DECODED_FORMAT_RGB24:
xbytes = width*3;
xstep = 16;
break;
case DECODED_FORMAT_YUYV:
xbytes = width*2;
xstep = 16;
break;
case DECODED_FORMAT_W13A:
case DECODED_FORMAT_RG64:
xbytes = width*8;
xstep = 32;
break;
case DECODED_FORMAT_WP13:
case DECODED_FORMAT_RG48:
xbytes = width*6;
xstep = 32;
break;
default:
assert(0);
break;
}
if(!(decoder->cfhddata.process_path_flags & (PROCESSING_ORIENTATION|PROCESSING_FRAMING)) ||
(decoder->cfhddata.channel[1].RotationOffset == 0.0 && decoder->cfhddata.channel[1].FrameKeyStone == 0.0 &&
decoder->cfhddata.channel[2].RotationOffset == 0.0 && decoder->cfhddata.channel[2].FrameKeyStone == 0.0 &&
decoder->cfhddata.FrameOffsetR == 0.0))
{
chunk_size = 8;
}
else
{
chunk_size = 1;
if((fabs(decoder->cfhddata.channel[1].RotationOffset) +
fabs(decoder->cfhddata.channel[1].FrameKeyStone*0.2) +
fabs(decoder->cfhddata.FrameOffsetR)) > 0.015 ||
(fabs(decoder->cfhddata.channel[2].RotationOffset) +
fabs(decoder->cfhddata.channel[2].FrameKeyStone*0.2) +
fabs(decoder->cfhddata.FrameOffsetR)) > 0.015)
{
switch(decoder->StereoBufferFormat)
{
case DECODED_FORMAT_RGB32:
xstep = 4;
break;
case DECODED_FORMAT_RGB24:
xstep = 3;
break;
case DECODED_FORMAT_YUYV:
xstep = 4;
break;
case DECODED_FORMAT_W13A:
case DECODED_FORMAT_RG64:
xstep = 8;
break;
case DECODED_FORMAT_WP13:
case DECODED_FORMAT_RG48:
default:
xstep = 6;
break;
}
fine_vertical = 1;
}
}
if( decoder->codec.encoded_format == ENCODED_FORMAT_YUV_422 &&
(decoder->frame.resolution == DECODED_RESOLUTION_FULL ||
decoder->frame.resolution == DECODED_RESOLUTION_HALF_HORIZONTAL) &&
decoder->codec.progressive == false)
{
int interlaced_pitch = local_pitch * 2;
uint8_t *field2_output = local_output + local_pitch;
// Post a message to the mailbox
mailbox->local_output = local_output;
mailbox->local_pitch = interlaced_pitch;
mailbox->channel_offset = channel_offset;
memcpy(&mailbox->info, &decoder->frame, sizeof(FRAME_INFO));
mailbox->info.height >>= 1;
mailbox->line_max = (xbytes + xstep-1)/xstep;
mailbox->chunk_size = chunk_size;
mailbox->fine_vertical = fine_vertical;
mailbox->jobType = JOB_TYPE_VERTICAL_3D; // 3d work -- vertical
workunits = (mailbox->line_max + mailbox->chunk_size - 1) / mailbox->chunk_size;
// Set the work count to the number of rows to process
ThreadPoolSetWorkCount(&decoder->worker_thread.pool, workunits);
// Start the transform worker threads
ThreadPoolSendMessage(&decoder->worker_thread.pool, THREAD_MESSAGE_START);
// Wait for all of the worker threads to finish
ThreadPoolWaitAllDone(&decoder->worker_thread.pool);
// Post a message to the mailbox
mailbox->local_output = field2_output;
mailbox->local_pitch = interlaced_pitch;
mailbox->channel_offset = channel_offset;
memcpy(&mailbox->info, &decoder->frame, sizeof(FRAME_INFO));
mailbox->info.height >>= 1;
mailbox->chunk_size = chunk_size;
mailbox->line_max = (xbytes + xstep-1)/xstep;
mailbox->fine_vertical = fine_vertical;
mailbox->jobType = JOB_TYPE_VERTICAL_3D; // 3d work -- vertical
workunits = (mailbox->line_max + mailbox->chunk_size - 1) / mailbox->chunk_size;
// Set the work count to the number of rows to process
ThreadPoolSetWorkCount(&decoder->worker_thread.pool, workunits);
// Start the transform worker threads
ThreadPoolSendMessage(&decoder->worker_thread.pool, THREAD_MESSAGE_START);
// Wait for all of the worker threads to finish
ThreadPoolWaitAllDone(&decoder->worker_thread.pool);
}
else
{
//TODO Lens corect here.
//call JOB_TYPE_VERTICAL_3D then (or lens correction equivalent.)
// JOB_TYPE_HORIZONTAL_3D
//before doing any offset and rotation corrections.
if(decoder->frame.resolution == DECODED_RESOLUTION_HALF_HORIZONTAL_DEBAYER) //HACK //DAN20110129
width /= 2;
// Post a message to the mailbox
mailbox->local_output = local_output;
mailbox->local_pitch = local_pitch;
mailbox->channel_offset = channel_offset;
memcpy(&mailbox->info, &decoder->frame, sizeof(FRAME_INFO));
mailbox->chunk_size = chunk_size;
mailbox->line_max = (xbytes + xstep-1)/xstep;
mailbox->fine_vertical = fine_vertical;
mailbox->jobType = JOB_TYPE_VERTICAL_3D; // 3d work -- vertical
workunits = (mailbox->line_max + mailbox->chunk_size - 1) / mailbox->chunk_size;
// Set the work count to the number of rows to process
ThreadPoolSetWorkCount(&decoder->worker_thread.pool, workunits);
// Start the transform worker threads
ThreadPoolSendMessage(&decoder->worker_thread.pool, THREAD_MESSAGE_START);
// Wait for all of the worker threads to finish
ThreadPoolWaitAllDone(&decoder->worker_thread.pool);
}
}
// Post a message to the mailbox
mailbox->output = output;
mailbox->pitch = pitch;
mailbox->local_output = local_output;
mailbox->local_pitch = local_pitch;
mailbox->channel_offset = channel_offset;
memcpy(&mailbox->info, &decoder->frame, sizeof(FRAME_INFO));
mailbox->chunk_size = 16;
mailbox->line_max = decoder->frame.height;
if(decoder->channel_mix_half_res == 1)
mailbox->line_max *= 2;
workunits = (mailbox->line_max + mailbox->chunk_size - 1) / mailbox->chunk_size;
decoder->doVerticalFilter = 0;
mailbox->jobType = JOB_TYPE_HORIZONAL_3D; // 3d work && horizontal and vertical flips
// Set the work count to the number of rows to process
ThreadPoolSetWorkCount(&decoder->worker_thread.pool, workunits);
// Start the transform worker threads
ThreadPoolSendMessage(&decoder->worker_thread.pool, THREAD_MESSAGE_START);
// Wait for all of the worker threads to finish
ThreadPoolWaitAllDone(&decoder->worker_thread.pool);
if(decoder->doVerticalFilter)
{
// Post a message to the mailbox
mailbox->output = output;
mailbox->pitch = pitch;
mailbox->local_output = local_output_double;
mailbox->local_pitch = local_pitch;
mailbox->channel_offset = channel_offset;
memcpy(&mailbox->info, &decoder->frame, sizeof(FRAME_INFO));
mailbox->chunk_size = 16;
mailbox->line_max = decoder->frame.height;
if(decoder->channel_decodes == 2 && decoder->channel_blend_type == 0)
mailbox->line_max *= 2;
if(decoder->channel_mix_half_res == 1)
mailbox->line_max *= 2;
workunits = (mailbox->line_max + mailbox->chunk_size - 1) / mailbox->chunk_size;
mailbox->jobType = JOB_TYPE_SHARPEN; // 3d work && horizontal and vertical flips
// Set the work count to the number of rows to process
ThreadPoolSetWorkCount(&decoder->worker_thread.pool, workunits);
// Start the transform worker threads
ThreadPoolSendMessage(&decoder->worker_thread.pool, THREAD_MESSAGE_START);
// Wait for all of the worker threads to finish
ThreadPoolWaitAllDone(&decoder->worker_thread.pool);
}
}
#else
{
int y,width, height;
uint8_t scratch[4096*16];
int scratchremain = 4096*16;
int ymin = 0, ymax;
width = decoder->frame.width;
height = decoder->frame.height;
ymax = height;
if((decoder->cfhddata.process_path_flags & PROCESSING_FRAMING) &&
memcmp(&decoder->cfhddata.channel[0].FrameMask, &emptyFrameMask, 32))
{
ymin = (float)height * decoder->cfhddata.channel[0].FrameMask.topLftY;
ymax = (float)height * decoder->cfhddata.channel[0].FrameMask.botLftY;
}
if( ((decoder->cfhddata.process_path_flags & PROCESSING_ORIENTATION) &&
(decoder->cfhddata.channel[0].FrameAutoZoom * decoder->cfhddata.channel[1].FrameDiffZoom != 1.0 ||
decoder->cfhddata.channel[1].FrameKeyStone ||
decoder->cfhddata.channel[1].VerticalOffset ||
decoder->cfhddata.channel[1].RotationOffset ||
decoder->cfhddata.channel[0].FrameAutoZoom / decoder->cfhddata.channel[2].FrameDiffZoom != 1.0 ||
decoder->cfhddata.channel[2].FrameKeyStone ||
decoder->cfhddata.channel[2].VerticalOffset ||
decoder->cfhddata.channel[2].RotationOffset))
||
((decoder->cfhddata.process_path_flags & PROCESSING_FRAMING) &&
(decoder->cfhddata.FrameOffsetY ||
decoder->cfhddata.FrameOffsetR ||
decoder->cfhddata.FrameOffsetX ||
decoder->cfhddata.FrameHScale != 1.0 ||
decoder->cfhddata.FrameHDynamic != 1.0 ||
decoder->cfhddata.channel[1].FrameZoom != 1.0 ||
decoder->cfhddata.channel[2].FrameZoom != 1.0))
{
int x,xbytes, xstep;
uint8_t *base = local_output;
float voffsetstep;
float voffset = decoder->cfhddata.channel[1].VerticalOffset;
float roffset = decoder->cfhddata.channel[1].RotationOffset;
float voffset1, voffset2;
float voffsetstep1, voffsetstep2;
int channel_flip = decoder->cfhddata.channel_flip;
int aspectx,aspecty;
float aspectfix;
GetDisplayAspectRatio(decoder, &aspectx, &aspecty);
aspectfix = (float)(aspectx*aspectx) / (float)(aspecty*aspecty);
if(!(decoder->cfhddata.process_path_flags & PROCESSING_ORIENTATION))
{
voffset = roffset = 0;
}
if(!(decoder->cfhddata.process_path_flags & PROCESSING_IMAGEFLIPS))
{
channel_flip = 0;
}
if(decoder->cfhddata.process_path_flags & PROCESSING_FRAMING)
voffset += decoder->cfhddata.FrameOffsetY;
if(decoder->cfhddata.InvertOffset)
{
voffset = -voffset;
roffset = -roffset;
}
switch(decoder->StereoBufferFormat)
{
case DECODED_FORMAT_RGB32:
xbytes = width*4;
xstep = 16;
break;
case DECODED_FORMAT_RGB24:
xbytes = width*3;
xstep = 16;
break;
case DECODED_FORMAT_YUYV:
xbytes = width*2;
xstep = 16;
break;
case DECODED_FORMAT_WP13:
case DECODED_FORMAT_RG48:
default:
xbytes = width*6;
xstep = 32;
break;
}
//DAN20100923 -- simplied
//voffset += roffset * (float)(width*width) / (float)(height*height) * 0.5;
//voffsetstep = -roffset * (float)(width*width) / (float)(height*height) / (float)(xbytes/xstep);
voffset += roffset * aspectfix * 0.5;
voffsetstep = -roffset * aspectfix / (float)(xbytes/xstep);
if(roffset == 0.0)
xstep = xbytes;
voffset1 = voffset2 = voffset;
voffsetstep1 = voffsetstep2 = voffsetstep;
if(channel_flip & 0xf)
{
if(channel_flip & 2)
{
voffset1 = -voffset1;
voffsetstep1 = -voffsetstep1;
}
if(channel_flip & 8)
{
voffset2 = -voffset2;
voffsetstep2 = -voffsetstep2;
}
if(channel_flip & 1)
{
voffset1 += voffsetstep1*(xbytes/xstep);
voffsetstep1 = -voffsetstep1;
}
if(channel_flip & 4)
{
voffset2 += voffsetstep2*(xbytes/xstep);
voffsetstep2 = -voffsetstep2;
}
}
for(x=0; x<xbytes; x+=xstep)
{
if(decoder->channel_decodes == 1 && decoder->channel_current == 1) // Right only
{
RGB48VerticalShift(decoder, base, (unsigned short *)scratch,
xstep, height, local_pitch, -voffset2);
}
else
{
RGB48VerticalShift(decoder, base, (unsigned short *)scratch,
xstep, height, local_pitch, voffset1);
}
if(decoder->channel_decodes == 2)
{
uint8_t *bptr = base + channel_offset;
RGB48VerticalShift(decoder, bptr, (unsigned short *)scratch,
xstep, height, local_pitch, -voffset2);
}
base += xstep;
voffset1 += voffsetstep1;
voffset2 += voffsetstep2;
}
}
if(decoder->channel_mix_half_res == 1)
height *= 2;
if(ymin)
{
memset(local_output, 0, abs(local_pitch)); // zero one line;
}
for(y=0; y<ymin; y++)
{
ProcessLine3D(decoder, scratch, scratchremain, output, pitch, local_output, 0, channel_offset, y, 0);
}
for(; y<ymax; y++)
{
ProcessLine3D(decoder, scratch, scratchremain, output, pitch, local_output, local_pitch, channel_offset, y, 0);
}
for(; y<height; y++)
{
ProcessLine3D(decoder, scratch, scratchremain, output, pitch, local_output, 0, channel_offset, y, 0);
}
}
#endif
}
// Decode a sample from the input bitstream into the output frame buffer
bool DecodeSample(DECODER *decoder, BITSTREAM *input, uint8_t *output, int pitch, ColorParam *colorparams, CFHDDATA *cfhddata)
{
//CODEC_ERROR error = CODEC_ERROR_OKAY;
#if (1 && DEBUG)
FILE *logfile = decoder->logfile;
#endif
//CODEC_STATE *codec = &decoder->codec;
//int subband_wavelet_index[] = {5, 5, 5, 5, 4, 4, 4, 3, 3, 3, 1, 1, 1, 0, 0, 0};
int channel_decodes = 1; // 3D Work
int channel_offset = 0;
int channel_mask = 0;
int channel_current = 0;
//int wavelet_index;
bool result = true;
uint8_t *local_output = output;
uint8_t *local_buffer = NULL;
int local_pitch = pitch;
int internal_format = decoder->frame.format;
int output_format = decoder->frame.output_format;
bool use_local_buffer = false;
DECODER *local_decoder = decoder;
//Frame_Region emptyFrameMask = {0};
Frame_Region emptyFrameMask = FRAME_REGION_INITIALIZER;
int orig_width = decoder->frame.width;
int orig_height = decoder->frame.height;
decoder->local_output = local_output; // used for NV12 decodes.
decoder->sample_uncompressed = 0; // set if a uncompressed sample is found.
decoder->image_dev_only = 0;
if(decoder->flags & (1<<3)) // This is an image development only decode.
{
decoder->sample_uncompressed = 1;
decoder->image_dev_only = 1;
decoder->codec.encoded_format = ENCODED_FORMAT_RGB_444;
decoder->codec.unique_framenumber = 0; //What should this be?
decoder->frame.white_point = 16; // how to we pass this in?
decoder->uncompressed_chunk = (uint32_t *)input->lpCurrentBuffer;
switch(output_format & 0x7fffffff)
{
case COLOR_FORMAT_RGB24:
decoder->uncompressed_size = orig_width * orig_height * 3;
break;
case COLOR_FORMAT_RGB32:
decoder->uncompressed_size = orig_width * orig_height * 4;
break;
case COLOR_FORMAT_RG48:
case COLOR_FORMAT_WP13:
decoder->uncompressed_size = orig_width * orig_height * 6;
break;
default:
decoder->uncompressed_size = orig_width * orig_height * 6;
assert(0);
break;
}
}
decoder->frame.alpha_Companded = 0; // reset this state.
if(decoder->parallelDecoder)
decoder->parallelDecoder->sample_uncompressed = 0;
decoder->error = CODEC_ERROR_OKAY;
input->error = BITSTREAM_ERROR_OKAY;
// first time through encoded_format is not initized.
if(input->nWordsUsed > 4096 && decoder->image_dev_only == 0) // an I-frame is needed
{
SAMPLE_HEADER header;
BITSTREAM input2;
InitBitstreamBuffer(&input2, input->lpCurrentWord, input->nWordsUsed, BITSTREAM_ACCESS_READ);
memset(&header, 0, sizeof(SAMPLE_HEADER));
header.find_lowpass_bands = 2; // help finding the uncompressed flag
if(ParseSampleHeader(&input2, &header))
{
decoder->codec.encoded_format = header.encoded_format;
decoder->sample_uncompressed = header.hdr_uncompressed;
if(decoder->parallelDecoder)
decoder->parallelDecoder->sample_uncompressed = header.hdr_uncompressed;
}
}
if((uintptr_t)input->lpCurrentBuffer & 0x3)
{
if(decoder->aligned_sample_buffer == NULL)
{
#if _ALLOCATOR
ALLOCATOR *allocator = decoder->allocator;
decoder->aligned_sample_buffer =
(uint8_t *)AllocAligned(allocator, (size_t)input->dwBlockLength, 16);
#else
decoder->aligned_sample_buffer =
(uint8_t *)MEMORY_ALIGNED_ALLOC(input->dwBlockLength, 16);
#endif
memcpy(decoder->aligned_sample_buffer, input->lpCurrentBuffer, input->dwBlockLength);
decoder->aligned_sample_buffer_size = input->dwBlockLength;
}
else
{
if ((size_t)input->dwBlockLength <= decoder->aligned_sample_buffer_size)
{
memcpy(decoder->aligned_sample_buffer, input->lpCurrentBuffer, input->dwBlockLength);
}
else
{
#if _ALLOCATOR
ALLOCATOR *allocator = decoder->allocator;
FreeAligned(decoder->allocator, decoder->aligned_sample_buffer);
decoder->aligned_sample_buffer =
(uint8_t *)AllocAligned(allocator, input->dwBlockLength, 16);
#else
MEMORY_ALIGNED_FREE(decoder->aligned_sample_buffer);
decoder->aligned_sample_buffer =
(uint8_t *)MEMORY_ALIGNED_ALLOC(input->dwBlockLength, 16);
#endif
memcpy(decoder->aligned_sample_buffer, input->lpCurrentBuffer, input->dwBlockLength);
decoder->aligned_sample_buffer_size = input->dwBlockLength;
}
}
input->lpCurrentBuffer = decoder->aligned_sample_buffer;
input->lpCurrentWord = decoder->aligned_sample_buffer;
}
#if 0 // Test for missaligning the image data
if(((int)input->lpCurrentBuffer&3) == 0)
{
int i;
uint8_t *ptr = (uint8_t *)input->lpCurrentBuffer;
int missaligned = 1; //2 or 3
for(i=input->dwBlockLength-1; i>=0; i--)
ptr[i+missaligned] = ptr[missaligned];
input->lpCurrentBuffer = (uint8_t *)&ptr[missaligned];
input->lpCurrentWord = (uint8_t *)&ptr[missaligned];
}
#endif
//HACK
// Unfortunately I need color matrix data deep within the codec for RT playback.
if(cfhddata && cfhddata->MagicNumber == CFHDDATA_MAGIC_NUMBER) // valid input
{
if(decoder->cfhddata.MagicNumber != CFHDDATA_MAGIC_NUMBER)
{
//int size = cfhddata->size;
size_t size = cfhddata->size;
memset(&decoder->cfhddata, 0, sizeof(CFHDDATA));
if (size > sizeof(CFHDDATA)) {
// Limit the size to the known structure
size = sizeof(CFHDDATA);
}
memcpy(&decoder->cfhddata, cfhddata, size);
}
}
else
{
unsigned short value;
if(decoder->cfhddata.MagicNumber != CFHDDATA_MAGIC_NUMBER || decoder->cfhddata.size != sizeof(CFHDDATA))
{
memset(&decoder->cfhddata, 0, sizeof(CFHDDATA));
decoder->cfhddata.MagicNumber = CFHDDATA_MAGIC_NUMBER;
decoder->cfhddata.size = sizeof(CFHDDATA);
if(decoder->image_dev_only) // For baseband image only corrections, initize the decoder with defaults
{
decoder->cfhddata.cfhd_subtype = 2; //RGB
decoder->cfhddata.num_channels = 3;
}
else if(GetTuplet(input->lpCurrentBuffer, input->nWordsUsed, CODEC_TAG_INPUT_FORMAT, &value))
{
if(value == COLOR_FORMAT_RG48)
{
decoder->cfhddata.cfhd_subtype = 2; //RGB
decoder->cfhddata.num_channels = 3;
}
else if(value == COLOR_FORMAT_RG64)
{
decoder->cfhddata.cfhd_subtype = 3; //RGBA
decoder->cfhddata.num_channels = 4;
}
else if(value > COLOR_FORMAT_BAYER && value < COLOR_FORMAT_BAYER_END)
{
unsigned int format = BAYER_FORMAT_RED_GRN;
decoder->cfhddata.cfhd_subtype = 1; //BAYER
decoder->cfhddata.bayer_format = format; // default to Red-Grn
decoder->cfhddata.version = CFHDDATA_VERSION;
}
}
}
}
OverrideCFHDDATA(decoder, input->lpCurrentBuffer, input->nWordsUsed);
if(decoder->image_dev_only) // HACK we need to support 3D also.
decoder->source_channels = 1;
else
decoder->source_channels = decoder->real_channels = SkipVideoChannel(decoder, input, 0);
if(!decoder->basic_only && (decoder->cfhddata.MSChannel_type_value || decoder->cfhddata.MSCTV_Override))
{
//int channels = 0;
int channel_blend_type = BLEND_NONE;
int channel_swapped_flags = 0;
if(decoder->cfhddata.MSCTV_Override)
{
channel_mask = decoder->cfhddata.MSCTV_Override&0xff;
channel_blend_type = ((decoder->cfhddata.MSCTV_Override>>8) & 0xff);
channel_swapped_flags = ((decoder->cfhddata.MSCTV_Override>>16) & 0xffff);
}
else
{
channel_mask = decoder->cfhddata.MSChannel_type_value&0xff;
channel_blend_type = ((decoder->cfhddata.MSChannel_type_value>>8) & 0xff);
channel_swapped_flags = ((decoder->cfhddata.MSChannel_type_value>>16) & 0xffff);
}
if(channel_mask != 3)
{
channel_blend_type = BLEND_NONE;
channel_swapped_flags = 0;
}
//if(channels >= 2) // even "mono" files need to be displayed as Stereo if a 3D mode is selected //DAN20090302
{
if(channel_mask == 1 && decoder->source_channels >= 2) // Decode Left only
{
if(decoder->cfhddata.FramingFlags & 2) // channel swap
{
SkipVideoChannel(decoder, input, 2); // 3D work
}
}
else if(channel_mask == 2 && decoder->source_channels >= 2) // Decode Right only
{
if(decoder->cfhddata.FramingFlags & 2) // channel swap
{
SkipVideoChannel(decoder, input, 1); // 3D work
}
else
{
//assume second channel decode
SkipVideoChannel(decoder, input, 2); // 3D work
}
channel_current = 1;
channel_decodes = 1;
channel_blend_type = BLEND_NONE;
channel_swapped_flags = 0;
}
else if(channel_mask == 2 && decoder->source_channels <= 1) // Decode 2D as Right channel
{
channel_current = 1;
channel_decodes = 1;
channel_blend_type = BLEND_NONE;
channel_swapped_flags = 0;
}
else if((channel_mask&3) == 3) // A+B 3d work
{
channel_decodes = 2;
decoder->channel_mix_half_res = 0;
if(channel_blend_type != BLEND_NONE)
{
if(decoder->codec.encoded_format == ENCODED_FORMAT_RGBA_4444 && ALPHAOUTPUT(decoder->frame.format))
{
//if(decoder->frame.format == DECODED_FORMAT_W13A)
// {
// decoder->frame.format = internal_format = DECODED_FORMAT_W13A;
// }
//else
//{
// decoder->frame.format = internal_format = DECODED_FORMAT_RG64;
// }
decoder->frame.format = internal_format = DECODED_FORMAT_RGB32;
local_pitch = decoder->frame.width * 4;
}
else
{
decoder->frame.format = internal_format = DECODED_FORMAT_RGB24;
local_pitch = decoder->frame.width * 3; //RGB24
}
/* if(decoder->frame.resolution == DECODED_RESOLUTION_FULL &&
(output_format == DECODED_FORMAT_YUYV ||
output_format == DECODED_FORMAT_UYVY))
{
if( channel_blend_type == BLEND_FREEVIEW ||
((channel_blend_type == BLEND_STACKED_ANAMORPHIC ||
channel_blend_type == BLEND_SIDEBYSIDE_ANAMORPHIC ||
channel_blend_type == BLEND_LINE_INTERLEAVED) && decoder->frame.width > 1280))
{
decoder->frame.resolution = DECODED_RESOLUTION_HALF;
decoder->channel_mix_half_res = 1;
decoder->frame.width /= 2;
decoder->frame.height /= 2;
local_pitch = (decoder->frame.width) * 3; //RGB24
}
} */
}
/* if(channel_blend_type == BLEND_STEREO_YUY2inRGBA) //YUY2 in RGBA
{
decoder->frame.format = internal_format = DECODED_FORMAT_YUYV;
local_pitch = decoder->frame.width * 2; //YUY2
channel_offset = local_pitch * (decoder->frame.height);
use_local_buffer = true;
}*/
/* DAN20120316 FLAG3D_HALFRES broken if(decoder->frame.resolution == DECODED_RESOLUTION_FULL && channel_swapped_flags & FLAG3D_HALFRES && output_format != DECODED_FORMAT_W13A)
{
decoder->frame.resolution = DECODED_RESOLUTION_HALF;
decoder->channel_mix_half_res = 1;
decoder->frame.width /= 2;
decoder->frame.height /= 2;
local_pitch /= 2;
} */
if( decoder->frame.resolution == DECODED_RESOLUTION_FULL &&
(channel_blend_type == BLEND_SIDEBYSIDE_ANAMORPHIC || channel_blend_type == BLEND_FREEVIEW))
{
if(decoder->codec.encoded_format != ENCODED_FORMAT_BAYER)
{
if(decoder->sample_uncompressed)
{
decoder->frame.resolution = DECODED_RESOLUTION_HALF;
decoder->channel_mix_half_res = 1;
decoder->frame.width /= 2;
decoder->frame.height /= 2;
local_pitch /= 2;
}
else
{
if(decoder->preformatted_3D_type > BLEND_NONE)
{
// leave as is.
}
else if(FORMAT8BIT(output_format))
{
decoder->frame.resolution = DECODED_RESOLUTION_HALF_HORIZONTAL;
decoder->frame.width /= 2;
local_pitch /= 2;
}
}
}
else
{
if(FORMAT8BIT(output_format))
decoder->frame.resolution = DECODED_RESOLUTION_HALF_HORIZONTAL_DEBAYER;
}
//TODO int uncompressed = decoder->uncompressed_chunk && decoder->uncompressed_size && decoder->sample_uncompressed;
}
if(channel_blend_type >= BLEND_STACKED_ANAMORPHIC && channel_blend_type < BLEND_ANAGLYPH_RC)// stacked, side-by-side, fields, Onion, YUY2
{
channel_offset = local_pitch * (decoder->frame.height);
}
else if(channel_blend_type >= BLEND_ANAGLYPH_RC)
{
/* if(channel_blend_type & 1 && channel_blend_type <= 21) // B&W Anaglyph
{
//B&W using YUYV
decoder->frame.format = internal_format = DECODED_FORMAT_YUYV;
local_pitch = decoder->frame.width * 2; //YUY2
}*/
channel_offset = local_pitch * (decoder->frame.height);
use_local_buffer = true;
}
else if(channel_blend_type == BLEND_NONE) // double high
{
channel_offset = pitch * decoder->frame.height;
}
else
{
channel_blend_type = BLEND_STACKED_ANAMORPHIC;
channel_offset = pitch * (decoder->frame.height/2);
}
// fields, stacked, etc, only works on full or half res.
if (channel_blend_type > BLEND_NONE && channel_blend_type <= BLEND_LINE_INTERLEAVED &&
decoder->frame.resolution == DECODED_RESOLUTION_LOWPASS_ONLY) //thumnbail.
{
channel_decodes = 1;
channel_blend_type = BLEND_NONE;
channel_swapped_flags = 0;
}
if (channel_blend_type != BLEND_NONE &&
(output_format == DECODED_FORMAT_BYR1 ||
output_format == DECODED_FORMAT_BYR2 ||
output_format == DECODED_FORMAT_BYR3 ||
output_format == DECODED_FORMAT_BYR4 ))
{
channel_decodes = 1;
channel_blend_type = BLEND_NONE;
channel_swapped_flags = 0;
}
}
}
decoder->channel_decodes = channel_decodes;
decoder->channel_blend_type = channel_blend_type;
decoder->channel_swapped_flags = channel_swapped_flags;
}
else
{
decoder->channel_decodes = channel_decodes = 1;
decoder->channel_blend_type = BLEND_NONE;
decoder->channel_swapped_flags = 0;
}
if(cfhddata) // So the P-frames can know the bayerformat
{
//int size = cfhddata->size;
size_t size = cfhddata->size;
if (size > sizeof(CFHDDATA)) {
size = sizeof(CFHDDATA);
}
memcpy(cfhddata, &decoder->cfhddata, size);
}
{
bool doOrientation = true;
bool doFraming = true;
bool doBurins = true;
bool doImageflips = true;
bool doGhostBust = false;
bool doPrimaries = true;
int process_path_flags = decoder->cfhddata.process_path_flags;
int process_path_flags_mask = decoder->cfhddata.process_path_flags_mask;
if(decoder->basic_only)
{
doOrientation = false;
doFraming = false;
doBurins = false;
doImageflips = false;
doPrimaries = false;
}
else
{
if(decoder->cfhddata.process_path_flags_mask)
{
//DAN20101007 --
if(process_path_flags == 0)
decoder->cfhddata.process_path_flags = process_path_flags = decoder->cfhddata.process_path_flags_mask;
process_path_flags &= decoder->cfhddata.process_path_flags_mask;
if(process_path_flags_mask & PROCESSING_ACTIVE2)
{
if(!(process_path_flags_mask & PROCESSING_ORIENTATION))
doOrientation = false;
if(!(process_path_flags_mask & PROCESSING_FRAMING))
doFraming = false;
if(!(process_path_flags_mask & PROCESSING_BURNINS))
doBurins = false;
if(!(process_path_flags_mask & PROCESSING_IMAGEFLIPS))
doImageflips = false;
}
if(!(process_path_flags_mask & PROCESSING_COLORMATRIX))
doPrimaries = false;
}
if(process_path_flags & PROCESSING_ACTIVE2)
{
if(!(process_path_flags & PROCESSING_ORIENTATION))
doOrientation = false;
if(!(process_path_flags & PROCESSING_FRAMING))
doFraming = false;
if(!(process_path_flags & PROCESSING_BURNINS))
doBurins = false;
if(!(process_path_flags & PROCESSING_IMAGEFLIPS))
doImageflips = false;
if(!(process_path_flags & PROCESSING_COLORMATRIX))
doPrimaries = false;
}
}
if(doOrientation)
process_path_flags |= PROCESSING_ORIENTATION;
if(doFraming)
process_path_flags |= PROCESSING_FRAMING;
if(doBurins)
process_path_flags |= PROCESSING_BURNINS;
if(doImageflips)
process_path_flags |= PROCESSING_IMAGEFLIPS;
if(doPrimaries)
process_path_flags |= PROCESSING_COLORMATRIX;
if(decoder->channel_swapped_flags & FLAG3D_GHOSTBUST)
{
if(decoder->ghost_bust_left || decoder->ghost_bust_right)
{
doGhostBust = true;
}
}
decoder->cfhddata.process_path_flags = process_path_flags;
if((!decoder->basic_only &&
(doOrientation && ( decoder->cfhddata.channel[0].FloatingWindowMaskL ||
decoder->cfhddata.channel[0].FloatingWindowMaskR ||
decoder->cfhddata.channel[0].FrameKeyStone ||
decoder->cfhddata.channel[0].FrameTilt ||
decoder->cfhddata.channel[0].HorizontalOffset ||
decoder->cfhddata.channel[0].VerticalOffset ||
decoder->cfhddata.channel[0].RotationOffset ||
decoder->cfhddata.channel[1].FloatingWindowMaskL ||
decoder->cfhddata.channel[1].FloatingWindowMaskR ||
decoder->cfhddata.channel[1].FrameKeyStone ||
decoder->cfhddata.channel[1].FrameTilt ||
decoder->cfhddata.channel[1].HorizontalOffset ||
decoder->cfhddata.channel[1].VerticalOffset ||
decoder->cfhddata.channel[1].RotationOffset ||
decoder->cfhddata.channel[0].FrameAutoZoom * decoder->cfhddata.channel[1].FrameDiffZoom != 1.0 ||
decoder->cfhddata.channel[2].FloatingWindowMaskL ||
decoder->cfhddata.channel[2].FloatingWindowMaskR ||
decoder->cfhddata.channel[2].FrameKeyStone ||
decoder->cfhddata.channel[2].FrameTilt ||
decoder->cfhddata.channel[2].HorizontalOffset ||
decoder->cfhddata.channel[2].VerticalOffset ||
decoder->cfhddata.channel[2].RotationOffset ||
decoder->cfhddata.channel[0].FrameAutoZoom / decoder->cfhddata.channel[2].FrameDiffZoom != 1.0)))
||
(doPrimaries && ( decoder->cfhddata.channel[0].user_blur_sharpen != 0.0 ||
decoder->cfhddata.channel[1].user_blur_sharpen != 0.0 ||
decoder->cfhddata.channel[2].user_blur_sharpen != 0.0))
||
(doFraming && ( decoder->cfhddata.channel[0].user_vignette_start != 0.0 ||
decoder->cfhddata.channel[1].user_vignette_start != 0.0 ||
decoder->cfhddata.channel[2].user_vignette_start != 0.0))
||
(doFraming && ( memcmp(&decoder->cfhddata.channel[0].FrameMask, &emptyFrameMask, 32) ||
decoder->cfhddata.FrameOffsetX ||
decoder->cfhddata.FrameOffsetY ||
decoder->cfhddata.FrameOffsetR ||
decoder->cfhddata.FrameHScale != 1.0 ||
decoder->cfhddata.FrameHDynamic != 1.0 ||
decoder->cfhddata.channel[1].FrameZoom != 1.0 ||
decoder->cfhddata.channel[2].FrameZoom != 1.0))
||
(doGhostBust && (decoder->channel_blend_type == BLEND_NONE) && (channel_decodes == 2))
||
(doImageflips && decoder->cfhddata.channel_flip)
||
(decoder->preformatted_3D_type == BLEND_STACKED_ANAMORPHIC) ||
(decoder->preformatted_3D_type == BLEND_SIDEBYSIDE_ANAMORPHIC) ||
(decoder->channel_blend_type && decoder->frame.resolution == DECODED_RESOLUTION_QUARTER) || // 3D mode generally don't work in quarter res -- this prevents crashes.
( ((decoder->frame.width+7)/8)*8 != decoder->frame.width || (channel_decodes > 1 && decoder->channel_blend_type != BLEND_NONE) ||
decoder->sample_uncompressed) ||
(decoder->cfhddata.doMesh)
)
{
if( output_format == DECODED_FORMAT_BYR1 ||
output_format == DECODED_FORMAT_BYR2 ||
output_format == DECODED_FORMAT_BYR3 ||
output_format == DECODED_FORMAT_BYR4 )
{
// no manipulation should be applied
}
else
{
use_local_buffer = true;
local_pitch = ((decoder->frame.width+7)/8)*8 * 6; //RGB48
if(decoder->image_dev_only)
{
decoder->frame.white_point = 13;
decoder->frame.format = internal_format = DECODED_FORMAT_WP13;
}
else if(decoder->codec.encoded_format == ENCODED_FORMAT_RGBA_4444 && ALPHAOUTPUT(decoder->frame.format))
{
decoder->frame.white_point = 13;
decoder->frame.format = internal_format = DECODED_FORMAT_W13A;
local_pitch = ((decoder->frame.width+7)/8)*8 * 8;
}
else
{
decoder->frame.white_point = 13;
decoder->frame.format = internal_format = DECODED_FORMAT_WP13;
}
if( decoder->frame.resolution == DECODED_RESOLUTION_HALF_HORIZONTAL ||
decoder->frame.resolution == DECODED_RESOLUTION_HALF_HORIZONTAL_DEBAYER)
{
local_pitch *= 2; // need horizontal room to make 3D side by side frame
}
/*
if(output_format == DECODED_FORMAT_WP13 || output_format == DECODED_FORMAT_W13A)
{
// preserve HDR
decoder->frame.format = internal_format = output_format;//DECODED_FORMAT_WP13; // HDR output
if(output_format == DECODED_FORMAT_W13A)
local_pitch = decoder->frame.width * 8;
}
else
{
if(decoder->codec.encoded_format == ENCODED_FORMAT_RGBA_4444 && ALPHAOUTPUT(decoder->frame.format))
{
decoder->frame.format = internal_format = DECODED_FORMAT_RG64;
local_pitch = decoder->frame.width * 8;
}
else
{
decoder->frame.format = internal_format = DECODED_FORMAT_RG48;
}
}*/
channel_offset = local_pitch * (decoder->frame.height);
}
}
}
if(output_format == DECODED_FORMAT_BYR4 && decoder->cfhddata.encode_curve_preset == 0)
{
if(decoder->BYR4LinearRestore == NULL)
{
int j,val;
int encode_curve_type = decoder->cfhddata.encode_curve >> 16;
//int encode_curve_neg = encode_curve_type & CURVE_TYPE_NEGATIVE;
float encode_curvebase;
if(encode_curve_type) //1 or 2
{
if(encode_curve_type & CURVE_TYPE_EXTENDED)
encode_curvebase = (float)(decoder->cfhddata.encode_curve & 0xffff); // use all 16-bits for larger log bases
else
encode_curvebase = (float)((decoder->cfhddata.encode_curve >> 8) & 0xff) / (float)(decoder->cfhddata.encode_curve & 0xff);
}
else
{
encode_curve_type = CURVE_TYPE_LOG;
encode_curvebase = 90.0;
}
#if _ALLOCATOR
decoder->BYR4LinearRestore = (unsigned short *)AllocAligned(decoder->allocator,16384*2, 16);
#else
decoder->BYR4LinearRestore = (unsigned short *)MEMORY_ALIGNED_ALLOC(16384*2, 16);
#endif
for(j=0; j<16384; j++) //0 to 1
{
switch(encode_curve_type & CURVE_TYPE_MASK)
{
case CURVE_TYPE_LOG:
val = (int)(CURVE_LOG2LIN((float)j/16384.0f,
(float)encode_curvebase) * 65535.0f);
break;
case CURVE_TYPE_GAMMA:
val = (int)(CURVE_GAM2LIN((float)j/16384.0f,
(float)encode_curvebase) * 65535.0f);
break;
case CURVE_TYPE_CINEON:
val = (int)(CURVE_CINEON2LIN((float)j/16384.0f,
(float)encode_curvebase) * 65535.0f);
break;
case CURVE_TYPE_CINE985:
val = (int)(CURVE_CINE9852LIN((float)j/16384.0f,
(float)encode_curvebase) * 65535.0f);
break;
case CURVE_TYPE_PARA:
val = (int)(CURVE_PARA2LIN((float)j/16384.0f,
(int)((decoder->cfhddata.encode_curve >> 8) & 0xff), (int)(decoder->cfhddata.encode_curve & 0xff)) * 65535.0f);
break;
case CURVE_TYPE_CSTYLE:
val = (int)(CURVE_CSTYLE2LIN((float)j/16384.0f,
(int)((decoder->cfhddata.encode_curve >> 8) & 0xff)) * 65535.0f);
break;
case CURVE_TYPE_SLOG:
val = (int)(CURVE_SLOG2LIN((float)j/16384.0f) * 65535.0f);
break;
case CURVE_TYPE_LOGC:
val = (int)(CURVE_LOGC2LIN((float)j/16384.0f) * 65535.0f);
break;
case CURVE_TYPE_LINEAR:
default:
val = j;
break;
}
if(val < 0) val = 0;
if(val > 65535) val = 65535;
decoder->BYR4LinearRestore[j] = val;
}
}
}
//DAN20120319 - removed
/*if(decoder->channel_mix_half_res) //decoding half but scaling to double the output size
{
local_pitch *= 2;
channel_offset = local_pitch * (decoder->frame.height*2);
}*/
if(use_local_buffer == true) // need buffer for anaglyph and other 3D presentation formats
{
int stereoframesize = channel_offset * channel_decodes/*stacked frames*/;
if(decoder->source_channels == 1 && decoder->preformatted_3D_type == BLEND_NONE)
stereoframesize = channel_offset;
if(channel_decodes == 1 && decoder->preformatted_3D_type != BLEND_NONE)
stereoframesize = channel_offset * 2;
if(channel_decodes == 2 && decoder->source_channels == 1 && decoder->channel_blend_type != BLEND_NONE)
stereoframesize = channel_offset * 2;
if(decoder->StereoBuffer==NULL || decoder->StereoBufferSize < stereoframesize)
{
#if _ALLOCATOR
if(decoder->StereoBuffer)
{
FreeAligned(decoder->allocator, decoder->StereoBuffer);
decoder->StereoBuffer = NULL;
}
decoder->StereoBuffer = (PIXEL16U *)AllocAligned(decoder->allocator, stereoframesize+256, 16); //DAN20130517 add 256, as 2.7K half we are write off the buffers end for zoom, don't know why yet.
#else
if(decoder->StereoBuffer)
{
MEMORY_ALIGNED_FREE(decoder->StereoBuffer);
decoder->StereoBuffer = NULL;
}
decoder->StereoBuffer = (PIXEL16U *)MEMORY_ALIGNED_ALLOC(stereoframesize+256, 16); //DAN20130517 add 256, as 2.7K half we are write off the buffers end for zoom, don't know why yet.
#endif
assert(decoder->StereoBuffer != NULL);
if (! (decoder->StereoBuffer != NULL)) {
return CODEC_ERROR_MEMORY_ALLOC;
}
decoder->StereoBufferSize = stereoframesize;
}
decoder->StereoBufferFormat = internal_format;
local_buffer = (uint8_t *)decoder->StereoBuffer;
local_output = local_buffer;
}
DecodeEntropyInit(decoder);
//swapped -- Maybe useful for double height decodes.
/* if(channel_decodes == 2 && channel_swapped_flags & FLAG3D_SWAPPED)
{
local_output += channel_offset;
channel_offset = -channel_offset;
}*/
decoder->use_local_buffer = use_local_buffer ? 1 : 0;
if(channel_decodes == 2 && decoder->parallelDecoder == NULL && decoder->source_channels > 1)
{
int encoded_width = decoder->frame.width;
int encoded_height = decoder->frame.height;
if (decoder->frame.resolution == DECODED_RESOLUTION_HALF)
{
// Compute the encoded dimensions from the frame dimensions
encoded_width *= 2;
encoded_height *= 2;
}
else if (decoder->frame.resolution == DECODED_RESOLUTION_QUARTER)
{
// Compute the encoded dimensions from the frame dimensions
encoded_width *= 4;
encoded_height *= 4;
}
else if (decoder->frame.resolution == DECODED_RESOLUTION_HALF_HORIZONTAL)
{
// Compute the encoded dimensions from the frame dimensions
encoded_width *= 2;
}
else if (decoder->frame.resolution == DECODED_RESOLUTION_HALF_VERTICAL)
{
// Compute the encoded dimensions from the frame dimensions
encoded_height *= 2;
}
#if _ALLOCATOR
decoder->parallelDecoder = (DECODER *)Alloc(decoder->allocator, sizeof(DECODER));
if(decoder->parallelDecoder)
{
memset(decoder->parallelDecoder, 0, sizeof(DECODER));
DecodeInit(decoder->allocator, decoder->parallelDecoder, encoded_width, encoded_height,
internal_format, DECODED_RESOLUTION_FULL, NULL);
}
#else
decoder->parallelDecoder = (DECODER *)MEMORY_ALLOC(sizeof(DECODER));
if(decoder->parallelDecoder)
{
memset(decoder->parallelDecoder, 0, sizeof(DECODER));
decoder->parallelDecoder->thread_cntrl = decoder->thread_cntrl;
DecodeInit(decoder->parallelDecoder, encoded_width, encoded_height,
internal_format, DECODED_RESOLUTION_FULL, NULL);
}
#endif
}
// Using the parallel decoder?
if (decoder->parallelDecoder)
{
// Initialize the parallel decoder with parameters from the regular decoder
memcpy(&decoder->parallelDecoder->cfhddata, &decoder->cfhddata, sizeof(CFHDDATA));
memcpy(decoder->parallelDecoder->licensekey,decoder->licensekey, 16);
DecodeEntropyInit(decoder->parallelDecoder);
DecodeOverrides(decoder->parallelDecoder, decoder->overrideData, decoder->overrideSize);
decoder->parallelDecoder->channel_decodes = decoder->channel_decodes;
decoder->parallelDecoder->channel_blend_type = decoder->channel_blend_type;
decoder->parallelDecoder->flags = decoder->flags;
decoder->parallelDecoder->frame = decoder->frame;
decoder->parallelDecoder->use_local_buffer = use_local_buffer ? 1 : 0;
decoder->parallelDecoder->codec.encoded_format = decoder->codec.encoded_format;
if(decoder->parallelDecoder->decoder_thread.pool.thread_count == 0)
{
CreateLock(&decoder->parallelDecoder->decoder_thread.lock);
// Initialize the pool of transform worker threads
ThreadPoolCreate(&decoder->parallelDecoder->decoder_thread.pool,
1, //
ParallelThreadProc,
decoder->parallelDecoder);
}
}
if(channel_decodes == 2 && decoder->real_channels > 1 && decoder->parallelDecoder && decoder->parallelDecoder->decoder_thread.pool.thread_count)
{
// Second stream as a thread.
BITSTREAM second_input = *input;
if(decoder->cfhddata.FramingFlags & 2 && decoder->source_channels >= 2) // channel swap
{
BITSTREAM leftEye_input = *input;
SkipVideoChannel(decoder, &leftEye_input, 2); // 3D work
*input = leftEye_input;
SkipVideoChannel(decoder, &second_input, 1); // 3D work
}
else
SkipVideoChannel(decoder, &second_input, 2); // 3D work
decoder->channel_current = 0;
decoder->parallelDecoder->channel_current = 1;
// Instead of reading the metadata databases again, use the ones in the main decoder
OverrideCFHDDATAUsingParent(decoder->parallelDecoder, decoder, input->lpCurrentBuffer, input->nWordsUsed);
// DAN20110404 Use left (first) eye metadata for both eyes (just in case right GUID is bad.)
// OverrideCFHDDATA(decoder->parallelDecoder, input->lpCurrentBuffer, input->nWordsUsed);
//OverrideCFHDDATA(decoder->parallelDecoder, second_input.lpCurrentWord, second_input.nWordsUsed);
// Hack, this gets lost
decoder->parallelDecoder->cfhddata.split_CC_position = decoder->cfhddata.split_CC_position;
#if (_THREADED && _GRAPHICS)
if(decoder->cfhddata.process_path_flags & PROCESSING_BURNINS && output)
{
if(decoder->cfhddata.BurninFlags & 3) // overlays / tools
{
DrawStartThreaded(decoder);
}
}
#endif
// Post a message to the mailbox
decoder->parallelDecoder->decoder_thread.input = &second_input;
if(use_local_buffer == false &&
(decoder->frame.format == DECODED_FORMAT_RGB32 || decoder->frame.format == DECODED_FORMAT_RGB24))
{
decoder->parallelDecoder->decoder_thread.output = local_output;
local_output += channel_offset;
}
else
{
decoder->parallelDecoder->decoder_thread.output = local_output + channel_offset;
}
decoder->parallelDecoder->decoder_thread.pitch = local_pitch;
decoder->parallelDecoder->decoder_thread.colorparams = colorparams;
// Set the work count to the number of rows to process
ThreadPoolSetWorkCount(&decoder->parallelDecoder->decoder_thread.pool, 1);
// Start the transform worker threads
ThreadPoolSendMessage(&decoder->parallelDecoder->decoder_thread.pool, THREAD_MESSAGE_START);
// do the first channel
{
TAGVALUE segment;
int sample_type;
#if _THREADED
decoder->entropy_worker_new.next_queue_num = 0;
decoder->entropy_worker_new.threads_used = 0;
#endif
// Get the type of sample
segment = GetTagValue(input);
assert(segment.tuple.tag == CODEC_TAG_SAMPLE);
if (!IsValidSegment(input, segment, CODEC_TAG_SAMPLE)) {
decoder->error = CODEC_ERROR_BITSTREAM;
STOP(tk_decompress);
return false;
}
sample_type = segment.tuple.value;
switch (sample_type)
{
case SAMPLE_TYPE_GROUP: // Group of frames (decode the first frame)
result = DecodeSampleGroup(decoder, input, local_output, local_pitch, colorparams);
break;
case SAMPLE_TYPE_FRAME: // Decode the second or later frame in a group
result = DecodeSampleFrame(decoder, input, local_output, local_pitch, colorparams);
break;
case SAMPLE_TYPE_IFRAME: // Decode a sample that represents an isolated frame
result = DecodeSampleIntraFrame(decoder, input, local_output, local_pitch, colorparams);
break;
case SAMPLE_TYPE_SEQUENCE_HEADER:
// The video sequence header is ignored
result = true;
break;
default:
// Need to fill the output frame
//error = CODEC_ERROR_SAMPLE_TYPE;
result = false;
}
}
// Wait for all of the worker threads to finish
ThreadPoolWaitAllDone(&decoder->parallelDecoder->decoder_thread.pool);
}
else
{
while(channel_decodes > 0)
{
TAGVALUE segment;
int sample_type;
local_decoder->channel_current = channel_current++;
//OverrideCFHDDATA(local_decoder, input->lpCurrentBuffer, input->nWordsUsed);
#if (_THREADED && _GRAPHICS)
if(decoder->cfhddata.process_path_flags & PROCESSING_BURNINS && output)
{
if(decoder->cfhddata.BurninFlags & 3) //overlays / tools
{
DrawStartThreaded(decoder);
}
}
#endif
#if _THREADED
local_decoder->entropy_worker_new.next_queue_num = 0;
local_decoder->entropy_worker_new.threads_used = 0;
#endif
if(decoder->image_dev_only)
{
result = DecodeSampleIntraFrame(local_decoder, input, local_output, local_pitch, colorparams);
}
else
{
// Get the type of sample
segment = GetTagValue(input);
assert(segment.tuple.tag == CODEC_TAG_SAMPLE);
if (!IsValidSegment(input, segment, CODEC_TAG_SAMPLE)) {
local_decoder->error = CODEC_ERROR_BITSTREAM;
STOP(tk_decompress);
return false;
}
sample_type = segment.tuple.value;
switch (sample_type)
{
case SAMPLE_TYPE_GROUP: // Group of frames (decode the first frame)
result = DecodeSampleGroup(local_decoder, input, local_output, local_pitch, colorparams);
break;
case SAMPLE_TYPE_FRAME: // Decode the second or later frame in a group
result = DecodeSampleFrame(local_decoder, input, local_output, local_pitch, colorparams);
break;
case SAMPLE_TYPE_IFRAME: // Decode a sample that represents an isolated frame
result = DecodeSampleIntraFrame(local_decoder, input, local_output, local_pitch, colorparams);
break;
case SAMPLE_TYPE_SEQUENCE_HEADER:
// The video sequence header is ignored
result = true;
break;
default:
// Need to fill the output frame
//error = CODEC_ERROR_SAMPLE_TYPE;
result = false;
}
}
if(ConvertPreformatted3D(decoder, use_local_buffer, internal_format, channel_mask, local_output, local_pitch, &channel_offset))
{
channel_decodes = 0;
}
else
{
channel_decodes--;
local_output += channel_offset;
if(decoder->parallelDecoder)
{
local_decoder = decoder->parallelDecoder;
}
}
}
}
if(use_local_buffer && output)
{
decoder->use_local_buffer = 0;
#if WARPSTUFF
WarpFrame(decoder, local_buffer, local_pitch, decoder->StereoBufferFormat);
MaskFrame(decoder, local_buffer, local_pitch, decoder->StereoBufferFormat);
#endif
ConvertLocalToOutput(decoder, output, pitch, output_format, local_buffer, local_pitch, abs(channel_offset));
}
else
{
#if WARPSTUFF
WarpFrame(decoder, output, pitch, output_format);
MaskFrame(decoder, output, pitch, output_format);
#endif
}
if(decoder->channel_mix_half_res) //HACK
{
decoder->frame.resolution = DECODED_RESOLUTION_FULL;
decoder->frame.width *= 2;
decoder->frame.height *= 2;
decoder->channel_mix_half_res = 0;
}
if( decoder->frame.resolution == DECODED_RESOLUTION_HALF_HORIZONTAL) //HACK
{
decoder->frame.resolution = DECODED_RESOLUTION_FULL;
decoder->frame.width *= 2;
}
if( decoder->frame.resolution == DECODED_RESOLUTION_HALF_HORIZONTAL_DEBAYER) //HACK
{
decoder->frame.resolution = DECODED_RESOLUTION_FULL;
}
#if _GRAPHICS
if(decoder->cfhddata.process_path_flags & PROCESSING_BURNINS && output)
{
PaintFrame(decoder, output, pitch, output_format);
}
#endif
STOP(tk_decompress);
// Return indication of whether decoding succeeded or failed
return result;
}
// Decode a sample that encoded a group of frames (return the first frame)
bool DecodeSampleGroup(DECODER *decoder, BITSTREAM *input, uint8_t *output, int pitch, ColorParam *colorparams)
{
CODEC_ERROR error = CODEC_ERROR_OKAY;
#if (1 && DEBUG)
FILE *logfile = decoder->logfile;
#endif
CODEC_STATE *codec = &decoder->codec;
int32_t frame_size = decoder->frame.height * pitch;
int resolution = decoder->frame.resolution;
bool result = true;
static int subband_wavelet_index[] = {5, 5, 5, 5, 4, 4, 4, 3, 3, 3, 3, 1, 1, 1, 0, 0, 0};
static int subband_band_index[] = {0, 1, 2, 3, 1, 2, 3, 0, 1, 2, 3, 1, 2, 3, 1, 2, 3};
int num_subbands = sizeof(subband_wavelet_index)/sizeof(subband_wavelet_index[0]);
#if (0 && DEBUG)
// Force quarter resolution decoding for debug that feature
resolution = DECODED_RESOLUTION_QUARTER;
#endif
#if (0 && DEBUG)
if (logfile) {
fprintf(logfile, "Decoding sample group\n");
}
#endif
START(tk_decoding);
// Initialize the codec state
InitCodecState(&decoder->codec);
// Allocate the transform data structure for the group of frames
AllocDecoderGroup(decoder);
// Initialize the tables for decoding the wavelet transforms
InitWaveletDecoding(decoder, subband_wavelet_index, subband_band_index, num_subbands);
// Clear the flags in the wavelet transforms
ClearTransformFlags(decoder);
// Process the tag value pairs until an encoded subband is found
for (;;)
{
TAGVALUE segment;
// Read the next tag value pair from the bitstream
//segment = GetTagValue(input);
segment = GetSegment(input);
assert(input->error == BITSTREAM_ERROR_OKAY);
if (input->error != BITSTREAM_ERROR_OKAY) {
decoder->error = CODEC_ERROR_BITSTREAM;
result = false;
break;
}
// Update the codec state with the information in the tag value pair
{
TAGWORD tag = segment.tuple.tag;
TAGWORD value = segment.tuple.value;
// Use the tag value pair to update the codec state
error = UpdateCodecState(decoder, input, codec, tag, value);
assert(error == CODEC_ERROR_OKAY);
if (error != CODEC_ERROR_OKAY)
{
decoder->error = error;
result = false;
break;
//NOTE: Consider moving the error code into the codec state
}
}
// Check whether the group has been decoded
if (codec->sample_done) break;
// Skip the rest of the current channel?
if (CanSkipChannel(decoder, resolution))
{
if(codec->channel == 3 && (decoder->frame.format == DECODED_FORMAT_YUYV || decoder->frame.format == DECODED_FORMAT_UYVY))
{
int channel = codec->channel;
uint32_t channel_size = codec->channel_size[channel];
uint8_t *position = codec->channel_position + channel_size;
// Advance the bitstream to the next channel
SetBitstreamPosition(input, position);
// Reset the decoded subband flags (otherwise this code will be executed again)
codec->decoded_subband_flags = 0;
codec->num_channels = 3;
goto decoding_complete;
}
else
if (resolution == DECODED_RESOLUTION_LOWPASS_ONLY)
{
int channel = codec->channel;
uint32_t channel_size = codec->channel_size[channel];
uint8_t *position = codec->channel_position + channel_size;
// Advance the bitstream to the next channel
SetBitstreamPosition(input, position);
// Reset the decoded subband flags (otherwise this code will be executed again)
codec->decoded_subband_flags = 0;
}
else
{
// Compute the bitstream position after the current channel
int channel = codec->channel;
uint32_t channel_size = codec->channel_size[channel];
uint8_t *position = codec->channel_position + channel_size;
// Get the temporal wavelet
int temporal_index = 2;
TRANSFORM *transform = decoder->transform[channel];
IMAGE *wavelet = transform->wavelet[temporal_index];
#if (0 && DEBUG)
if (IsBandValid(wavelet, HIGHPASS_BAND))
{
int static count = 0;
if (count < 20) {
char label[_MAX_PATH];
sprintf(label, "Temporal-decode-%d-", count);
DumpBandPGM(label, wavelet, HIGHPASS_BAND, NULL);
}
count++;
}
#endif
#if _THREADED_DECODER
// Ready to invert this wavelet to get the lowpass band in the lower wavelet?
//if (DecodedBandsValid(wavelet, temporal_index))
if (resolution != DECODED_RESOLUTION_QUARTER || (decoder->codec.encoded_format == ENCODED_FORMAT_BAYER))
#else
// Have all bands in the temporal wavelet been decoded?
//if (wavelet && BANDS_ALL_VALID(wavelet))
if (AllBandsValid(wavelet))
#endif
{
//PIXEL *buffer = (PIXEL *)decoder->buffer;
//size_t buffer_size = decoder->buffer_size;
int precision = codec->precision;
#if (0 && DEBUG)
if (logfile) {
fprintf(logfile, "Reconstructing the lowpass bands in the first level wavelets\n");
}
#endif
#if _THREADED_DECODER
// Add the temporal inverse transform to the processing queue
if(decoder->entropy_worker_new.pool.thread_count)
{
ReconstructWaveletBand(decoder, transform, channel, wavelet, temporal_index,
precision, &decoder->scratch, 1);
QueueThreadedTransform(decoder, channel, temporal_index);
}
else
#endif
{
// Reconstruct the lowpass bands in the first level wavelets
//ReconstructWaveletBand(transform, channel, wavelet, temporal_index, precision, buffer, buffer_size);
ReconstructWaveletBand(decoder, transform, channel, wavelet, temporal_index,
precision, &decoder->scratch, 0 );
}
// Advance the bitstream to the next channel
SetBitstreamPosition(input, position);
// Reset the decoded subband flags (otherwise this code will be executed again)
codec->decoded_subband_flags = 0;
// Note that the subband flags are also reset when the channel header is decoded
}
// Was the wavelet created?
else if (wavelet == NULL)
{
// The temporal wavelet is not created during quarter resolution decoding
// Advance the bitstream to the next channel
SetBitstreamPosition(input, position);
// Reset the decoded subband flags (otherwise this code will be executed again)
codec->decoded_subband_flags = 0;
}
//TODO: Improve quarter resolution decoding so that the wavelet is created?
}
}
}
decoding_complete:
STOP(tk_decoding);
#if (0 && DEBUG)
if (logfile)
{
char label[_MAX_PATH];
int channel;
for (channel = 0; channel < codec->num_channels; channel++)
{
TRANSFORM *transform = decoder->transform[channel];
IMAGE *wavelet = transform->wavelet[2];
uint8_t *data = (uint8_t *)wavelet->band[HIGHPASS_BAND];
int height = wavelet->height;
int pitch = wavelet->pitch;
int size = height * pitch;
int band;
for (band = 0; band < wavelet->num_bands; band++)
{
sprintf(label, "Temporal channel: %d, band: %d", channel, band);
DumpBandStatistics(label, wavelet, band, logfile);
#if 0
sprintf(label, "Temporal-channel%d-band%d-", channel, band);
DumpBandPGM(label, wavelet, band, NULL);
#endif
}
assert(size > 0);
ZeroMemory(data, size);
}
}
#endif
if (result)
{
// Two frames have been decoded
decoder->gop_length = 2;
decoder->frame_count += 2;
#if (1 && DEBUG)
if (logfile) {
fprintf(logfile,
"DecodeSampleGroup, decoder: 0x%p, GOP length: %d\n",
decoder, decoder->gop_length);
}
#endif
// Return the first frame in the group
if (!decoder->no_output)
{
#if 0
// Decoding to quarter frame resolution at full frame rate?
if (resolution == DECODED_RESOLUTION_QUARTER)
{
int num_channels = codec->num_channels;
FRAME_INFO *info = &decoder->frame;
char *buffer = decoder->buffer;
size_t buffer_size = decoder->buffer_size;
uint8_t *frame1 = output;
uint8_t *frame2 = decoder->output2;
assert(frame2 != NULL);
// Reconstruct two frames at quarter resolution
ReconstructQuarterFrame(decoder, num_channels,
frame1, frame2, pitch,
info, buffer, buffer_size);
}
else
#endif
// Finish computing the output frame
ReconstructSampleFrameToBuffer(decoder, 0, output, pitch);
}
if (decoder->error != CODEC_ERROR_OKAY) {
result = false;
}
#if TIMING
// Increment the count of bytes that have been decoded
decode_byte_count += (COUNTER)BitstreamByteCount(input);
#endif
}
if (!result)
{
// Check that the frame can be cleared
assert(frame_size > 0);
if (frame_size > 0)
{
// Zero the frame
memset(output, 0, frame_size);
}
}
return result;
}
// Decode a sample that represents the second frame in a group
bool DecodeSampleFrame(DECODER *decoder, BITSTREAM *input, uint8_t *output, int pitch, ColorParam *colorparams)
{
CODEC_ERROR error = CODEC_ERROR_OKAY;
#if (1 && DEBUG)
FILE *logfile = decoder->logfile;
#endif
CODEC_STATE *codec = &decoder->codec;
int32_t frame_size = decoder->frame.height * pitch;
bool result = true;
START(tk_decoding);
// Decode the tag value pairs in the frame sample
for (;;)
{
TAGWORD tag;
TAGWORD value;
// Read the next tag value pair from the bitstream
//TAGVALUE segment = GetTagValue(input);
TAGVALUE segment = GetSegment(input);
assert(input->error == BITSTREAM_ERROR_OKAY);
if (input->error != BITSTREAM_ERROR_OKAY) {
decoder->error = CODEC_ERROR_BITSTREAM;
result = false;
break;
}
// Update the codec state with the information in the tag value pair
tag = segment.tuple.tag;
value = segment.tuple.value;
// Use the tag value pair to update the codec state
error = UpdateCodecState(decoder, input, codec, tag, value);
assert(error == CODEC_ERROR_OKAY);
if (error != CODEC_ERROR_OKAY) {
decoder->error = error;
result = false;
break;
}
// End of the frame header?
if (tag == CODEC_TAG_FRAME_INDEX) break;
}
STOP(tk_decoding);
#if (1 && DEBUG)
if (logfile) {
fprintf(logfile,
"DecodeSampleFrame, decoder: 0x%p, GOP length: %d\n",
decoder, decoder->gop_length);
}
#endif
if (result)
{
// Return the second frame in the group
// assert(decoder->gop_length >= 2);
if (decoder->gop_length >= 2)
{
int frame_index = 1; // Display the second frame in the group
ReconstructSampleFrameToBuffer(decoder, frame_index, output, pitch);
if (decoder->error != CODEC_ERROR_OKAY) {
result = false;
}
}
else if (decoder->gop_length > 0)
{
int frame_index = 0; // Display the first frame in the group
ReconstructSampleFrameToBuffer(decoder, frame_index, output, pitch);
if (decoder->error != CODEC_ERROR_OKAY) {
result = false;
}
}
#if TIMING
// Increment the count of bytes that have been decoded
decode_byte_count += (COUNTER)BitstreamByteCount(input);
#endif
}
if (!result)
{
// Frame type that is not handled
// Check that the frame can be cleared
assert(frame_size > 0);
if (frame_size > 0)
{
// Zero the frame
memset(output, 0, frame_size);
}
}
return result;
}
// Decode a sample that encodes an intra frame
bool DecodeSampleIntraFrame(DECODER *decoder, BITSTREAM *input, uint8_t *output, int pitch, ColorParam *colorparams)
{
CODEC_ERROR error = CODEC_ERROR_OKAY;
#if (1 && DEBUG)
FILE *logfile = decoder->logfile;
#endif
CODEC_STATE *codec = &decoder->codec;
int32_t frame_size = decoder->frame.height * pitch;
int resolution = decoder->frame.resolution;
bool result = true;
static int subband_wavelet_index[] = {2, 2, 2, 2, 1, 1, 1, 0, 0, 0};
static int subband_band_index[] = {0, 1, 2, 3, 1, 2, 3, 1, 2, 3, 1, 2, 3, 1, 2, 3};
int num_subbands = sizeof(subband_wavelet_index)/sizeof(subband_wavelet_index[0]);
START(tk_decoding);
if(decoder->image_dev_only) goto decoding_completeI;
// Initialize the codec state
InitCodecState(&decoder->codec);
// Allocate the transform data structure for the group of frames
AllocDecoderGroup(decoder);
// Initialize the tables for decoding the wavelet transforms
InitWaveletDecoding(decoder, subband_wavelet_index, subband_band_index, num_subbands);
// Clear the flags in the wavelet transforms
ClearTransformFlags(decoder);
//Force V210 output for debugging ***DEBUG***
//decoder->frame.format = DECODED_FORMAT_V210;
// Process the tag value pairs until an encoded subband is found
for (;;)
{
TAGVALUE segment;
// Read the next tag value pair from the bitstream
segment = GetSegment(input);
assert(input->error == BITSTREAM_ERROR_OKAY);
if (input->error != BITSTREAM_ERROR_OKAY) {
decoder->error = CODEC_ERROR_BITSTREAM;
result = false;
break;
}
{
TAGWORD tag = segment.tuple.tag;
TAGWORD value = segment.tuple.value;
// Use the tag value pair to update the codec state
error = UpdateCodecState(decoder, input, codec, tag, value);
assert(error == CODEC_ERROR_OKAY);
if (error != CODEC_ERROR_OKAY) {
decoder->error = error;
result = false;
break;
//NOTE: Consider moving the error code into the codec state
}
}
// Check whether the group has been decoded
if (codec->sample_done) {
break;
}
// Skip the rest of the current channel?
if (CanSkipChannel(decoder, resolution))
{
if(codec->channel == 3 && (decoder->frame.format == DECODED_FORMAT_YUYV || decoder->frame.format == DECODED_FORMAT_UYVY))
{
int channel = codec->channel;
uint32_t channel_size = codec->channel_size[channel];
uint8_t *position = codec->channel_position + channel_size;
// Advance the bitstream to the next channel
SetBitstreamPosition(input, position);
// Reset the decoded subband flags (otherwise this code will be executed again)
codec->decoded_subband_flags = 0;
codec->num_channels = 3;
goto decoding_completeI;
}
else if (resolution == DECODED_RESOLUTION_LOWPASS_ONLY)
{
int channel = codec->channel;
uint32_t channel_size = codec->channel_size[channel];
uint8_t *position = codec->channel_position + channel_size;
// Advance the bitstream to the next channel
SetBitstreamPosition(input, position);
// Reset the decoded subband flags (otherwise this code will be executed again)
codec->decoded_subband_flags = 0;
}
else
{
// Compute the bitstream position after the current channel
int channel = codec->channel;
uint32_t channel_size = codec->channel_size[channel];
uint8_t *position = codec->channel_position + channel_size;
// Get the highest wavelet in the pyramid
int wavelet_index = 2;
TRANSFORM *transform = decoder->transform[channel];
IMAGE *wavelet = transform->wavelet[wavelet_index];
#if _THREADED_DECODER
// Ready to invert this wavelet to get the lowpass band in the lower wavelet?
//if (DecodedBandsValid(wavelet, temporal_index))
if (resolution != DECODED_RESOLUTION_QUARTER || (decoder->codec.encoded_format == ENCODED_FORMAT_BAYER))
#else
// Have all bands in the wavelet been decoded?
if (AllBandsValid(wavelet))
#endif
{
//PIXEL *buffer = (PIXEL *)decoder->buffer;
//size_t buffer_size = decoder->buffer_size;
int precision = codec->precision;
#if (0 && DEBUG)
if (logfile) {
char label[_MAX_PATH];
int band;
sprintf(label, "Channel: %d, index: %d", channel, wavelet_index);
DumpImageStatistics(label, wavelet, logfile);
#if 1
for (band = 1; band < wavelet->num_bands; band++)
{
sprintf(label, "Channel: %d, index: %d, band: %d", channel, wavelet_index, band);
DumpBandStatistics(label, wavelet, band, logfile);
}
#endif
}
#endif
#if (0 & DEBUG)
if (logfile) {
fprintf(logfile, "Reconstructing the lowpass bands in the first level wavelets\n");
}
#endif
#if _THREADED_DECODER
// Add the inverse spatial transform to the processing queue
if(decoder->entropy_worker_new.pool.thread_count)
{
ReconstructWaveletBand(decoder, transform, channel, wavelet, wavelet_index,
precision, &decoder->scratch, 1);
QueueThreadedTransform(decoder, channel, wavelet_index);
}
else
#endif
{
// Reconstruct the lowpass bands in the first level wavelets
//ReconstructWaveletBand(transform, channel, wavelet, temporal_index, precision, buffer, buffer_size);
ReconstructWaveletBand(decoder, transform, channel, wavelet, wavelet_index,
precision, &decoder->scratch, 0);
}
// Advance the bitstream to the next channel
SetBitstreamPosition(input, position);
// Reset the decoded subband flags (otherwise this code will be executed again)
codec->decoded_subband_flags = 0;
// Note that the subband flags are also reset when the channel header is decoded
}
// Was the wavelet created?
//else if (wavelet == NULL)
else
{
// The wavelet may not have been created during quarter resolution decoding
// The wavelet should have been created if all bands are valid
assert(wavelet != NULL);
// Advance the bitstream to the next channel
SetBitstreamPosition(input, position);
// Reset the decoded subband flags (otherwise this code will be executed again)
codec->decoded_subband_flags = 0;
}
//TODO: Improve quarter resolution decoding so that the wavelet is created?
}
}
}
decoding_completeI:
STOP(tk_decoding);
if (result)
{
// One frame has been decoded
decoder->gop_length = 1;
decoder->frame_count += 1;
#if (0 && DEBUG)
if (logfile) {
fprintf(logfile,
"DecodeSampleIntraFrame, decoder: 0x%p, GOP length: %d\n",
decoder, decoder->gop_length);
}
#endif
// Return the first frame (the only frame that was decoded)
if (!decoder->no_output)
{
int uncompressed = decoder->uncompressed_chunk && decoder->uncompressed_size && decoder->sample_uncompressed;
if ( !uncompressed && resolution == DECODED_RESOLUTION_QUARTER && (decoder->codec.encoded_format != ENCODED_FORMAT_BAYER))
{
//CODEC_STATE *codec = &decoder->codec;
TRANSFORM **transform_array = decoder->transform;
int num_channels = codec->num_channels;
//int progressive = codec->progressive;
FRAME_INFO *info = &decoder->frame;
int precision = codec->precision;
#if _THREADED_DECODER
// Wait until the transform thread has finished all pending transforms
WaitForTransformThread(decoder);
#endif
ConvertQuarterFrameToBuffer(decoder, transform_array, num_channels, output, pitch, info, precision);
}
else
{
// Finish computing the output frame
ReconstructSampleFrameToBuffer(decoder, 0, output, pitch);
}
}
if (decoder->error != CODEC_ERROR_OKAY) {
result = false;
}
#if TIMING
// Increment the count of bytes that have been decoded
decode_byte_count += (COUNTER)BitstreamByteCount(input);
#endif
}
if (!result)
{
// Check that the frame can be cleared
assert(frame_size > 0);
if (frame_size > 0)
{
// Zero the frame
memset(output, 0, frame_size);
}
}
return result;
}
// Decode a sample channel header
bool DecodeSampleChannelHeader(DECODER *decoder, BITSTREAM *input)
{
#if (1 && DEBUG)
FILE *logfile = decoder->logfile;
#endif
CODEC_ERROR error = CODEC_ERROR_OKAY;
CODEC_STATE *codec = &decoder->codec;
int channel = codec->channel;
CHANNEL_HEADER header;
TRANSFORM *transform = decoder->transform[channel];
TRANSFORM *next_transform;
// Advance to the next channel
channel++;
// Get the next transform for decoded information
//TRANSFORM *next_transform = AllocGroupTransform(group, channel);
// Decode the rest of the channel header
error = DecodeChannelHeader(input, &header, SAMPLE_TYPE_CHANNEL);
assert(error == CODEC_ERROR_OKAY);
decoder->error = error;
if (error != CODEC_ERROR_OKAY) return false;
// The decoder is not able to skip channels
assert(header.channel == channel);
// Initialize the next transform using the previous one
next_transform = decoder->transform[channel];
InitChannelTransform(next_transform, transform);
// Update the channel
codec->channel = channel;
// Reset the subband counter
codec->band.subband = 0;
// Reset the decoded subband flags
codec->decoded_subband_flags = 0;
// Loop back to decode the next channel
//transform = next_transform;
return true;
}
// Decode the coefficients in a subband
bool DecodeSampleSubband(DECODER *decoder, BITSTREAM *input, int subband)
{
#if (1 && DEBUG)
FILE *logfile = decoder->logfile;
#endif
CODEC_STATE *codec = &decoder->codec;
int channel = codec->channel;
TRANSFORM *transform = decoder->transform[channel];
int *subband_wavelet_index = decoder->subband_wavelet_index;
// Used for quarter resolution and threaded decoding
int transform_type = transform->type;
// Wavelet parameters
int width;
int height;
int level;
int type;
int band;
int threading = 1;
// Wavelet containing the band to decode
int index;
IMAGE *wavelet = NULL;
bool result;
if(subband >= 7 && subband <= 10 && transform_type == TRANSFORM_TYPE_FIELDPLUS)
threading = 0;
// Update the transform data structure from the codec state
UpdateCodecTransform(transform, codec);
// Is this an empty band?
if (subband == 255)
{
// Decode an empty band
// This wavelet is the temporal wavelet
index = 2;
wavelet = transform->wavelet[index];
// Get the wavelet parameters decoded from the bitstream
width = codec->band.width;
height = codec->band.height;
level = codec->highpass.wavelet_level;
type = codec->highpass.wavelet_type;
band = codec->band.number;
// The empty band should be the highpass band in a temporal wavelet
assert(type == WAVELET_TYPE_TEMPORAL && band == 1);
#if _THREADED_DECODER
// Allocate (or reallocate) the wavelet with thread safety
wavelet = GetWaveletThreadSafe(decoder, transform, index, width, height, level, type);
#else
// Allocate (or reallocate) the wavelet
#if _ALLOCATOR
wavelet = ReallocWaveletEx(decoder->allocator, wavelet, width, height, level, type);
#else
wavelet = ReallocWaveletEx(wavelet, width, height, level, type);
#endif
// Save this wavelet in the transform data structure
transform->wavelet[index] = wavelet;
#endif
// Set the wavelet parameters
wavelet->pixel_type[band] = PIXEL_TYPE_16S;
wavelet->num_bands = 2;
result = DecodeSampleEmptyBand(decoder, input, wavelet, band);
// Set the subband number for the next band expected in the bitstream
codec->band.subband = 11;
}
// Is this a highpass band?
else if (subband > 0)
{
// Decode a highpass band
// Get the wavelet that contains this subband
index = subband_wavelet_index[subband];
wavelet = transform->wavelet[index];
// Get the wavelet parameters decoded from the bitstream
width = codec->band.width;
height = codec->band.height;
level = codec->highpass.wavelet_level;
type = codec->highpass.wavelet_type;
band = codec->band.number;
#if _THREADED_DECODER
// Allocate (or reallocate) the wavelet with thread safety
wavelet = GetWaveletThreadSafe(decoder, transform, index, width, height, level, type);
#else
// Allocate (or reallocate) the wavelet
#if _ALLOCATOR
wavelet = ReallocWaveletEx(decoder->allocator, wavelet, width, height, level, type);
#else
wavelet = ReallocWaveletEx(wavelet, width, height, level, type);
#endif
// Save this wavelet in the transform data structure
transform->wavelet[index] = wavelet;
#endif
result = DecodeSampleHighPassBand(decoder, input, wavelet, band, threading);
if (result)
{
// Call thread safe routine to update the band valid flags
UpdateWaveletBandStartedFlags(decoder, wavelet, band);
}
// Reset the default encoding method
codec->band.encoding = BAND_ENCODING_RUNLENGTHS;
// Set the subband number for the next band expected in the bitstream
codec->band.subband = subband + 1;
}
else
{
// Decode a lowpass band
// Get the wavelet that contains this subband
index = subband_wavelet_index[0];
wavelet = transform->wavelet[index];
// Get the wavelet parameters decoded from the bitstream
width = codec->lowpass.width;
height = codec->lowpass.height;
level = codec->lowpass.level;
type = codec->first_wavelet;
//band = codec->band.number;
band = 0;
#if _THREADED_DECODER
// Allocate (or reallocate) the wavelet with thread safety
wavelet = GetWaveletThreadSafe(decoder, transform, index, width, height, level, type);
#else
// Allocate (or reallocate) the wavelet
#if _ALLOCATOR
wavelet = ReallocWaveletEx(decoder->allocator, wavelet, width, height, level, type);
#else
wavelet = ReallocWaveletEx(wavelet, width, height, level, type);
#endif
// Save this wavelet in the transform data structure
transform->wavelet[index] = wavelet;
#endif
// The lowpass data is always stored in wavelet band zero
assert(band == 0);
// The lowpass band must be subband zero
assert(subband == 0);
result = DecodeSampleLowPassBand(decoder, input, wavelet);
if (result)
{
// Call thread safe routine to update the band valid flags
UpdateWaveletBandValidFlags(decoder, wavelet, band);
}
// Set the subband number for the next band expected in the bitstream
codec->band.subband = subband + 1;
}
// Was the subband successfully decoded?
if (result)
{
// The transform will set the band valid flag if this is the temporal wavelet
//if (index != 2)
// Record that this subband has been decoded successfully
if (0 <= subband && subband <= CODEC_MAX_SUBBAND)
codec->decoded_subband_flags |= DECODED_SUBBAND_MASK(subband);
#if (0 && DEBUG)
if (logfile) {
fprintf(logfile, "Decoded subband: %d, wavelet: %d, channel: %d\n",
subband, index, channel);
}
#endif
}
#if _THREADED_DECODER
// Ready to queue a threaded transform to invert this wavelet?
if (BANDS_ALL_STARTED(wavelet))
{
// Are frames being decoded to quarter resolution?
if (decoder->frame.resolution == DECODED_RESOLUTION_QUARTER && (decoder->codec.encoded_format != ENCODED_FORMAT_BAYER))
{
// Smallest spatial wavelet above the lowpass temporal band (fieldplus transform)
int highest_index = 5;
if (transform_type == TRANSFORM_TYPE_SPATIAL)
{
// Smallest wavelet in the spatial transform
highest_index = 2;
}
// Only the smallest spatial wavelet must be reconstructed
if (index != highest_index) {
return result;
}
//TODO: Can we improve on the current scheme for quarter resolution decoding?
}
if ((transform->type == TRANSFORM_TYPE_SPATIAL && index > 0) || index >= 2)
{
if(decoder->entropy_worker_new.pool.thread_count && threading)
{
ReconstructWaveletBand(decoder, transform, codec->channel, wavelet, index,
codec->precision, &decoder->scratch, 1);
// Add the inverse wavelet transform to the processing queue
QueueThreadedTransform(decoder, codec->channel, index);
}
else
{
// Apply the inverse wavelet transform to reconstruct the lower level wavelet
ReconstructWaveletBand(decoder, transform, codec->channel, wavelet, index,
codec->precision, &decoder->scratch, 0);
}
}
}
#else
// Ready to invert this wavelet to get the lowpass band in the lower wavelet?
if (BANDS_ALL_VALID(wavelet))
{
int channel = codec->channel;
//PIXEL *buffer = (PIXEL *)decoder->buffer;
//size_t buffer_size = decoder->buffer_size;
int precision = codec->precision;
#if (0 && DEBUG)
if (logfile) {
char label[_MAX_PATH];
int band;
sprintf(label, "Channel: %d, index: %d", channel, index);
DumpImageStatistics(label, wavelet, logfile);
#if 1
for (band = 1; band < wavelet->num_bands; band++)
{
sprintf(label, "Channel: %d, index: %d, band: %d", channel, index, band);
DumpBandStatistics(label, wavelet, band, logfile);
}
#endif
}
#endif
// Are frames being decoded to quarter resolution?
if (decoder->frame.resolution == DECODED_RESOLUTION_QUARTER && (decoder->codec.encoded_format != ENCODED_FORMAT_BAYER))
{
// Smallest spatial wavelet above the lowpass temporal band (fieldplus transform)
int highest_index = 5;
if (transform_type == TRANSFORM_TYPE_SPATIAL)
{
// Smallest wavelet in the spatial transform
highest_index = 2;
}
// Only the smallest spatial wavelet must be reconstructed
if (index != highest_index) {
return result;
}
//TODO: Can we improve on the current scheme for quarter resolution decoding?
}
// Apply the inverse wavelet transform to reconstruct the lower level wavelet
ReconstructWaveletBand(decoder, transform, channel, wavelet, index, precision, &decoder->scratch, 0);
}
#endif
return result;
}
// Decode the coefficients in a lowpass band
bool DecodeSampleLowPassBand(DECODER *decoder, BITSTREAM *stream, IMAGE *wavelet)
{
#if (1 && DEBUG)
FILE *logfile = decoder->logfile;
#endif
CODEC_STATE *codec = &decoder->codec;
int channel = codec->channel;
bool result = true;
int lowpass_width; // Lowpass band dimensions
int lowpass_height;
int lowpass_pitch;
PIXEL *pLowPassRow; // Pointer into the lowpass band
//int wavelet_width; // Dimensions of the wavelet image
//int wavelet_height;
int bits_per_pixel;
int quantization;
int offset;
//int pixel_divisor = (1 << (2 * codec->lowpass.level));
int row, column;
int32_t solid_color = -1;
const int gain = 128;
const int colorshift = 0;
// int channelgain[4];
//int waterrow=19, watercol=214;
//int cspace = decoder->frame.colorspace;
// Lowpass image dimensions may be smaller than the wavelet dimensions
// because the encoder may have transmitted an image without the border
lowpass_width = codec->lowpass.width;
lowpass_height = codec->lowpass.height;
lowpass_pitch = wavelet->pitch/sizeof(PIXEL);
pLowPassRow = wavelet->band[0];
// Get the parameters for quantization performed by the encoder
quantization = codec->lowpass.quantization;
offset = codec->lowpass.pixel_offset;
bits_per_pixel = codec->lowpass.bits_per_pixel;
#if (0 && DEBUG)
if (logfile) {
fprintf(logfile, "Decode lowpass subband\n");
}
#endif
if (bits_per_pixel == 16 && stream->nBitsFree == BITSTREAM_BUFFER_SIZE && !(lowpass_width&1))
{
int32_t *lpCurrentLong = (int32_t *)stream->lpCurrentWord;
//int signval = 0;
//int channel3stats = 0;
int channeloffset = 0;
if(decoder->codec.precision == 8)
{
channeloffset = (codec->num_frames==2 ? 64 : 32);
}
else if(decoder->codec.precision == 10)
{
switch(decoder->frame.format)
{
case DECODED_FORMAT_YU64:
case DECODED_FORMAT_YR16:
case DECODED_FORMAT_V210:
channeloffset = codec->num_frames==2 ? 14 : 4;//DAN20090601, recal I-frame DAN20110301
break;
default:
channeloffset = codec->num_frames==2 ? 48 : 24;//DAN20090601
}
if(decoder->sample_uncompressed) //DAN20110301 was testing the GOP length for this (why?)
channeloffset = 0; //DAN20100822 -- Prevent offset between uncompressed V210 and compressed frames
}
else if(decoder->codec.precision == 12)
{
switch(decoder->frame.format)
{
case DECODED_FORMAT_RGB24:
case DECODED_FORMAT_RGB24_INVERTED:
case DECODED_FORMAT_RGB32:
case DECODED_FORMAT_RGB32_INVERTED:
channeloffset = 8; //DAN200906010
break;
// 16-bit precision:
case DECODED_FORMAT_RG48:
case DECODED_FORMAT_RG64:
case DECODED_FORMAT_B64A:
case DECODED_FORMAT_WP13:
case DECODED_FORMAT_W13A:
channeloffset = 0;
break;
case DECODED_FORMAT_RG30:
case DECODED_FORMAT_R210:
case DECODED_FORMAT_DPX0:
case DECODED_FORMAT_AR10:
case DECODED_FORMAT_AB10:
channeloffset = 6; //DAN200906010 //DAN20100822 -- prefect for uncompressed to compressed.
break;
default:
channeloffset = 0;
break;
}
}
if(decoder->codec.encoded_format == ENCODED_FORMAT_BAYER) //DAN20090728 -- Prevent offset between uncompressed and compressed RAW frames
channeloffset = 0;
#define DUMPLL 0
#if (_DEBUG && DUMPLL)
FILE *fp;
if(channel == 0)
{
static int inc = 1;
char name[256];
sprintf(name,"C:\\Cedoc\\LLdec%03d.pgm", inc++);
fp = fopen(name,"w");
fprintf(fp, "P2\n# CREATOR: DAN\n%d %d\n255\n", lowpass_width, lowpass_height);
}
#endif
#if LOSSLESS
channeloffset = 0; //LOSSLESS
#endif
//if(lpCurrentLong[0] == 0xffffffff)
if(lpCurrentLong[0] == (int32_t)UINT32_MAX)
{
if(SwapInt32BtoN(lpCurrentLong[2]) == (uint32_t)lowpass_width)
{
if(SwapInt32BtoN(lpCurrentLong[3]) == (uint32_t)lowpass_height)
{
solid_color = SwapInt32BtoN(lpCurrentLong[1]);
solid_color |= (solid_color<<16);
lpCurrentLong += 4;
}
}
}
// Decode each row in the lowpass image
for (row = 0; row < lowpass_height; row++)
{
int pixels;
// Start at the first column
column = 0;
// Process the rest of the row
{
for (; column < lowpass_width; column++)
{
int pixel_value;
//int i;
// Perform inverse quantization
if(column & 1)
{
pixel_value = pixels;
}
else
{
//pixels = _bswap(*(lpCurrentLong++));
if(solid_color == -1)
pixels = SwapInt32BtoN(*(lpCurrentLong++));
else
pixels = solid_color;
pixel_value = (pixels>>16);
pixels <<= 16;
pixels >>= 16;
}
// Store the pixel in the lowpass band of the wavelet
pixel_value += channeloffset;
// pixel_value -= 64;
// pixel_value += ((rand() & 0x7fff) - 0x4000);
// if(pixel_value < 0) pixel_value = 0;
if(pixel_value > 0x7fff) pixel_value = 0x7fff;
pLowPassRow[column] = pixel_value;
#if (_DEBUG && DUMPLL)
if(channel==0 && fp)
fprintf(fp, "%d\n", pixel_value>>7);
#endif
}
}
// Advance to the next row in the lowpass image
pLowPassRow += lowpass_pitch;
}
#if (_DEBUG && DUMPLL)
if(channel == 0 && fp)
fclose(fp);
#endif
#if ERROR_TOLERANT
// Update the count of bytes used
stream->nWordsUsed -= (int)(((intptr_t)lpCurrentLong - (intptr_t)stream->lpCurrentWord));
#endif
// Update the bitstream
stream->lpCurrentWord = (uint8_t *)lpCurrentLong;
}
else if (bits_per_pixel == 8 && stream->nBitsFree == BITSTREAM_BUFFER_SIZE)
{
uint8_t *lpCurrentByte = (uint8_t *)stream->lpCurrentWord;
//int signval = 0;
// Decode each row in the lowpass image
for (row = 0; row < lowpass_height; row++)
{
// Start at the first column
column = 0;
// Process the rest of the row
for (; column < lowpass_width; column++)
{
int pixel_value = *(lpCurrentByte++);
// Perform inverse quantization
#if _ENCODE_CHROMA_ZERO
if (channel == 0)
pixel_value = (quantization * pixel_value) + offset;
else
pixel_value = (pixel_value - offset) * quantization;
#else
pixel_value = (quantization * pixel_value) + offset;// + colorshift;
#endif
pixel_value -= 128 * quantization;
pixel_value *= gain;
pixel_value >>= 7;
pixel_value += 128 * quantization;
pixel_value += colorshift;
// Store the pixel in the lowpass band of the wavelet
// Multiply by 16 to turn 8-bit into the new 16-bit format
pLowPassRow[column] = pixel_value * 16;
}
// Advance to the next row in the lowpass image
pLowPassRow += lowpass_pitch;
}
#if ERROR_TOLERANT
// Update the count of bytes used
stream->nWordsUsed -= (int)(((intptr_t)lpCurrentByte - (intptr_t)stream->lpCurrentWord));
#endif
// Update the bitstream
stream->lpCurrentWord = (uint8_t *)lpCurrentByte;
}
else
{
int channeloffset = 0;
if(decoder->codec.precision == 8)
{
channeloffset = (codec->num_frames==2 ? 64 : 32);
}
else if(decoder->codec.precision == 10)
{
channeloffset = (codec->num_frames==2 ? 10 : 5);
}
else if(decoder->codec.precision == 12)
{
// channeloffset = (codec->num_frames==2 ? 4 : 2); // Seems to result in less shift using the viper images
}
//DAN20050923 no longer trying to compensate for YUV to RGB issues.
if(decoder->frame.format == DECODED_FORMAT_RGB24 || decoder->frame.format == DECODED_FORMAT_RGB32)
{
if(decoder->codec.precision == 8)
{
switch(channel)
{
case 0: channeloffset += 8; break; // fixed rounding error introduced by YUV->RGB
case 1: channeloffset += 16; break;
case 2: channeloffset += 10; break;
}
}
else if(decoder->codec.precision == 10)
{
switch(channel)
{
case 0: channeloffset += -8; break; // fixed rounding error introduced by YUV->RGB
case 1: channeloffset += -4; break;
case 2: channeloffset += -4; break;
}
}
else if(decoder->codec.precision == 12)
{
switch(channel)
{
case 0: channeloffset += 0; break; // fixed rounding error introduced by YUV->RGB
case 1: channeloffset += 0; break;
case 2: channeloffset += 0; break;
}
}
}
if(bits_per_pixel != 16)
channeloffset = 0;
for (row = 0; row < lowpass_height; row++)
{
for (column = 0; column < lowpass_width; column++) {
int pixel_value = GetBits(stream, bits_per_pixel);
// Perform inverse quantization
#if _ENCODE_CHROMA_ZERO
if (channel == 0)
pixel_value = (quantization * pixel_value) + offset;
else
pixel_value = (pixel_value - offset) * quantization;
#else
pixel_value = (quantization * pixel_value) + offset;// + colorshift;
#endif
// Store the pixel in the lowpass band of the wavelet
pLowPassRow[column] = SATURATE(pixel_value + channeloffset); // DAN20050926 added chromaoffet to match the normal path -- this code will be used for SD (720) encodes
}
stream->nWordsUsed -= lowpass_width*(bits_per_pixel>>3);
// Advance to the next row in the lowpass image
pLowPassRow += lowpass_pitch;
}
}
// Set the wavelet scale factor
wavelet->scale[0] = quantization;
// Align the bitstream to the next tag value pair
AlignBitsTag(stream);
// Return indication of lowpass decoding success
return result;
}
// Decode the coefficients in a highpass band
bool DecodeSampleHighPassBand(DECODER *decoder, BITSTREAM *stream, IMAGE *wavelet, int band, int threading)
{
CODEC_ERROR error = CODEC_ERROR_OKAY;
#if (1 && DEBUG)
FILE *logfile = decoder->logfile;
#endif
CODEC_STATE *codec = &decoder->codec;
//int channel = codec->channel;
//int subband = codec->band.subband;
//int index = codec->highpass.wavelet_number;
int width;
int height;
int quantization;
// The encoder may not have used variable-length coding
int method = codec->band.encoding;
bool result = true;
// Check that the band index is in range
assert(0 <= band && band <= codec->max_subband);
// Encoded coefficients start on a tag boundary
AlignBitsTag(stream);
#if (0 && DEBUG)
// Dump the band header to the logfile
if (logfile) {
fprintf(logfile,
"Band header marker: 0x%04X, subband: %d, width: %d, height: %d, encoding: %d\n",
header->marker, header->subband, header->width, header->height, header->encoding);
}
#endif
// Copy the scale factors used by the encoder into the wavelet band
// (Zero means that the encoder did not supply this parameter)
if (codec->band.scale > 0) {
wavelet->scale[band] = codec->band.scale;
}
// Get the quantization factor that was used to encode the band coefficients
quantization = codec->band.quantization;
// Copy the quantization into the wavelet
wavelet->quantization[band] = quantization;
#if (0 && DEBUG)
if (logfile) {
fprintf(logfile, "Decode highpass subband: %d, quantization: %d\n", subband, quantization);
}
#endif
// Get the highpass band dimensions
width = codec->band.width;
height = codec->band.height;
// Is this a special band for the temporal high pass thumbnail?
if (method == BAND_ENCODING_LOSSLESS)
{
//lossless temporal subband //DAN20060701
result = DecodeBand16sLossless(decoder, stream, wavelet, band, width, height);
assert(result);
if (result)
{
// Call thread safe routine to update the band valid flags
UpdateWaveletBandValidFlags(decoder, wavelet, band);
}
}
else if (method == BAND_ENCODING_16BIT)
{
//lossless temporal subband //DAN20060701
result = DecodeBand16s(decoder, stream, wavelet, band, width, height);
assert(result);
if (result)
{
// Call thread safe routine to update the band valid flags
UpdateWaveletBandValidFlags(decoder, wavelet, band);
}
}
else
{
// Must use the runlength encoding method
assert(codec->band.encoding == BAND_ENCODING_RUNLENGTHS);
#if 0
// This code attempts to not decode various subbands for 1/4 res decodes.
// Unforuntately playback would stop after 5 seonds with this code (but not in debug mode.)
if (subband >= 4 && subband <= 6)
{
TAGVALUE segment;
AlignBitsTag(stream);
do
{
segment = GetTagValue(stream);
}
while(segment.tuple.tag != CODEC_TAG_BAND_TRAILER);
stream->lpCurrentWord -= 4;
stream->nWordsUsed += 4;
}
else
#elif 0
// Is this subband required for decoding the frame?
if (CanSkipSubband(decoder, subband))
{
// Skip past the end of this subband
SkipSubband(stream);
}
#endif
// Decode this subband
result = DecodeFastRunsFSM16s(decoder, stream, wavelet, band, width, height, threading);
}
// Return failure if a problem was encountered while reading the band coefficients
if (!result) return result;
// The encoded band coefficients end on a bitstream word boundary
// to avoid interference with the marker for the coefficient band trailer
AlignBits(stream);
// Decode the band trailer
error = DecodeBandTrailer(stream, NULL);
decoder->error = error;
assert(error == CODEC_ERROR_OKAY);
if (error != CODEC_ERROR_OKAY) {
#if (0 && DEBUG)
if (logfile) {
fprintf(logfile, "Error in band %d trailer: %d\n", band, error);
}
#endif
return false;
}
return result;
}
// Decode an empty band
bool DecodeSampleEmptyBand(DECODER *decoder, BITSTREAM *stream, IMAGE *wavelet, int band)
{
CODEC_ERROR error = CODEC_ERROR_OKAY;
#if (1 && DEBUG)
FILE *logfile = decoder->logfile;
#endif
CODEC_STATE *codec = &decoder->codec;
int quantization;
// Check that the band is in range
assert(0 <= band && band <= CODEC_MAX_HIGHBANDS);
// Check that the highpass band is 16 bits
assert(wavelet->pixel_type[1] == PIXEL_TYPE_16S);
#if (0 && DEBUG)
//TODO: Change format string to handle 64-bit pointers
if (logfile) {
fprintf(logfile, "Start decoding an empty band, stream: 0x%p\n", stream->lpCurrentWord);
}
#endif
// Encoded coefficients must start on a word boundary
AlignBits(stream);
// Copy the scale factors used by the encoder into the wavelet band
// (Zero means that the encoder did not supply the parameter)
if (codec->band.scale > 0)
wavelet->scale[band] = codec->band.scale;
// Set the quantization used to encode the band coefficients
quantization = codec->band.quantization;
wavelet->quantization[band] = quantization;
#if (0 && DEBUG)
if (logfile) {
DumpBits(stream, logfile);
}
#endif
// Decode the band trailer
error = DecodeBandTrailer(stream, NULL);
decoder->error = error;
assert(error == CODEC_ERROR_OKAY);
if (error != CODEC_ERROR_OKAY) {
#if (0 && DEBUG)
if (logfile) {
fprintf(logfile, "Error in band: %d, error: %d\n", band, error);
}
#endif
return false;
}
// The encoded band coefficients end on a bitstream word boundary
// to avoid interference with the marker for the coefficient band trailer
AlignBits(stream);
#if (0 && DEBUG)
// Dump the band trailer to the logfile
if (logfile) {
fprintf(logfile, "Band trailer marker: 0x%04X\n", trailer->marker);
}
#endif
#if (0 && DEBUG)
if (logfile) {
//TODO: Change format string to handle 64-bit pointers
fprintf(logfile, "End decode empty band, stream: 0x%X\n", stream->lpCurrentWord);
}
#endif
return true;
}
bool DecodeBand16s(DECODER *decoder, BITSTREAM *stream, IMAGE *wavelet,
int band_index, int width, int height)
{
PIXEL *rowptr = wavelet->band[band_index];
int pitch = wavelet->pitch;
int row,dequant = wavelet->quantization[band_index];
// Convert the pitch from bytes to pixels
pitch /= sizeof(PIXEL);
//BAND_ENCODING_16BIT
if(dequant == 1)
{
for (row = 0; row < height; row++)
{
int column;
#if 0
for (column = 0; column < width; column++)
{
int value = GetWord16s(stream);
rowptr[column] = value;
}
#else // Mild speedup (2.5% overall half-res decode improvement.)
char *sptr = (char *)stream->lpCurrentWord;
char *dptr = (char *)rowptr;
for (column = 0; column < width; column++)
{
*(dptr+1) = *sptr++;
*dptr = *sptr++;
dptr+=2;
}
stream->lpCurrentWord += width*2;
stream->nWordsUsed += width*2;
#endif
rowptr += pitch;
}
}
else
{
for (row = 0; row < height; row++)
{
int column;
for (column = 0; column < width; column++)
{
int value = GetWord16s(stream);
rowptr[column] = value*dequant;
}
rowptr += pitch;
}
}
#if (0 && DEBUG)
{
int static count = 0;
if (count < 20) {
char label[_MAX_PATH];
sprintf(label, "Hightemp-decode-%d-", count);
DumpBandPGM(label, wavelet, band_index, NULL);
}
count++;
}
#endif
return true;
}
bool DecodeBand16sLossless(DECODER *decoder, BITSTREAM *stream, IMAGE *wavelet,
int band_index, int width, int height)
{
//CODEC_ERROR error = CODEC_ERROR_OKAY;
#if (1 && DEBUG)
FILE *logfile = decoder->logfile;
#endif
int result = true;
int quant = wavelet->quantization[band_index];
// Get the pointer to the finite state machine
FSM *fsm = &decoder->fsm[decoder->codec.active_codebook];
int size;
PIXEL *rowptr;
//int row = 0;
int pitch;
//CODEC_STATE *codec = &decoder->codec;
//int channel = codec->channel;
//int subband = codec->band.subband;
//int num_subbands = codec->num_subbands;
//int pixel_type = wavelet->pixel_type[band_index];
//int difference_coding = decoder->codec.difference_coding;
//int localquant = 1;
//int threading = 0;
decoder->codec.active_codebook = 0; // reset CODEC state
decoder->codec.difference_coding = 0; //reset state for next subband
// Must have a valid wavelet
assert(wavelet != NULL);
if (! (wavelet != NULL)) {
decoder->error = CODEC_ERROR_RUN_DECODE;
return false;
}
//Must have a valid FSM
assert(fsm != NULL);
if (! (fsm != NULL)) {
decoder->error = CODEC_ERROR_RUN_DECODE;
return false;
}
// All rows are treated as one int32_t row that covers the entire band
size = fsm->table.num_states;
assert(size > 0);
if (size == 0) {
decoder->error = CODEC_ERROR_RUN_DECODE;
return false;
}
// Check if the band is intended for 8-bit pixels
assert(wavelet->pixel_type[band_index] == PIXEL_TYPE_16S);
rowptr = (PIXEL *)wavelet->band[band_index];
pitch = wavelet->pitch;
assert(rowptr != NULL && pitch != 0);
if (! (rowptr != NULL && pitch != 0)) {
decoder->error = CODEC_ERROR_RUN_DECODE;
return false;
}
DeQuantFSM(fsm, 1); // can;t use this to dequant as we split the cooefficients into high and low bytes.
if (!DecodeBandFSM16sNoGap2Pass(fsm, stream, (PIXEL16S *)rowptr, width, height, pitch, quant)) {
decoder->error = CODEC_ERROR_RUN_DECODE;
return false;
}
if(quant)
{
int x,y;
PIXEL *line = rowptr;
if(quant == 32)
{
for(y=0;y<height;y++)
{
for(x=0;x<width;x++)
{
line[x] <<= 5;
}
line += pitch/2;
}
}
else
{
for(y=0;y<height;y++)
{
for(x=0;x<width;x++)
{
line[x] *= quant;
}
line += pitch/2;
}
}
}
/* if(once <= 60)
{
char name[200];
FILE *fp;
sprintf(name,"C:/Cedoc/DUMP/Decoder/dump%02d.raw", once);
fp = fopen(name,"wb");
fwrite(rowptr,width*height,1,fp);
fclose(fp);
once++;
}*/
assert(result == true);
if (! (result == true)) {
decoder->error = CODEC_ERROR_RUN_DECODE;
return false;
}
return true;
}
// Invert the wavelet to reconstruct the lower wavelet in the transform
void ReconstructWaveletBand(DECODER *decoder, TRANSFORM *transform, int channel,
IMAGE *wavelet, int index, int precision,
const SCRATCH *scratch, int allocations_only)
{
int transform_type = transform->type;
int width = wavelet->width;
int height = wavelet->height;
int level = wavelet->level;
PIXEL *buffer = (PIXEL *)scratch->free_ptr;
size_t buffer_size = scratch->free_size;
// Is the current wavelet a spatial wavelet?
if (transform_type == TRANSFORM_TYPE_SPATIAL && index > 0)
{
// Reconstruct the lowpass band in the lower wavelet
int lowpass_index = index - 1;
IMAGE *lowpass = transform->wavelet[lowpass_index];
int lowpass_width = 2 * width;
int lowpass_height = 2 * height;
int lowpass_level = level - 1;
int lowpass_type = (lowpass_index == 0) ? WAVELET_TYPE_FRAME : WAVELET_TYPE_SPATIAL;
//const int prescale = 1;
const bool inverse_prescale = (precision >= CODEC_PRECISION_10BIT);
int prescale = transform->prescale[index];
#if _THREADED_DECODER
// Allocate (or reallocate) the wavelet with thread safety
lowpass = GetWaveletThreadSafe(decoder, transform, lowpass_index,
lowpass_width, lowpass_height,
lowpass_level, lowpass_type);
#else
// Allocate the wavelet if not already allocated
#if _ALLOCATOR
lowpass = ReallocWaveletEx(decoder->allocator, lowpass, lowpass_width, lowpass_height, lowpass_level, lowpass_type);
#else
lowpass = ReallocWaveletEx(lowpass, lowpass_width, lowpass_height, lowpass_level, lowpass_type);
#endif
transform->wavelet[lowpass_index] = lowpass;
#endif
// Check that the lowpass band has not already been reconstructed
//assert((lowpass->band_valid_flags & BAND_VALID_MASK(0)) == 0);
if(!allocations_only)
{
// Check that all of the wavelet bands have been decoded
assert(BANDS_ALL_VALID(wavelet));
// Has this wavelet already been reconstructed?
if ((lowpass->band_valid_flags & BAND_VALID_MASK(0)) == 0)
{
// Perform the inverse spatial transform before decoding the next wavelet
STOP(tk_decoding);
START(tk_inverse);
//TransformInverseSpatialQuantLowpass(wavelet, lowpass, buffer, buffer_size, prescale, inverse_prescale);
TransformInverseSpatialQuantLowpass(wavelet, lowpass, scratch, prescale, inverse_prescale);
STOP(tk_inverse);
START(tk_decoding);
// Call thread safe routine to update the band valid flags
UpdateWaveletBandValidFlags(decoder, lowpass, 0);
#if TIMING
// Increment the count of spatial transforms performed during decoding
spatial_decoding_count++;
#endif
}
}
}
// Is the current wavelet a spatial wavelet above the temporal lowpass band?
else if (index > 3)
{
// Reconstruct the lowpass band in the lower wavelet
const int temporal_wavelet_index = 2;
int lowpass_index = (index > 4) ? index - 1 : index - 2;
IMAGE *lowpass = transform->wavelet[lowpass_index];
int lowpass_width = 2 * width;
int lowpass_height = 2 * height;
int lowpass_level = level - 1;
int lowpass_type = ((lowpass_index == temporal_wavelet_index) ? WAVELET_TYPE_TEMPORAL : WAVELET_TYPE_SPATIAL);
//const int prescale = 2;
const bool inverse_prescale = (precision >= CODEC_PRECISION_10BIT);
int prescale = transform->prescale[index];
#if _THREADED_DECODER
// Allocate (or reallocate) the wavelet with thread safety
lowpass = GetWaveletThreadSafe(decoder, transform, lowpass_index,
lowpass_width, lowpass_height,
lowpass_level, lowpass_type);
#else
// Allocate the wavelet if not already allocated
#if _ALLOCATOR
lowpass = ReallocWaveletEx(decoder->allocator, lowpass, lowpass_width, lowpass_height, lowpass_level, lowpass_type);
#else
lowpass = ReallocWaveletEx(lowpass, lowpass_width, lowpass_height, lowpass_level, lowpass_type);
#endif
transform->wavelet[lowpass_index] = lowpass;
#endif
if(!allocations_only)
{
// Check that the lowpass band has not already been reconstructed
assert((lowpass->band_valid_flags & BAND_VALID_MASK(0)) == 0);
// Check that all of the wavelet bands have been decoded
assert(BANDS_ALL_VALID(wavelet));
// Perform the inverse spatial transform before decoding the next wavelet
STOP(tk_decoding);
START(tk_inverse);
//TransformInverseSpatialQuantLowpass(wavelet, lowpass, buffer, buffer_size, prescale, inverse_prescale);
TransformInverseSpatialQuantLowpass(wavelet, lowpass, scratch, prescale, inverse_prescale);
STOP(tk_inverse);
START(tk_decoding);
// Call thread safe routine to update the band valid flags
UpdateWaveletBandValidFlags(decoder, lowpass, 0);
#if TIMING
// Increment the count of spatial transforms performed during decoding
spatial_decoding_count++;
#endif
}
}
// Is the current wavelet the spatial wavelet above the temporal highpass band?
else if (index == 3)
{
// Reconstruct the highpass band in the temporal wavelet
const int temporal_wavelet_index = 2;
int highpass_index = index - 1;
IMAGE *highpass = transform->wavelet[highpass_index];
int highpass_width = 2 * width;
int highpass_height = 2 * height;
int highpass_level = level - 1;
int highpass_type = ((highpass_index == temporal_wavelet_index) ? WAVELET_TYPE_TEMPORAL : WAVELET_TYPE_SPATIAL);
const bool inverse_prescale = (precision >= CODEC_PRECISION_10BIT);
int prescale = inverse_prescale ? transform->prescale[index] : 0;
#if _THREADED_DECODER
// Allocate (or reallocate) the wavelet with thread safety
highpass = GetWaveletThreadSafe(decoder, transform, highpass_index,
highpass_width, highpass_height,
highpass_level, highpass_type);
#else
// Allocate the wavelet if not already allocated
#if _ALLOCATOR
highpass = ReallocWaveletEx(decoder->allocator, highpass , highpass_width, highpass_height, highpass_level, highpass_type);
#else
highpass = ReallocWaveletEx(highpass , highpass_width, highpass_height, highpass_level, highpass_type);
#endif
transform->wavelet[highpass_index] = highpass;
#endif
if(!allocations_only)
{
// Check that the highpass band has not already been reconstructed
assert((highpass->band_valid_flags & BAND_VALID_MASK(1)) == 0);
// Check that all of the wavelet bands have been decoded
assert(BANDS_ALL_VALID(wavelet));
// Perform the inverse spatial transform before decoding the next wavelet
STOP(tk_decoding);
START(tk_inverse);
TransformInverseSpatialQuantHighpass(wavelet, highpass, buffer, buffer_size, prescale);
STOP(tk_inverse);
START(tk_decoding);
// Call thread safe routine to update the band valid flags
UpdateWaveletBandValidFlags(decoder, highpass, 1);
#if TIMING
// Increment the count of spatial transforms performed during decoding
spatial_decoding_count++;
#endif
}
}
// Is the current wavelet the temporal wavelet?
else if (index == 2)
{
// Get the temporal wavelet
IMAGE *temporal = wavelet;
// Set the frame wavelet parameters
int frame_level = 1;
int frame_type = WAVELET_TYPE_FRAME;
// Get the two frame wavelets
IMAGE *frame[2];
frame[0] = transform->wavelet[0];
frame[1] = transform->wavelet[1];
// Check that the temporal wavelet is valid
assert(temporal->num_bands == 2 && temporal->wavelet_type == WAVELET_TYPE_TEMPORAL);
#if _THREADED_DECODER
// Allocate (or reallocate) the frame wavelets with thread safety
frame[0] = GetWaveletThreadSafe(decoder, transform, 0, width, height, frame_level, frame_type);
frame[1] = GetWaveletThreadSafe(decoder, transform, 1, width, height, frame_level, frame_type);
#else
// Allocate the frame wavelets if not already allocated
#if _ALLOCATOR
frame[0] = ReallocWaveletEx(decoder->allocator, frame[0], width, height, frame_level, frame_type);
frame[1] = ReallocWaveletEx(decoder->allocator, frame[1], width, height, frame_level, frame_type);
#else
frame[0] = ReallocWaveletEx(frame[0], width, height, frame_level, frame_type);
frame[1] = ReallocWaveletEx(frame[1], width, height, frame_level, frame_type);
#endif
transform->wavelet[0] = frame[0];
transform->wavelet[1] = frame[1];
#endif
#if (0 && DEBUG)
if (logfile) {
fprintf(logfile, "Before inverse temporal transform");
DumpArray16s("Temporal Lowpass", temporal->band[0], temporal->width, temporal->height, temporal->pitch, logfile);
DumpArray16s("Temporal Highpass", temporal->band[1], temporal->width, temporal->height, temporal->pitch, logfile);
}
#endif
if(!allocations_only)
{
// Check that the lowpass bands have not already been reconstructed
assert((frame[0]->band_valid_flags & BAND_VALID_MASK(0)) == 0);
assert((frame[1]->band_valid_flags & BAND_VALID_MASK(0)) == 0);
// Check that all of the wavelet bands have been decoded
assert(BANDS_ALL_VALID(temporal));
// Invert the temporal transform between the frame wavelets
STOP(tk_decoding);
START(tk_inverse);
TransformInverseTemporalQuant(temporal, frame[0], frame[1], buffer, buffer_size, precision);
STOP(tk_inverse);
START(tk_decoding);
#if (0 && DEBUG)
if (logfile) {
IMAGE *wavelet = quad[0];
fprintf(logfile, "After inverse temporal transform\n");
DumpArray16s("Temporal Lowpass", temporal->band[0], temporal->width, temporal->height, temporal->pitch, logfile);
DumpArray16s("Temporal Highpass", temporal->band[1], temporal->width, temporal->height, temporal->pitch, logfile);
DumpArray16s("First frame wavelet, band 0", wavelet->band[0], wavelet->width, wavelet->height, wavelet->pitch, logfile);
}
#endif
// Call thread safe routine to update the band valid flags
UpdateWaveletBandValidFlags(decoder, frame[0], 0);
UpdateWaveletBandValidFlags(decoder, frame[1], 0);
#if TIMING
// Increment the number of temporal transforms performed outside of decoding
temporal_decoding_count++;
#endif
}
}
}
// Compute the dimensions of the output buffer
void ComputeOutputDimensions(DECODER *decoder, int frame,
int *decoded_width_out, int *decoded_height_out)
{
#if (1 && DEBUG)
FILE *logfile = decoder->logfile;
#endif
CODEC_STATE *codec = &decoder->codec;
int num_channels = codec->num_channels;
FRAME_INFO *info = &decoder->frame;
//int progressive = codec->progressive;
TRANSFORM **transform_array = decoder->transform;
//IMAGE *lowpass_images[TRANSFORM_MAX_CHANNELS];
IMAGE *wavelet = NULL;
int wavelet_width;
int wavelet_height;
int decoded_width;
int decoded_height;
int resolution = info->resolution;
//int chroma_offset = decoder->codec.chroma_offset;
int decoded_scale = 0;
if (decoded_width_out == NULL || decoded_height_out == NULL) {
return;
}
// Clear the return values in case this routine terminates early
*decoded_width_out = 0;
*decoded_height_out = 0;
// Get the decoding scale
switch(resolution)
{
case DECODED_RESOLUTION_FULL:
case DECODED_RESOLUTION_HALF_HORIZONTAL:
#if DEBUG
assert(AllTransformBandsValid(transform_array, num_channels, frame));
#endif
decoded_scale = 2;
wavelet = transform_array[0]->wavelet[0];
break;
case DECODED_RESOLUTION_HALF:
#if DEBUG
assert(AllLowpassBandsValid(transform_array, num_channels, frame));
#endif
decoded_scale = 1;
wavelet = transform_array[0]->wavelet[0];
break;
case DECODED_RESOLUTION_QUARTER:
if(decoder->codec.encoded_format == ENCODED_FORMAT_BAYER)
{
#if DEBUG
assert(AllLowpassBandsValid(transform_array, num_channels, frame));
#endif
decoded_scale = 1;
wavelet = transform_array[0]->wavelet[0];
}
else
{
decoded_scale = 1;
wavelet = transform_array[0]->wavelet[3];
}
break;
case DECODED_RESOLUTION_LOWPASS_ONLY:
decoded_scale = 1;
wavelet = transform_array[0]->wavelet[5];
if(wavelet == NULL) // there Intra Frame compressed
wavelet = transform_array[0]->wavelet[2];
break;
default:
assert(0);
break;
}
// Get the decoded frame dimensions
assert(wavelet != NULL);
wavelet_width = wavelet->width;
wavelet_height = wavelet->height;
if(resolution == DECODED_RESOLUTION_HALF_HORIZONTAL)
decoded_width = wavelet_width;
else
decoded_width = decoded_scale * wavelet_width;
decoded_height = decoded_scale * wavelet_height;
#if (0 && DEBUG)
if (logfile) {
fprintf(logfile, "Decoded scale: %d, decoded width: %d, wavelet width: %d\n", decoded_scale, decoded_width, wavelet_width);
}
#endif
#if (0 && DEBUG)
if (logfile) {
fprintf(logfile, "Decoded width: %d, height: %d, frame width: %d, height: %d, output pitch: %d\n",
decoded_width, decoded_height, info->width, info->height, pitch);
}
#endif
// Return the decoded width and height
*decoded_width_out = decoded_width;
*decoded_height_out = decoded_height;
}
#define DEBUG_ROW16U 0
void ReconstructSampleFrameToBuffer(DECODER *decoder, int frame, uint8_t *output, int pitch)
{
FRAME_INFO local_info;
#if (1 && DEBUG)
FILE *logfile = decoder->logfile;
#endif
FRAME_INFO *info = &local_info;
CODEC_STATE *codec = &decoder->codec;
int num_channels = codec->num_channels;
int progressive = codec->progressive;
TRANSFORM **transform_array = decoder->transform;
IMAGE *lowpass_images[TRANSFORM_MAX_CHANNELS];
IMAGE *wavelet;
int wavelet_width;
int wavelet_height;
int decoded_width;
int decoded_height;
int resolution = decoder->frame.resolution;
int chroma_offset = decoder->codec.chroma_offset;
int uncompressed = decoder->uncompressed_chunk && decoder->uncompressed_size && decoder->sample_uncompressed;
//TODO: Change this routine to return the codec error code
CODEC_ERROR error = CODEC_ERROR_OKAY;
//if(decoder->cfhddata.calibration)
// LoadTweak();
//TODO: Change this routine to return an error code
if (decoder == NULL) {
return;
}
decoder->gop_frame_num = frame;
#if _THREADED_DECODER
// Wait until the transform thread has finished all pending transforms
WaitForTransformThread(decoder);
#endif
//return;
// copy frame info in a changable local structure
memcpy(info, &decoder->frame, sizeof(FRAME_INFO));
// Use the old code for reconstructing the frame
#if (0 && DEBUG)
// Force quarter resolution decoding for debugging that feature
resolution = DECODED_RESOLUTION_QUARTER;
#endif
#if (0 && DEBUG)
if (logfile) {
fprintf(logfile, "Inverting last wavelet, frame: %d\n", frame);
}
#endif
// The decoder can decode a video sample without returning a frame
if (output == NULL || pitch == 0) return;
#if (1 && DEBUG_ROW16U)
// Force decoding to 16-bit pixels for debugging
info->format = DECODED_FORMAT_YR16;
#endif
#if 0
if (info->format == DECODED_FORMAT_YR16)
{
// Force interlaced or progressive decoding for debugging
//progressive = false;
progressive = true;
}
#endif
#if (0 && DEBUG)
if (logfile) {
fprintf(logfile, "Decoder flags: 0x%p\n", decoder->flags);
}
#endif
// Does this frame have to be reconstructed?
if ((decoder->flags & DECODER_FLAGS_RENDER) == 0) {
#if (0 && DEBUG)
if (logfile) {
fprintf(logfile, "Decoder discarding frame: %d\n", frame);
}
#endif
return;
}
// Check that the requested frame is within the limits of the group of frames
assert(0 <= frame && frame < decoder->gop_length);
// Check that the frame resolution is valid
assert(IsValidFrameResolution(resolution));
if (!IsValidFrameResolution(resolution)) {
decoder->error = CODEC_ERROR_RESOLUTION;
return;
}
#if (0 && TIMING) //(0 && DEBUG)
// Override progressive flag read from the bitstream for debugging
//progressive = 0; // Use the inverse frame transform
progressive = 1; // Use the inverse spatial transform
#endif
// Build the 3D LUTs if needed
ComputeCube(decoder);
//HACK DAN20110131 -- some formats will not directly decode so need to use the AM route
{
if( decoder->codec.encoded_format == ENCODED_FORMAT_YUV_422 &&
resolution == DECODED_RESOLUTION_HALF)
{
if( decoder->frame.format == COLOR_FORMAT_R408 ||
decoder->frame.format == COLOR_FORMAT_V408)
{
decoder->use_active_metadata_decoder = true;
decoder->apply_color_active_metadata = true;
}
}
if( decoder->frame.format == COLOR_FORMAT_NV12)
{
decoder->use_active_metadata_decoder = true;
decoder->apply_color_active_metadata = true; // TODO, make it work with this.
}
if (decoder->codec.progressive == false && decoder->frame.format == COLOR_FORMAT_RGB24)
{
decoder->use_active_metadata_decoder = true;
decoder->apply_color_active_metadata = true;
}
}
// Get the decoding scale
if(!uncompressed)
{
switch(resolution)
{
case DECODED_RESOLUTION_FULL:
case DECODED_RESOLUTION_HALF_HORIZONTAL_DEBAYER:
#if DEBUG
assert(AllTransformBandsValid(transform_array, num_channels, frame));
#endif
wavelet = transform_array[0]->wavelet[0];
// Get the decoded frame dimensions
assert(wavelet != NULL);
wavelet_width = wavelet->width;
wavelet_height = wavelet->height;
decoded_width = 2 * wavelet_width;
decoded_height = 2 * wavelet_height;
break;
case DECODED_RESOLUTION_HALF:
#if DEBUG
assert(AllLowpassBandsValid(transform_array, num_channels, frame));
#endif
wavelet = transform_array[0]->wavelet[0];
// Get the decoded frame dimensions
assert(wavelet != NULL);
wavelet_width = wavelet->width;
wavelet_height = wavelet->height;
decoded_width = wavelet_width;
decoded_height = wavelet_height;
break;
case DECODED_RESOLUTION_HALF_HORIZONTAL:
#if DEBUG
assert(AllLowpassBandsValid(transform_array, num_channels, frame));
#endif
wavelet = transform_array[0]->wavelet[0];
// Get the decoded frame dimensions
assert(wavelet != NULL);
wavelet_width = wavelet->width;
wavelet_height = wavelet->height;
decoded_width = wavelet_width;
decoded_height = 2 * wavelet_height;
break;
case DECODED_RESOLUTION_QUARTER:
if(decoder->codec.encoded_format == ENCODED_FORMAT_BAYER)
{
#if DEBUG
assert(AllLowpassBandsValid(transform_array, num_channels, frame));
#endif
wavelet = transform_array[0]->wavelet[0];
}
else
{
wavelet = transform_array[0]->wavelet[3];
}
// Get the decoded frame dimensions
assert(wavelet != NULL);
wavelet_width = wavelet->width;
wavelet_height = wavelet->height;
decoded_width = wavelet_width;
decoded_height = wavelet_height;
break;
case DECODED_RESOLUTION_LOWPASS_ONLY:
wavelet = transform_array[0]->wavelet[5];
if(wavelet == NULL) // there Intra Frame compressed
wavelet = transform_array[0]->wavelet[2];
// Get the decoded frame dimensions
assert(wavelet != NULL);
wavelet_width = wavelet->width;
wavelet_height = wavelet->height;
decoded_width = wavelet_width;
decoded_height = wavelet_height;
break;
default:
assert(0);
break;
}
}
else
{
if(decoder->codec.encoded_format == ENCODED_FORMAT_BAYER)
{
decoded_width = info->width/2;
decoded_height = info->height/2;
}
else
{
decoded_width = info->width;
decoded_height = info->height;
}
}
if(decoder->codec.encoded_format == ENCODED_FORMAT_BAYER)
{
if(resolution == DECODED_RESOLUTION_FULL)
{
if(decoded_width*2 == info->width)
{
info->width /= 2;
info->height /= 2;
info->resolution = resolution = DECODED_RESOLUTION_FULL_DEBAYER;
}
}
else if(resolution == DECODED_RESOLUTION_HALF_HORIZONTAL_DEBAYER)
{
if(decoded_width*2 == info->width)
{
info->width /= 2;
info->height /= 2;
}
}
else if(resolution == DECODED_RESOLUTION_HALF_HORIZONTAL)
{
if(decoded_width*2 == info->width)
{
info->height /= 2;
info->resolution = resolution = DECODED_RESOLUTION_HALF_HORIZONTAL_DEBAYER;
}
}
else if(decoder->frame.format == DECODED_FORMAT_BYR2 || decoder->frame.format == DECODED_FORMAT_BYR4)
{
if(decoded_width*2 == info->width)
{
info->width /= 2;
info->height /= 2;
info->resolution = resolution = DECODED_RESOLUTION_HALF_NODEBAYER;
}
}
else
{
if(resolution == DECODED_RESOLUTION_HALF)
{
if(decoded_width*2 == info->width)
{
decoded_width *= 2;
decoded_height *= 2;
info->resolution = resolution = DECODED_RESOLUTION_FULL;
}
}
else if(resolution == DECODED_RESOLUTION_QUARTER)
{
if(uncompressed)
{
decoded_width *= 2;
decoded_height *= 2;
info->resolution = resolution = DECODED_RESOLUTION_QUARTER_NODEBAYER_SCALED;
}
else
{
if(decoded_width == info->width)
{
info->resolution = resolution = DECODED_RESOLUTION_HALF;
}
}
}
}
}
if(uncompressed)
{
// Call the appropriate routine for the encoded format
switch (decoder->codec.encoded_format)
{
case ENCODED_FORMAT_YUVA_4444: // Four planes of YUVA 4:4:4:4
// Not implemented
assert(0);
error = CODEC_ERROR_UNSUPPORTED_FORMAT;
break;
case ENCODED_FORMAT_BAYER: // Bayer encoded data
// Add new code here for the final steps in decoding the Bayer format
error = UncompressedSampleFrameBayerToBuffer(decoder, info, frame, output, pitch);
break;
case ENCODED_FORMAT_YUV_422: // Original encoding scheme for YUV 4:2:2 (always v210)
error = UncompressedSampleFrameYUVToBuffer(decoder, info, frame, output, pitch);//CODEC_ERROR_UNSUPPORTED_FORMAT;
break;
case ENCODED_FORMAT_RGB_444: // Original encoding scheme for RGB 444 (always DPX0)
error = UncompressedSampleFrameRGBToBuffer(decoder, info, frame, output, pitch);//CODEC_ERROR_UNSUPPORTED_FORMAT;
break;
default:
// Fall through into the old code for reconstructing frames
error = CODEC_ERROR_UNSUPPORTED_FORMAT;
break;
}
}
else
{
// Call the appropriate routine for the encoded format
switch (decoder->codec.encoded_format)
{
case ENCODED_FORMAT_RGB_444: // channels = decoder->codec.num_channels; planes of RGB 4:4:4
case ENCODED_FORMAT_RGBA_4444: // Four planes of ARGB 4:4:4:4
error = ReconstructSampleFrameRGB444ToBuffer(decoder, frame, output, pitch);
break;
case ENCODED_FORMAT_YUVA_4444: // Four planes of YUVA 4:4:4:4
// Not implemented
assert(0);
//error = ReconstructSampleFrameYUVA4444ToBuffer(decoder, frame, output, pitch);
break;
case ENCODED_FORMAT_BAYER: // Bayer encoded data
// Add new code here for the final steps in decoding the Bayer format
error = ReconstructSampleFrameBayerToBuffer(decoder, info, frame, output, pitch);
break;
case ENCODED_FORMAT_YUV_422: // Original encoding scheme for YUV 4:2:2
// Add new code here for the final steps in decoding the original YUV 4:2:2 format
error = ReconstructSampleFrameYUV422ToBuffer(decoder, frame, output, pitch);
break;
default:
// Fall through into the old code for reconstructing frames
error = CODEC_ERROR_UNSUPPORTED_FORMAT;
break;
}
}
// Was the newer code able to successfully reconstruct the frame?
if (error != CODEC_ERROR_UNSUPPORTED_FORMAT)
{
// Save the codec error code in the decoder state and return
decoder->error = error;
return;
}
#if (0 && DEBUG)
if (logfile) {
fprintf(logfile, "Decoded scale: %d, decoded width: %d, wavelet width: %d\n", decoded_scale, decoded_width, wavelet_width);
}
#endif
#if (0 && DEBUG)
if (logfile) {
fprintf(logfile, "Decoded width: %d, height: %d, frame width: %d, height: %d, output pitch: %d\n",
decoded_width, decoded_height, info->width, info->height, pitch);
}
#endif
#if (0 && DEBUG)
if (logfile) {
IMAGE *wavelet = transform[0]->wavelet[frame];
int band = 0;
fprintf(logfile, "Luminance wavelet, frame: %d, band: %d\n", frame, band);
DumpArray16s("Lowpass Band", wavelet->band[band], wavelet->width, wavelet->height, wavelet->pitch, logfile);
}
#endif
// Check that the requested frame is large enough to hold the decoded frame
#if (0 && DEBUG)
//if (! (info->width >= decoded_width))
{
if (logfile) {
//fprintf(logfile, "Requested frame not large enough to hold decoded frame: %d < %d\n", info->width, decoded_width);
fprintf(logfile, "Output frame width: %d, decoded frame width: %d\n", info->width, decoded_width);
}
}
#endif
assert(info->width >= decoded_width);
assert((info->height+7)/8 >= (decoded_height+7)/8);
if (!(info->width >= decoded_width && (info->height+7)/8 >= (decoded_height+7)/8)) {
decoder->error = CODEC_ERROR_FRAMESIZE;
return;
}
#if (0 && DEBUG)
if (logfile) {
//SUBIMAGE subimage = SUBIMAGE_UPPER_LEFT(16, 16);
SUBIMAGE subimage = SUBIMAGE_UPPER_RIGHT(16, 16);
// Adjust the subimage to be at the middle of the right border
//subimage.row += wavelet_height/2 - 8;
DumpBand("SIF Image", wavelet, 0, &subimage, logfile);
}
#endif
START(tk_inverse);
if (resolution == DECODED_RESOLUTION_QUARTER)
{
int precision = codec->precision;
// Reconstruct the frame to quarter resolution
ReconstructQuarterFrame(decoder, num_channels, frame, output, pitch,
info, &decoder->scratch, precision);
}
else
// Was the first transform a frame transform (used for interlaced frames)?
if (!progressive)
{
// Can the inverse frame transform and output byte packing be done in one pass?
if ((resolution == DECODED_RESOLUTION_FULL) &&
(info->format == DECODED_FORMAT_YUYV || info->format == DECODED_FORMAT_UYVY))
{
// Apply the inverse frame transform and pack the results into the output buffer
int precision = codec->precision;
#if (0 && DEBUG)
DumpWaveletBandsPGM(wavelet, frame, num_channels);
#endif
#if _INTERLACED_WORKER_THREADS
StartInterlaceWorkerThreads(decoder);
//TODO: support new threading
// Send the upper and lower rows of the transforms to the worker threads
TransformInverseFrameThreadedToYUV(decoder, frame, num_channels, output, pitch,
info, chroma_offset, precision);
#else
// Transform the wavelets for each channel to the output image (not threaded)
TransformInverseFrameToYUV(transform_array, frame, num_channels, output, pitch,
info, &decoder->scratch, chroma_offset, precision);
#endif
}
//#if BUILD_PROSPECT
else if (resolution == DECODED_RESOLUTION_FULL && info->format == DECODED_FORMAT_YR16)
{
// Apply the inverse frame transform and output rows of luma and chroma
//DWORD dwThreadID1;
//DWORD dwThreadID2;
//HANDLE thread1;
//HANDLE thread2;
int precision = codec->precision;
#if _INTERLACED_WORKER_THREADS
StartInterlaceWorkerThreads(decoder);
//TODO: support new threading
// Send the upper and lower rows of the transforms to the worker threads
TransformInverseFrameThreadedToRow16u(decoder, frame, num_channels,
(PIXEL16U *)output, pitch,
info, chroma_offset, precision);
#else
// Transform the wavelets for each channel to the output image (not threaded)
TransformInverseFrameToRow16u(decoder, transform_array, frame, num_channels,
(PIXEL16U *)output, pitch, info,
&decoder->scratch, chroma_offset, precision);
#endif
}
//#endif
else
{
// Reconstruct the frame as separate planes and combine the planes into a packed output image
int channel;
if (resolution == DECODED_RESOLUTION_LOWPASS_ONLY)
{
int scale = 13;
for (channel = 0; channel < num_channels; channel++)
{
lowpass_images[channel] = transform_array[channel]->wavelet[5];
if(lowpass_images[channel] == NULL) // therefore IntreFrame compressed.
{
scale = 12;
lowpass_images[channel] = transform_array[channel]->wavelet[2];
}
}
STOP(tk_inverse);
CopyLowpass16sToBuffer(decoder, lowpass_images, num_channels, output, pitch, info, chroma_offset,
scale, decoder->codec.encoded_format, decoder->frame.white_point);
START(tk_inverse);
}
else
// In SIF resolution, no need to reconstruct the bottom-level wavelet transforms
// Just copy the lowpass images directly into output frame
if (resolution == DECODED_RESOLUTION_HALF)
{
int precision = codec->precision;
for (channel = 0; channel < num_channels; channel++)
{
lowpass_images[channel] = transform_array[channel]->wavelet[frame];
}
STOP(tk_inverse);
CopyLowpass16sToBuffer(decoder, lowpass_images, num_channels, output, pitch, info, chroma_offset,
precision, decoder->codec.encoded_format, decoder->frame.white_point);
START(tk_inverse);
}
// In full resolution, reconstruct the frame wavelet and
// convert the YUYV output to the specified color format
else
{
int precision = codec->precision;
TransformInverseFrameToBuffer(transform_array, frame, num_channels, output, pitch,
info, &decoder->scratch, chroma_offset, precision);
}
}
}
else // The first transform was a spatial transform (used for progressive frames)
{
// Can the inverse frame transform and output byte packing be done in one pass?
if ((resolution == DECODED_RESOLUTION_FULL) &&
(info->format == DECODED_FORMAT_YUYV || info->format == DECODED_FORMAT_UYVY) && // Output YUV
decoder->thread_cntrl.capabilities & _CPU_FEATURE_SSE2)
{
int precision = codec->precision;
//DWORD dwThreadID1;
//DWORD dwThreadID2;
//HANDLE thread1;
//HANDLE thread2;
// Apply the inverse frame transform and pack the results into the output buffer
#if _THREADED
if(decoder->codec.encoded_format == ENCODED_FORMAT_BAYER)
{
uint8_t *pixoutput = output;
if(decoder->use_active_metadata_decoder) //WIP
{
TransformInverseSpatialUniversalThreadedToOutput(decoder, frame, num_channels,
pixoutput, pitch,
info, chroma_offset, precision,
InvertHorizontalStrip16sBayerThruLUT);
}
else
{
TransformInverseSpatialUniversalThreadedToOutput(decoder, frame, num_channels,
pixoutput, pitch,
info, chroma_offset, precision,
InvertHorizontalStrip16sToBayerYUV);
}
}
else if((decoder->codec.encoded_format == ENCODED_FORMAT_RGB_444) ||
(decoder->codec.encoded_format == ENCODED_FORMAT_RGBA_4444))
{
TransformInverseSpatialUniversalThreadedToOutput(decoder, frame, num_channels,
output, pitch,
info, chroma_offset, precision,
InvertHorizontalStrip16sRGB2YUV);
}
else
{
TransformInverseSpatialUniversalThreadedToOutput(decoder, frame, num_channels,
output, pitch,
info, chroma_offset, precision,
InvertHorizontalStrip16sToYUV);
}
#else
//TODO : Accelerated BAYER for single thread decoding.
assert(0);
// Transform the wavelets for each channel to the output image (not threaded)
//TransformInverseSpatialToYUV(decoder, transform_array, frame, num_channels, output, pitch, info,
// &decoder->scratch, chroma_offset, precision);
#endif
}
else if ((resolution == DECODED_RESOLUTION_FULL) && decoder->codec.encoded_format == ENCODED_FORMAT_BAYER &&
(info->format == DECODED_FORMAT_RGB24 || info->format == DECODED_FORMAT_RGB32) && // Output RGB
decoder->thread_cntrl.capabilities & _CPU_FEATURE_SSE2 && decoder->use_active_metadata_decoder)
{
int precision = codec->precision;
//DWORD dwThreadID1;
//DWORD dwThreadID2;
//HANDLE thread1;
//HANDLE thread2;
// Apply the inverse frame transform and pack the results into the output buffer
#if _THREADED
{
uint8_t *pixoutput = output;
if(info->format == DECODED_FORMAT_RGB24 || info->format == DECODED_FORMAT_RGB32)
{
pixoutput += (info->height-1)*pitch;
pitch = -pitch;
}
TransformInverseSpatialUniversalThreadedToOutput(decoder, frame, num_channels,
pixoutput, pitch,
info, chroma_offset, precision,
InvertHorizontalStrip16sBayerThruLUT);
}
#endif
}
//#if BUILD_PROSPECT
else if (resolution == DECODED_RESOLUTION_FULL && info->format == DECODED_FORMAT_YR16)
{
// Apply the inverse frame transform and output rows of luma and chroma
int precision = codec->precision;
#if _THREADED
TransformInverseSpatialUniversalThreadedToRow16u(decoder, frame, num_channels,
(uint8_t *)output, pitch,
info, chroma_offset, precision);
#else
// Transform the wavelets for each channel to the output image (not threaded)
TransformInverseSpatialToRow16u(transform_array, frame, num_channels,
(PIXEL16U *)output, pitch, info,
&decoder->scratch, chroma_offset, precision);
#endif
}
//#endif
else
{
// Reconstruct the frame as separate planes and combine the planes into a packed output image
int channel;
if (resolution == DECODED_RESOLUTION_LOWPASS_ONLY)
{
//int precision = codec->precision;
int scale = 13;
//DAN20081203 -- fix for 444 decodes in AE32-bit float
decoder->frame.white_point = 16;
//decoder->frame.signed_pixels = 0;
for (channel = 0; channel < num_channels; channel++)
{
lowpass_images[channel] = transform_array[channel]->wavelet[5];
if(lowpass_images[channel] == NULL) // therefore IntreFrame compressed.
{
scale = 12;
lowpass_images[channel] = transform_array[channel]->wavelet[2];
}
}
STOP(tk_inverse);
CopyLowpass16sToBuffer(decoder, lowpass_images, num_channels, output, pitch, info, chroma_offset,
scale, decoder->codec.encoded_format, decoder->frame.white_point);
START(tk_inverse);
}
else
// In SIF resolution, no need to reconstruct the bottom-level wavelet transforms
// Just copy the lowpass images directly into output frame
if (resolution == DECODED_RESOLUTION_HALF || resolution == DECODED_RESOLUTION_HALF_NODEBAYER)// || resolution == DECODED_RESOLUTION_HALF_HORIZONTAL_DEBAYER)
{
int precision = codec->precision;
for (channel = 0; channel < num_channels; channel++)
{
lowpass_images[channel] = transform_array[channel]->wavelet[frame];
#if (0 && DEBUG)
if (logfile) {
char label[_MAX_PATH];
char *format = decoded_format_string[info->format];
sprintf(label, "Output, channel: %d, format: %s", channel, format);
DumpImageStatistics(label, lowpass_images[channel], logfile);
}
#endif
}
STOP(tk_inverse);
#if 1 //|| BAYER_SUPPORT
if(decoder->codec.encoded_format == ENCODED_FORMAT_BAYER)
{
#if _THREADED
WORKER_THREAD_DATA *mailbox = &decoder->worker_thread.data;
#if _DELAY_THREAD_START
if(decoder->worker_thread.pool.thread_count == 0)
{
CreateLock(&decoder->worker_thread.lock);
// Initialize the pool of transform worker threads
ThreadPoolCreate(&decoder->worker_thread.pool,
decoder->thread_cntrl.capabilities >> 16/*cpus*/,
WorkerThreadProc,
decoder);
}
#endif
// Post a message to the mailbox
mailbox->output = output;
mailbox->pitch = pitch;
memcpy(&mailbox->info, info, sizeof(FRAME_INFO));
mailbox->jobType = JOB_TYPE_OUTPUT;
// Set the work count to the number of rows to process
ThreadPoolSetWorkCount(&decoder->worker_thread.pool, info->height);
// Start the transform worker threads
ThreadPoolSendMessage(&decoder->worker_thread.pool, THREAD_MESSAGE_START);
// Wait for all of the worker threads to finish
ThreadPoolWaitAllDone(&decoder->worker_thread.pool);
#else
//unsigned short scanline[4096*3],*sptr;
//unsigned short scanline2[4096*3],*sptr2;
unsigned short *scanline,*sptr;
unsigned short *scanline2,*sptr2;
char *buffer = decoder->scratch.free_ptr;
size_t buffer_size = decoder->scratch.free_size;
IMAGE *g_image = lowpass_images[0];
IMAGE *rg_image = lowpass_images[1];
IMAGE *bg_image = lowpass_images[2];
IMAGE *gd_image = lowpass_images[3];
uint8_t *outyuv,*line = output;
PIXEL *bayer_line, *bayerptr;
PIXEL *G,*RG,*BG,*GD;
int x,y;
int bayer_pitch = info->width*4;
int format = info->format;
bool inverted = false;
int maxbound = 4095; //10-bit source
int midpoint = 32768>>3;
int shift = 4;
if(precision == 12)
{
maxbound = 16383;
midpoint = 32768>>1;
shift = 2;
}
if(buffer_size < info->width * 2 * 3 * 2)
assert(0); // not enough memory
if (format == DECODED_FORMAT_RGB24 || format == DECODED_FORMAT_RGB32)
{
inverted = true;
line += (info->height-1)*pitch;
pitch = -pitch;
}
scanline = (unsigned short *)buffer;
buffer += info->width * 2 * 3;
scanline2 = (unsigned short *)buffer;
G = g_image->band[0];
RG = rg_image->band[0];
BG = bg_image->band[0];
for(y=0; y<info->height; y++)
{
uint8_t *newline = line;
PIXEL *newG=G,*newRG=RG,*newBG=BG;
PIXEL *gptr,*rgptr,*bgptr,*gdptr;
int r,g,b,rg,bg,y1,y2,u,v;
int r1,g1,b1;
int i;
newline += pitch*y;
newG += y * (g_image->pitch / sizeof(PIXEL));
newRG += y * (rg_image->pitch / sizeof(PIXEL));
newBG += y * (bg_image->pitch / sizeof(PIXEL));
gptr = newG;
rgptr = newRG;
bgptr = newBG;
sptr = scanline;
for(x=0; x<info->width; x++)
{
g = (*gptr++);
if(g > maxbound) g = maxbound;
rg = (*rgptr++);
bg = (*bgptr++);
r = (rg<<1) - midpoint + g;
b = (bg<<1) - midpoint + g;
if(r > maxbound) r = maxbound;
if(b > maxbound) b = maxbound;
if(r < 0) r = 0;
if(g < 0) g = 0;
if(b < 0) b = 0;
*sptr++ = r<<shift;
*sptr++ = g<<shift;
*sptr++ = b<<shift;
}
{
int flags = 0;
int whitebitdepth = 16;
sptr = scanline;
if(decoder->apply_color_active_metadata)
sptr = ApplyActiveMetaData(decoder, info->width, 1, y, scanline, scanline2,
info->format, &whitebitdepth, &flags);
ConvertLinesToOutput(decoder, info->width, 1, sptr,
newline, y, pitch,
info->format, whitebitdepth, flags);
}
}
#endif
}
else if((decoder->codec.encoded_format == ENCODED_FORMAT_RGB_444) ||
(decoder->codec.encoded_format == ENCODED_FORMAT_RGBA_4444))
{
IMAGE *g_image = lowpass_images[0];
IMAGE *rg_image = lowpass_images[1];
IMAGE *bg_image = lowpass_images[2];
uint8_t *line = output;
unsigned char *rgb8;
PIXEL *G,*RG,*BG;
int x,y;
G = g_image->band[0];
RG = rg_image->band[0];
BG = bg_image->band[0];
if(info->format == DECODED_FORMAT_RGB32)
{
line = output;
line += (info->height-1) * pitch;
for(y=0; y<info->height; y++)
{
PIXEL *gptr,*rgptr,*bgptr;
int r,g,b;
int i,noisearray[32];
for(i=0; i<32; i++)
{
noisearray[i] = (rand() & 63);
}
gptr = G;
rgptr = RG;
bgptr = BG;
rgb8 = (unsigned char *)line;
for(x=0; x<info->width; x++)
{
int rnd = noisearray[x&31];
g = ((*gptr++) + rnd) >> 6;
r = ((*rgptr++) + rnd) >> 6;
b = ((*bgptr++) + rnd) >> 6;
if(r < 0) r=0; if(r > 255) r=255;
if(g < 0) g=0; if(g > 255) g=255;
if(b < 0) b=0; if(b > 255) b=255;
*rgb8++ = b;
*rgb8++ = g;
*rgb8++ = r;
*rgb8++ = 255;
}
line -= pitch;
G += g_image->pitch / sizeof(PIXEL);
RG += rg_image->pitch / sizeof(PIXEL);
BG += bg_image->pitch / sizeof(PIXEL);
}
}
else if(info->format == DECODED_FORMAT_RGB24)
{
line = output;
line += (info->height-1) * pitch;
for(y=0; y<info->height; y++)
{
PIXEL *gptr,*rgptr,*bgptr;
int r,g,b;
int i,noisearray[32];
for(i=0; i<32; i++)
{
noisearray[i] = (rand() & 63);
}
gptr = G;
rgptr = RG;
bgptr = BG;
rgb8 = (unsigned char *)line;
for(x=0; x<info->width; x++)
{
int rnd = noisearray[x&31];
g = ((*gptr++) + rnd) >> 6;
r = ((*rgptr++) + rnd) >> 6;
b = ((*bgptr++) + rnd) >> 6;
if(r < 0) r=0; if(r > 255) r=255;
if(g < 0) g=0; if(g > 255) g=255;
if(b < 0) b=0; if(b > 255) b=255;
*rgb8++ = b;
*rgb8++ = g;
*rgb8++ = r;
}
line -= pitch;
G += g_image->pitch / sizeof(PIXEL);
RG += rg_image->pitch / sizeof(PIXEL);
BG += bg_image->pitch / sizeof(PIXEL);
}
}
}
else
#endif
{
CopyLowpass16sToBuffer(decoder, lowpass_images, num_channels, output, pitch, info, chroma_offset,
precision, decoder->codec.encoded_format, decoder->frame.white_point);
}
START(tk_inverse);
#if (0 && DEBUG)
if (logfile) {
char label[_MAX_PATH];
int width = info->width;
int height = info->height;
sprintf(label, "Output");
DumpBufferStatistics(label, output, width, height, pitch, logfile);
}
#endif
}
// In full resolution, reconstruct the frame wavelet and
// convert the YUYV output to the specified color format
else
{
// Handle inversion of the output image in this routine
FRAME_INFO info2;
int format;
bool inverted = false;
int precision = codec->precision;
memcpy(&info2, info, sizeof(FRAME_INFO));
format = info2.format;
if (format == DECODED_FORMAT_RGB24) {
format = DECODED_FORMAT_RGB24_INVERTED;
info2.format = format;
inverted = true;
}
else if (format == DECODED_FORMAT_RGB32) {
format = DECODED_FORMAT_RGB32_INVERTED;
info2.format = format;
inverted = true;
}
// Have the output location and pitch been inverted?
if (inverted && pitch > 0) {
int height = info->height;
if(resolution == DECODED_RESOLUTION_FULL_DEBAYER)
height *= 2;
output += (height - 1) * pitch; // Start at the bottom row
pitch = NEG(pitch); // Negate the pitch to go up
}
//#if BUILD_PROSPECT
// Output the frame in V210 foramt?
if( (format == DECODED_FORMAT_V210 ||
format == DECODED_FORMAT_YU64) &&
decoder->codec.encoded_format != ENCODED_FORMAT_BAYER )
{
//char *buffer = decoder->buffer;
//size_t buffer_size = decoder->buffer_size;
int precision = codec->precision;
// The output buffer is an array of 10-bit pixels packed into double words
#if 0
TransformInverseSpatialToV210(transform_array, frame, num_channels, output, pitch, &info2,
buffer, buffer_size, chroma_offset, decoder->codec.precision);
#else
TransformInverseSpatialToV210(transform_array, frame, num_channels, output, pitch,
&info2, &decoder->scratch, chroma_offset, precision);
#endif
}
else
//#endif
// Decoding a full resolution progressive frame to a Bayer output format?
if(decoder->codec.encoded_format == ENCODED_FORMAT_BAYER)
{
//char *buffer = decoder->buffer;
//size_t buffer_size = decoder->buffer_size;
int precision = codec->precision;
// PIXEL16U *RawBayer16 = (PIXEL16U *)MEMORY_ALIGNED_ALLOC(info->width*decoded_height*4*sizeof(PIXEL), 16);
if(decoder->RawBayer16 == NULL)
{
#if _ALLOCATOR
ALLOCATOR *allocator = decoder->allocator;
size_t size = info->width*decoded_height*4*sizeof(PIXEL);
decoder->RawBayer16 =
(PIXEL16U *)AllocAligned(allocator, size, 16);
#else
decoder->RawBayer16 =
(PIXEL16U *)MEMORY_ALIGNED_ALLOC(info->width*decoded_height*4*sizeof(PIXEL), 16);
#endif
decoder->RawBayerSize = info->width*decoded_height*4*sizeof(PIXEL);
}
//TODO: Replace this memory allocation with a scratch buffer allocation
//#ifdef SHARPENING
if(decoder->RGBFilterBuffer16 == NULL)
{
#if _ALLOCATOR
ALLOCATOR *allocator = decoder->allocator;
size_t size = info->width*decoded_height*4*3*sizeof(PIXEL);
decoder->RGBFilterBuffer16 =
(PIXEL16U *)AllocAligned(allocator, size, 16);
#else
decoder->RGBFilterBuffer16 =
(PIXEL16U *)MEMORY_ALIGNED_ALLOC(info->width*decoded_height*4*3*sizeof(PIXEL), 16);
#endif
decoder->RGBFilterBufferSize = info->width*decoded_height*4*3*sizeof(PIXEL);
}
//#endif
if(decoder->RawBayer16 == NULL || decoder->RGBFilterBuffer16 == NULL)
{
decoder->error = CODEC_ERROR_MEMORY_ALLOC;
return;
}
if(decoder->RawBayer16)
{
uint8_t *line;
PIXEL16U *bayer_line, *bayerptr, *outA16, *outB16;
PIXEL16U *G,*RG,*BG,*GD;
int x,y;
int bayer_pitch = info->width*4;
//float scale = 256.0;
//int matrix_non_unity = 0;
//int wb_non_unity = 0;
//float curve2lin[2048];
//float lin2curve[2048+512+2];
#if 0
static float rgb2yuv[3][4] =
{
{0.183f, 0.614f, 0.062f, 16.0f/256.0f},
{-0.101f,-0.338f, 0.439f, 0.5f},
{0.439f,-0.399f,-0.040f, 0.5f}
};
float mtrx[3][4] =
{
{1.0f, 0, 0, 0},
{0, 1.0f, 0, 0},
{0, 0, 1.0f, 0}
};
float whitebalance[3] = { 1.0f, 1.0f, 1.0f };
#endif
#if 0 // Matrix disabled as it can only be correct handled by the 3D LUT due to the required linear conversions
/* if(decoder->cfhddata.MagicNumber == CFHDDATA_MAGIC_NUMBER && decoder->cfhddata.version >= 2)
{
float fval = 0.0;
int i;
for(i=0; i<12; i++)
{
mtrx[i>>2][i&3] = fval = decoder->cfhddata.colormatrix[i>>2][i&3];
if((i>>2) == (i&3))
{
if(fval != 1.0)
{
matrix_non_unity = 1;
}
}
else
{
if(fval != 0.0)
{
matrix_non_unity = 1;
}
}
}
// not active as VFW isn't yet support the 3D LUTs
if(decoder->cfhddata.version >= 5)
{
int j;
float encode_curvebase = 90.0;
float decode_curvebase = 90.0;
int encode_curve_type = decoder->cfhddata.encode_curve >> 16;
int decode_curve_type = decoder->cfhddata.decode_curve >> 16;
if(decoder->cfhddata.user_white_balance[0] > 0.0)
{
wb_non_unity = 1;
whitebalance[0] = decoder->cfhddata.user_white_balance[0];
whitebalance[1] = (decoder->cfhddata.user_white_balance[1]+decoder->cfhddata.user_white_balance[2])/2.0;
whitebalance[2] = decoder->cfhddata.user_white_balance[3];
}
if(encode_curve_type) //1 or 2
encode_curvebase = (float)((decoder->cfhddata.encode_curve >> 8) & 0xff) / (float)(decoder->cfhddata.encode_curve & 0xff);
else
{
encode_curve_type = 1;
encode_curvebase = 90.0;
}
if(decode_curve_type) //1 or 2
decode_curvebase = (float)((decoder->cfhddata.decode_curve >> 8) & 0xff) / (float)(decoder->cfhddata.decode_curve & 0xff);
else
{
decode_curve_type = 1;
decode_curvebase = 90.0;
}
for(j=0; j<2048; j++)
{
if(encode_curve_type == 1)
curve2lin[j] = CURVE_LOG2LIN((float)j/2047.0,encode_curvebase);
else
curve2lin[j] = CURVE_GAM2LIN((float)j/2047.0,encode_curvebase);
}
for(j=-512; j<=2048; j++) // -1 to +4
{
if(encode_curve_type == CURVE_TYPE_LOG)
lin2curve[j+512] = CURVE_LIN2LOG((float)j/512.0,encode_curvebase);
else
lin2curve[j+512] = CURVE_LIN2GAM((float)j/512.0,encode_curvebase);
}
}
}*/
#endif
#if _THREADED
TransformInverseSpatialUniversalThreadedToRow16u(decoder, frame, num_channels,
(uint8_t *)decoder->RawBayer16, bayer_pitch*sizeof(PIXEL),
info, chroma_offset, precision);
#else
// Decode that last transform to rows of Bayer data (one row per channel)
TransformInverseSpatialToRow16u(transform_array, frame, num_channels,
decoder->RawBayer16, bayer_pitch*sizeof(PIXEL), info,
&decoder->scratch, chroma_offset, precision);
#endif
if(resolution == DECODED_RESOLUTION_FULL_DEBAYER &&
(info->format < DECODED_FORMAT_BYR1 || info->format > DECODED_FORMAT_BYR4))
{
#if _THREADED //DemosaicRAW
WORKER_THREAD_DATA *mailbox = &decoder->worker_thread.data;
#if _DELAY_THREAD_START
if(decoder->worker_thread.pool.thread_count == 0)
{
CreateLock(&decoder->worker_thread.lock);
// Initialize the pool of transform worker threads
ThreadPoolCreate(&decoder->worker_thread.pool,
decoder->thread_cntrl.capabilities >> 16/*cpus*/,
WorkerThreadProc,
decoder);
}
#endif
// Post a message to the mailbox
mailbox->output = output;
mailbox->pitch = pitch;
memcpy(&mailbox->info, info, sizeof(FRAME_INFO));
mailbox->jobType = JOB_TYPE_OUTPUT;
// Set the work count to the number of rows to process
ThreadPoolSetWorkCount(&decoder->worker_thread.pool, info->height);
// Start the transform worker threads
ThreadPoolSendMessage(&decoder->worker_thread.pool, THREAD_MESSAGE_START);
// Wait for all of the worker threads to finish
ThreadPoolWaitAllDone(&decoder->worker_thread.pool);
#else
assert(0) // old code disabled
/* int bayer_format = decoder->cfhddata.bayer_format;
unsigned char *outA8, *outB8;
unsigned short *lineStartA16, *lineStartB16;
unsigned short *lineA16, *lineB16;
// int stats1=0, stats2=0, statsd=0;
// double dstats1=0, dstats2=0, dstatsd=0;
line = output;
bayer_line = decoder->RawBayer16;
for(y=0; y<info->height+DEMOSAIC_DELAYLINES; y++)
{
bayer_line = decoder->RawBayer16;
bayer_line += bayer_pitch * y;
if(y<info->height)
{
ColorDifference2Bayer(info->width,
bayer_line, bayer_pitch, bayer_format);
}
if(y>=3+DEMOSAIC_DELAYLINES && y<info->height-3+DEMOSAIC_DELAYLINES) //middle scanline
{
unsigned short *delayptr = decoder->RawBayer16;
delayptr += bayer_pitch * (y-DEMOSAIC_DELAYLINES);
BayerRippleFilter(info->width,
delayptr, bayer_pitch, bayer_format, decoder->RawBayer16);
}
if(y>=DEMOSAIC_DELAYLINES)
{
int delay_y = y - DEMOSAIC_DELAYLINES;
unsigned short *sptr, scanline[8192*3];
outA8 = line;
line += pitch;
outB8 = line;
line += pitch;
sptr = scanline;
DebayerLine(info->width*2, info->height*2, delay_y*2,
decoder->RawBayer16, bayer_format, sptr, sharpening);
for(x=0; x<info->width*2; x++)
{
outA8[2] = *sptr++>>8;
outA8[1] = *sptr++>>8;
outA8[0] = *sptr++>>8;
outA8+=3;
}
for(x=0; x<info->width*2; x++)
{
outB8[2] = *sptr++>>8;
outB8[1] = *sptr++>>8;
outB8[0] = *sptr++>>8;
outB8+=3;
}
}
}*/
#endif // _THREADED
}
else
if(format == DECODED_FORMAT_BYR2 || format == DECODED_FORMAT_BYR4)
{
#if _THREADED
WORKER_THREAD_DATA *mailbox = &decoder->worker_thread.data;
#if _DELAY_THREAD_START
if(decoder->worker_thread.pool.thread_count == 0)
{
CreateLock(&decoder->worker_thread.lock);
// Initialize the pool of transform worker threads
ThreadPoolCreate(&decoder->worker_thread.pool,
decoder->thread_cntrl.capabilities >> 16/*cpus*/,
WorkerThreadProc,
decoder);
}
#endif
// Post a message to the mailbox
mailbox->output = output;
mailbox->pitch = pitch;
memcpy(&mailbox->info, info, sizeof(FRAME_INFO));
mailbox->jobType = JOB_TYPE_OUTPUT;
// Set the work count to the number of rows to process
ThreadPoolSetWorkCount(&decoder->worker_thread.pool, info->height);
// Start the transform worker threads
ThreadPoolSendMessage(&decoder->worker_thread.pool, THREAD_MESSAGE_START);
// Wait for all of the worker threads to finish
ThreadPoolWaitAllDone(&decoder->worker_thread.pool);
#else
assert(0) // old code disabled
/* {
int bayer_format = decoder->cfhddata.bayer_format;
// int stats1=0, stats2=0, statsd=0;
// double dstats1=0, dstats2=0, dstatsd=0;
line = output;
bayer_line = decoder->RawBayer16;
for(y=0; y<info->height; y++)
{
outA16 = (PIXEL16U *)line;
line += pitch;
outB16 = (PIXEL16U *)line;
line += pitch;
bayerptr = bayer_line;
G = bayerptr;
RG = G + bayer_pitch/4;
BG = RG + bayer_pitch/4;
GD = BG + bayer_pitch/4;
for(x=0; x<info->width; x++)
{
int r,g,b,rg,bg,gd,g1,g2,y1,y2,u,v,dither;
g = (*G++);
rg = (*RG++);
bg = (*BG++);
gd = (*GD++) - 32768;
r = ((rg - 32768)<<1) + g;
b = ((bg - 32768)<<1) + g;
g1 = g + gd;
g2 = g - gd; //TODO: Is there a DC offset to gd (causes a check in output )
// stats1+=g1;
// stats2+=g2;
// statsd+=gd;
if(r < 0) r = 0;
if(g1 < 0) g1 = 0;
if(g2 < 0) g2 = 0;
if(b < 0) b = 0;
if(r > 0xffff) r = 0xffff;
if(g1 > 0xffff) g1 = 0xffff;
if(g2 > 0xffff) g2 = 0xffff;
if(b > 0xffff) b = 0xffff;
switch(bayer_format)
{
case BAYER_FORMAT_RED_GRN: //Red-grn phase
*outA16++ = r;
*outA16++ = g1;
*outB16++ = g2;
*outB16++ = b;
break;
case BAYER_FORMAT_GRN_RED:// grn-red
*outA16++ = g1;
*outA16++ = r;
*outB16++ = b;
*outB16++ = g2;
break;
case BAYER_FORMAT_GRN_BLU:
*outA16++ = g1;
*outA16++ = b;
*outB16++ = r;
*outB16++ = g2;
break;
case BAYER_FORMAT_BLU_GRN:
*outA16++ = b;
*outA16++ = g1;
*outB16++ = g2;
*outB16++ = r;
break;
}
}
bayer_line += bayer_pitch;
}
if(decoder->flags & DECODER_FLAGS_HIGH_QUALITY)
{
int bayer_format = decoder->cfhddata.bayer_format;
for(y=2; y<info->height-3; y++)
{
int offset = pitch>>1;
line = output; //0
line += pitch * y * 2;
// If on a red line, move to a blue line
if(bayer_format == BAYER_FORMAT_GRN_RED || bayer_format == BAYER_FORMAT_RED_GRN)
line -= pitch;
{
int offset = pitch>>1;
outA16 = (PIXEL16U *)line;
outA16++; //g //for BAYER_FORMAT_RED_GRN input
outA16++; //b
outA16++; //g
outA16++; //b
//point to green pixel with *outA16
if(bayer_format == BAYER_FORMAT_GRN_RED || bayer_format == BAYER_FORMAT_GRN_BLU)
outA16++;
for(x=2; x<info->width-2; x++)
{
int mn,mx,g;
int range = 8*256; //1<<11
int shift = 11;
int delta;
int alpha;
g = *outA16;
// lines below do not need to be tested for a corrected value
mn = mx = outA16[offset+1];
if(mn > outA16[offset-1]) mn = outA16[offset-1];
if(mx < outA16[offset-1]) mx = outA16[offset-1];
if((outA16[-offset-1] & 1)==0)
{
if(mn > outA16[-offset-1]) mn = outA16[-offset-1];
if(mx < outA16[-offset-1]) mx = outA16[-offset-1];
}
if((outA16[-offset+1] & 1)==0)
{
if(mn > outA16[-offset+1]) mn = outA16[-offset+1];
if(mx < outA16[-offset+1]) mx = outA16[-offset+1];
}
delta = mx - mn;
if(delta < range && ((mn-range < g && g < mn) || (mx+range > g && g > mx)))
{
int gmn,gmx;
gmn = gmx = g;
if((outA16[-2*offset-2] & 1)==0)
{
if(gmn > outA16[-2*offset-2]) gmn = outA16[-2*offset-2];
if(gmx < outA16[-2*offset-2]) gmx = outA16[-2*offset-2];
}
if((outA16[-2*offset] & 1)==0)
{
if(gmn > outA16[-2*offset]) gmn = outA16[-2*offset];
if(gmx < outA16[-2*offset]) gmx = outA16[-2*offset];
}
if((outA16[-2*offset+2] & 1)==0)
{
if(gmn > outA16[-2*offset+2]) gmn = outA16[-2*offset+2];
if(gmx < outA16[-2*offset+2]) gmx = outA16[-2*offset+2];
}
if((outA16[-2] & 1)==0)
{
if(gmn > outA16[-2]) gmn = outA16[-2];
if(gmx < outA16[-2]) gmx = outA16[-2];
}
// lines below do not need to be tested for a corrected value
if(gmn > outA16[2*offset-2]) gmn = outA16[2*offset-2];
if(gmx < outA16[2*offset-2]) gmx = outA16[2*offset-2];
if(gmn > outA16[2*offset]) gmn = outA16[2*offset];
if(gmx < outA16[2*offset]) gmx = outA16[2*offset];
if(gmn > outA16[2*offset+2]) gmn = outA16[2*offset+2];
if(gmx < outA16[2*offset+2]) gmx = outA16[2*offset+2];
if(gmn > outA16[2]) gmn = outA16[2];
if(gmx < outA16[2]) gmx = outA16[2];
if((gmx - gmn) < range)
{
alpha = range;//delta;
if(g > mx)
{
alpha *= (g-mx); //max range
alpha >>= shift;
}
else // g < mn
{
alpha *= (mn-g); //max range
alpha >>= shift;
}
alpha *= alpha;
alpha >>= shift;
// avg = (outA16[-offset-1] + outA16[offset-1] + outA16[-offset+1] + outA16[offset+1] + 2) >> 2;
// *outA16 = avg; //good
// *outA16 = mn; //spotty
if( (abs(outA16[offset] - outA16[-offset]) < range)
&& ((abs(outA16[1] - outA16[-1]) < range)))
{
int val = (alpha*g + (range - alpha)*((mn+mx)>>1))>>shift;
if(val > 0xffff) val = 0xffff;
if(val < 0) val = 0;
val |= 1;
*outA16 = val;
// *outA16 = ((mn+mx)>>1) | 1; // like avg but less compute
}
}
}
outA16++; //g
outA16++; //b
}
}
}
}
}*/
#endif
}
// Pack the rows of Bayer data (full resolution progressive) into BYR3 format?
else if (format == DECODED_FORMAT_BYR3)
{
PIXEL16U *outR, *outG1, *outG2, *outB;
// int stats1=0, stats2=0, statsd=0;
// double dstats1=0, dstats2=0, dstatsd=0;
// #pragma omp parallel for
for(y=0; y<info->height; y++)
{
uint8_t *line = output;
PIXEL *bayerptr = (PIXEL *)decoder->RawBayer16;
line += pitch*2*y;
bayerptr += bayer_pitch * y;
outR = (PIXEL16U *)line;
outG1 = outR + (pitch/4);
outG2 = outR + (pitch/4)*2;
outB = outR + (pitch/4)*3;
G = (PIXEL16U *)bayerptr;
RG = G + bayer_pitch/4;
BG = RG + bayer_pitch/4;
GD = BG + bayer_pitch/4;
// Pack the rows of Bayer components into the BYR3 pattern
#if (1 && XMMOPT)
{
__m128i *G_128 = (__m128i *)G;
__m128i *RG_128 = (__m128i *)RG;
__m128i *BG_128 = (__m128i *)BG;
__m128i *GD_128 = (__m128i *)GD;
__m128i *outR_128 = (__m128i *)outR;
__m128i *outG1_128 = (__m128i *)outG1;
__m128i *outG2_128 = (__m128i *)outG2;
__m128i *outB_128 = (__m128i *)outB;
__m128i limiter = _mm_set1_epi16(0x7fff - 0x3ff);
__m128i midpoint1 = _mm_set1_epi16(32768>>6);
__m128i midpoint2 = _mm_set1_epi16(32768>>5);
int column_step = 8;
int post_column = (info->width) - ((info->width) % column_step);
for (x=0; x < post_column; x += column_step)
{
__m128i r_128;
__m128i g1_128;
__m128i g2_128;
__m128i b_128;
__m128i g_128;
__m128i rg_128;
__m128i bg_128;
__m128i gd_128;
g_128 = _mm_load_si128(G_128++);
rg_128 = _mm_load_si128(RG_128++);
bg_128 = _mm_load_si128(BG_128++);
gd_128 = _mm_load_si128(GD_128++);
g_128 = _mm_srli_epi16(g_128, 6);
rg_128 = _mm_srli_epi16(rg_128, 5);
bg_128 = _mm_srli_epi16(bg_128, 5);
gd_128 = _mm_srli_epi16(gd_128, 6);
gd_128 = _mm_subs_epi16(gd_128, midpoint1);
rg_128 = _mm_subs_epi16(rg_128, midpoint2);
bg_128 = _mm_subs_epi16(bg_128, midpoint2);
r_128 = _mm_adds_epi16(rg_128, g_128);
b_128 = _mm_adds_epi16(bg_128, g_128);
g1_128 = _mm_adds_epi16(g_128, gd_128);
g2_128 = _mm_subs_epi16(g_128, gd_128);
r_128 = _mm_adds_epi16(r_128, limiter);
r_128 = _mm_subs_epu16(r_128, limiter);
g1_128 = _mm_adds_epi16(g1_128, limiter);
g1_128 = _mm_subs_epu16(g1_128, limiter);
g2_128 = _mm_adds_epi16(g2_128, limiter);
g2_128 = _mm_subs_epu16(g2_128, limiter);
b_128 = _mm_adds_epi16(b_128, limiter);
b_128 = _mm_subs_epu16(b_128, limiter);
_mm_store_si128(outR_128++, r_128);
_mm_store_si128(outG1_128++, g1_128);
_mm_store_si128(outG2_128++, g2_128);
_mm_store_si128(outB_128++, b_128);
}
G = (PIXEL16U *)G_128;
RG = (PIXEL16U *)RG_128;
BG = (PIXEL16U *)BG_128;
GD = (PIXEL16U *)GD_128;
outR = (PIXEL16U *)outR_128;
outG1 = (PIXEL16U *)outG1_128;
outG2 = (PIXEL16U *)outG2_128;
outB = (PIXEL16U *)outB_128;
}
#endif
for(; x<info->width; x++)
{
int r,g,b,rg,bg,gd,g1,g2;
g = (*G++);
rg = (*RG++);
bg = (*BG++);
gd = (*GD++) - 32768;
r = ((rg - 32768)<<1) + g;
b = ((bg - 32768)<<1) + g;
g1 = g + gd;
g2 = g - gd; //TODO: Is there a DC offset to gd (causes a check in output )
if(r < 0) r = 0;
if(g1 < 0) g1 = 0;
if(g2 < 0) g2 = 0;
if(b < 0) b = 0;
if(r > 0xffff) r = 0xffff;
if(g1 > 0xffff) g1 = 0xffff;
if(g2 > 0xffff) g2 = 0xffff;
if(b > 0xffff) b = 0xffff;
//Red-grn phase
*outR++ = r>>6;
*outG1++ = g1>>6;
*outG2++ = g2>>6;
*outB++ = b>>6;
}
}
}
// Pack the rows of Bayer data (full resolution progressive) into BYR4 format?
else if (format == DECODED_FORMAT_BYR4)
{
int bayer_format = decoder->cfhddata.bayer_format;
line = output;
bayer_line = decoder->RawBayer16;
for(y=0; y<info->height; y++)
{
outA16 = (PIXEL16U *)line;
line += pitch;
outB16 = (PIXEL16U *)line;
line += pitch;
bayerptr = bayer_line;
G = bayerptr;
RG = G + bayer_pitch/4;
BG = RG + bayer_pitch/4;
GD = BG + bayer_pitch/4;
for(x=0; x<info->width; x++)
{
//int r,g,b,rg,bg,gd,g1,g2,y1,y2,u,v,dither;
int32_t r, g, b, rg, bg, gd, g1, g2;
// The output of the inverse transform is unsigned 16-bit integers
const int midpoint = 32768;
g = (*G++);
rg = (*RG++);
bg = (*BG++);
gd = (*GD++) - midpoint;
r = ((rg - midpoint)<<1) + g;
b = ((bg - midpoint)<<1) + g;
g1 = g + gd;
g2 = g - gd;
r = SATURATE_16U(r);
g1 = SATURATE_16U(g1);
g2 = SATURATE_16U(g2);
b = SATURATE_16U(b);
// stats1+=g1;
// stats2+=g2;
// statsd+=gd;
switch(bayer_format)
{
case BAYER_FORMAT_RED_GRN: //Red-grn phase
*outA16++ = r;
*outA16++ = g1;
*outB16++ = g2;
*outB16++ = b;
break;
case BAYER_FORMAT_GRN_RED:// grn-red
*outA16++ = g1;
*outA16++ = r;
*outB16++ = b;
*outB16++ = g2;
break;
case BAYER_FORMAT_GRN_BLU:
*outA16++ = g1;
*outA16++ = b;
*outB16++ = r;
*outB16++ = g2;
break;
case BAYER_FORMAT_BLU_GRN:
*outA16++ = b;
*outA16++ = g1;
*outB16++ = g2;
*outB16++ = r;
break;
default:
// Unsupported Bayer format
assert(0);
*outA16++ = 0;
*outA16++ = 0;
*outB16++ = 0;
*outB16++ = 0;
break;
}
}
bayer_line += bayer_pitch;
}
if(decoder->flags & DECODER_FLAGS_HIGH_QUALITY)
{
for(y=2; y<info->height-3; y++)
{
//int offset = pitch>>1;
line = output; //0
line += pitch * y * 2;
// If on a red line, move to a blue line
if(bayer_format == BAYER_FORMAT_GRN_RED || bayer_format == BAYER_FORMAT_RED_GRN)
line -= pitch;
{
int offset = pitch>>1;
outA16 = (PIXEL16U *)line;
outA16++; //g //for BAYER_FORMAT_RED_GRN input
outA16++; //b
outA16++; //g
outA16++; //b
//point to green pixel with *outA16
if(bayer_format == BAYER_FORMAT_GRN_RED || bayer_format == BAYER_FORMAT_GRN_BLU)
outA16++;
for(x=2; x<info->width-2; x++)
{
int mn,mx,g;
int range = 8*256; //1<<11
int shift = 11;
int delta;
int alpha;
g = *outA16;
// lines below do not need to be tested for a corrected value
mn = mx = outA16[offset+1];
if(mn > outA16[offset-1]) mn = outA16[offset-1];
if(mx < outA16[offset-1]) mx = outA16[offset-1];
if((outA16[-offset-1] & 1)==0)
{
if(mn > outA16[-offset-1]) mn = outA16[-offset-1];
if(mx < outA16[-offset-1]) mx = outA16[-offset-1];
}
if((outA16[-offset+1] & 1)==0)
{
if(mn > outA16[-offset+1]) mn = outA16[-offset+1];
if(mx < outA16[-offset+1]) mx = outA16[-offset+1];
}
delta = mx - mn;
if(delta < range && ((mn-range < g && g < mn) || (mx+range > g && g > mx)))
{
int gmn,gmx;
gmn = gmx = g;
if((outA16[-2*offset-2] & 1)==0)
{
if(gmn > outA16[-2*offset-2]) gmn = outA16[-2*offset-2];
if(gmx < outA16[-2*offset-2]) gmx = outA16[-2*offset-2];
}
if((outA16[-2*offset] & 1)==0)
{
if(gmn > outA16[-2*offset]) gmn = outA16[-2*offset];
if(gmx < outA16[-2*offset]) gmx = outA16[-2*offset];
}
if((outA16[-2*offset+2] & 1)==0)
{
if(gmn > outA16[-2*offset+2]) gmn = outA16[-2*offset+2];
if(gmx < outA16[-2*offset+2]) gmx = outA16[-2*offset+2];
}
if((outA16[-2] & 1)==0)
{
if(gmn > outA16[-2]) gmn = outA16[-2];
if(gmx < outA16[-2]) gmx = outA16[-2];
}
// lines below do not need to be tested for a corrected value
if(gmn > outA16[2*offset-2]) gmn = outA16[2*offset-2];
if(gmx < outA16[2*offset-2]) gmx = outA16[2*offset-2];
if(gmn > outA16[2*offset]) gmn = outA16[2*offset];
if(gmx < outA16[2*offset]) gmx = outA16[2*offset];
if(gmn > outA16[2*offset+2]) gmn = outA16[2*offset+2];
if(gmx < outA16[2*offset+2]) gmx = outA16[2*offset+2];
if(gmn > outA16[2]) gmn = outA16[2];
if(gmx < outA16[2]) gmx = outA16[2];
if((gmx - gmn) < range)
{
alpha = range;//delta;
if(g > mx)
{
alpha *= (g-mx); //max range
alpha >>= shift;
}
else // g < mn
{
alpha *= (mn-g); //max range
alpha >>= shift;
}
alpha *= alpha;
alpha >>= shift;
// avg = (outA16[-offset-1] + outA16[offset-1] + outA16[-offset+1] + outA16[offset+1] + 2) >> 2;
// *outA16 = avg; //good
// *outA16 = mn; //spotty
if( (abs(outA16[offset] - outA16[-offset]) < range)
&& ((abs(outA16[1] - outA16[-1]) < range)))
{
int val = (alpha*g + (range - alpha)*((mn+mx)>>1))>>shift;
if(val > 0xffff) val = 0xffff;
if(val < 0) val = 0;
val |= 1;
*outA16 = val;
// *outA16 = ((mn+mx)>>1) | 1; // like avg but less compute
}
}
}
outA16++; //g
outA16++; //b
}
}
}
}
// Linear restore
{
unsigned short *buff = (unsigned short *)output;
//static int pos = 0;
for(y=0; y<info->height*2; y++)
{
for(x=0; x<info->width*2; x++)
{
float val = (float)buff[y*info->width*2 + x]/65535.0f;
float encode_curvebase = 90.0;
int encode_curve_type = CURVE_TYPE_LOG;
int encode_curve_neg;
if((decoder->cfhddata.encode_curve)>>16) //1 or 2
{
encode_curve_type = (decoder->cfhddata.encode_curve)>>16;
if(encode_curve_type & CURVE_TYPE_EXTENDED)
encode_curvebase = (float)(decoder->cfhddata.encode_curve & 0xffff); // use all 16-bits for larger log bases
else
encode_curvebase = (float)((decoder->cfhddata.encode_curve >> 8) & 0xff) / (float)(decoder->cfhddata.encode_curve & 0xff);
}
if(encode_curvebase == 1.0 && encode_curve_type <= CURVE_TYPE_LINEAR)
encode_curve_type = CURVE_TYPE_LINEAR;
encode_curve_neg = encode_curve_type & CURVE_TYPE_NEGATIVE;
switch(encode_curve_type & CURVE_TYPE_MASK)
{
case CURVE_TYPE_LOG:
val = CURVE_LOG2LIN(val,encode_curvebase);
break;
case CURVE_TYPE_GAMMA:
val = CURVE_GAM2LIN(val,encode_curvebase);
break;
case CURVE_TYPE_CINEON:
val = CURVE_CINEON2LIN(val,encode_curvebase);
break;
case CURVE_TYPE_CINE985:
val = CURVE_CINE9852LIN(val,encode_curvebase);
break;
case CURVE_TYPE_PARA:
val = CURVE_PARA2LIN(val,(int)((decoder->cfhddata.encode_curve >> 8) & 0xff), (int)(decoder->cfhddata.encode_curve & 0xff));
break;
case CURVE_TYPE_CSTYLE:
val = CURVE_CSTYLE2LIN((float)val,(int)((decoder->cfhddata.encode_curve >> 8) & 0xff));
break;
case CURVE_TYPE_SLOG:
val = CURVE_SLOG2LIN((float)val);
break;
case CURVE_TYPE_LOGC:
val = CURVE_LOGC2LIN((float)val);
break;
case CURVE_TYPE_LINEAR:
default:
break;
}
buff[y*info->width*2 + x] = (int)(val*4095.0);
}
}
}
}
else
{
#if _THREADED
WORKER_THREAD_DATA *mailbox = &decoder->worker_thread.data;
#if _DELAY_THREAD_START
if(decoder->worker_thread.pool.thread_count == 0)
{
CreateLock(&decoder->worker_thread.lock);
// Initialize the pool of transform worker threads
ThreadPoolCreate(&decoder->worker_thread.pool,
decoder->thread_cntrl.capabilities >> 16/*cpus*/,
WorkerThreadProc,
decoder);
}
#endif
// Post a message to the mailbox
mailbox->output = output;
mailbox->pitch = pitch;
memcpy(&mailbox->info, info, sizeof(FRAME_INFO));
mailbox->jobType = JOB_TYPE_OUTPUT;
// Set the work count to the number of rows to process
ThreadPoolSetWorkCount(&decoder->worker_thread.pool, info->height);
// Start the transform worker threads
ThreadPoolSendMessage(&decoder->worker_thread.pool, THREAD_MESSAGE_START);
// Wait for all of the worker threads to finish
ThreadPoolWaitAllDone(&decoder->worker_thread.pool);
#else
//unsigned short scanline[8192*3],*sptr;
//unsigned short scanline2[8192*3],*sptr2;
unsigned short *scanline,*sptr;
unsigned short *scanline2,*sptr2;
char *buffer = decoder->scratch.free_ptr;
size_t buffer_size = decoder->scratch.free_size;
uint8_t *outyuv,*line = output;
PIXEL *bayerptr;
int x,y;
if(buffer_size < info->width * 2 * 3 * 2)
assert(0); // not enough memory
scanline = (unsigned short *)buffer;
buffer += info->width * 2 * 3;
scanline2 = (unsigned short *)buffer;
line = output;
bayer_line = decoder->RawBayer16;
for(y=0; y<info->height; y++)
{
int r,g,b,rg,bg,y1,y2,u,v;
int r1,g1,b1;
int i;
__m128i gggggggg,ggggggg2,rgrgrgrg,bgbgbgbg;
__m128i rrrrrrrr,bbbbbbbb;
__m128i mid8192 = _mm_set1_epi16(8192);
__m128i mid16384 = _mm_set1_epi16(16384);
__m128i mid32768 = _mm_set1_epi16(32768);
__m128i overflowprotectRGB_epi16 = _mm_set1_epi16(0x7fff-0x3fff);
int sse2width = info->width & 0xfff8;
bayerptr = bayer_line;
G = bayerptr;
RG = G + bayer_pitch/4;
BG = RG + bayer_pitch/4;
GD = BG + bayer_pitch/4;
sptr = scanline;
x = 0;
for(; x<sse2width; x+=8)
{
gggggggg = _mm_loadu_si128((__m128i *)G); G+=8;
rgrgrgrg = _mm_loadu_si128((__m128i *)RG); RG+=8;
bgbgbgbg = _mm_loadu_si128((__m128i *)BG); BG+=8;
ggggggg2 = _mm_srli_epi16(gggggggg, 2);// 0-16383 14bit unsigned
rgrgrgrg = _mm_srli_epi16(rgrgrgrg, 2);// 14bit unsigned
bgbgbgbg = _mm_srli_epi16(bgbgbgbg, 2);// 14bit unsigned
rrrrrrrr = _mm_subs_epi16(rgrgrgrg, mid8192);// -8191 to 8191 14bit signed
rrrrrrrr = _mm_slli_epi16(rrrrrrrr, 1); // -16382 to 16382 15bit signed
rrrrrrrr = _mm_adds_epi16(rrrrrrrr, ggggggg2); // -16382 to 32767
bbbbbbbb = _mm_subs_epi16(bgbgbgbg, mid8192);// -8191 to 8191 14bit signed
bbbbbbbb = _mm_slli_epi16(bbbbbbbb, 1); // -16382 to 16382 15bit signed
bbbbbbbb = _mm_adds_epi16(bbbbbbbb, ggggggg2); // -16382 to 32767
//limit to 0 to 16383
rrrrrrrr = _mm_adds_epi16(rrrrrrrr, overflowprotectRGB_epi16);
rrrrrrrr = _mm_subs_epu16(rrrrrrrr, overflowprotectRGB_epi16);
//limit to 0 to 16383
bbbbbbbb = _mm_adds_epi16(bbbbbbbb, overflowprotectRGB_epi16);
bbbbbbbb = _mm_subs_epu16(bbbbbbbb, overflowprotectRGB_epi16);
rrrrrrrr = _mm_slli_epi16(rrrrrrrr, 2); // restore to 0 to 65535
bbbbbbbb = _mm_slli_epi16(bbbbbbbb, 2); // restore to 0 to 65535
*sptr++ = _mm_extract_epi16(rrrrrrrr, 0);
*sptr++ = _mm_extract_epi16(gggggggg, 0);
*sptr++ = _mm_extract_epi16(bbbbbbbb, 0);
*sptr++ = _mm_extract_epi16(rrrrrrrr, 1);
*sptr++ = _mm_extract_epi16(gggggggg, 1);
*sptr++ = _mm_extract_epi16(bbbbbbbb, 1);
*sptr++ = _mm_extract_epi16(rrrrrrrr, 2);
*sptr++ = _mm_extract_epi16(gggggggg, 2);
*sptr++ = _mm_extract_epi16(bbbbbbbb, 2);
*sptr++ = _mm_extract_epi16(rrrrrrrr, 3);
*sptr++ = _mm_extract_epi16(gggggggg, 3);
*sptr++ = _mm_extract_epi16(bbbbbbbb, 3);
*sptr++ = _mm_extract_epi16(rrrrrrrr, 4);
*sptr++ = _mm_extract_epi16(gggggggg, 4);
*sptr++ = _mm_extract_epi16(bbbbbbbb, 4);
*sptr++ = _mm_extract_epi16(rrrrrrrr, 5);
*sptr++ = _mm_extract_epi16(gggggggg, 5);
*sptr++ = _mm_extract_epi16(bbbbbbbb, 5);
*sptr++ = _mm_extract_epi16(rrrrrrrr, 6);
*sptr++ = _mm_extract_epi16(gggggggg, 6);
*sptr++ = _mm_extract_epi16(bbbbbbbb, 6);
*sptr++ = _mm_extract_epi16(rrrrrrrr, 7);
*sptr++ = _mm_extract_epi16(gggggggg, 7);
*sptr++ = _mm_extract_epi16(bbbbbbbb, 7);
}
for(; x<info->width; x++)
{
g = (*G++);
rg = (*RG++);
bg = (*BG++);
r = ((rg - 32768)<<1) + g;
b = ((bg - 32768)<<1) + g;
if(r < 0) r = 0; if(r > 0xffff) r = 0xffff;
if(g < 0) g = 0; if(g > 0xffff) g = 0xffff;
if(b < 0) b = 0; if(b > 0xffff) b = 0xffff;
*sptr++ = r;
*sptr++ = g;
*sptr++ = b;
}
{
int flags = 0;
int whitebitdepth = 16;
sptr = scanline;
if(decoder->apply_color_active_metadata)
sptr = ApplyActiveMetaData(decoder, info->width, 1, y, scanline, scanline2,
info->format, &whitebitdepth, &flags);
ConvertLinesToOutput(decoder, info->width, 1, sptr, line, pitch,
info->format, whitebitdepth, flags);
}
line += pitch;
bayer_line += bayer_pitch;
}
#endif
}
/* // switch to using the ApplyActiveMetaData() and ConvertLinesToOutput() calls - DAN20071201
// Pack the rows of Bayer data (full resolution progressive) into BYR2 format?
else if (format == DECODED_FORMAT_YUYV)
{
line = output;
bayer_line = decoder->RawBayer16;
scale = 256.0;
y_rmult = ((rgb2yuv[0][0]) * scale);
y_gmult = ((rgb2yuv[0][1]) * scale);
y_bmult = ((rgb2yuv[0][2]) * scale);
y_offset= ((rgb2yuv[0][3]) * scale);
u_rmult = ((rgb2yuv[1][0]) * scale);
u_gmult = ((rgb2yuv[1][1]) * scale);
u_bmult = ((rgb2yuv[1][2]) * scale);
u_offset= ((rgb2yuv[1][3]) * scale);
v_rmult = ((rgb2yuv[2][0]) * scale);
v_gmult = ((rgb2yuv[2][1]) * scale);
v_bmult = ((rgb2yuv[2][2]) * scale);
v_offset= ((rgb2yuv[2][3]) * scale);
r_rmult= (mtrx[0][0] * scale * whitebalance[0]);
r_gmult= (mtrx[0][1] * scale * whitebalance[1]);
r_bmult= (mtrx[0][2] * scale * whitebalance[2]);
r_offset= (mtrx[0][3] * scale);
g_rmult= (mtrx[1][0] * scale * whitebalance[0]);
g_gmult= (mtrx[1][1] * scale * whitebalance[1]);
g_bmult= (mtrx[1][2] * scale * whitebalance[2]);
g_offset= (mtrx[1][3] * scale);
b_rmult= (mtrx[2][0] * scale * whitebalance[0]);
b_gmult= (mtrx[2][1] * scale * whitebalance[1]);
b_bmult= (mtrx[2][2] * scale * whitebalance[2]);
b_offset= (mtrx[2][3] * scale);
for(y=0; y<info->height; y++)
{
outyuv = line;
bayerptr = bayer_line;
G = bayerptr;
RG = G + bayer_pitch/4;
BG = RG + bayer_pitch/4;
for(x=0; x<info->width; x+=2)
{
int r,g,b,r1,g1,b1,rg,bg,y1,y2,u,v,dither;
g = (*G++);
rg = (*RG++);
bg = (*BG++);
r = ((rg - 32768)<<1) + g;
b = ((bg - 32768)<<1) + g;
// dither = (rand() & 65535)<<1;
if(matrix_non_unity)
{
//TODO : need on convert to linear first.
r1= (( r_rmult * r + r_gmult * g + r_bmult * b + r_offset)>>8);
g1= (( g_rmult * r + g_gmult * g + g_bmult * b + g_offset)>>8);
b1= (( b_rmult * r + b_gmult * g + b_bmult * b + b_offset)>>8);
//TODO : need on convert back to log/display curve.
if(r1 < 0) r1 = 0;
if(r1 > 65535) r1 = 65535;
if(g1 < 0) g1 = 0;
if(g1 > 65535) g1 = 65535;
if(b1 < 0) b1 = 0;
if(b1 > 65535) b1 = 65535;
}
else
{
r1 = r;
g1 = g;
b1 = b;
}
y1= ( y_rmult * r1 + y_gmult * g1 + y_bmult * b1 + 32768)>>16;
u = (-u_rmult * r1 - u_gmult * g1 + u_bmult * b1 + 32768)>>16;
v = ( v_rmult * r1 - v_gmult * g1 - v_bmult * b1 + 32768)>>16;
g = (*G++);
rg = (*RG++);
bg = (*BG++);
r = ((rg - 32768)<<1) + g;
b = ((bg - 32768)<<1) + g;
// dither = (rand() & 65535)<<1;
if(matrix_non_unity)
{
//TODO : need on convert to linear first.
r1= (( r_rmult * r + r_gmult * g + r_bmult * b + r_offset)>>8);
g1= (( g_rmult * r + g_gmult * g + g_bmult * b + g_offset)>>8);
b1= (( b_rmult * r + b_gmult * g + b_bmult * b + b_offset)>>8);
//TODO : need on convert back to log/display curve.
if(r1 < 0) r1 = 0;
if(r1 > 65535) r1 = 65535;
if(g1 < 0) g1 = 0;
if(g1 > 65535) g1 = 65535;
if(b1 < 0) b1 = 0;
if(b1 > 65535) b1 = 65535;
}
else
{
r1 = r;
g1 = g;
b1 = b;
}
y2 = ( y_rmult * r1 + y_gmult * g1 + y_bmult * b1 + 32768)>>16;
u += (-u_rmult * r1 - u_gmult * g1 + u_bmult * b1 + 32768)>>16;
v += ( v_rmult * r1 - v_gmult * g1 - v_bmult * b1 + 32768)>>16;
u >>= 1;
v >>= 1;
y1 += y_offset;
y2 += y_offset;
u += u_offset;
v += v_offset;
if(y1 < 0) y1 = 0;
if(y1 > 255) y1 = 255;
if(y2 < 0) y2 = 0;
if(y2 > 255) y2 = 255;
if(u < 0) u = 0;
if(u > 255) u = 255;
if(v < 0) v = 0;
if(v > 255) v = 255;
*outyuv++ = y1;
*outyuv++ = u;
*outyuv++ = y2;
*outyuv++ = v;
}
line += pitch;
bayer_line += bayer_pitch;
}
}
else if (format == DECODED_FORMAT_YU64)
{
int shift = 14;
PIXEL16U *outyuv64;
line = output;
bayer_line = decoder->RawBayer16;
scale = 16384.0;
//_mm_empty(); // Clear the mmx register state
y_rmult = ((rgb2yuv[0][0]) * scale);
y_gmult = ((rgb2yuv[0][1]) * scale);
y_bmult = ((rgb2yuv[0][2]) * scale);
y_offset= ((rgb2yuv[0][3]) * scale * 4.0);
u_rmult = ((rgb2yuv[1][0]) * scale);
u_gmult = ((rgb2yuv[1][1]) * scale);
u_bmult = ((rgb2yuv[1][2]) * scale);
u_offset= ((rgb2yuv[1][3]) * scale * 4.0);
v_rmult = ((rgb2yuv[2][0]) * scale);
v_gmult = ((rgb2yuv[2][1]) * scale);
v_bmult = ((rgb2yuv[2][2]) * scale);
v_offset= ((rgb2yuv[2][3]) * scale * 4.0);
scale = 4096.0;
r_rmult= (mtrx[0][0] * scale * whitebalance[0]);
r_gmult= (mtrx[0][1] * scale * whitebalance[1]);
r_bmult= (mtrx[0][2] * scale * whitebalance[2]);
r_offset= (mtrx[0][3] * scale);
g_rmult= (mtrx[1][0] * scale * whitebalance[0]);
g_gmult= (mtrx[1][1] * scale * whitebalance[1]);
g_bmult= (mtrx[1][2] * scale * whitebalance[2]);
g_offset= (mtrx[1][3] * scale);
b_rmult= (mtrx[2][0] * scale * whitebalance[0]);
b_gmult= (mtrx[2][1] * scale * whitebalance[1]);
b_bmult= (mtrx[2][2] * scale * whitebalance[2]);
b_offset= (mtrx[2][3] * scale);
y_offset += 26;
u_offset += 26;
v_offset += 26;
for(y=0; y<info->height; y++)
{
outyuv64 = (PIXEL16U *)line;
bayerptr = bayer_line;
G = bayerptr;
RG = G + bayer_pitch/4;
BG = RG + bayer_pitch/4;
for(x=0; x<info->width; x+=2)
{
int r,g,b,r1,g1,b1,rg,bg,y1,y2,u,v,dither;
g = (*G++);
rg = (*RG++);
bg = (*BG++);
r = ((rg - 32768)<<1) + g;
b = ((bg - 32768)<<1) + g;
// dither = (rand() & 65535)<<1;
if(matrix_non_unity)
{
//TODO : need on convert to linear first.
r1= (( r_rmult * r + r_gmult * g + r_bmult * b + r_offset)>>12);
g1= (( g_rmult * r + g_gmult * g + g_bmult * b + g_offset)>>12);
b1= (( b_rmult * r + b_gmult * g + b_bmult * b + b_offset)>>12);
//TODO : need on convert back to log/display curve.
if(r1 < 0) r1 = 0;
if(r1 > 65535) r1 = 65535;
if(g1 < 0) g1 = 0;
if(g1 > 65535) g1 = 65535;
if(b1 < 0) b1 = 0;
if(b1 > 65535) b1 = 65535;
}
else
{
r1 = r;
g1 = g;
b1 = b;
}
y1= (( y_rmult * r1 + y_gmult * g1 + y_bmult * b1)>>shift) + y_offset;
u = (( u_rmult * r1 + u_gmult * g1 + u_bmult * b1)>>shift);
v = (( v_rmult * r1 + v_gmult * g1 + v_bmult * b1)>>shift);
g = (*G++);
rg = (*RG++);
bg = (*BG++);
r = ((rg - 32768)<<1) + g;
b = ((bg - 32768)<<1) + g;
// dither = (rand() & 65535)<<1;
if(matrix_non_unity)
{
//TODO : need on convert to linear first.
r1= (( r_rmult * r + r_gmult * g + r_bmult * b + r_offset)>>12);
g1= (( g_rmult * r + g_gmult * g + g_bmult * b + g_offset)>>12);
b1= (( b_rmult * r + b_gmult * g + b_bmult * b + b_offset)>>12);
//TODO : need on convert back to log/display curve.
if(r1 < 0) r1 = 0;
if(r1 > 65535) r1 = 65535;
if(g1 < 0) g1 = 0;
if(g1 > 65535) g1 = 65535;
if(b1 < 0) b1 = 0;
if(b1 > 65535) b1 = 65535;
}
else
{
r1 = r;
g1 = g;
b1 = b;
}
y2= (( y_rmult * r1 + y_gmult * g1 + y_bmult * b1)>>shift) + y_offset;
u+= (( u_rmult * r1 + u_gmult * g1 + u_bmult * b1)>>shift);
v+= (( v_rmult * r1 + v_gmult * g1 + v_bmult * b1)>>shift);
u >>= 1;
v >>= 1;
u += u_offset;
v += v_offset;
if(y1 < 0) y1 = 0;
if(y1 > 65535) y1 = 65535;
if(y2 < 0) y2 = 0;
if(y2 > 65535) y2 = 65535;
if(u < 0) u = 0;
if(u > 65535) u = 65535;
if(v < 0) v = 0;
if(v > 65535) v = 65535;
*outyuv64++ = y1;
*outyuv64++ = v;
*outyuv64++ = y2;
*outyuv64++ = u;
}
line += pitch;
bayer_line += bayer_pitch;
}
}
else //RGBs
{
line = output;
bayer_line = decoder->RawBayer16;
scale = 256.0;
r_rmult = (mtrx[0][0]) * scale * whitebalance[0];
r_gmult = (mtrx[0][1]) * scale * whitebalance[1];
r_bmult = (mtrx[0][2]) * scale * whitebalance[2];
r_offset= (mtrx[0][3]) * scale;
g_rmult = (mtrx[1][0]) * scale * whitebalance[0];
g_gmult = (mtrx[1][1]) * scale * whitebalance[1];
g_bmult = (mtrx[1][2]) * scale * whitebalance[2];
g_offset= (mtrx[1][3]) * scale;
b_rmult = (mtrx[2][0]) * scale * whitebalance[0];
b_gmult = (mtrx[2][1]) * scale * whitebalance[1];
b_bmult = (mtrx[2][2]) * scale * whitebalance[2];
b_offset= (mtrx[2][3]) * scale;
for(y=0; y<info->height; y++)
{
int i,noisearray[32];
outyuv = line;
bayerptr = bayer_line;
G = bayerptr;
RG = G + bayer_pitch/4;
BG = RG + bayer_pitch/4;
GD = RG + bayer_pitch/4;
for(i=0; i<32; i++)
{
noisearray[i] = (rand() & 127);
}
if(info->format == DECODED_FORMAT_RGB32)
{
for(x=0; x<info->width; x++)
{
int R1,G1,B1;
int rnd = noisearray[x&31];
// *ptr++ = *bayerptr++ >> 8;
// *ptr++ = 0x80;
// *ptr++ = *bayerptr++ >> 8;
// *ptr++ = 0x80;
int r,g,b,g1,g2,gdiff,y1,y2,u,v;
// g = (g1+g2)>>1;
// *g_row_ptr++ = g;
// *rg_row_ptr++ = (r-g+256)>>1;
// *bg_row_ptr++ = (b-g+256)>>1;
// *gdiff_row_ptr++ = (g1-g2+256)>>1;
g = ((*G++)>>1);
r = ((*RG++ + 64)>>0)-(256<<7)+g;
b = ((*BG++ + 64)>>0)-(256<<7)+g;
// gdiff = ((*GD++ + 64)>>7)-256+g;
if(matrix_non_unity)
{
//TODO : need on convert to linear first.
R1 = ((r*r_rmult + g*r_gmult + b*r_bmult + r_offset)>>8) + rnd;
G1 = ((r*g_rmult + g*g_gmult + b*g_bmult + g_offset)>>8) + rnd;
B1 = ((r*b_rmult + g*b_gmult + b*b_bmult + b_offset)>>8) + rnd;
//TODO : need on convert back to log/display curve.
}
else
{
R1 = r + rnd;
G1 = g + rnd;
B1 = b + rnd;
}
R1 >>= 7;
G1 >>= 7;
B1 >>= 7;
if(R1 < 0) R1 = 0;
if(R1 > 255) R1 = 255;
if(G1 < 0) G1 = 0;
if(G1 > 255) G1 = 255;
if(B1 < 0) B1 = 0;
if(B1 > 255) B1 = 255;
*outyuv++ = B1;
*outyuv++ = G1;
*outyuv++ = R1;
*outyuv++ = 255;
}
}
else
{
for(x=0; x<info->width; x++)
{
int R1,G1,B1;
int rnd = noisearray[x&31];
// *ptr++ = *bayerptr++ >> 8;
// *ptr++ = 0x80;
// *ptr++ = *bayerptr++ >> 8;
// *ptr++ = 0x80;
int r,g,b,g1,g2,gdiff,y1,y2,u,v;
//g = (g1+g2)>>1;
// *g_row_ptr++ = g;
// *rg_row_ptr++ = (r-g+256)>>1;
// *bg_row_ptr++ = (b-g+256)>>1;
// *gdiff_row_ptr++ = (g1-g2+256)>>1;
g = ((*G++)>>1);
r = ((*RG++ + 64)>>0)-(256<<7)+g;
b = ((*BG++ + 64)>>0)-(256<<7)+g;
// gdiff = ((*GD++ + 64)>>7)-256+g;
if(matrix_non_unity)
{
//TODO: Need to convert to linear first.
R1 = ((r*r_rmult + g*r_gmult + b*r_bmult + r_offset)>>8) + rnd;
G1 = ((r*g_rmult + g*g_gmult + b*g_bmult + g_offset)>>8) + rnd;
B1 = ((r*b_rmult + g*b_gmult + b*b_bmult + b_offset)>>8) + rnd;
//TODO: Need to convert back to log/display curve.
}
else
{
R1 = r + rnd;
G1 = g + rnd;
B1 = b + rnd;
}
R1 >>= 7;
G1 >>= 7;
B1 >>= 7;
if(R1 < 0) R1 = 0;
if(R1 > 255) R1 = 255;
if(G1 < 0) G1 = 0;
if(G1 > 255) G1 = 255;
if(B1 < 0) B1 = 0;
if(B1 > 255) B1 = 255;
*outyuv++ = B1;
*outyuv++ = G1;
*outyuv++ = R1;
}
}
line += pitch;
bayer_line += bayer_pitch;
}
}
*/
//MEMORY_ALIGNED_FREE(RawBayer16);
}
}
else
if((decoder->codec.encoded_format == ENCODED_FORMAT_RGB_444) ||
(decoder->codec.encoded_format == ENCODED_FORMAT_RGBA_4444))
{
int precision = codec->precision;
if(decoder->RawBayer16 == NULL)
{
#if _ALLOCATOR
ALLOCATOR *allocator = decoder->allocator;
size_t size = info->width*info->height*num_channels*sizeof(PIXEL);
decoder->RawBayer16 =
(PIXEL16U *)AllocAligned(allocator, size, 16);
#else
decoder->RawBayer16 =
(PIXEL16U *)MEMORY_ALIGNED_ALLOC(info->width*info->height*num_channels*sizeof(PIXEL), 16);
#endif
decoder->RawBayerSize = info->width*info->height*num_channels*sizeof(PIXEL);
}
//#ifdef SHARPENING
if(decoder->RGBFilterBuffer16 == NULL)
{
int frame_size = info->width*decoded_height*4*3*sizeof(PIXEL);
if(decoder->codec.encoded_format == ENCODED_FORMAT_RGBA_4444 && ALPHAOUTPUT(decoder->frame.format))
frame_size = info->width*decoded_height*4*4*sizeof(PIXEL);
#if _ALLOCATOR
{
ALLOCATOR *allocator = decoder->allocator;
decoder->RGBFilterBuffer16 =
(PIXEL16U *)AllocAligned(allocator, frame_size, 16);
}
#else
decoder->RGBFilterBuffer16 =
(PIXEL16U *)MEMORY_ALIGNED_ALLOC(frame_size, 16);
#endif
decoder->RGBFilterBufferSize = frame_size;
}
//#endif
if(decoder->RawBayer16 == NULL || decoder->RGBFilterBuffer16 == NULL)
{
decoder->error = CODEC_ERROR_MEMORY_ALLOC;
return;
}
//TODO: Replace this memory allocation with a scratch buffer allocation
if(decoder->RawBayer16)
{
uint8_t *outyuv,*line, *source_line;
PIXEL16U *bayerptr;
PIXEL16U *G,*RG,*BG;
int x,y;
int src_pitch = info->width*num_channels*sizeof(PIXEL);
int y_rmult,y_gmult,y_bmult,y_offset;//shift=8;
int u_rmult,u_gmult,u_bmult,u_offset;
int v_rmult,v_gmult,v_bmult,v_offset;
float scale = 256.0;
//int matrix_non_unity = 0;
//int wb_non_unity = 0;
//float curve2lin[2048];
//float lin2curve[2048+512+2];
static float rgb2yuv[3][4] =
{
{0.183f, 0.614f, 0.062f, 16.0f/256.0f},
{-0.101f,-0.338f, 0.439f, 0.5f},
{0.439f,-0.399f,-0.040f, 0.5}
};
#if _THREADED
TransformInverseSpatialUniversalThreadedToRow16u(decoder, frame, num_channels,
(uint8_t *)decoder->RawBayer16, src_pitch,
info, chroma_offset, precision);
#else
TransformInverseSpatialToRow16u(transform_array, frame, num_channels,
decoder->RawBayer16, src_pitch, info,
&decoder->scratch, chroma_offset, precision);
#endif
if (format == DECODED_FORMAT_YUYV)
{
line = output;
source_line = (unsigned char *)decoder->RawBayer16;
scale = 256.0;
y_rmult = (int)((rgb2yuv[0][0]));
y_gmult = (int)((rgb2yuv[0][1]));
y_bmult = (int)((rgb2yuv[0][2]));
y_offset= (int)((rgb2yuv[0][3]));
u_rmult = (int)((rgb2yuv[1][0]));
u_gmult = (int)((rgb2yuv[1][1]));
u_bmult = (int)((rgb2yuv[1][2]));
u_offset= (int)((rgb2yuv[1][3]));
v_rmult = (int)((rgb2yuv[2][0]));
v_gmult = (int)((rgb2yuv[2][1]));
v_bmult = (int)((rgb2yuv[2][2]));
v_offset= (int)((rgb2yuv[2][3]));
for(y=0; y<info->height; y++)
{
outyuv = line;
bayerptr = (PIXEL16U *)source_line;
G = bayerptr;
RG = G + src_pitch/(2*num_channels);
BG = RG + src_pitch/(2*num_channels);
for(x=0; x<info->width; x+=2)
{
int r,g,b,r1,g1,b1,rg,bg,y1,y2,u,v;
g = (*G++);
rg = (*RG++);
bg = (*BG++);
r = ((rg - 32768)<<1) + g;
b = ((bg - 32768)<<1) + g;
r1 = r;
g1 = g;
b1 = b;
y1= ( y_rmult * r1 + y_gmult * g1 + y_bmult * b1 + 32768)>>16;
u = (-u_rmult * r1 - u_gmult * g1 + u_bmult * b1 + 32768)>>16;
v = ( v_rmult * r1 - v_gmult * g1 - v_bmult * b1 + 32768)>>16;
g = (*G++);
rg = (*RG++);
bg = (*BG++);
r = ((rg - 32768)<<1) + g;
b = ((bg - 32768)<<1) + g;
r1 = r;
g1 = g;
b1 = b;
y2 = ( y_rmult * r1 + y_gmult * g1 + y_bmult * b1 + 32768)>>16;
u += (-u_rmult * r1 - u_gmult * g1 + u_bmult * b1 + 32768)>>16;
v += ( v_rmult * r1 - v_gmult * g1 - v_bmult * b1 + 32768)>>16;
u >>= 1;
v >>= 1;
y1 += y_offset;
y2 += y_offset;
u += u_offset;
v += v_offset;
if(y1 < 0) y1 = 0;
if(y1 > 255) y1 = 255;
if(y2 < 0) y2 = 0;
if(y2 > 255) y2 = 255;
if(u < 0) u = 0;
if(u > 255) u = 255;
if(v < 0) v = 0;
if(v > 255) v = 255;
*outyuv++ = y1;
*outyuv++ = u;
*outyuv++ = y2;
*outyuv++ = v;
}
line += pitch;
source_line += src_pitch;
}
}
else if (format == DECODED_FORMAT_YU64)
{
int shift = 14;
PIXEL16U *outyuv64;
line = output;
source_line = (unsigned char *)decoder->RawBayer16;
scale = 16384.0;
y_rmult = (int)((rgb2yuv[0][0]) * scale);
y_gmult = (int)((rgb2yuv[0][1]) * scale);
y_bmult = (int)((rgb2yuv[0][2]) * scale);
y_offset= (int)((rgb2yuv[0][3]) * scale * 4.0f);
u_rmult = (int)((rgb2yuv[1][0]) * scale);
u_gmult = (int)((rgb2yuv[1][1]) * scale);
u_bmult = (int)((rgb2yuv[1][2]) * scale);
u_offset= (int)((rgb2yuv[1][3]) * scale * 4.0f);
v_rmult = (int)((rgb2yuv[2][0]) * scale);
v_gmult = (int)((rgb2yuv[2][1]) * scale);
v_bmult = (int)((rgb2yuv[2][2]) * scale);
v_offset= (int)((rgb2yuv[2][3]) * scale * 4.0f);
scale = 4096.0;
y_offset += 26;
u_offset += 26;
v_offset += 26;
for(y=0; y<info->height; y++)
{
outyuv64 = (PIXEL16U *)line;
bayerptr = (PIXEL16U *)source_line;
G = bayerptr;
RG = G + src_pitch/(2*num_channels);
BG = RG + src_pitch/(2*num_channels);
for(x=0; x<info->width; x+=2)
{
int r,g,b,r1,g1,b1,rg,bg,y1,y2,u,v;
g = (*G++);
rg = (*RG++);
bg = (*BG++);
r = ((rg - 32768)<<1) + g;
b = ((bg - 32768)<<1) + g;
r1 = r;
g1 = g;
b1 = b;
y1= (( y_rmult * r1 + y_gmult * g1 + y_bmult * b1)>>shift) + y_offset;
u = (( u_rmult * r1 + u_gmult * g1 + u_bmult * b1)>>shift);
v = (( v_rmult * r1 + v_gmult * g1 + v_bmult * b1)>>shift);
g = (*G++);
rg = (*RG++);
bg = (*BG++);
r = ((rg - 32768)<<1) + g;
b = ((bg - 32768)<<1) + g;
r1 = r;
g1 = g;
b1 = b;
y2= (( y_rmult * r1 + y_gmult * g1 + y_bmult * b1)>>shift) + y_offset;
u+= (( u_rmult * r1 + u_gmult * g1 + u_bmult * b1)>>shift);
v+= (( v_rmult * r1 + v_gmult * g1 + v_bmult * b1)>>shift);
u >>= 1;
v >>= 1;
u += u_offset;
v += v_offset;
if(y1 < 0) y1 = 0;
if(y1 > 65535) y1 = 65535;
if(y2 < 0) y2 = 0;
if(y2 > 65535) y2 = 65535;
if(u < 0) u = 0;
if(u > 65535) u = 65535;
if(v < 0) v = 0;
if(v > 65535) v = 65535;
*outyuv64++ = y1;
*outyuv64++ = v;
*outyuv64++ = y2;
*outyuv64++ = u;
}
line += pitch;
source_line += src_pitch;
}
}
else //RGBs
{
line = output;
source_line = (unsigned char *)decoder->RawBayer16;
for(y=0; y<info->height; y++)
{
int i,noisearray[32];
unsigned short *rgb16 = (unsigned short *)line;
outyuv = line;
bayerptr = (PIXEL16U *)source_line;
G = bayerptr;
RG = G + src_pitch/(2*num_channels);
BG = RG + src_pitch/(2*num_channels);
for(i=0; i<32; i++)
{
noisearray[i] = (rand() & 255);
}
if(info->format == DECODED_FORMAT_RGB32)
{
for(x=0; x<info->width; x++)
{
int R1,G1,B1;
int rnd = noisearray[x&31];
#if 0
G1 = (*G++) + rnd;
R1 = ((*RG++<<1) - (128<<9)) + G1;
B1 = ((*BG++<<1) - (128<<9)) + G1;
#else
G1 = (*G++) + rnd;
R1 = (*RG++) + rnd;
B1 = (*BG++) + rnd;
#endif
R1 >>= 8;
G1 >>= 8;
B1 >>= 8;
if(R1 < 0) R1 = 0;
if(R1 > 255) R1 = 255;
if(G1 < 0) G1 = 0;
if(G1 > 255) G1 = 255;
if(B1 < 0) B1 = 0;
if(B1 > 255) B1 = 255;
*outyuv++ = B1;
*outyuv++ = G1;
*outyuv++ = R1;
*outyuv++ = 255;
}
}
else if(info->format == DECODED_FORMAT_RGB24)
{
for(x=0; x<info->width; x++)
{
int R1,G1,B1;
int rnd = noisearray[x&31];
#if 0
G1 = (*G++) + rnd;
R1 = ((*RG++<<1) - (128<<9)) + G1;
B1 = ((*BG++<<1) - (128<<9)) + G1;
#else
G1 = (*G++) + rnd;
R1 = (*RG++) + rnd;
B1 = (*BG++) + rnd;
#endif
R1 >>= 8;
G1 >>= 8;
B1 >>= 8;
if(R1 < 0) R1 = 0;
if(R1 > 255) R1 = 255;
if(G1 < 0) G1 = 0;
if(G1 > 255) G1 = 255;
if(B1 < 0) B1 = 0;
if(B1 > 255) B1 = 255;
*outyuv++ = B1;
*outyuv++ = G1;
*outyuv++ = R1;
}
}
else if(info->format == DECODED_FORMAT_RG48)
{
for(x=0; x<info->width; x++)
{
int R1,G1,B1;
G1 = (*G++);
R1 = (*RG++);
B1 = (*BG++);
*rgb16++ = R1;
*rgb16++ = G1;
*rgb16++ = B1;
}
}
line += pitch;
source_line += src_pitch;
}
}
//MEMORY_ALIGNED_FREE(RawBayer16);
}
}
else // Output the frame in one of the RGB 8-bit formats
{
//char *buffer = decoder->buffer;
//size_t buffer_size = decoder->buffer_size;
// Invert the bottom wavelet and convert the output to the requested color format
#if _THREADED
TransformInverseSpatialUniversalThreadedToOutput(decoder, frame, num_channels,
output, pitch,
info, chroma_offset, precision,
InvertHorizontalStrip16sYUVtoRGB);
#else
TransformInverseSpatialToBuffer(decoder, transform_array, frame, num_channels, output, pitch,
&info2, &decoder->scratch, chroma_offset, precision);
#endif
}
}
}
#if TIMING
// Count the number of progressive frames that were decoded
progressive_decode_count++;
#endif
}
STOP(tk_inverse);
#ifdef ADOBE_MEMORY_FUNCTIONS
if((decoder->RawBayer16 && decoder->RawBayerSize > 2048*1152*2) ||
(decoder->RGBFilterBuffer16 && decoder->RGBFilterBufferSize > 2048*1152*2))
{
#if _ALLOCATOR
if(decoder->RawBayer16)
{
FreeAligned(decoder->allocator, decoder->RawBayer16);
decoder->RawBayer16 = NULL;
decoder->RawBayerSize = NULL;
}
if(decoder->RGBFilterBuffer16)
{
FreeAligned(decoder->allocator, decoder->RGBFilterBuffer16);
decoder->RGBFilterBuffer16 = NULL;
decoder->RGBFilterBufferSize = NULL;
}
#else
if(decoder->RawBayer16)
{
MEMORY_ALIGNED_FREE(decoder->RawBayer16);
decoder->RawBayer16 = NULL;
decoder->RawBayerSize = NULL;
}
if(decoder->RGBFilterBuffer16)
{
MEMORY_ALIGNED_FREE(decoder->RGBFilterBuffer16);
decoder->RGBFilterBuffer16 = NULL;
decoder->RGBFilterBufferSize = NULL;
}
#endif
}
#endif
#if (0 && DEBUG)
if (logfile) {
//uint8_t *subimage = output;
uint8_t *subimage = output + (2 * info->width) - 16;
DumpArray8u("YUV Image", subimage, 16, 16, pitch, logfile);
}
#endif
#if (0 && DEBUG)
if (logfile) {
fprintf(logfile, "Exit ReconstructFrameToBuffer\n");
}
#endif
#if (0 && DEBUG && _WIN32)
_CrtCheckMemory();
#endif
}
#if 0
// Reconstruct the frame to quarter resolution at full frame rate
void ReconstructQuarterFrame(DECODER *decoder, int num_channels,
uint8_t *frame1, uint8_t *frame2, int output_pitch,
FRAME_INFO *info, char *buffer, size_t buffer_size)
{
TRANSFORM **transform_array = decoder->transform;
int output_width = info->width;
int output_height = info->height;
PIXEL *low_row_ptr[CODEC_MAX_CHANNELS];
PIXEL *high_row_ptr[CODEC_MAX_CHANNELS];
PIXEL *out1_row_ptr[CODEC_MAX_CHANNELS];
PIXEL *out2_row_ptr[CODEC_MAX_CHANNELS];
PIXEL *bufptr = (PIXEL *)buffer;
uint8_t *output_row_ptr = output;
int low_pitch[CODEC_MAX_CHANNELS];
int high_pitch[CODEC_MAX_CHANNELS];
int channel;
int row;
// Check that there is enough space for the intermediate results from each channel
assert(output_width * sizeof(PIXEL) < buffer_size);
// Get pointers into the wavelets for each channel
for (channel = 0; channel < num_channels; channel++)
{
// Get the lowpass bands from the two wavelets for the two halves of the temporal wavelet
IMAGE *low_wavelet = transform_array[channel]->wavelet[3];
IMAGE *high_wavelet = transform_array[channel]->wavelet[2];
// Get the pointers to the first row in each lowpass band
low_row_ptr[channel] = low_wavelet->band[0];
high_row_ptr[channel] = high_wavelet->band[0];
low_pitch[channel] = low_wavelet->pitch / sizeof(PIXEL);
high_pitch[channel] = high_wavelet->pitch / sizeof(PIXEL);
// Allocate space for one row of results for this channel
channel_row_ptr[channel] = bufptr;
bufptr += low_wavelet->width;
}
for (row = 0; row < output_height; row++)
{
char *bufptr = buffer;
for (channel = 0; channel < num_channels; channel++)
{
// Invert the temporal transform at quarter resolution
InvertTemporalQuarterRow16s(low_row_ptr[channel], high_row_ptr[channel], channel_row_ptr[channel]);
// Advance to the next row in each band for the temporal transform
low_row_ptr[channel] += low_pitch[channel];
high_row_ptr[channel] += high_pitch[channel];
}
// Pack the intermediate results into the output row
ConvertUnpacked16sRowToPacked8u(channel_row_ptr, num_channels, output_row_ptr, output_width);
// Advance the output row pointer
output_row_ptr += output_pitch;
}
}
#else
// Reconstruct the frame to quarter resolution at full frame rate
void ReconstructQuarterFrame(DECODER *decoder, int num_channels,
int frame_index, uint8_t *output, int output_pitch,
FRAME_INFO *info, const SCRATCH *scratch, int precision)
{
#if (1 && DEBUG)
FILE *logfile = decoder->logfile;
#endif
TRANSFORM **transform_array = decoder->transform;
int output_width = info->width;
int output_height = info->height;
PIXEL *low_row_ptr[CODEC_MAX_CHANNELS];
PIXEL *high_row_ptr[CODEC_MAX_CHANNELS];
uint8_t *output_row_ptr = output;
int low_pitch[CODEC_MAX_CHANNELS];
int high_pitch[CODEC_MAX_CHANNELS];
int channel;
int row;
// Value used for filling the fourth channel in ARGB output
int alpha = 255;
int format = COLORFORMAT(info);
int color_space = COLORSPACE(info);
int decoded_format = DECODEDFORMAT(info);
//bool inverted = false;
// The pixels are descaled in the inverse temporal transform
//const int descale = 0;
// Shift the intermediate results to 16-bit pixels
const int shift_yu64 = 8;
// Push the scratch space state to allocate a new section
char *buffer = scratch->free_ptr;
#if DEBUG
size_t buffer_size = scratch->free_size;
#endif
// Initialize a pointer for allocating space in the buffer
PIXEL *bufptr = (PIXEL *)buffer;
// Array of pointers to the start of each channel in the intermediate results
PIXEL *channel_row_ptr[CODEC_MAX_CHANNELS];
// Check that there is enough space for the intermediate results from each channel
#if DEBUG
assert(output_width * sizeof(PIXEL) < buffer_size);
#endif
ComputeCube(decoder);
// Get pointers into the wavelets for each channel
for (channel = 0; channel < num_channels; channel++)
{
// Get the lowpass bands from the two wavelets for the two halves of the temporal wavelet
IMAGE *low_wavelet = transform_array[channel]->wavelet[4];
IMAGE *high_wavelet = transform_array[channel]->wavelet[3];
// Get the pointers to the first row in each lowpass band
low_row_ptr[channel] = low_wavelet->band[0];
high_row_ptr[channel] = high_wavelet->band[0];
low_pitch[channel] = low_wavelet->pitch / sizeof(PIXEL);
high_pitch[channel] = high_wavelet->pitch / sizeof(PIXEL);
// Force the row of intermediate results to be properly aligned
bufptr = (PIXEL *)ALIGN16(bufptr);
// Allocate space for one row of results for this channel
channel_row_ptr[channel] = bufptr;
bufptr += low_wavelet->width;
// Check that the row of intermediate results is properly aligned
assert(ISALIGNED16(channel_row_ptr[channel]));
}
// Invert the image if required
switch (decoded_format)
{
case DECODED_FORMAT_RGB24:
case DECODED_FORMAT_RGB32:
output_row_ptr += (output_height - 1) * output_pitch;
output_pitch = NEG(output_pitch);
}
//HACK: Seems to work, I don't know why. //DAN20070304
if (precision == 12) precision = 8;
// Apply the inverse temporal transform to the lowpass and highpass rows
for (row = 0; row < output_height; row++)
{
// Most of the color conversion routines use zero descaling
int descale = 0;
//char *bufptr = buffer;
for (channel = 0; channel < num_channels; channel++)
{
if (frame_index == 0)
{
// Invert the temporal transform at quarter resolution to get the even row
InvertTemporalQuarterEvenRow16s(low_row_ptr[channel], high_row_ptr[channel],
channel_row_ptr[channel], output_width, precision);
}
else
{
assert(frame_index == 1);
// Invert the temporal transform at quarter resolution to get the odd row
InvertTemporalQuarterOddRow16s(low_row_ptr[channel], high_row_ptr[channel],
channel_row_ptr[channel], output_width, precision);
}
// Advance to the next row in each band for the temporal transform
low_row_ptr[channel] += low_pitch[channel];
high_row_ptr[channel] += high_pitch[channel];
}
if(decoder->use_active_metadata_decoder)
{
uint8_t *channeldata[TRANSFORM_MAX_CHANNELS]; // used in quarter res decodes
int channelpitch[TRANSFORM_MAX_CHANNELS]; // used in quarter res decodes
int i;
FRAME_INFO info2;
memcpy(&info2, info, sizeof(FRAME_INFO));
info2.height = 1;
for(i=0;i<num_channels;i++)
{
channeldata[i] = (uint8_t *)channel_row_ptr[i];
channelpitch[i] = 0;
}
#if 1
{
__m128i *Y = (__m128i *)channeldata[0];
__m128i *U = (__m128i *)channeldata[1];
__m128i *V = (__m128i *)channeldata[2];
__m128i v;
int x;
__m128i rgb_limit_epi16 = _mm_set1_epi16(0x7fff - 0x0fff);
for(x=0;x<info->width;x+=8)
{
v = _mm_load_si128(Y);
v = _mm_adds_epi16(v, rgb_limit_epi16);
v = _mm_subs_epu16(v, rgb_limit_epi16);
v = _mm_slli_epi16(v, 4);
_mm_store_si128(Y++, v);
}
for(x=0;x<info->width/2;x+=8)
{
v = _mm_load_si128(U);
v = _mm_adds_epi16(v, rgb_limit_epi16);
v = _mm_subs_epu16(v, rgb_limit_epi16);
v = _mm_slli_epi16(v, 4);
_mm_store_si128(U++, v);
}
for(x=0;x<info->width/2;x+=8)
{
v = _mm_load_si128(V);
v = _mm_adds_epi16(v, rgb_limit_epi16);
v = _mm_subs_epu16(v, rgb_limit_epi16);
v = _mm_slli_epi16(v, 4);
_mm_store_si128(V++, v);
}
}
#else
//non SSE2
for(x=0;x<info->width*2;x++)
{
int val = *gptr++;
if(val < 0) val = 0;
if(val > 4095) val = 4095;
val <<= 4;
*src++ = val;
}
src = scanline2;
#endif
Row16uQuarter2OutputFormat(decoder, &info2, 0, output_row_ptr, output_pitch,
decoder->gop_frame_num/*0 frame*/, scratch->free_ptr, scratch->free_size, false, channeldata, channelpitch);
}
else
{
//DAN20081203 -- fix for 444 decodes in AE32-bit float
decoder->frame.white_point = 16;
//decoder->frame.signed_pixels = 0;
// Convert the rows of luma and chroma into the output format
switch(format)
{
case COLOR_FORMAT_YUYV:
case COLOR_FORMAT_UYVY:
// Pack the intermediate results into the output row
if(decoder->codec.encoded_format == ENCODED_FORMAT_BAYER)
{
assert(0);//need quarter res BAYER To YUV decoder
}
else if((decoder->codec.encoded_format == ENCODED_FORMAT_RGB_444) ||
(decoder->codec.encoded_format == ENCODED_FORMAT_RGBA_4444))
{
// assert(0);//need quarter res RGB To YUV decoder
ConvertRGB2YUV( channel_row_ptr[1], channel_row_ptr[0], channel_row_ptr[2],
output_width, output_width, output_width,
output_row_ptr, output_pitch,
info->width, 1, 10, info->colorspace, format);
}
else
{
ConvertUnpacked16sRowToPacked8u(channel_row_ptr, num_channels, output_row_ptr, output_width, format);
}
break;
case COLOR_FORMAT_RGB24:
if((decoder->codec.encoded_format == ENCODED_FORMAT_RGB_444) ||
(decoder->codec.encoded_format == ENCODED_FORMAT_RGBA_4444))
{
ConvertRGB48toRGB24( channel_row_ptr[1], channel_row_ptr[0], channel_row_ptr[2],
output_width, output_width, output_width,
output_row_ptr, output_pitch,
info->width, 1, 10, 0);
}
else
{
// Convert the intermediate results into a row of RGB24
ConvertUnpacked16sRowToRGB24(channel_row_ptr, num_channels, output_row_ptr, output_width,
descale, format, color_space);
}
break;
case COLOR_FORMAT_RGB32:
if((decoder->codec.encoded_format == ENCODED_FORMAT_RGB_444) ||
(decoder->codec.encoded_format == ENCODED_FORMAT_RGBA_4444))
{
ConvertRGBA48toRGB32(channel_row_ptr[1], channel_row_ptr[0], channel_row_ptr[2], NULL,
output_width,
output_row_ptr, output_pitch,
info->width, 1, 10, 0, 3/*only 3 chhanel not 4 for alpha*/);
}
else
{
// Convert the intermediate results into a row of RGBA32
ConvertUnpacked16sRowToRGB32(channel_row_ptr, num_channels, output_row_ptr, output_width,
descale, format, color_space, alpha);
}
break;
case COLOR_FORMAT_YU64:
case COLOR_FORMAT_V210:
// Convert the intermediate results into a row of YU64
ConvertUnpacked16sRowToYU64(channel_row_ptr, num_channels, output_row_ptr, output_width,
shift_yu64, precision, format);
break;
case COLOR_FORMAT_B64A:
if((decoder->codec.encoded_format == ENCODED_FORMAT_RGB_444) ||
(decoder->codec.encoded_format == ENCODED_FORMAT_RGBA_4444))
{
// Convert the intermediate results into a row of RGBA with 16 bits per component
descale = 2;
ConvertUnpacked16sRowToB64A(channel_row_ptr, num_channels, output_row_ptr, output_width,
descale, precision);
}
else
{
ConvertUnpackedYUV16sRowToRGB48(channel_row_ptr, num_channels, output_row_ptr, output_width,
descale, precision, COLOR_FORMAT_B64A, color_space);
}
break;
case COLOR_FORMAT_R210:
case COLOR_FORMAT_DPX0:
case COLOR_FORMAT_RG30:
case COLOR_FORMAT_AR10:
case COLOR_FORMAT_AB10:
if((decoder->codec.encoded_format == ENCODED_FORMAT_RGB_444) ||
(decoder->codec.encoded_format == ENCODED_FORMAT_RGBA_4444))
{
// Convert the intermediate results into a row of RGBA with 16 bits per component
descale = 2;
ConvertUnpacked16sRowToRGB30(channel_row_ptr, num_channels, output_row_ptr, output_width,
descale, precision, format, color_space);
}
else
{
ConvertUnpackedYUV16sRowToRGB48(channel_row_ptr, num_channels, output_row_ptr, output_width,
descale, precision, format, color_space);
}
break;
case COLOR_FORMAT_RG48:
// Convert the intermediate results into a row of RGBA with 16 bits per component
descale = 2;
ConvertUnpacked16sRowToRGB48(channel_row_ptr, num_channels, output_row_ptr, output_width,
descale, precision);
break;
case COLOR_FORMAT_RG64:
// Convert the intermediate results into a row of RGBA with 16 bits per component
descale = 2;
ConvertUnpacked16sRowToRGBA64(channel_row_ptr, num_channels, output_row_ptr, output_width,
descale, precision);
break;
default:
#if (1 && DEBUG)
if (logfile) {
fprintf(logfile, "ReconstructQuarterFrame bad color format: %d\n", format);
}
#endif
assert(0);
break;
}
}
// Advance the output row pointer
output_row_ptr += output_pitch;
}
}
#endif
#if 0
// Copy the quarter resolution lowpass channels from the spatial transform
void CopyQuarterFrameToBuffer(TRANSFORM **transform_array, int num_channels,
uint8_t *output, int output_pitch,
FRAME_INFO *info, int precision)
{
int output_width = info->width;
int output_height = info->height;
PIXEL *input_row_ptr[CODEC_MAX_CHANNELS];
uint8_t *output_row_ptr = output;
int input_pitch[CODEC_MAX_CHANNELS];
int channel;
int row;
// Get pointers into the wavelets for each channel
for (channel = 0; channel < num_channels; channel++)
{
// Get the lowpass bands from the two wavelets for the two halves of the temporal wavelet
IMAGE *wavelet = transform_array[channel]->wavelet[1];
// Get the pointers to the first row in each lowpass band
input_row_ptr[channel] = wavelet->band[0];
input_pitch[channel] = wavelet->pitch / sizeof(PIXEL);
}
for (row = 0; row < output_height; row++)
{
// Descale and pack the pixels in each output row
CopyQuarterRowToBuffer(input_row_ptr, num_channels, output_row_ptr, output_width, precision);
// Advance the input row pointers
for (channel = 0; channel < num_channels; channel++) {
input_row_ptr[channel] += input_pitch[channel];
}
// Advance the output row pointer
output_row_ptr += output_pitch;
}
}
#endif
// Convert the quarter resolution lowpass channels to the specified output format
void ConvertQuarterFrameToBuffer(DECODER *decoder, TRANSFORM **transform_array, int num_channels,
uint8_t *output, int output_pitch,
FRAME_INFO *info, int precision)
{
int output_width = info->width;
int output_height = info->height;
PIXEL *input_row_ptr[CODEC_MAX_CHANNELS];
uint8_t *output_row_ptr = output;
int input_pitch[CODEC_MAX_CHANNELS];
int channel;
int row;
// Value used for filling the fourth channel in ARGB output
int alpha = 255;
int format = COLORFORMAT(info);
int color_space = COLORSPACE(info);
int decoded_format = DECODEDFORMAT(info);
//bool inverted = false;
// Get pointers into the wavelets for each channel
for (channel = 0; channel < num_channels; channel++)
{
// Get the lowpass bands from the wavelets with quarter resolution
const int wavelet_index = 1;
IMAGE *wavelet = transform_array[channel]->wavelet[wavelet_index];
// The wavelet should have been reconstructed
assert(wavelet != NULL);
// The lowpass band should be valid
assert((wavelet->band_valid_flags & BAND_VALID_MASK(0)) != 0);
// Get the pointers to the first row in each lowpass band
input_row_ptr[channel] = wavelet->band[0];
input_pitch[channel] = wavelet->pitch / sizeof(PIXEL);
}
// Invert the image if required
switch (decoded_format)
{
case DECODED_FORMAT_RGB24:
case DECODED_FORMAT_RGB32:
output_row_ptr += (output_height - 1) * output_pitch;
output_pitch = NEG(output_pitch);
}
ComputeCube(decoder);
//HACK DAN20110122 -- some formats will not directly decode so need to use the AM route
{
if( format == COLOR_FORMAT_YU64 ||
format == COLOR_FORMAT_V210 ||
format == COLOR_FORMAT_R408 ||
format == COLOR_FORMAT_V408)
{
if( (decoder->codec.encoded_format == ENCODED_FORMAT_RGB_444) ||
(decoder->codec.encoded_format == ENCODED_FORMAT_RGBA_4444))
{
decoder->use_active_metadata_decoder = true;
decoder->apply_color_active_metadata = true;
}
}
}
if(decoder->use_active_metadata_decoder)
{
#if _THREADED
{
WORKER_THREAD_DATA *mailbox = &decoder->worker_thread.data;
#if _DELAY_THREAD_START
if(decoder->worker_thread.pool.thread_count == 0)
{
CreateLock(&decoder->worker_thread.lock);
// Initialize the pool of transform worker threads
ThreadPoolCreate(&decoder->worker_thread.pool,
decoder->thread_cntrl.capabilities >> 16/*cpus*/,
WorkerThreadProc,
decoder);
}
#endif
// Post a message to the mailbox
mailbox->output = output_row_ptr;
mailbox->pitch = output_pitch;
mailbox->framenum = 0;
for(channel = 0; channel < num_channels; channel++)
{
mailbox->channeldata[channel] = (uint8_t *)input_row_ptr[channel];
mailbox->channelpitch[channel] = input_pitch[channel]*sizeof(PIXEL);
}
memcpy(&mailbox->info, info, sizeof(FRAME_INFO));
mailbox->jobType = JOB_TYPE_OUTPUT;
decoder->RGBFilterBufferPhase = 1;
// Set the work count to the number of rows to process
ThreadPoolSetWorkCount(&decoder->worker_thread.pool, info->height);
// Start the transform worker threads
ThreadPoolSendMessage(&decoder->worker_thread.pool, THREAD_MESSAGE_START);
// Wait for all of the worker threads to finish
ThreadPoolWaitAllDone(&decoder->worker_thread.pool);
decoder->RGBFilterBufferPhase = 0;
}
#endif
}
else
{
//DAN20081203 -- fix for 444 decodes in AE32-bit float
decoder->frame.white_point = 16;
//decoder->frame.signed_pixels = 0;
// Convert each row to the specified output format
for (row = 0; row < output_height; row++)
{
// Right shift for converting lowpass coefficients to pixels
int descale = 4;
switch(format & 0x7fffffff)
{
case COLOR_FORMAT_YUYV:
case COLOR_FORMAT_UYVY:
if( (decoder->codec.encoded_format == ENCODED_FORMAT_RGB_444) ||
(decoder->codec.encoded_format == ENCODED_FORMAT_RGBA_4444))
{
// assert(0);//need quarter res RGB To YUV decoder
ConvertRGB2YUV( input_row_ptr[1], input_row_ptr[0], input_row_ptr[2],
output_width, output_width, output_width,
output_row_ptr, output_pitch,
info->width, 1, 14, info->colorspace, format);
}
else
{
// Descale and pack the pixels in each output row
CopyQuarterRowToBuffer(input_row_ptr, num_channels, output_row_ptr, output_width,
precision, format);
}
break;
case COLOR_FORMAT_RGB24:
if( (decoder->codec.encoded_format == ENCODED_FORMAT_RGB_444) ||
(decoder->codec.encoded_format == ENCODED_FORMAT_RGBA_4444))
{
ConvertRGB48toRGB24(input_row_ptr[1], input_row_ptr[0], input_row_ptr[2],
output_width, output_width, output_width,
output_row_ptr, output_pitch,
info->width, 1, 14, 0);
}
else
{
// Convert the intermediate results into a row of RGB24
ConvertUnpacked16sRowToRGB24(input_row_ptr, num_channels, output_row_ptr, output_width, descale, format, color_space);
}
break;
case COLOR_FORMAT_RGB32:
case COLOR_FORMAT_RGB32_INVERTED:
if( (decoder->codec.encoded_format == ENCODED_FORMAT_RGB_444) ||
(decoder->codec.encoded_format == ENCODED_FORMAT_RGBA_4444))
{
ConvertRGBA48toRGB32( input_row_ptr[1], input_row_ptr[0], input_row_ptr[2], input_row_ptr[3],
output_width,
output_row_ptr, output_pitch,
info->width, 1, 14, 0, num_channels);
}
else
{
// Convert the intermediate results into a row of RGBA32
ConvertUnpacked16sRowToRGB32(input_row_ptr, num_channels, output_row_ptr, output_width,
descale, format, color_space, alpha);
}
break;
case COLOR_FORMAT_YU64:
case COLOR_FORMAT_V210:
if( (decoder->codec.encoded_format == ENCODED_FORMAT_RGB_444) ||
(decoder->codec.encoded_format == ENCODED_FORMAT_RGBA_4444))
{
//TODO RGB to YUV Quarter RES DAN20110120 - handle above with HACK DAN20110122
//
}
else
{
// Convert the intermediate results into a row of YU64
ConvertUnpacked16sRowToYU64(input_row_ptr, num_channels, output_row_ptr, output_width,
descale, precision, format);
}
break;
case COLOR_FORMAT_B64A:
// Convert the intermediate results to a row of ARGB with 16 bits per pixel
descale = 2;
ConvertUnpacked16sRowToB64A(input_row_ptr, num_channels, output_row_ptr, output_width,
descale, precision);
break;
case COLOR_FORMAT_R210:
case COLOR_FORMAT_DPX0:
case COLOR_FORMAT_RG30:
case COLOR_FORMAT_AR10:
case COLOR_FORMAT_AB10:
// Convert the intermediate results to a row of ARGB with 16 bits per pixel
descale = 2;
ConvertUnpacked16sRowToRGB30(input_row_ptr, num_channels, output_row_ptr, output_width,
descale, precision, format, color_space);
break;
case COLOR_FORMAT_RG48:
// Convert the intermediate results into a row of RGBA with 16 bits per component
descale = 2;
ConvertUnpacked16sRowToRGB48(input_row_ptr, num_channels, output_row_ptr, output_width,
descale, precision);
break;
case COLOR_FORMAT_RG64:
// Convert the intermediate results into a row of RGBA with 16 bits per component
descale = 2;
ConvertUnpacked16sRowToRGBA64(input_row_ptr, num_channels, output_row_ptr, output_width,
descale, precision);
break;
default:
assert(0);
break;
}
// Advance the input row pointers
for (channel = 0; channel < num_channels; channel++) {
input_row_ptr[channel] += input_pitch[channel];
}
// Advance the output row pointer
output_row_ptr += output_pitch;
}
}
}
// Release all resources allocated by the decoder
void DecodeRelease(DECODER *decoder, TRANSFORM *transform[], int num_transforms)
{
#if _TIMING && 0
FILE *logfile = decoder->logfile;
uint32_t frame_count = decoder->frame_count;
if (logfile != NULL && frame_count > 0)\
{
#ifdef _WIN32
PrintStatistics(logfile, frame_count, NULL, TIMING_CSV_FILENAME);
#else
PrintStatistics(logfile, frame_count, NULL, NULL);
#endif
}
#endif
// Free the data structures allocated for decoding
ClearDecoder(decoder);
}
void DecodeForceMetadataRefresh(DECODER *decoder)
{
CFHDDATA *cfhddata = &decoder->cfhddata;
cfhddata->force_metadata_refresh = true;
if (decoder->parallelDecoder) {
cfhddata = &decoder->parallelDecoder->cfhddata;
cfhddata->force_metadata_refresh = true;
}
}
void SetDecoderFlags(DECODER *decoder, uint32_t flags)
{
#if (1 && DEBUG)
FILE *logfile = decoder->logfile;
#endif
// Set the decoder flags
decoder->flags = flags;
#if (0 && DEBUG)
if (logfile) {
fprintf(logfile, "Decoder flags: 0x%p\n", decoder->flags);
}
#endif
}
void SetDecoderFormat(DECODER *decoder, int width, int height, int format, int resolution)
{
// Need to modify the codec to use the decoding format
decoder->frame.width = width;
decoder->frame.height = height;
if(format == DECODED_FORMAT_WP13)
{
decoder->frame.output_format = format;
//decoder->frame.format = DECODED_FORMAT_RG48; //TODO Why is this needed with W13A work natively.
decoder->frame.format = format;
//decoder->frame.signed_pixels = 1;
decoder->frame.white_point = 13;
}
else if(format == DECODED_FORMAT_W13A)
{
decoder->frame.output_format = format;
// decoder->frame.format = DECODED_FORMAT_W13A; // TODO eventually this might be DECODED_FORMAT_RG64
decoder->frame.format = format;
//decoder->frame.signed_pixels = 1;
decoder->frame.white_point = 13;
}
else
{
decoder->frame.output_format = format;
decoder->frame.format = format;
//decoder->frame.signed_pixels = 0;
decoder->frame.white_point = 16;
}
decoder->frame.resolution = resolution;
decoder->frame.pixel_size = PixelSize(decoder->frame.format);
}
void SetDecoderCapabilities(DECODER *decoder)
{
int processor_count;
#ifdef _WIN32
int limit_cpus = 32;
#else
int limit_cpus = 32; // AJA spins off too many
#endif
// Set the capabilities that are most likely supported by the Intel Mac
decoder->thread_cntrl.capabilities = (_CPU_FEATURE_MMX | _CPU_FEATURE_SSE | _CPU_FEATURE_SSE2);
if (decoder->thread_cntrl.limit)
{
limit_cpus = decoder->thread_cntrl.limit;
}
else if (decoder->thread_cntrl.affinity)
{
int i;
const int max_cpu_count = 32;
limit_cpus = 0;
for (i = 0; i < max_cpu_count; i++)
{
if (decoder->thread_cntrl.affinity & (1<<i)) {
limit_cpus++;
}
}
}
// Set the number of processors
processor_count = GetProcessorCount();
if(processor_count > limit_cpus)
processor_count = limit_cpus;
#if (0 && DEBUG)
// Set the number of processors (for debugging)
//processor_count = 8;
processor_count = 1;
fprintf(stderr, "Limit processors to %d\n", processor_count);
#endif
decoder->thread_cntrl.capabilities |= (processor_count << 16);
}
int GetDecoderCapabilities(DECODER *decoder)
{
return decoder->thread_cntrl.capabilities;
}
bool SetDecoderColorFlags(DECODER *decoder, uint32_t color_flags)
{
if (/*MIN_DECODED_COLOR_SPACE <= color_flags && */color_flags <= MAX_DECODED_COLOR_SPACE)
{
decoder->frame.colorspace = color_flags;
// Indicate that the color flags were set as specified
return true;
}
// The specified color flags were not valid
return false;
}
// Compute the resolution corresponding to the specified combination of input and output dimensions
int DecodedResolution(int input_width, int input_height, int output_width, int output_height)
{
int decoded_width;
int decoded_height;
// Output height can be negative for inverted RGB
output_height = abs(output_height);
if (output_width == input_width && output_height == input_height) {
return DECODED_RESOLUTION_FULL;
}
// Compute the dimensions for half resolution decoding
decoded_width = input_width / 2;
decoded_height = input_height / 2;
// Do the output dimensions correspond to half resolution decoding?
if (output_width == decoded_width && output_height == decoded_height) {
return DECODED_RESOLUTION_HALF;
}
// Compute the dimensions for quarter resolution decoding
decoded_width /= 2;
decoded_height /= 2;
// Do the output dimensions correspond to half resolution decoding?
if (output_width == decoded_width && output_height == decoded_height) {
return DECODED_RESOLUTION_QUARTER;
}
return DECODED_RESOLUTION_UNSUPPORTED;
}
// Compute the decoded resolution that is closest to the output dimensions
int DecodedScale(int input_width, int input_height, int output_width, int output_height)
{
int decoded_width = input_width;
int decoded_height = input_height;
static int decodedResolution[] =
{
DECODED_RESOLUTION_FULL,
DECODED_RESOLUTION_HALF,
DECODED_RESOLUTION_QUARTER
};
int reduction = 0;
int max_reduction = 2;
// Output height can be negative for inverted RGB
output_height = abs(output_height);
#if 1
// Always decode to the next larger size
while (decoded_width > output_width &&
decoded_height > output_height &&
reduction < max_reduction)
{
// Decode to a frame size that is larger than the output image
int reduced_width = decoded_width / 2;
int reduced_height = decoded_height / 2;
if (reduced_width >= output_width && reduced_height >= output_height)
{
decoded_width = reduced_width;
decoded_height = reduced_height;
reduction++;
}
else
{
break;
}
}
#else
while (decoded_width*4 > output_width*5 &&
decoded_height*4 > output_height*5 &&
reduction < max_reduction)
{
#if 0
// Decode to a frame size that is larger than the output image
int reduced_width = decoded_width / 2;
int reduced_height = decoded_height / 2;
if (reduced_width >= output_width && reduced_height >= output_height)
{
decoded_width = reduced_width;
decoded_height = reduced_height;
reduction++;
}
else
{
break;
}
#else
// Better to scale up a smaller image than scale down a larger image
decoded_width /= 2;
decoded_height /= 2;
reduction++;
#endif
}
#endif
// Check that the decoded resolution is valid
assert(0 <= reduction && reduction <= max_reduction);
return decodedResolution[reduction];
}
void ComputeDecodedDimensions(int encoded_width, int encoded_height, int decoded_resolution,
int *decoded_width_out, int *decoded_height_out)
{
switch (decoded_resolution)
{
default:
assert(0);
case DECODED_RESOLUTION_FULL:
*decoded_width_out = encoded_width;
*decoded_height_out = encoded_height;
break;
case DECODED_RESOLUTION_HALF:
*decoded_width_out = encoded_width / 2;
*decoded_height_out = encoded_height / 2;
break;
case DECODED_RESOLUTION_QUARTER:
*decoded_width_out = encoded_width / 4;
*decoded_height_out = encoded_height / 4;
break;
case DECODED_RESOLUTION_LOWPASS_ONLY:
//TODO: Check that the lowpass dimensions are correct
*decoded_width_out = encoded_width / 8;
*decoded_height_out = encoded_height / 8;
break;
}
}
// Return true if the specified resolution is supported
bool IsDecodedResolution(int resolution)
{
if (resolution == DECODED_RESOLUTION_QUARTER) {
return true;
}
return (resolution == DECODED_RESOLUTION_FULL ||
resolution == DECODED_RESOLUTION_HALF);
}
// Return true if the encoded sample is a key frame
bool IsSampleKeyFrame(uint8_t *sample, size_t size)
{
bool key_frame_flag = false;
// Search the first twenty tags for the sample type
const int num_tags = 20;
int i;
BITSTREAM bitstream;
InitBitstreamBuffer(&bitstream, sample, size, BITSTREAM_ACCESS_READ);
for (i = 0; i < num_tags && size > 0; i++, size -= sizeof(TAGVALUE))
{
TAGVALUE segment = GetSegment(&bitstream);
if (segment.tuple.tag == CODEC_TAG_SAMPLE)
{
switch (segment.tuple.value)
{
case SAMPLE_TYPE_GROUP:
case SAMPLE_TYPE_FIRST:
case SAMPLE_TYPE_IFRAME:
key_frame_flag = true;
break;
case SAMPLE_TYPE_SEQUENCE_HEADER:
case SAMPLE_TYPE_FRAME:
case SAMPLE_TYPE_SECOND:
case SAMPLE_TYPE_PFRAME:
default:
key_frame_flag = false;
break;
case SAMPLE_TYPE_GROUP_TRAILER:
case SAMPLE_TYPE_NONE:
case SAMPLE_TYPE_ERROR:
case SAMPLE_TYPE_CHANNEL:
assert(0); // Unexpected situation
key_frame_flag = false; // Report the sample as a non-key frame
break;
}
break; // Found the sample type
}
}
return key_frame_flag;
}
// Return the number of the more recent decoded frame
uint32_t DecodedFrameNumber(DECODER *decoder)
{
CODEC_STATE *codec = &decoder->codec;
if (decoder == NULL) return 0;
return codec->frame_number;
}
/***** Start of the new code for the finite state machine (FSM) decoder *****/
#if _PROCESSOR_DISPATCH
__declspec(cpu_dispatch(Pentium_4,Generic))
static inline void ZeroHighPassRow(PIXEL *rowptr, int length)
{
// Stub routine for processor specific dispatch
}
#endif
#if _PROCESSOR_GENERIC
#if _PROCESSOR_DISPATCH
__declspec(cpu_specific(Generic))
#endif
// This version assumes that the row is a multiple of 8 bytes
static inline void ZeroHighPassRow(PIXEL *rowptr, int length)
{
int count;
// Check that the row starts on a 16-byte boundary
//assert(ISALIGNED(rowptr, 16));
// Check that the row length (in bytes) is a multiple of 8 byte blocks
assert(ISALIGNED(length, 8));
// Convert the length from pixels to 8-byte blocks
count = (length >> 3);
// This code assumes that at least one 8-byte block will be zeroed
assert(count > 0);
__asm
{
pxor mm0, mm0 // Zero a 16 byte register
mov eax, rowptr // Load the pointer to the memory block
mov ebx, count // Load the count of 8-byte blocks
loop: movq [eax], mm0 // Write 8 bytes of zeros
add eax, 8 // Advance to the next 8 byte block
sub ebx, 1 // Decrement the number of blocks
jg loop
}
//_mm_empty();
}
#endif
#if _PROCESSOR_PENTIUM_4
#if _PROCESSOR_DISPATCH
__declspec(cpu_specific(Pentium_4))
#endif
#ifndef _WIN64
// This version assumes that the row is a multiple of 16 bytes
static inline void ZeroHighPassRow(PIXEL *rowptr, int length)
{
int count;
// Check that the row starts on a 16-byte boundary
assert(ISALIGNED(rowptr, 16));
// Check that the row length (in bytes) is a multiple of 16 byte blocks
assert(ISALIGNED(length, 16));
// Convert the length from pixels to 16-byte blocks
count = (length >> 4);
// This code assumes that at least one 16-byte block will be zeroed
assert(count > 0);
#if 1 //DANREMOVE
memset(rowptr, 0, length);
#else
__asm
{
pxor xmm0, xmm0 // Zero a 16 byte register
mov eax, rowptr // Load the pointer to the memory block
mov ebx, count // Load the count of 16-byte blocks
loop: movdqa [eax], xmm0 // Write 16 bytes of zeros
add eax, 16 // Advance to the next 16 byte block
sub ebx, 1 // Decrement the number of blocks
jg loop
}
#endif
}
#else
// This version assumes that the row is a multiple of 16 bytes
static inline void ZeroHighPassRow(PIXEL *rowptr, int length)
{
// Check that the row starts on a 16-byte boundary
assert(ISALIGNED(rowptr, 16));
// Check that the row length (in bytes) is a multiple of 16 byte blocks
assert(ISALIGNED(length, 16));
memset(rowptr, 0, length);
}
#endif
#endif
#if (0 && _DEBUG)
// Functions for the finite state machine decoder (debug version)
static FSMENTRY *GetFSMTableEntry(FSM *fsm, int index)
{
// Return the address of the next table entry in the finite state machine
return &fsm->next_state[index];
}
static void ResetFSM(FSM *fsm)
{
// Reset the state to the beginning of the finite state machine entries
fsm->next_state = fsm->entries;
}
static void UpdateFSM(FSM *fsm, int next)
{
// Change the state pointer to the next block of table entries
fsm->next_state = fsm->entries + (next << FSM_INDEX_SIZE);
}
#else
// Macros for the finite state machine decoder
#if _INDIVIDUAL_LUT
#define GetFSMTableEntry(fsm, index) (FSMENTRY *)fsm->next_state+index
#define ResetFSM(fsm) fsm->next_state = fsm->table.entries[0]
#define UpdateFSM(fsm, next) fsm->next_state = fsm->table.entries[next]
#define GetFSMTableEntryIndividual(fsm, index) (FSMENTRY *)fsm->table.entries_ind[(fsm->next_state_index << FSM_INDEX_SIZE) | index]
#define ResetFSMIndividual(fsm) fsm->next_state_index = 0
#define UpdateFSMIndividual(fsm, next) fsm->next_state_index = next
#else
#define GetFSMTableEntry(fsm, index) (FSMENTRY *)fsm->next_state+index
#define ResetFSM(fsm) fsm->next_state = fsm->table.entries
#define UpdateFSM(fsm, next) fsm->next_state = fsm->table.entries+((int)next << FSM_INDEX_SIZE)
#endif
#endif
#if _DEBUG
static void DebugOutputFSMEntry(FSM *fsm, int index, FSMENTRY *entry)
{
int pre_skip = (entry->pre_post_skip & 0xFFF);
int post_skip = (entry->pre_post_skip >> 12);
// Remove companding
int value0 = entry->value0 / 32;
int value1 = entry->value1 / 32;
// Convert the index to start at the beginning of the table
index += (int)(fsm->next_state - fsm->table.entries[0]);
}
static void DebugOutputFSMEntryFast(FSM *fsm, int index, FSMENTRYFAST *entry)
{
int pre_skip = (entry->pre_post_skip & 0xFFF);
int post_skip = (entry->pre_post_skip >> 12);
// Remove companding
int value0 = (entry->values >> 16) / 32;
int value1 = (entry->values & 0xFFFF) / 32;
// Convert the index to start at the beginning of the table
index += (int)(fsm->next_state - fsm->table.entries[0]);
}
static void DebugOutputFSM(FSM *fsm)
{
int num_entries = FSM_INDEX_ENTRIES;
int i;
for (i = 0; i < num_entries; i++)
{
FSMENTRY *entry = &fsm->table.entries[0][i];
int pre_skip = (entry->pre_post_skip & 0xFFF);
int post_skip = (entry->pre_post_skip >> 12);
}
}
static void PrintFSMEntry(FSM *fsm, int index, FSMENTRY *entry, FILE *logfile)
{
int pre_skip = (entry->pre_post_skip & 0xFFF);
int post_skip = (entry->pre_post_skip >> 12);
// Remove companding
int value0 = entry->value0 / 32;
int value1 = entry->value1 / 32;
// Convert the index to start at the beginning of the table
index += (int)(fsm->next_state - fsm->table.entries[0]);
if (logfile) {
fprintf(logfile, "%d, %d, %d, %d, %d\n", index, value0, value1, pre_skip, post_skip);
}
}
static void PrintFSMEntryFast(FSM *fsm, int index, FSMENTRYFAST *entry, FILE *logfile)
{
int pre_skip = (entry->pre_post_skip & 0xFFF);
int post_skip = (entry->pre_post_skip >> 12);
// Remove companding
int value0 = (entry->values >> 16) / 32;
int value1 = (entry->values & 0xFFFF) / 32;
// Convert the index to start at the beginning of the table
index += (int)(fsm->next_state - fsm->table.entries[0]);
if (logfile) {
fprintf(logfile, "%d, %d, %d, %d, %d\n", index, value0, value1, pre_skip, post_skip);
}
}
#endif
static inline int GetFastByte(BITSTREAM *stream)
{
// Inline of the third case of GetByte
uint8_t *lpCurrentWord = stream->lpCurrentWord;
// Get the next byte from the bitstream
int byte = (uint32_t )(*(lpCurrentWord++));
// Update the state of the bitstream
stream->lpCurrentWord = lpCurrentWord;
#if ERROR_TOLERANT
// Update the count of bytes used
stream->nWordsUsed--;
#endif
// Check that the high bits are zero
assert((byte & ~BITMASK(8)) == 0);
return byte;
}
#if 0
static inline int GetFastShort(BITSTREAM *stream)
{
// Adaptation of the code in GetByte
uint8_t *lpCurrentWord = stream->lpCurrentWord;
// Get the next byte from the bitstream
int byte = (uint32_t )(lpCurrentWord[0]);
int word = (byte << 8) | (uint32_t )(lpCurrentWord[1]);
// Update the state of the bitstream
stream->lpCurrentWord = lpCurrentWord+2;
// Check that the high bits are zero
assert((word & ~BITMASK(16)) == 0);
return word;
}
#endif
// Must declare the byte swap function even though it is an intrinsic
//int _bswap(int);
#if 0
static inline int GetFastLong(BITSTREAM *stream)
{
uint32_t *lpCurrentWord = (uint32_t *)stream->lpCurrentWord;
int word = *(lpCurrentWord)++;
//word = _bswap(word);
word = SwapInt32BtoN(word);
stream->lpCurrentWord = (uint8_t *)lpCurrentWord;
return word;
}
#endif
#if 0 //DAN20041030 not used
// Decode a subband using FSM. One byte is read from the bitstream each time and decoded in two steps
// Original version that does not use a separate buffer for decoding
bool DecodeBandFSM(FSM *fsm, BITSTREAM *stream, PIXEL *image, int width, int height, int pitch, int quantization)
{
int index, byte;
FSMENTRY *entry;
PIXEL *rowptr = image;
int column = 0;
int32_t value;
size_t bytes_row_size = width * sizeof(PIXEL);
PIXEL *maxptr;
int length = width * sizeof(PIXEL);
//ROI roi = {width, 1};
// This version of Huffman decoder assumes that one byte
// is processed as two 4-bit chunks
assert(BITSTREAM_WORD_SIZE == FSM_INDEX_SIZE*2);
assert(stream->nBitsFree == BITSTREAM_BUFFER_SIZE);
// Convert the pitch to units of pixels
pitch /= sizeof(PIXEL);
// Compute the address of the row after the last row in the band
maxptr = rowptr + height * pitch;
// Round up the row length (in bytes) to a multiple of 16 bytes
length = ALIGN16(length);
#if (0 && DEBUG)
zerorow_count = 0;
#endif
ZeroHighPassRow(rowptr, length);
// Decode runs and magnitude values until the band end trailer is decoded
for (;;)
{
// Read a byte from the bitstream
byte = GetFastByte(stream);
// Decode the first 4-bit chunk
index = byte >> FSM_INDEX_SIZE;
// Index into the lookup table at that state
entry = GetFSMTableEntry(fsm, index);
// Return when the entire band is decoded
if (entry->value0 == BAND_END_TRAILER) {
// Zero out the whole subband from here on
rowptr += pitch;
while(rowptr < maxptr) {
ZeroHighPassRow(rowptr, length);
rowptr += pitch;
}
ResetFSM(fsm);
return true;
}
// set the pointer to the next state
UpdateFSM(fsm, (int)entry->next_state);
// If no magnitude value is decoded
if (entry->value0 == 0) {
column += entry->pre_skip;
// The run length scan can go past the end of the row if the row ends
// with a run of zeros and the next row begins with a run of zeros
// Did the scan go beyond the end of the row?
while (column >= width)
{
// Compute the starting column for the next row
column -= width;
// Advance to the next row
rowptr += pitch;
if(rowptr < maxptr) ZeroHighPassRow(rowptr, length);
}
}
// If there is only one decoded magnitude value
else if(entry->value1 == 0) {
// Undo quantization and scaling
value = quantization * entry->value0;
column += entry->pre_skip;
// The run length scan can go past the end of the row if the row ends
// with a run of zeros and the next row begins with a run of zeros
// Did the scan go beyond the end of the row?
while (column >= width)
{
// Compute the starting column for the next row
column -= width;
// Advance to the next row
rowptr += pitch;
if(rowptr < maxptr) ZeroHighPassRow(rowptr, length);
}
// Fill in the decoded magnitude
// Check the column before storing the value
//assert(index < width);
assert(0 <= column && column < width);
// Store the saturated value at the position found in the scan
rowptr[column] = SATURATE(value);
column += entry->post_skip;
// Did the scan go beyond the end of the row?
if (column >= width)
{
// Compute the starting column for the next row
column -= width;
// Advance to the next row
rowptr += pitch;
if(rowptr < maxptr) ZeroHighPassRow(rowptr, length);
}
}
// If there are two decoded magnitude values
else
{
// Check the column before storing values
assert(0 <= column && column < width);
if(column < width-1) {
value = quantization * entry->value0;
rowptr[column++] = SATURATE(value);
value = quantization * entry->value1;
rowptr[column++] = SATURATE(value);
}
else {
value = quantization * entry->value0;
rowptr[column] = SATURATE(value);
value = quantization * entry->value1;
rowptr += pitch;
if(rowptr < maxptr) ZeroHighPassRow(rowptr, length);
column = 0;
rowptr[column++] = SATURATE(value);
}
}
// decode the second 4-bit chunk
index = byte & ((1<<FSM_INDEX_SIZE)-1);
// Index into the lookup table at that state
entry = GetFSMTableEntry(fsm, index);
// Return if the subband is decoded completely
if (entry->value0 == BAND_END_TRAILER) {
// Zero out the whole subband from here on
rowptr += pitch;
while(rowptr < maxptr) {
ZeroHighPassRow(rowptr, length);
rowptr += pitch;
}
ResetFSM(fsm);
return true;
}
// set the pointer to the next state
UpdateFSM(fsm, (int)entry->next_state);
// If no magnitude value is decoded
if (entry->value0 == 0) {
column += entry->pre_skip;
// The run length scan can go past the end of the row if the row ends
// with a run of zeros and the next row begins with a run of zeros
// Did the scan go beyond the end of the row?
while (column >= width)
{
// Compute the starting column for the next row
column -= width;
// Advance to the next row
rowptr += pitch;
if(rowptr < maxptr) ZeroHighPassRow(rowptr, length);
}
}
// If there is only one decoded magnitude value
else if (entry->value1 == 0) {
// Undo quantization and scaling
int32_t value = quantization * entry->value0;
column += entry->pre_skip;
// The run length scan can go past the end of the row if the row ends
// with a run of zeros and the next row begins with a run of zeros
// Did the scan go beyond the end of the row?
while (column >= width)
{
// Compute the starting column for the next row
column -= width;
// Advance to the next row
rowptr += pitch;
if(rowptr < maxptr) ZeroHighPassRow(rowptr, length);
}
// Fill in the decoded magnitude
// Check the column before storing the value
//assert(index < width);
assert(0 <= column && column < width);
// Store the saturated value at the position found in the scan
rowptr[column] = SATURATE(value);
column += entry->post_skip;
// Did the scan go beyond the end of the row?
if (column >= width)
{
// Compute the starting column for the next row
column -= width;
// Advance to the next row
rowptr += pitch;
if(rowptr < maxptr) ZeroHighPassRow(rowptr, length);
}
}
// If there are two decoded magnitude values
else
{
// Check the column before storing values
assert(0 <= column && column < width);
if(column < width-1) {
value = quantization * entry->value0;
rowptr[column++] = SATURATE(value);
value = quantization * entry->value1;
rowptr[column++] = SATURATE(value);
}
else {
value = quantization * entry->value0;
rowptr[column] = SATURATE(value);
value = quantization * entry->value1;
rowptr += pitch;
if(rowptr < maxptr) ZeroHighPassRow(rowptr, length);
column = 0;
rowptr[column++] = SATURATE(value);
}
}
}
}
#endif
// Decode a subband of highpass coefficients using a finite state machine.
// One byte is read from the bitstream each time and decoded in two steps.
// New version that uses a buffer aligned to the cache for decoding.
#if 0
static inline void ZeroHighPassBuffer(PIXEL *ptrCacheLines, int numCacheLines)
{
// This routine assume that the cache line size is 64 bytes
assert(_CACHE_LINE_SIZE == 64);
// This routine assumes that the input pointer is aligned to a cache line
assert(ISALIGNED(ptrCacheLines, _CACHE_LINE_SIZE));
// This routine assumes that at least one cache line will be written
assert(numCacheLines > 0);
#if __GNUC__
memset(ptrCacheLines, 0, numCacheLines * _CACHE_LINE_SIZE);
#else
__asm
{
pxor xmm0, xmm0 // Zero a 16 byte register
mov eax, ptrCacheLines // Load the pointer to the memory block
mov ebx, numCacheLines // Load the count of the number of cache lines
loop: movdqa [eax], xmm0 // Write 64 bytes of zeros using aligned stores
movdqa [eax+16], xmm0
movdqa [eax+32], xmm0
movdqa [eax+48], xmm0
add eax, 64 // Advance to the next cache line
sub ebx, 1 // Decrement the number of cache lines
jg loop
}
#endif
// The routine returns the pointer to the cache line after zeroing the block
}
#endif
#if 0
static inline void CopyRowBuffer(char *rowptr, PIXEL *buffer, int length)
{
// Note that the length is in units of bytes (not pixels)
int count; // Number of 16-byte blocks to copy
// Check that the row length is an integer multiple of 16-byte blocks
assert(ISALIGNED(length, 16));
// Convert the row length to the number of 16-byte blocks to copy
count = length >> 4;
// This routine assumes that at least one 16 byte block will be copied
assert(count > 0);
#if __GNUC__
// Use standard memory copy
memcpy(rowptr, buffer, length);
#else
// Copy a multiple of 16 byte blocks
__asm
{
mov eax, rowptr // Load the pointer to the destination
mov ebx, buffer // Load the pointer to the source
mov ecx, count // Load the number of 16-byte blocks to copy
loop: movdqa xmm0, [ebx] // Load 16 bytes from the source
movntdq [eax], xmm0 // Copy 16 bytes to the destination
add eax, 16 // Advance to the group of 16 bytes
add ebx, 16
sub ecx, 1 // Decrement the number of blocks to copy
jg loop
}
#endif
}
#endif
// DecodeBandFSMBuffered is no longer used
#if 0 //dan20041030 not used
bool DecodeBandFSMBuffered(FSM *fsm, BITSTREAM *stream, PIXEL *image,
int width, int height, int pitch,
int quantization, char *decoding_buffer, size_t decoding_buffer_size)
{
char *rowptr = (char *)image; // Pointer to current row
char *maxptr = rowptr + height * pitch; // Address of row after the last row
FSMENTRY *entry;
int index;
int byte;
int column = 0;
int32_t value;
size_t row_size;
size_t cache_row_size; // Size of a row in bytes
int cache_line_count; // Size of the buffer in cache lines
PIXEL *buffer; // Pixel pointer to the buffer
int length; // Length of row in bytes
// Check that the processing size allows two chunks per byte
assert(BITSTREAM_WORD_SIZE == FSM_INDEX_SIZE*2);
// The bitstream buffer should be empty
assert(stream->nBitsFree == BITSTREAM_BUFFER_SIZE);
// Compute the number of cache lines used in the buffer
row_size = width * sizeof(PIXEL);
cache_row_size = ALIGN(row_size, _CACHE_LINE_SIZE);
cache_line_count = (cache_row_size >> _CACHE_LINE_SHIFT);
// Check that the buffer is large enough
assert(decoding_buffer != NULL && decoding_buffer_size >= cache_row_size);
// Check that the buffer starts on a cache line boundary
assert(ISALIGNED(decoding_buffer, _CACHE_LINE_SIZE));
// This routine assumes that the rows are contiguous and the pitch is a multiple of 16 bytes
length = pitch;
assert(length == ALIGN(row_size, 16));
// Cast the buffer pointer for pixel access
buffer = (PIXEL *)decoding_buffer;
// Zero the decoding buffer
ZeroHighPassBuffer(buffer, cache_line_count);
// Decode runs and magnitude values until the band end trailer is decoded
for (;;)
{
// Read a byte from the bitstream
byte = GetFastByte(stream);
// Decode the first 4-bit chunk
index = byte >> FSM_INDEX_SIZE;
// Index into the lookup table at that state
entry = GetFSMTableEntry(fsm, index);
// Return when the entire band is decoded
if (entry->value0 == BAND_END_TRAILER)
{
// Copy the buffer to the row if not already beyond the band
if (rowptr < maxptr) CopyRowBuffer(rowptr, buffer, length);
// Advance to the next row
rowptr += pitch;
// Zero the remaining rows in the subband
while (rowptr < maxptr) {
ZeroHighPassRow((PIXEL *)rowptr, length);
rowptr += pitch;
}
// Reset the finite state machine to the root node in the Huffman tree
ResetFSM(fsm);
// Return indication that the band was fully decoded
return true;
}
// Set the finite state machine to the next state in the Huffman tree
UpdateFSM(fsm, entry->next_state);
// No magnitude values decoded?
if (entry->value0 == 0)
{
// No magnitudes decoded so just advance the column pointer
column += entry->pre_skip;
// The run length scan can go past the end of the row if the row ends
// with a run of zeros and the next row begins with a run of zeros
// Did the scan go beyond the end of the row?
while (column >= width)
{
// Compute the starting column for the next row
column -= width;
// Advance to the next row
assert(rowptr < maxptr);
CopyRowBuffer(rowptr, buffer, length);
rowptr += pitch;
// Zero the decoding buffer if there are more rows to process
if (rowptr < maxptr) ZeroHighPassBuffer(buffer, cache_line_count);
}
}
// Only one magnitude value decoded?
else if (entry->value1 == 0)
{
// Process the magnitude value that was decoded
// Undo quantization and scaling
value = quantization * entry->value0;
// Advance to the column where the value should be placed
column += entry->pre_skip;
// The run length scan can go past the end of the row if the row ends
// with a run of zeros and the next row begins with a run of zeros
// Did the scan go beyond the end of the row?
while (column >= width)
{
// Compute the starting column for the next row
column -= width;
// Advance to the next row
assert(rowptr < maxptr);
CopyRowBuffer(rowptr, buffer, length);
rowptr += pitch;
// Zero the decoding buffer if there are more rows to process
if (rowptr < maxptr) ZeroHighPassBuffer(buffer, cache_line_count);
}
// Fill in the decoded magnitude
// Check the column before storing the value
assert(0 <= column && column < width);
// Store the saturated value at the position found in the scan
buffer[column] = SATURATE(value);
column += entry->post_skip;
// Did the scan go beyond the end of the row?
if (column >= width)
{
// Compute the starting column for the next row
column -= width;
// Advance to the next row
assert(rowptr < maxptr);
CopyRowBuffer(rowptr, buffer, length);
rowptr += pitch;
// Zero the decoding buffer if there are more rows to process
if (rowptr < maxptr) ZeroHighPassBuffer(buffer, cache_line_count);
}
}
else // Two magnitude values were decoded
{
// Check the column before storing values
assert(0 <= column && column < width);
if (column < width - 1) {
// Dequantize and store the first value
value = quantization * entry->value0;
buffer[column++] = SATURATE(value);
// Dequantize and store the second value
value = quantization * entry->value1;
buffer[column++] = SATURATE(value);
}
else {
// Dequantize and store the first value in the current row
value = quantization * entry->value0;
buffer[column] = SATURATE(value);
// Dequantize the second value
value = quantization * entry->value1;
// Advance to the next row
assert(rowptr < maxptr);
CopyRowBuffer(rowptr, buffer, length);
rowptr += pitch;
// Zero the decoding buffer if there are more rows to process
if (rowptr < maxptr) ZeroHighPassBuffer(buffer, cache_line_count);
// Reset the column to the beginning of the row
column = 0;
// Store the second value in the new row
buffer[column++] = SATURATE(value);
}
}
// Decode the second 4-bit chunk
index = byte & FSM_INDEX_MASK;
// Index into the lookup table at that state
entry = GetFSMTableEntry(fsm, index);
// Return if the subband is decoded completely
if (entry->value0 == BAND_END_TRAILER)
{
// Copy the buffer to the row if not already beyond the band
if (rowptr < maxptr) CopyRowBuffer(rowptr, buffer, length);
// Advance to the next row
rowptr += pitch;
// Zero the remaining rows in the subband
while (rowptr < maxptr) {
ZeroHighPassRow((PIXEL *)rowptr, length);
rowptr += pitch;
}
// Reset the finite state machine to the root node in the Huffman tree
ResetFSM(fsm);
// Return indication that the band was fully decoded
return true;
}
// Set the finite state machine to the next state in the Huffman tree
UpdateFSM(fsm, (int)entry->next_state);
// If no magnitude value is decoded
if (entry->value0 == 0) {
column += entry->pre_skip;
// The run length scan can go past the end of the row if the row ends
// with a run of zeros and the next row begins with a run of zeros
// Did the scan go beyond the end of the row?
while (column >= width)
{
// Compute the starting column for the next row
column -= width;
// Advance to the next row
assert(rowptr < maxptr);
CopyRowBuffer(rowptr, buffer, length);
rowptr += pitch;
// Zero the decoding buffer if there are more rows to process
if (rowptr < maxptr) ZeroHighPassBuffer(buffer, cache_line_count);
}
}
// If there is only one decoded magnitude value
else if (entry->value1 == 0) {
// Undo quantization and scaling
int32_t value = quantization * entry->value0;
column += entry->pre_skip;
// The run length scan can go past the end of the row if the row ends
// with a run of zeros and the next row begins with a run of zeros
// Did the scan go beyond the end of the row?
while (column >= width)
{
// Compute the starting column for the next row
column -= width;
// Advance to the next row
assert(rowptr < maxptr);
CopyRowBuffer(rowptr, buffer, length);
rowptr += pitch;
// Zero the decoding buffer if there are more rows to process
if (rowptr < maxptr) ZeroHighPassBuffer(buffer, cache_line_count);
}
// Fill in the decoded magnitude
// Check the column before storing the value
//assert(index < width);
assert(0 <= column && column < width);
// Store the saturated value at the position found in the scan
buffer[column] = SATURATE(value);
column += entry->post_skip;
// Did the scan go beyond the end of the row?
if (column >= width)
{
// Compute the starting column for the next row
column -= width;
// Advance to the next row
assert(rowptr < maxptr);
CopyRowBuffer(rowptr, buffer, length);
rowptr += pitch;
// Zero the decoding buffer if there are more rows to process
if (rowptr < maxptr) ZeroHighPassBuffer(buffer, cache_line_count);
}
}
// If there are two decoded magnitude values
else
{
// Check the column before storing values
assert(0 <= column && column < width);
if (column < width-1) {
value = quantization * entry->value0;
buffer[column++] = SATURATE(value);
value = quantization * entry->value1;
buffer[column++] = SATURATE(value);
}
else {
value = quantization * entry->value0;
buffer[column] = SATURATE(value);
value = quantization * entry->value1;
// Advance to the next row
assert(rowptr < maxptr);
CopyRowBuffer(rowptr, buffer, length);
rowptr += pitch;
// Zero the decoding buffer if there are more rows to process
if (rowptr < maxptr) ZeroHighPassBuffer(buffer, cache_line_count);
// Reset the column to the beginning of the row
column = 0;
buffer[column++] = SATURATE(value);
}
}
}
}
#endif
#if 0 //dan20041030 not used
// Decode a subband using FSM, combine the two results decoded from one byte
bool DecodeBandFSMCombined(FSM *fsm, BITSTREAM *stream, PIXEL *image, int width, int height, int pitch, int quantization)
{
int index, skip;
uint8_t byte;
FSMENTRY *entry1, *entry2;
PIXEL *rowptr = image;
int row = 0, column = 0;
int32_t value,bytes_row_size = width*sizeof(PIXEL);
PIXEL *maxptr = rowptr + height*pitch;
// This Huffman decoder assumes each byte is processed as two 4-bit chunks
assert(BITSTREAM_WORD_SIZE == FSM_INDEX_SIZE*2);
ZeroHighPassRow(rowptr, width);
// Double check that the bitstream buffer is empty
assert(stream->nBitsFree == BITSTREAM_BUFFER_SIZE);
// Decode runs and magnitude values until the band end trailer is decoded
for (;;)
{
// Read a byte from the bitstream
//byte = GetBits(stream, BITSTREAM_WORD_SIZE);
#if 0
byte = GetByte(stream);
if (stream->error != BITSTREAM_ERROR_OKAY) {
stream->error = VLC_ERROR_NOTFOUND;
return false;
}
#else
// Inline of the third case of GetByte
uint8_t *lpCurrentWord = stream->lpCurrentWord;
// Get the next byte from the bitstream
byte = (uint32_t )(*(lpCurrentWord++));
// Update the state of the bitstream
stream->lpCurrentWord = lpCurrentWord;
// Check that the high bits are zero
assert((byte & ~BITMASK(8)) == 0);
#endif
// Decode the first 4-bit chunk
index = byte >> FSM_INDEX_SIZE;
entry1 = GetFSMTableEntry(fsm, index);
UpdateFSM(fsm, entry1->next_state);
// decode the second 4-bit chunk
index = byte & ((1<<FSM_INDEX_SIZE)-1);
entry2 = GetFSMTableEntry(fsm, index);
UpdateFSM(fsm, entry2->next_state);
// Return when the subband is completely decoded
if(entry1->value0 == BAND_END_TRAILER || entry2->value0 == BAND_END_TRAILER) {
ResetFSM(fsm);
return true;
}
// If no magnitude value is decoded at the first step
if (entry1->value0 == 0) {
// If no magnitude is decoded at the second step
if(entry2->value0 == 0) {
column += entry1->pre_skip+entry2->pre_skip;
// Did the scan go beyond the end of the row?
while (column >= width)
{
// Compute the starting column for the next row
column -= width;
// Advance to the next row
rowptr += pitch;
if(rowptr < maxptr) ZeroHighPassRow(rowptr, width);
}
}
// If one magnitude is decoded at the second step
else if(entry2->value1 == 0) {
// Skip to the non-zero position
column += entry1->pre_skip+entry2->pre_skip;
// Did the scan go beyond the end of the row?
while (column >= width)
{
// Compute the starting column for the next row
column -= width;
// Advance to the next row
rowptr += pitch;
if(rowptr < maxptr) ZeroHighPassRow(rowptr, width);
}
// Fill in the decoded magnitude
// Undo quantization and scaling
value = quantization * entry2->value0;
// Check the column before storing the value
//assert(index < width);
assert(0 <= column && column < width);
// Store the saturated value
rowptr[column] = SATURATE(value);
column += entry2->post_skip;
// Did the scan go beyond the end of the row?
if (column >= width)
{
// Compute the starting column for the next row
column -= width;
// Advance to the next row
rowptr += pitch;
if(rowptr < maxptr) ZeroHighPassRow(rowptr, width);
}
}
// If two magnitudes are decoded at the second step
else {
column += entry1->pre_skip;
// Did the scan go beyond the end of the row?
while (column >= width)
{
// Compute the starting column for the next row
column -= width;
// Advance to the next row
rowptr += pitch;
if(rowptr < maxptr) ZeroHighPassRow(rowptr, width);
}
// Check the column before storing values
assert(0 <= column && column < width);
if(column < width-1) {
value = quantization * entry2->value0;
rowptr[column++] = SATURATE(value);
value = quantization * entry2->value1;
rowptr[column++] = SATURATE(value);
}
else {
value = quantization * entry2->value0;
rowptr[column] = SATURATE(value);
value = quantization * entry2->value1;
rowptr += pitch;
if(rowptr < maxptr) ZeroHighPassRow(rowptr, width);
column = 0;
rowptr[column++] = SATURATE(value);
}
}
}
// If only one magnitude is decoded at the first step
else if(entry1->value1 == 0) {
// Undo quantization and scaling
value = quantization * entry1->value0;
column += entry1->pre_skip;
// Did the scan go beyond the end of the row?
while (column >= width)
{
// Compute the starting column for the next row
column -= width;
// Advance to the next row
rowptr += pitch;
if(rowptr < maxptr) ZeroHighPassRow(rowptr, width);
}
// Fill in the decoded magnitude
// Check the column before storing the value
//assert(index < width);
assert(0 <= column && column < width);
// Store the saturated value at the position found in the scan
rowptr[column] = SATURATE(value);
// If no magnitude is decoded at the second step
if(entry2->value0 == 0) {
column += entry1->post_skip+entry2->pre_skip;
// Did the scan go beyond the end of the row?
while (column >= width)
{
// Compute the starting column for the next row
column -= width;
// Advance to the next row
rowptr += pitch;
if(rowptr < maxptr) ZeroHighPassRow(rowptr, width);
}
}
// If one magnitude is decoded at the second step
else if (entry2->value1 == 0)
{
// Undo quantization and scaling
value = quantization * entry2->value0;
column += entry1->post_skip+entry2->pre_skip;
// Did the scan go beyond the end of the row?
while (column >= width)
{
// Compute the starting column for the next row
column -= width;
// Advance to the next row
rowptr += pitch;
if(rowptr < maxptr) ZeroHighPassRow(rowptr, width);
}
// Fill in the decoded magnitude
// Check the column before storing the value
assert(0 <= column && column < width);
// Store the saturated value at the position found in the scan
rowptr[column] = SATURATE(value);
column += entry2->post_skip;
// Did the scan go beyond the end of the row?
if (column >= width)
{
// Compute the starting column for the next row
column -= width;
// Advance to the next row
rowptr += pitch;
if(rowptr < maxptr) ZeroHighPassRow(rowptr, width);
}
}
// If two magnitudes are decoded at the second step
else
{
column += entry1->post_skip;
// Did the scan go beyond the end of the row?
if (column >= width)
{
// Compute the starting column for the next row
column -= width;
// Advance to the next row
rowptr += pitch;
if(rowptr < maxptr) ZeroHighPassRow(rowptr, width);
}
// Check the column before storing values
assert(0 <= column && column < width);
if(column < width-1) {
value = quantization * entry2->value0;
rowptr[column++] = SATURATE(value);
value = quantization * entry2->value1;
rowptr[column++] = SATURATE(value);
}
else {
value = quantization * entry2->value0;
rowptr[column] = SATURATE(value);
value = quantization * entry2->value1;
rowptr += pitch;
if(rowptr < maxptr) ZeroHighPassRow(rowptr, width);
column = 0;
rowptr[column++] = SATURATE(value);
}
}
}
// If two magnitudes are decoded at the first step
else {
// Check the column before storing values
assert(0 <= column && column < width);
if(column < width-1) {
value = quantization * entry1->value0;
rowptr[column++] = SATURATE(value);
value = quantization * entry1->value1;
rowptr[column++] = SATURATE(value);
}
else {
value = quantization * entry1->value0;
rowptr[column] = SATURATE(value);
value = quantization * entry1->value1;
rowptr += pitch;
if(rowptr < maxptr) ZeroHighPassRow(rowptr, width);
column = 0;
rowptr[column++] = SATURATE(value);
}
// If two magnitudes are decoded at the first step
// then at most one more magnitude can be decoded at the second step
assert(entry2->value1 == 0);
// If no magnitude is decoded at the second step
if(entry2->value0 == 0) {
column += entry2->pre_skip; // entry2->pre_skip <=4 must be true
// Did the scan go beyond the end of the row?
if (column >= width)
{
// Compute the starting column for the next row
column -= width;
// Advance to the next row
rowptr += pitch;
if(rowptr < maxptr) ZeroHighPassRow(rowptr, width);
}
}
// If one magnitude is decoded at the second step
else {
column += entry2->pre_skip; // must be a small zero run
// Did the scan go beyond the end of the row?
if (column >= width)
{
// Compute the starting column for the next row
column -= width;
// Advance to the next row
rowptr += pitch;
if (rowptr < maxptr) ZeroHighPassRow(rowptr, width);
}
// Fill in the decoded magnitude
// Undo quantization and scaling
value = quantization * entry2->value0;
// Check the column before storing the value
assert(0 <= column && column < width);
// Store the saturated value at the position found in the scan
rowptr[column] = SATURATE(value);
column += entry2->post_skip;
// Did the scan go beyond the end of the row?
if (column >= width)
{
// Compute the starting column for the next row
column -= width;
// Advance to the next row
rowptr += pitch;
if (rowptr < maxptr) ZeroHighPassRow(rowptr, width);
}
}
}
}
}
#endif
#if 0 //dan20041030 not used
// Decode a subband using FSM. One byte is read from the bitstream each time and decoded in two steps
// Original version that does not use a separate buffer for decoding
bool DecodeBandFSM8s(FSM *fsm, BITSTREAM *stream, PIXEL8S *image, int width, int height, int pitch)
{
int index, byte;
FSMENTRY *entry;
PIXEL8S *rowptr = image;
int column = 0;
int32_t value;
PIXEL8S *maxptr;
int length = width * sizeof(PIXEL8S);
//ROI roi = {width, 1};
// This version of Huffman decoder assumes that one byte
// is processed as two 4-bit chunks
assert(BITSTREAM_WORD_SIZE == FSM_INDEX_SIZE*2);
assert(stream->nBitsFree == BITSTREAM_BUFFER_SIZE);
// Convert the pitch to units of pixels
pitch /= sizeof(PIXEL8S);
// Compute the address of the row after the last row in the band
maxptr = rowptr + height * pitch;
// Round up the row length (in bytes) to a multiple of 16 bytes
length = ALIGN16(length);
ZeroHighPassRow((PIXEL *)rowptr, length);
// Decode runs and magnitude values until the band end trailer is decoded
for (;;)
{
// Read a byte from the bitstream
byte = GetFastByte(stream);
// Decode the first 4-bit chunk
index = byte >> FSM_INDEX_SIZE;
// Index into the lookup table at that state
entry = GetFSMTableEntry(fsm, index);
// Return when the entire band is decoded
if (entry->value0 == BAND_END_TRAILER) {
// Zero out the whole subband from here on
rowptr += pitch;
while(rowptr < maxptr) {
ZeroHighPassRow((PIXEL *)rowptr, length);
rowptr += pitch;
}
ResetFSM(fsm);
return true;
}
// set the pointer to the next state
UpdateFSM(fsm, (int)entry->next_state);
// If no magnitude value is decoded
if (entry->value0 == 0)
{
column += entry->pre_skip;
// The run length scan can go past the end of the row if the row ends
// with a run of zeros and the next row begins with a run of zeros
// Did the scan go beyond the end of the row?
while (column >= width)
{
// Compute the starting column for the next row
column -= width;
// Advance to the next row
rowptr += pitch;
if(rowptr < maxptr) ZeroHighPassRow((PIXEL *)rowptr, length);
}
}
// If there is only one decoded magnitude value
else if(entry->value1 == 0)
{
value = entry->value0;
column += entry->pre_skip;
// The run length scan can go past the end of the row if the row ends
// with a run of zeros and the next row begins with a run of zeros
// Did the scan go beyond the end of the row?
while (column >= width)
{
// Compute the starting column for the next row
column -= width;
// Advance to the next row
rowptr += pitch;
if(rowptr < maxptr) ZeroHighPassRow((PIXEL *)rowptr, length);
}
// Fill in the decoded magnitude
// Check the column before storing the value
assert(0 <= column && column < width);
// Store the saturated value at the position found in the scan
rowptr[column] = SATURATE8S(value);
column += entry->post_skip;
// Did the scan go beyond the end of the row?
if (column >= width)
{
// Compute the starting column for the next row
column -= width;
// Advance to the next row
rowptr += pitch;
if(rowptr < maxptr) ZeroHighPassRow((PIXEL *)rowptr, length);
}
}
// If there are two decoded magnitude values
else
{
// Check the column before storing values
assert(0 <= column && column < width);
if(column < width-1) {
value = entry->value0;
rowptr[column++] = SATURATE8S(value);
value = entry->value1;
rowptr[column++] = SATURATE8S(value);
}
else {
value = entry->value0;
rowptr[column] = SATURATE8S(value);
value = entry->value1;
rowptr += pitch;
if(rowptr < maxptr) ZeroHighPassRow((PIXEL *)rowptr, length);
column = 0;
rowptr[column++] = SATURATE8S(value);
}
}
// decode the second 4-bit chunk
index = byte & ((1<<FSM_INDEX_SIZE)-1);
// Index into the lookup table at that state
entry = GetFSMTableEntry(fsm, index);
// Return if the subband is decoded completely
if (entry->value0 == BAND_END_TRAILER)
{
// Zero out the whole subband from here on
rowptr += pitch;
while(rowptr < maxptr) {
ZeroHighPassRow((PIXEL *)rowptr, length);
rowptr += pitch;
}
ResetFSM(fsm);
return true;
}
// Set the pointer to the next state
UpdateFSM(fsm, (int)entry->next_state);
// If no magnitude value is decoded
if (entry->value0 == 0)
{
column += entry->pre_skip;
// The run length scan can go past the end of the row if the row ends
// with a run of zeros and the next row begins with a run of zeros
// Did the scan go beyond the end of the row?
while (column >= width)
{
// Compute the starting column for the next row
column -= width;
// Advance to the next row
rowptr += pitch;
if(rowptr < maxptr) ZeroHighPassRow((PIXEL *)rowptr, length);
}
}
// If there is only one decoded magnitude value
else if (entry->value1 == 0)
{
value = entry->value0;
column += entry->pre_skip;
// The run length scan can go past the end of the row if the row ends
// with a run of zeros and the next row begins with a run of zeros
// Did the scan go beyond the end of the row?
while (column >= width)
{
// Compute the starting column for the next row
column -= width;
// Advance to the next row
rowptr += pitch;
if(rowptr < maxptr) ZeroHighPassRow((PIXEL *)rowptr, length);
}
// Fill in the decoded magnitude
// Check the column before storing the value
assert(0 <= column && column < width);
// Store the saturated value at the position found in the scan
rowptr[column] = SATURATE8S(value);
column += entry->post_skip;
// Did the scan go beyond the end of the row?
if (column >= width)
{
// Compute the starting column for the next row
column -= width;
// Advance to the next row
rowptr += pitch;
if(rowptr < maxptr) ZeroHighPassRow((PIXEL *)rowptr, length);
}
}
// If there are two decoded magnitude values
else
{
// Check the column before storing values
assert(0 <= column && column < width);
if(column < width-1) {
value = entry->value0;
rowptr[column++] = SATURATE8S(value);
value = entry->value1;
rowptr[column++] = SATURATE8S(value);
}
else {
value = entry->value0;
rowptr[column] = SATURATE8S(value);
value = entry->value1;
rowptr += pitch;
if(rowptr < maxptr) ZeroHighPassRow((PIXEL *)rowptr, length);
column = 0;
rowptr[column++] = SATURATE8S(value);
}
}
}
}
#endif
// same as DecodeBandFSM8sNoGap but output to 16bit data
bool DecodeBandFSM16sNoGap2Pass(FSM *fsm, BITSTREAM *stream, PIXEL16S *image, int width, int height, int pitch, int quant)
{
int index, byte;
FSMENTRY *entry;
PIXEL *rowptr = (PIXEL *)image;
PIXEL16S *bandendptr;
int value;
#if ERROR_TOLERANT
uint8_t *startCurrentWord = stream->lpCurrentWord;
int32_t startWordsUsed = stream->nWordsUsed;
#endif
#if _FSMBUFFER
__declspec(align(32)) FSMENTRY buffer;
#endif
if (image == NULL) {
return false;
}
// Reset the decoder
ResetFSM(fsm);
pitch /= sizeof(PIXEL16S);
// Zero out the entire subband
ZeroHighPassRow((PIXEL *)rowptr, pitch*height*sizeof(PIXEL16S));
// This Huffman decoder assumes each byte is processed as two 4-bit chunks
assert(BITSTREAM_WORD_SIZE == 2 * FSM_INDEX_SIZE);
assert(stream->nBitsFree == BITSTREAM_BUFFER_SIZE);
bandendptr = rowptr + height * pitch;
#if 0 // test for errors.
{
if((rand() % 10) == 1)
stream->lpCurrentWord[rand()%50] ^= 1;
}
#endif
// Decode runs and magnitude values until the entire band is decoded
#if ERROR_TOLERANT
while((intptr_t)bandendptr - (intptr_t)rowptr >= 0)
#else
for (;;)
#endif
{
// Read a byte from the bitstream
#if ERROR_TOLERANT
if(stream->nWordsUsed)
{
byte = GetFastByte(stream);
}
else
{
break;
}
#else
byte = GetFastByte(stream);
#endif
// Decode the first 4-bit chunk
index = byte >> FSM_INDEX_SIZE;
// Index into the lookup table at that state
entry = GetFSMTableEntry(fsm, index);
#if _FSMBUFFER
memcpy(&buffer, entry, sizeof(FSMENTRY));
entry = &buffer;
#endif
// Return if the subband is decoded completely
if (entry->value0 == BAND_END_TRAILER)
{
assert(rowptr <= bandendptr);
ResetFSM(fsm);
goto SecondPass;
}
// Set the pointer to the next state
UpdateFSM(fsm, (int)entry->next_state);
// Skip the decoded zero runs
rowptr = &rowptr[entry->pre_post_skip & 0xfff];
// Write down the first decoded magnitude
value = entry->value0;
rowptr[0] = value;//SATURATE(value);
// Write down the second decoded magnitude
value = entry->value1;
rowptr[1] = value;//SATURATE(value);
// Skip the appropriate distance
rowptr = &rowptr[entry->pre_post_skip >> 12];
// decode the second 4-bit chunk
index = byte & ((1<<FSM_INDEX_SIZE)-1);
// Index into the lookup table at that state
entry = GetFSMTableEntry(fsm, index);
#if _FSMBUFFER
memcpy(&buffer, entry, sizeof(FSMENTRY));
entry = &buffer;
#endif
// Return if the subband is decoded completely
if (entry->value0 == BAND_END_TRAILER)
{
assert(rowptr <= bandendptr);
ResetFSM(fsm);
goto SecondPass;
}
// set the pointer to the next state
UpdateFSM(fsm, (int)entry->next_state);
// Skip the decoded zero runs
rowptr = &rowptr[entry->pre_post_skip & 0xfff];
// Write down the first decoded magnitude
value = entry->value0;
rowptr[0] = value;//SATURATE(value);
// Write down the second decoded magnitude
value = entry->value1;
rowptr[1] = value;//SATURATE(value);
// Skip the decoded zero runs
rowptr = &rowptr[entry->pre_post_skip >> 12];
}
SecondPass:
rowptr = (PIXEL16S *)image;
AlignBits(stream);
AlignBitsTag(stream);
stream->lpCurrentWord += 4;
stream->nWordsUsed -= 4;
// Decode runs and magnitude values until the entire band is decoded
#if ERROR_TOLERANT
while((intptr_t)bandendptr - (intptr_t)rowptr >= 0)
#else
for (;;)
#endif
{
// Read a byte from the bitstream
#if ERROR_TOLERANT
if(stream->nWordsUsed)
{
byte = GetFastByte(stream);
}
else
{
break;
}
#else
byte = GetFastByte(stream);
#endif
// Decode the first 4-bit chunk
index = byte >> FSM_INDEX_SIZE;
// Index into the lookup table at that state
entry = GetFSMTableEntry(fsm, index);
#if _FSMBUFFER
memcpy(&buffer, entry, sizeof(FSMENTRY));
entry = &buffer;
#endif
// Return if the subband is decoded completely
if (entry->value0 == BAND_END_TRAILER)
{
assert(rowptr <= bandendptr);
ResetFSM(fsm);
return true;
}
// Set the pointer to the next state
UpdateFSM(fsm, (int)entry->next_state);
// Skip the decoded zero runs
rowptr = &rowptr[entry->pre_post_skip & 0xfff];
// Write down the first decoded magnitude
value = entry->value0;
rowptr[0] |= value << 8;
// Write down the second decoded magnitude
value = entry->value1;
rowptr[1] |= value << 8;
// Skip the appropriate distance
rowptr = &rowptr[entry->pre_post_skip >> 12];
// decode the second 4-bit chunk
index = byte & ((1<<FSM_INDEX_SIZE)-1);
// Index into the lookup table at that state
entry = GetFSMTableEntry(fsm, index);
#if _FSMBUFFER
memcpy(&buffer, entry, sizeof(FSMENTRY));
entry = &buffer;
#endif
// Return if the subband is decoded completely
if (entry->value0 == BAND_END_TRAILER)
{
assert(rowptr <= bandendptr);
ResetFSM(fsm);
return true;
}
// set the pointer to the next state
UpdateFSM(fsm, (int)entry->next_state);
// Skip the decoded zero runs
rowptr = &rowptr[entry->pre_post_skip & 0xfff];
// Write down the first decoded magnitude
value = entry->value0;
rowptr[0] |= value << 8;
// Write down the second decoded magnitude
value = entry->value1;
rowptr[1] |= value << 8;
// Skip the decoded zero runs
rowptr = &rowptr[entry->pre_post_skip >> 12];
}
#if ERROR_TOLERANT
// Reset the decoder
ResetFSM(fsm);
// Backup the bitstream to the beginning of the band
stream->lpCurrentWord = startCurrentWord;
stream->nWordsUsed = startWordsUsed;
#if 0
AlignBitsTag(stream);
// Read the debugging marker
{
TAGVALUE segment;
do
{
segment = GetTagValue(stream);
}
while(segment.tuple.tag != CODEC_TAG_BAND_TRAILER);
stream->lpCurrentWord -= 4;
stream->nWordsUsed += 4;
}
#else
SkipSubband(stream);
#endif
#endif
return true;
}
// Same as DecodeBandFSM8sNoGap but output to 16bit data
#if _DEBUG
bool DecodeBandFSM16sNoGap(FSM *fsm, BITSTREAM *stream, PIXEL16S *image, int width, int height, int pitch, FILE *logfile)
#else
bool DecodeBandFSM16sNoGap(FSM *fsm, BITSTREAM *stream, PIXEL16S *image, int width, int height, int pitch)
#endif
{
int index, byte;
FSMENTRY *entry;
FSMENTRYFAST *entryfast;
PIXEL16S *rowptr = image;
PIXEL16S *bandendptr;
PIXEL16S *fastendptr;
int32_t value;
uint8_t *startCurrentWord = stream->lpCurrentWord;
uint8_t *CurrentWord = stream->lpCurrentWord;
int32_t startWordsUsed = stream->nWordsUsed;
ptrdiff_t offset;
#if _FSMBUFFER
__declspec(align(32)) FSMENTRY buffer;
#endif
#if (0 && DEBUG)
DebugOutputBitstreamPosition(stream);
DebugOutputBitstreamBytes(stream, 16);
#endif
// Reset the decoder
ResetFSM(fsm);
#if (0 && DEBUG)
DebugOutputFSM(fsm);
#endif
pitch /= sizeof(PIXEL16S);
// Zero out the entire subband
ZeroHighPassRow((PIXEL *)rowptr, pitch*height*sizeof(PIXEL16S));
//memset(rowptr, 0, pitch*height*sizeof(PIXEL16S));
// This Huffman decoder assumes each byte is processed as two 4-bit chunks
assert(BITSTREAM_WORD_SIZE == 2 * FSM_INDEX_SIZE);
assert(stream->nBitsFree == BITSTREAM_BUFFER_SIZE);
bandendptr = rowptr + height * pitch;
#if 0 // test for errors.
{
if((rand() % 10) == 1)
stream->lpCurrentWord[rand()%50] ^= 1;
}
#endif
fastendptr = bandendptr;
fastendptr -= 500;
// Decode runs and magnitude values until the entire band is decoded
while(rowptr < fastendptr)
{
// Read a byte from the bitstream
byte = *CurrentWord++;
// Decode the first 4-bit chunk
index = byte >> FSM_INDEX_SIZE;
// Index into the lookup table at that state
entryfast = (FSMENTRYFAST *)GetFSMTableEntry(fsm, index);
#if (0 && DEBUG)
//DebugOutputFSMEntryFast(fsm, index, entryfast);
PrintFSMEntryFast(fsm, index, entryfast, logfile);
#endif
// Set the pointer to the next state
UpdateFSM(fsm, (int)entryfast->next_state);
// Skip the decoded zero runs
rowptr = &rowptr[entryfast->pre_post_skip & 0xfff];
// Write down the first decoded magnitude
*((uint32_t *)rowptr) = entryfast->values;
// Skip the appropriate distance
rowptr = &rowptr[entryfast->pre_post_skip >> 12];
// decode the second 4-bit chunk
index = byte & ((1<<FSM_INDEX_SIZE)-1);
// Index into the lookup table at that state
entryfast = (FSMENTRYFAST *)GetFSMTableEntry(fsm, index);
#if (0 && DEBUG)
//DebugOutputFSMEntryFast(fsm, index, entryfast);
PrintFSMEntryFast(fsm, index, entryfast, logfile);
#endif
// set the pointer to the next state
UpdateFSM(fsm, (int)entryfast->next_state);
// Skip the decoded zero runs
rowptr = &rowptr[entryfast->pre_post_skip & 0xfff];
// Write down the first decoded magnitude
*((uint32_t *)rowptr) = entryfast->values;
// Skip the decoded zero runs
rowptr = &rowptr[entryfast->pre_post_skip >> 12];
}
offset = CurrentWord - startCurrentWord;
stream->lpCurrentWord += offset;
stream->nWordsUsed -= (int)offset;
// Decode runs and magnitude values until the entire band is decoded
#if ERROR_TOLERANT
while(bandendptr >= rowptr)
#else
for (;;)
#endif
{
#if (0 && DEBUG)
if (!(rowptr < bandendptr)) {
return true;
}
#endif
#if (0 && DEBUG)
PrintBitstreamPosition(stream, logfile);
#endif
// Read a byte from the bitstream
#if ERROR_TOLERANT
if(stream->nWordsUsed)
{
byte = GetFastByte(stream);
}
else
{
break;
}
#else
byte = GetFastByte(stream);
#endif
// Decode the first 4-bit chunk
index = byte >> FSM_INDEX_SIZE;
// Index into the lookup table at that state
entry = GetFSMTableEntry(fsm, index);
#if (0 && DEBUG)
//DebugOutputFSMEntry(fsm, index, entry);
PrintFSMEntry(fsm, index, entry, logfile);
#endif
#if _FSMBUFFER
memcpy(&buffer, entry, sizeof(FSMENTRY));
entry = &buffer;
#endif
// Return if the subband is decoded completely
if (entry->value0 == BAND_END_TRAILER)
{
assert(rowptr <= bandendptr);
ResetFSM(fsm);
return true;
}
// Set the pointer to the next state
UpdateFSM(fsm, (int)entry->next_state);
// Skip the decoded zero runs
rowptr = &rowptr[entry->pre_post_skip & 0xfff];
// Write down the first decoded magnitude
if ((value = entry->value0)) {
rowptr[0] = value;//SATURATE(value);
}
// Write down the second decoded magnitude
if ((value = entry->value1)) {
rowptr[1] = value;//SATURATE(value);
}
// Skip the appropriate distance
rowptr = &rowptr[entry->pre_post_skip >> 12];
// decode the second 4-bit chunk
index = byte & ((1<<FSM_INDEX_SIZE)-1);
// Index into the lookup table at that state
entry = GetFSMTableEntry(fsm, index);
#if (0 && DEBUG)
//DebugOutputFSMEntry(fsm, index, entry);
PrintFSMEntry(fsm, index, entry, logfile);
#endif
#if _FSMBUFFER
memcpy(&buffer, entry, sizeof(FSMENTRY));
entry = &buffer;
#endif
// Return if the subband is decoded completely
if (entry->value0 == BAND_END_TRAILER)
{
assert(rowptr <= bandendptr);
ResetFSM(fsm);
return true;
}
// set the pointer to the next state
UpdateFSM(fsm, (int)entry->next_state);
// Skip the decoded zero runs
rowptr = &rowptr[entry->pre_post_skip & 0xfff];
// Write down the first decoded magnitude
if ((value = entry->value0)) {
rowptr[0] = value;//SATURATE(value);
}
// Write down the second decoded magnitude
if ((value = entry->value1)) {
rowptr[1] = value;//SATURATE(value);
}
// Skip the decoded zero runs
rowptr = &rowptr[entry->pre_post_skip >> 12];
}
#if ERROR_TOLERANT
// Reset the decoder
ResetFSM(fsm);
// Backup the bitstream to the beginning of the band
stream->lpCurrentWord = startCurrentWord;
stream->nWordsUsed = startWordsUsed;
#if 0
AlignBitsTag(stream);
// Read the debugging marker
{
TAGVALUE segment;
do
{
segment = GetTagValue(stream);
}
while(segment.tuple.tag != CODEC_TAG_BAND_TRAILER);
stream->lpCurrentWord -= 4;
stream->nWordsUsed += 4;
}
#else
SkipSubband(stream);
#endif
#endif
return true;
}
bool DecodeBandFSM16sNoGapWithPeaks(FSM *fsm, BITSTREAM *stream, PIXEL16S *image, int width, int height, int pitch, PIXEL *peaks, int level, int quant)
{
int index, byte;
FSMENTRY *entry;
PIXEL16S *rowptr = image;
PIXEL16S *bandendptr;
PIXEL16S *fastendptr;
int32_t value;
uint8_t *startCurrentWord = stream->lpCurrentWord;
uint8_t *CurrentWord = stream->lpCurrentWord;
int32_t startWordsUsed = stream->nWordsUsed;
#if _FSMBUFFER
__declspec(align(32)) FSMENTRY buffer;
#endif
// Reset the decoder
ResetFSM(fsm);
//This is been called with non-prequantized FSM
if(quant>1) level /= quant;
pitch /= sizeof(PIXEL16S);
// Zero out the entire subband
ZeroHighPassRow((PIXEL *)rowptr, pitch*height*sizeof(PIXEL16S));
// This Huffman decoder assumes each byte is processed as two 4-bit chunks
assert(BITSTREAM_WORD_SIZE == 2 * FSM_INDEX_SIZE);
assert(stream->nBitsFree == BITSTREAM_BUFFER_SIZE);
bandendptr = rowptr + height * pitch;
#if 0 // test for errors.
{
if((rand() % 10) == 1)
stream->lpCurrentWord[rand()%50] ^= 1;
}
#endif
fastendptr = bandendptr;
fastendptr -= 1000;
// Decode runs and magnitude values until the entire band is decoded
while(rowptr < fastendptr)
{
// Read a byte from the bitstream
byte = *CurrentWord++;
// Decode the first 4-bit chunk
index = byte >> FSM_INDEX_SIZE;
// Index into the lookup table at that state
entry = GetFSMTableEntry(fsm, index);
// Set the pointer to the next state
UpdateFSM(fsm, (int)entry->next_state);
// Skip the decoded zero runs
rowptr = &rowptr[entry->pre_post_skip & 0xfff];
// Write down the first decoded magnitude
value = entry->value0;
if(abs(value) > level)
rowptr[0] = *peaks++ / quant;
else
rowptr[0] = value;//SATURATE(value);
value = entry->value1;
rowptr[1] = value;//SATURATE(value);
// Skip the appropriate distance
rowptr = &rowptr[entry->pre_post_skip >> 12];
// decode the second 4-bit chunk
index = byte & ((1<<FSM_INDEX_SIZE)-1);
// Index into the lookup table at that state
entry = GetFSMTableEntry(fsm, index);
// set the pointer to the next state
UpdateFSM(fsm, (int)entry->next_state);
// Skip the decoded zero runs
rowptr = &rowptr[entry->pre_post_skip & 0xfff];
// Write down the first decoded magnitude
value = entry->value0;
if(abs(value) > level)
rowptr[0] = *peaks++ / quant;
else
rowptr[0] = value;//SATURATE(value);
value = entry->value1;
rowptr[1] = value;//SATURATE(value);
// Skip the decoded zero runs
rowptr = &rowptr[entry->pre_post_skip >> 12];
}
stream->lpCurrentWord += ((intptr_t)CurrentWord - (intptr_t)startCurrentWord);
stream->nWordsUsed -= (int)(((intptr_t)CurrentWord - (intptr_t)startCurrentWord));
// Decode runs and magnitude values until the entire band is decoded
#if ERROR_TOLERANT
while(((intptr_t)bandendptr - (intptr_t)rowptr) >= 0)
#else
for (;;)
#endif
{
#if (0 && DEBUG)
if (!(rowptr < bandendptr)) {
return true;
}
#endif
// Read a byte from the bitstream
#if ERROR_TOLERANT
if(stream->nWordsUsed)
{
byte = GetFastByte(stream);
}
else
{
break;
}
#else
byte = GetFastByte(stream);
#endif
// Decode the first 4-bit chunk
index = byte >> FSM_INDEX_SIZE;
// Index into the lookup table at that state
entry = GetFSMTableEntry(fsm, index);
#if _FSMBUFFER
memcpy(&buffer, entry, sizeof(FSMENTRY));
entry = &buffer;
#endif
// Return if the subband is decoded completely
if (entry->value0 == BAND_END_TRAILER)
{
assert(rowptr <= bandendptr);
ResetFSM(fsm);
return true;
}
// Set the pointer to the next state
UpdateFSM(fsm, (int)entry->next_state);
// Skip the decoded zero runs
rowptr = &rowptr[entry->pre_post_skip & 0xfff];
// Write down the first decoded magnitude
value = entry->value0;
if(abs(value) > level)
rowptr[0] = *peaks++ / quant;
else
rowptr[0] = value;//SATURATE(value);
// Write down the second decoded magnitude
value = entry->value1;
rowptr[1] = value;//SATURATE(value);
// Skip the appropriate distance
rowptr = &rowptr[entry->pre_post_skip >> 12];
// decode the second 4-bit chunk
index = byte & ((1<<FSM_INDEX_SIZE)-1);
// Index into the lookup table at that state
entry = GetFSMTableEntry(fsm, index);
#if _FSMBUFFER
memcpy(&buffer, entry, sizeof(FSMENTRY));
entry = &buffer;
#endif
// Return if the subband is decoded completely
if (entry->value0 == BAND_END_TRAILER)
{
assert(rowptr <= bandendptr);
ResetFSM(fsm);
return true;
}
// set the pointer to the next state
UpdateFSM(fsm, (int)entry->next_state);
// Skip the decoded zero runs
rowptr = &rowptr[entry->pre_post_skip & 0xfff];
// Write down the first decoded magnitude
value = entry->value0;
if(abs(value) > level)
rowptr[0] = *peaks++ / quant;
else
rowptr[0] = value;//SATURATE(value);
// Write down the second decoded magnitude
value = entry->value1;
rowptr[1] = value;//SATURATE(value);
// Skip the decoded zero runs
rowptr = &rowptr[entry->pre_post_skip >> 12];
}
#if ERROR_TOLERANT
// Reset the decoder
ResetFSM(fsm);
// Backup the bitstream to the beginning of the band
stream->lpCurrentWord = startCurrentWord;
stream->nWordsUsed = startWordsUsed;
#if 0
AlignBitsTag(stream);
// Read the debugging marker
{
TAGVALUE segment;
do
{
segment = GetTagValue(stream);
}
while(segment.tuple.tag != CODEC_TAG_BAND_TRAILER);
stream->lpCurrentWord -= 4;
stream->nWordsUsed += 4;
}
#else
SkipSubband(stream);
#endif
#endif
return true;
}
// This version of DecodeBandFSM() assumes that the gap between width and pitch has been coded as
// zero runs. Therefore decoded magnitude values can be written down without the need to check
// if the end of a row has been reached. Hence the total number of conditionals in DecodeBandFSM
// can be significantly reduced.
// Decode a subband using FSM. One byte is read from the bitstream each time and decoded in two steps
// Original version that does not use a separate buffer for decoding
#if !_INDIVIDUAL_ENTRY
#if 0 //dan20041030 not used
bool DecodeBandFSM8sNoGap(FSM *fsm, BITSTREAM *stream, PIXEL8S *image, int width, int height, int pitch)
{
int index, byte;
FSMENTRY *entry;
PIXEL8S *rowptr = image;
PIXEL8S *bandendptr;
int32_t value;
#if _FSMBUFFER
__declspec(align(32)) FSMENTRY buffer;
#endif
pitch /= sizeof(PIXEL8S);
// Zero out the entire subband
ZeroHighPassRow((PIXEL *)rowptr, pitch*height);
// This version of Huffman decoder assumes that one byte
// is processed as two 4-bit chunks
assert(BITSTREAM_WORD_SIZE == FSM_INDEX_SIZE*2);
assert(stream->nBitsFree == BITSTREAM_BUFFER_SIZE);
bandendptr = rowptr + height * pitch;
// Decode runs and magnitude values until the entire band is decoded
//while (rowptr < bandendptr)
for (;;)
{
#if (0 && DEBUG)
if (!(rowptr < bandendptr)) {
return true;
}
#endif
// Check that the decoder has not overrun the output array
//assert(rowptr < bandendptr);
// Read a byte from the bitstream
byte = GetFastByte(stream);
// Decode the first 4-bit chunk
index = byte >> FSM_INDEX_SIZE;
// Index into the lookup table at that state
entry = GetFSMTableEntry(fsm, index);
#if _FSMBUFFER
memcpy(&buffer, entry, sizeof(FSMENTRY));
entry = &buffer;
#endif
#if 1
// Return if the subband is decoded completely
if (entry->value0 == BAND_END_TRAILER)
{
assert(rowptr <= bandendptr);
ResetFSM(fsm);
return true;
}
#endif
// Set the pointer to the next state
UpdateFSM(fsm, (int)entry->next_state);
// Skip the decoded zero runs
rowptr = &rowptr[entry->pre_skip];
// Write down the first decoded magnitude
value = entry->value0;
rowptr[0] = SATURATE(value);
// Write down the second decoded magnitude
value = entry->value1;
rowptr[1] = SATURATE(value);
// Skip the appropriate distance
rowptr = &rowptr[entry->post_skip];
// decode the second 4-bit chunk
index = byte & ((1<<FSM_INDEX_SIZE)-1);
// Index into the lookup table at that state
entry = GetFSMTableEntry(fsm, index);
#if _FSMBUFFER
memcpy(&buffer, entry, sizeof(FSMENTRY));
entry = &buffer;
#endif
#if 1
// Return if the subband is decoded completely
if (entry->value0 == BAND_END_TRAILER)
{
assert(rowptr <= bandendptr);
ResetFSM(fsm);
return true;
}
#endif
// set the pointer to the next state
UpdateFSM(fsm, (int)entry->next_state);
// Skip the decoded zero runs
rowptr = &rowptr[entry->pre_skip];
// Write down the first decoded magnitude
value = entry->value0;
rowptr[0] = SATURATE(value);
// Write down the second decoded magnitude
value = entry->value1;
rowptr[1] = SATURATE(value);
// Skip the decoded zero runs
rowptr = &rowptr[entry->post_skip];
}
}
#endif
#elif _SINGLE_FSM_TABLE
bool DecodeBandFSM8sNoGap(FSM *fsm, BITSTREAM *stream, PIXEL8S *image, int width, int height, int pitch)
{
int index, byte, i;
FSMENTRY *entry,*firstentry = fsm->table->firstentry;
PIXEL8S *rowptr = image;
PIXEL8S *bandendptr;
int32_t value;
pitch /= sizeof(PIXEL8S);
// Zero out the entire subband
ZeroHighPassRow((PIXEL *)rowptr, pitch*height);
// The Huffman decoder assumes each byte is processed as two 4-bit chunks
assert(BITSTREAM_WORD_SIZE == 2 * FSM_INDEX_SIZE);
assert(stream->nBitsFree == BITSTREAM_BUFFER_SIZE);
// Decode runs and magnitude values until the entire band is decoded
for (;;)
{
// Check that the decoder has not overrun the output array
//assert(rowptr < bandendptr);
// Read a byte from the bitstream
byte = GetFastByte(stream);
// Decode the first 4-bit chunk
index = byte >> FSM_INDEX_SIZE;
// Index into the lookup table at that state
i = (fsm->next_state_index << FSM_INDEX_SIZE) | index;//DAN
entry = firstentry+i; //DAN
// Return if the subband is decoded completely
if(entry->value0 == BAND_END_TRAILER)
{
assert(rowptr <= bandendptr);
ResetFSMIndividual(fsm);
return true;
}
// set the pointer to the next state
UpdateFSMIndividual(fsm, (entry->next_state));
// Skip the decoded zero runs
rowptr = &rowptr[entry->pre_skip];
// Write down the first decoded magnitude
value = entry->value0;
rowptr[0] = SATURATE(value);
// Write down the second decoded magnitude
value = entry->value1;
rowptr[1] = SATURATE(value);
// Skip the appropriate distance
rowptr = &rowptr[entry->post_skip];
// decode the second 4-bit chunk
index = byte & ((1<<FSM_INDEX_SIZE)-1);
// Index into the lookup table at that state
i = (fsm->next_state_index << FSM_INDEX_SIZE) | index;//DAN
entry = firstentry+i; //DAN
// Return if the subband is decoded completely
if(entry->value0 == BAND_END_TRAILER)
{
assert(rowptr <= bandendptr);
ResetFSMIndividual(fsm);
return true;
}
// set the pointer to the next state
UpdateFSMIndividual(fsm, (entry->next_state));
// Skip the decoded zero runs
rowptr = &rowptr[entry->pre_skip];
// Write down the first decoded magnitude
value = entry->value0;
rowptr[0] = SATURATE(value);
// Write down the second decoded magnitude
value = entry->value1;
rowptr[1] = SATURATE(value);
// Skip the decoded zero runs
rowptr = &rowptr[entry->post_skip];
}
}
#else
bool DecodeBandFSM8sNoGap(FSM *fsm, BITSTREAM *stream, PIXEL8S *image, int width, int height, int pitch)
{
int index, byte;
FSMENTRY *entry;
PIXEL8S *rowptr = image;
PIXEL8S *bandendptr;
int32_t value;
#if 1
__declspec(align(4)) FSMENTRY buffer;
#endif
pitch /= sizeof(PIXEL8S);
// zero out the entire subband
ZeroHighPassRow((PIXEL *)rowptr, pitch*height);
// The Huffman decoder assumes each byte is processed as two 4-bit chunks
assert(BITSTREAM_WORD_SIZE == 2 * FSM_INDEX_SIZE);
assert(stream->nBitsFree == BITSTREAM_BUFFER_SIZE);
bandendptr = rowptr + height * pitch;
// Decode runs and magnitude values until the entire band is decoded
for (;;)
{
#if (0 && DEBUG)
if (!(rowptr < bandendptr)) {
return true;
}
#endif
// Read a byte from the bitstream
byte = GetFastByte(stream);
// Decode the first 4-bit chunk
index = byte >> FSM_INDEX_SIZE;
// Index into the lookup table at that state
entry = GetFSMTableEntryIndividual(fsm, index);
// Return if the subband is decoded completely
if(entry == NULL)
{
assert(rowptr <= bandendptr);
ResetFSMIndividual(fsm);
return true;
}
// Set the pointer to the next state
UpdateFSMIndividual(fsm, (entry->next_state));
// Skip the decoded zero runs
rowptr = &rowptr[entry->pre_skip];
// Write down the first decoded magnitude
value = entry->value0;
rowptr[0] = SATURATE(value);
// Write down the second decoded magnitude
value = entry->value1;
rowptr[1] = SATURATE(value);
// Skip the appropriate distance
rowptr = &rowptr[entry->post_skip];
// decode the second 4-bit chunk
index = byte & ((1<<FSM_INDEX_SIZE)-1);
// Index into the lookup table at that state
entry = GetFSMTableEntryIndividual(fsm, index);
// Return if the subband is decoded completely
if (entry == NULL)
{
assert(rowptr <= bandendptr);
ResetFSMIndividual(fsm);
return true;
}
// Set the pointer to the next state
UpdateFSMIndividual(fsm, (entry->next_state));
// Skip the decoded zero runs
rowptr = &rowptr[entry->pre_skip];
// Write down the first decoded magnitude
value = entry->value0;
rowptr[0] = SATURATE(value);
// Write down the second decoded magnitude
value = entry->value1;
rowptr[1] = SATURATE(value);
// Skip the decoded zero runs
rowptr = &rowptr[entry->post_skip];
}
}
#endif
// Decode the highpass band coefficients but do not write them out - used in SIF mode
bool SkipBandFSM(FSM *fsm, BITSTREAM *stream, PIXEL8S *image, int width, int height, int pitch)
{
int index, byte;
FSMENTRY *entry;
pitch /= sizeof(PIXEL8S);
// The Huffman decoder assumes each byte is processed as two 4-bit chunks
assert(BITSTREAM_WORD_SIZE == FSM_INDEX_SIZE*2);
assert(stream->nBitsFree == BITSTREAM_BUFFER_SIZE);
// Decode runs and magnitude values until the entire band is decoded
for (;;)
{
// Read a byte from the bitstream
byte = GetFastByte(stream);
// Decode the first 4-bit chunk
index = byte >> FSM_INDEX_SIZE;
// Index into the lookup table at that state
entry = GetFSMTableEntry(fsm, index);
// Return if the subband is decoded completely
if (entry->value0 == BAND_END_TRAILER) {
ResetFSM(fsm);
return true;
}
// set the pointer to the next state
UpdateFSM(fsm, (int)entry->next_state);
// decode the second 4-bit chunk
index = byte & ((1<<FSM_INDEX_SIZE)-1);
// Index into the lookup table at that state
entry = GetFSMTableEntry(fsm, index);
// Return if the subband is decoded completely
if (entry->value0 == BAND_END_TRAILER) {
ResetFSM(fsm);
return true;
}
// set the pointer to the next state
UpdateFSM(fsm, (int)entry->next_state);
}
}
#if _TIMING
extern TIMER tk_fastruns;
#endif
#if 0 //dan20041030 not used
// New version of coefficient runs decoder that uses a finite state machine with a scaling factor
bool DecodeFastRunsFSM8s(DECODER *decoder, BITSTREAM *stream, IMAGE *wavelet,
int band_index, int width, int height)
{
CODEC_ERROR error = CODEC_ERROR_OKAY;
FILE *logfile = decoder->logfile;
int result;
// Get the pointer to the finite state machine
FSM *fsm = &decoder->fsm[decoder->codec.active_codebook]; //DAN20041026
// All rows are treated as one long row that covers the entire band
int size = fsm->table.num_states;
PIXEL *rowptr;
int row = 0;
int pitch;
int pixel_type = wavelet->pixel_type[band_index];
decoder->codec.active_codebook = 0; // reset CODEC state
// Must have a valid wavelet
assert(wavelet != NULL);
if (wavelet == NULL) return false;
//Must have a valid FSM
assert(fsm != NULL);
if(fsm == NULL) return false;
assert(size > 0);
if (size == 0) {
decoder->error = CODEC_ERROR_RUN_DECODE;
return false;
}
// Check if the band is intended for 8-bit pixels
assert(pixel_type == PIXEL_TYPE_8S);
START(tk_fastruns);
rowptr = (PIXEL *)wavelet->band[band_index];
pitch = wavelet->pitch8s; // Use the 8-bit pitch
//pitch = wavelet->pitch;
// The finite state machine does not support a marker at the end of rows
#if RUNS_ROWEND_MARKER
assert(0);
#endif
// Get one byte from the bitstream and decode 4 bits at a time
result = DecodeBandFSM8sNoGap(fsm, stream, (PIXEL8S *)rowptr, width, height, pitch);
assert(result == true);
if (result != true) {
decoder->error = CODEC_ERROR_RUN_DECODE;
return false;
}
#if (0 && DEBUG && _WIN32)
_CrtCheckMemory();
#endif
#if (0 && DEBUG)
if (logfile)
DumpBand("Band", wavelet, band_index, NULL, logfile);
#endif
#if (0 && DEBUG)
if (logfile) {
fprintf(logfile, "DecodeFastRunsFSM8s, band index: %d\n", band_index);
DumpWaveletRow(wavelet, band_index, 0, logfile);
}
#endif
end:
STOP(tk_fastruns);
return true;
}
#endif
#if _DEQUANTIZE_IN_FSM
void ReQuantFSM(FSM *fsm, int quant)
{
int count = 0;
int i, j;
short *restore = &fsm->restoreFSM[0];
#if !_INDIVIDUAL_ENTRY
for (i = 0; i < fsm->table.num_states; i++)
{
FSMENTRY *entry = fsm->table.entries[i];
for (j = 0; j < (1 << FSM_INDEX_SIZE); j++)
{
entry[j].value0 = restore[count++];
entry[j].value1 = restore[count++];
}
}
#else
for (i = 0; i < (fsm->table.num_states << FSM_INDEX_SIZE); i++)
{
FSMENTRY *entry = fsm_table.entries_ind[i];
if(entry)
{
entry->value0 = restore[count++];
entry->value1 = restore[count++];
}
}
#endif
}
void DeQuantFSM(FSM *fsm, int quant)
{
int i, j;
if(fsm->LastQuant > 1 && fsm->LastQuant != quant)
{
ReQuantFSM(fsm, fsm->LastQuant);
}
else if(fsm->LastQuant == quant)
{
return;
}
if(fsm->InitizedRestore == 0)
{
short *restore = &fsm->restoreFSM[0];
int count = 0;
#if !_INDIVIDUAL_ENTRY
for (i = 0; i < fsm->table.num_states; i++)
{
FSMENTRY *entry = fsm->table.entries[i];
for (j = 0; j < (1 << FSM_INDEX_SIZE); j++)
{
restore[count++] = entry[j].value0;
restore[count++] = entry[j].value1;
}
}
#else
for (i = 0; i < (fsm->table.num_states << FSM_INDEX_SIZE); i++)
{
FSMENTRY *entry = fsm->table.entries_ind[i];
if(entry)
{
restore[count++] = entry->value0;
restore[count++] = entry->value1;
}
}
#endif
fsm->InitizedRestore = 1;
}
#if !_INDIVIDUAL_ENTRY
for (i = 0; i < fsm->table.num_states; i++)
{
FSMENTRY *entry = fsm->table.entries[i];
for (j = 0; j < (1 << FSM_INDEX_SIZE); j++)
{
if(entry[j].value0 < 0x7ff0) // band end trailer
entry[j].value0 *= quant;
entry[j].value1 *= quant;
}
}
#else
for (i = 0; i < (fsm->table.num_states << FSM_INDEX_SIZE); i++)
{
FSMENTRY *entry = fsm->table.entries_ind[i];
if(entry)
{
if(entry->value0 < 0x7ff0) // band end trailer etc
entry->value0 *= quant;
entry->value1 *= quant;
}
}
#endif
fsm->LastQuant = quant;
}
#endif // _DEQUANTIZE_IN_FSM
// New version of coefficient runs decoder that uses a finite state machine with a scaling factor
//dan 7-11-03
bool DecodeFastRunsFSM16s(DECODER *decoder, BITSTREAM *stream, IMAGE *wavelet,
int band_index, int width, int height, int threading)
{
//CODEC_ERROR error = CODEC_ERROR_OKAY;
#if (1 && DEBUG)
FILE *logfile = decoder->logfile;
#endif
int result = true;
int quant = wavelet->quantization[band_index];
int active_codebook = decoder->codec.active_codebook;
// Get the pointer to the finite state machine
FSM *fsm = &decoder->fsm[active_codebook];
int size;
PIXEL *rowptr;
//int row = 0;
int pitch;
CODEC_STATE *codec = &decoder->codec;
//int channel = codec->channel;
//int subband = codec->band.subband;
//int num_subbands = codec->num_subbands;
//int pixel_type = wavelet->pixel_type[band_index];
int difference_coding = decoder->codec.difference_coding;
//int localquant = 1;
int peaklevel = 0;
//int peaksize = 0;
PIXEL *peakbase = NULL;
#if (0 && DEBUG)
if (logfile) {
fprintf(logfile, "Subband: %d, active_codebook: %d, difference_coding: %d\n",
subband, decoder->codec.active_codebook, difference_coding);
}
#endif
decoder->codec.active_codebook = 0; // reset CODEC state
decoder->codec.difference_coding = 0; //reset state for next subband
// Must have a valid wavelet
assert(wavelet != NULL);
if (wavelet == NULL) return false;
//Must have a valid FSM
assert(fsm != NULL);
if(fsm == NULL) return false;
// All rows are treated as one long row that covers the entire band
size = fsm->table.num_states;
assert(size > 0);
if (size == 0) {
decoder->error = CODEC_ERROR_RUN_DECODE;
return false;
}
// Check if the band is intended for 8-bit pixels
assert(wavelet->pixel_type[band_index] == PIXEL_TYPE_16S);
START(tk_fastruns);
rowptr = (PIXEL *)wavelet->band[band_index];
//pitch = wavelet->pitch8s; // Use the 8-bit pitch
pitch = wavelet->pitch;
peaklevel = codec->peak_table.level;
peakbase = codec->peak_table.base;
#if _THREADED
threading = decoder->entropy_worker_new.pool.thread_count > 1 ? threading : 0;
if(threading)
{
decoder->entropy_worker_new.threads_used = 1;
{
//int start = stream->nWordsUsed;
int end;
struct entropy_data_new *data;
int next_queue_num = decoder->entropy_worker_new.next_queue_num++;
data = &decoder->entropy_worker_new.entropy_data[next_queue_num];
memcpy(&data->stream,stream, sizeof(BITSTREAM));
data->rowptr = rowptr;
data->width = width;
data->height = height;
data->pitch = pitch;
data->peaks = peakbase;
data->level = peaklevel;
data->quant = quant;
data->wavelet = wavelet;
data->band_index = band_index;
data->active_codebook = active_codebook;
data->difference_coding = difference_coding;
// Start only a particular threadid
if(next_queue_num == 0)
{
ThreadPoolSetWorkCount(&decoder->entropy_worker_new.pool, 1);
#if _DELAYED_THREAD_START==0
ThreadPoolSendMessage(&decoder->entropy_worker_new.pool, THREAD_MESSAGE_START);
#endif
}
else
{ // Set the work count to the number of rows to process
ThreadPoolAddWorkCount(&decoder->entropy_worker_new.pool, 1);
}
{
unsigned short tag = *(stream->lpCurrentWord-8) << 8;
if(tag == (unsigned short)OPTIONALTAG(CODEC_TAG_SUBBAND_SIZE))
{
int chunksize;
int value = *(stream->lpCurrentWord-6) << 8;
value |= *(stream->lpCurrentWord-5);
tag |= *(stream->lpCurrentWord-7);
tag = NEG(tag);
chunksize = value;
chunksize &= 0xffff;
chunksize += ((tag&0xff)<<16);
chunksize *= 4;
chunksize -= 8;
{
uint32_t *ptr = (uint32_t *)stream->lpCurrentWord;
ptr += (chunksize>>2);
if(*ptr != 0x00003800) // bandend
{
goto continuesearch;
}
}
stream->lpCurrentWord += chunksize;
stream->nWordsUsed -= chunksize;
end = stream->nWordsUsed;
}
else
{
continuesearch:
while(*((uint32_t *)stream->lpCurrentWord) != 0x00003800) // bandend
{
stream->lpCurrentWord += 4;
stream->nWordsUsed -= 4;
}
end = stream->nWordsUsed;
}
}
}
}
else
#endif // _THREADED
{
DeQuantFSM(fsm, quant);
if (peaklevel)
{
result = DecodeBandFSM16sNoGapWithPeaks(fsm, stream, (PIXEL16S *)rowptr, width, height, pitch, peakbase, peaklevel, 1);
}
else
{
#if _DEBUG
result = DecodeBandFSM16sNoGap(fsm, stream, (PIXEL16S *)rowptr, width, height, pitch, logfile);
#else
result = DecodeBandFSM16sNoGap(fsm, stream, (PIXEL16S *)rowptr, width, height, pitch);
#endif
}
if(difference_coding)
{
int x,y;
PIXEL *line = rowptr;
for(y=0;y<height;y++)
{
for(x=1;x<width;x++)
{
line[x] += line[x-1];
}
line += pitch/2;
}
}
if (result)
{
// Call thread safe routine to update the band valid flags
UpdateWaveletBandValidFlags(decoder, wavelet, band_index);
}
}
assert(result == true);
if (result != true) {
decoder->error = CODEC_ERROR_RUN_DECODE;
return false;
}
//end:
STOP(tk_fastruns);
return true;
}
bool SkipFastRunsFSM(DECODER *decoder, BITSTREAM *stream, IMAGE *wavelet,
int band_index, int width, int height)
{
//CODEC_ERROR error = CODEC_ERROR_OKAY;
#if (1 && DEBUG)
FILE *logfile = decoder->logfile;
#endif
int result;
// Get the pointer to the finite state machine
FSM *fsm = &decoder->fsm[decoder->codec.active_codebook]; //DAN20041026
// All rows are treated as one long row that covers the entire band
int size = fsm->table.num_states;
PIXEL *rowptr;
//int row = 0;
int pitch;
//int pixel_type = wavelet->pixel_type[band_index];
decoder->codec.active_codebook = 0; // reset CODEC state
// Must have a valid wavelet
assert(wavelet != NULL);
if (wavelet == NULL) return false;
//Must have a valid FSM
assert(fsm != NULL);
if(fsm == NULL) return false;
assert(size > 0);
if (size == 0) {
decoder->error = CODEC_ERROR_RUN_DECODE;
return false;
}
// Check if the band is 8bit/pixel
assert(wavelet->pixel_type[band_index] == PIXEL_TYPE_8S);
START(tk_fastruns);
rowptr = (PIXEL *)wavelet->band[band_index];
pitch = wavelet->pitch8s; // Use the 8-bit pitch
// The finite state machine does not support a marker at the end of rows
#if RUNS_ROWEND_MARKER
assert(0);
#endif
#if 1 // Get one byte from the bitstream and decode 4 bits at a time
result = SkipBandFSM(fsm, stream, (PIXEL8S *)rowptr, width, height, pitch);
assert(result == true);
if (result != true) {
decoder->error = CODEC_ERROR_RUN_DECODE;
return false;
}
#endif
#if (0 && DEBUG && _WIN32)
_CrtCheckMemory();
#endif
#if (0 && DEBUG)
if (logfile)
DumpBand("Band", wavelet, band_index, NULL, logfile);
#endif
//end:
STOP(tk_fastruns);
return true;
}
// The third version is also based on the finite state machine decoder with
// gaps between rows encoded as zero runs, but dequantization is performed as
// the highpass values are read from the bitstream and placed into a row buffer.
// The highpass values are not written into the wavelet highpass band.
// Eventually this routine will be merged into the routine DecodeTemporalBand8s
// since this routine contains code specific to the inverse temporal transform
// and DecodeTemporalBand8s has become a shell.
#if 0
bool DecodeBandRunsFSM8s(DECODER *decoder, BITSTREAM *stream, IMAGE *wavelet,
int band_index, int width, int height,
IMAGE *frame0, IMAGE *frame1)
{
CODEC_ERROR error = CODEC_ERROR_OKAY;
FILE *logfile = decoder->logfile;
int result;
// Get the pointer to the finite state machine
FSM *fsm = &decoder->fsm;
// All rows are treated as one long row that covers the entire band
int size = fsm->table.num_states;
PIXEL *lowpass = wavelet->band[0];
int lowpass_pitch = wavelet->pitch;
//PIXEL8S *rowptr;
int row = 0;
int pitch;
int row_width; // Width of the encoded row of highpass coefficients
PIXEL *even = frame0->band[0];
PIXEL *odd = frame1->band[0];
int even_pitch = frame0->pitch;
int odd_pitch = frame1->pitch;
int pixel_type = wavelet->pixel_type[band_index];
int quantization = wavelet->quantization[band_index];
PIXEL *buffer;
size_t buffer_size;
int index, byte;
FSMENTRY *entry;
int column = 0;
int32_t value;
int buffer_row_size;
PIXEL *highpass;
// Check that the wavelet into which the band will be decoded is valid
assert(wavelet != NULL);
if (wavelet == NULL) return false;
// Check that the finite state machine is valid
assert(fsm != NULL);
if (fsm == NULL) return false;
assert(size > 0);
if (size == 0) {
decoder->error = CODEC_ERROR_RUN_DECODE;
return false;
}
// Check that the band was encoded using 8-bit signed coefficients
assert(pixel_type == PIXEL_TYPE_8S);
pitch = wavelet->pitch8s; // Use the pitch for 8-bit packed rows
// Get the buffer for storing one row of dequantized highpass coefficients
buffer = (PIXEL *)decoder->buffer;
buffer_size = decoder->buffer_size;
// The finite state machine does not support a marker at the end of each row
assert(RUNS_ROWEND_MARKER == 0);
/***** Start of code included from DecodeBandFSM8s() *****/
// Check that one byte can be processes as two 4-bit nibbles
assert(BITSTREAM_WORD_SIZE == (2 * FSM_INDEX_SIZE));
// Check that the bitstream buffer is empty
assert(stream->nBitsFree == BITSTREAM_BUFFER_SIZE);
// Convert the pitch to units of pixels
pitch /= sizeof(PIXEL8S);
buffer_row_size = pitch * sizeof(PIXEL);
lowpass_pitch /= sizeof(PIXEL);
even_pitch /= sizeof(PIXEL);
odd_pitch /= sizeof(PIXEL);
// Compute the address of the row after the last row in the band
//maxptr = rowptr + height * pitch;
// Round up the row length (in bytes) to a multiple of 16 bytes
//row_size = ALIGN16(row_size);
// Check that the buffer is large enough to hold one row
//assert(buffer_size >= row_size);
assert(buffer_size >= buffer_row_size);
// Use the buffer for the row or highpass coefficients
highpass = buffer;
#if 1
// The row spans the allocated width (pitch) of the band in no gap mode
row_width = pitch;
#else
// For debugging
row_width = wavelet->encoded_pitch/sizeof(PIXEL8S);
#endif
// Clear the highpass buffer before decoding the non-zero coefficients
ZeroHighPassRow(highpass, buffer_row_size);
// Decode zero runs and magnitude values (with appended sign bit)
// until the marker for the band end trailer has been decoded
for (;;)
{
// Read a byte from the bitstream
byte = GetFastByte(stream);
/***** Decode the first 4-bit nibble *****/
// Decode the first 4-bit nibble
index = byte >> FSM_INDEX_SIZE;
// Index into the lookup table at that state
entry = GetFSMTableEntry(fsm, index);
// Return when the entire band is decoded
if (entry->value0 == BAND_END_TRAILER)
{
// Dequantize the highpass coefficients
//DequantizeBandRow(rowptr, width, quantization, highpass);
// Apply the inverse temporal transform to the current row
InvertTemporalRow16s(lowpass, highpass, even, odd, width);
// Advance to the next lowpass input row
lowpass += lowpass_pitch;
// Advance to the next even and odd output rows
even += even_pitch;
odd += odd_pitch;
// Process the rest of the subband
ZeroHighPassRow(highpass, buffer_row_size);
while (++row < height)
{
// Apply the inverse temporal transform to the current row
InvertTemporalRow16s(lowpass, highpass, even, odd, width);
// Advance to the next lowpass input row
lowpass += lowpass_pitch;
// Advance to the next even and odd output rows
even += even_pitch;
odd += odd_pitch;
}
ResetFSM(fsm);
return true;
}
// set the pointer to the next state
UpdateFSM(fsm, (int)entry->next_state);
// If no magnitude value is decoded
if (entry->value0 == 0)
{
column += entry->pre_skip;
// The run length scan can go past the end of the row if the row ends
// with a run of zeros and the next row begins with a run of zeros
// Did the scan go beyond the end of the row?
while (column >= row_width)
{
// Dequantize the highpass coefficients
//DequantizeBandRow(rowptr, width, quantization, highpass);
// Apply the inverse temporal transform to the current row
InvertTemporalRow16s(lowpass, highpass, even, odd, width);
// Advance to the next lowpass input row
lowpass += lowpass_pitch;
// Advance to the next even and odd output rows
even += even_pitch;
odd += odd_pitch;
// Compute the starting column for the next row
column -= row_width;
// Advance to the next row
row++;
// Clear the highpass buffer before decoding the non-zero coefficients
ZeroHighPassRow(highpass, buffer_row_size);
}
}
// If there is only one decoded magnitude value
else if (entry->value1 == 0)
{
value = entry->value0;
column += entry->pre_skip;
// The run length scan can go past the end of the row if the row ends
// with a run of zeros and the next row begins with a run of zeros
// Did the scan go beyond the end of the row?
while (column >= row_width)
{
// Dequantize the highpass coefficients
//DequantizeBandRow(rowptr, width, quantization, highpass);
// Apply the inverse temporal transform to the current row
InvertTemporalRow16s(lowpass, highpass, even, odd, width);
// Advance to the next lowpass input row
lowpass += lowpass_pitch;
// Advance to the next even and odd output rows
even += even_pitch;
odd += odd_pitch;
// Compute the starting column for the next row
column -= row_width;
// Advance to the next row
row++;
// Clear the highpass buffer before decoding the non-zero coefficients
ZeroHighPassRow(highpass, buffer_row_size);
}
// Fill in the decoded magnitude
// Check the column before storing the value
assert(0 <= column && column < row_width);
// Dequantize the value and store it in the highpass row buffer
highpass[column] = quantization * value;
column += entry->post_skip;
// Did the scan go beyond the end of the row?
if (column >= row_width)
{
// Dequantize the highpass coefficients
//DequantizeBandRow(rowptr, width, quantization, highpass);
// Apply the inverse temporal transform to the current row
InvertTemporalRow16s(lowpass, highpass, even, odd, width);
// Advance to the next lowpass input row
lowpass += lowpass_pitch;
// Advance to the next even and odd output rows
even += even_pitch;
odd += odd_pitch;
// Compute the starting column for the next row
column -= row_width;
// Advance to the next row
row++;
// Clear the highpass buffer before decoding the non-zero coefficients
ZeroHighPassRow(highpass, buffer_row_size);
}
}
// If there are two decoded magnitude values
else
{
// Check the column before storing values
assert(0 <= column && column < row_width);
if (column < (row_width - 1)) {
// Store both values in the current row
highpass[column++] = quantization * entry->value0;
highpass[column++] = quantization * entry->value1;
}
else {
value = entry->value0;
highpass[column] = quantization * value;
value = entry->value1;
// Dequantize the highpass coefficients
//DequantizeBandRow(rowptr, width, quantization, highpass);
// Apply the inverse temporal transform to the current row
InvertTemporalRow16s(lowpass, highpass, even, odd, width);
// Advance to the next lowpass input row
lowpass += lowpass_pitch;
// Advance to the next even and odd output rows
even += even_pitch;
odd += odd_pitch;
// Advance to the next row
row++;
// Clear the highpass buffer before decoding the non-zero coefficients
ZeroHighPassRow(highpass, buffer_row_size);
column = 0;
highpass[column++] = quantization * value;
}
}
/***** Decode the second 4-bit nibble *****/
// Decode the second 4-bit nibble
index = byte & FSM_INDEX_MASK;
// Index into the lookup table at that state
entry = GetFSMTableEntry(fsm, index);
// Return if the subband is decoded completely
if (entry->value0 == BAND_END_TRAILER)
{
// Dequantize the highpass coefficients
//DequantizeBandRow(rowptr, width, quantization, highpass);
// Apply the inverse temporal transform to the current row
InvertTemporalRow16s(lowpass, highpass, even, odd, width);
// Advance to the next lowpass input row
lowpass += lowpass_pitch;
// Advance to the next even and odd output rows
even += even_pitch;
odd += odd_pitch;
// Process the rest of the subband
ZeroHighPassRow(highpass, buffer_row_size);
while (++row < height)
{
// Apply the inverse temporal transform to the current row
InvertTemporalRow16s(lowpass, highpass, even, odd, width);
// Advance to the next lowpass input row
lowpass += lowpass_pitch;
// Advance to the next even and odd output rows
even += even_pitch;
odd += odd_pitch;
}
ResetFSM(fsm);
return true;
}
// Set the pointer to the next state
UpdateFSM(fsm, (int)entry->next_state);
// If no magnitude value is decoded
if (entry->value0 == 0)
{
column += entry->pre_skip;
// The run length scan can go past the end of the row if the row ends
// with a run of zeros and the next row begins with a run of zeros
// Did the scan go beyond the end of the row?
while (column >= row_width)
{
// Dequantize the highpass coefficients
//DequantizeBandRow(rowptr, width, quantization, highpass);
// Apply the inverse temporal transform to the current row
InvertTemporalRow16s(lowpass, highpass, even, odd, width);
// Advance to the next lowpass input row
lowpass += lowpass_pitch;
// Advance to the next even and odd output rows
even += even_pitch;
odd += odd_pitch;
// Compute the starting column for the next row
column -= row_width;
// Advance to the next row
row++;
// Clear the highpass buffer before decoding the non-zero coefficients
ZeroHighPassRow(highpass, buffer_row_size);
}
}
// If there is only one decoded magnitude value
else if (entry->value1 == 0)
{
value = entry->value0;
column += entry->pre_skip;
// The run length scan can go past the end of the row if the row ends
// with a run of zeros and the next row begins with a run of zeros
// Did the scan go beyond the end of the row?
while (column >= row_width)
{
// Dequantize the highpass coefficients
//DequantizeBandRow(rowptr, width, quantization, highpass);
// Apply the inverse temporal transform to the current row
InvertTemporalRow16s(lowpass, highpass, even, odd, width);
// Advance to the next lowpass input row
lowpass += lowpass_pitch;
// Advance to the next even and odd output rows
even += even_pitch;
odd += odd_pitch;
// Compute the starting column for the next row
column -= row_width;
// Advance to the next row
row++;
// Clear the highpass buffer before decoding the non-zero coefficients
ZeroHighPassRow(highpass, buffer_row_size);
}
// Fill in the decoded magnitude
// Check the column before storing the value
//assert(index < width);
assert(0 <= column && column < row_width);
highpass[column] = quantization * value;
column += entry->post_skip;
// Did the scan go beyond the end of the row?
if (column >= row_width)
{
// Dequantize the highpass coefficients
//DequantizeBandRow(rowptr, width, quantization, highpass);
// Apply the inverse temporal transform to the current row
InvertTemporalRow16s(lowpass, highpass, even, odd, width);
// Advance to the next lowpass input row
lowpass += lowpass_pitch;
// Advance to the next even and odd output rows
even += even_pitch;
odd += odd_pitch;
// Compute the starting column for the next row
column -= row_width;
// Advance to the next row
row++;
// Clear the highpass buffer before decoding the non-zero coefficients
ZeroHighPassRow(highpass, buffer_row_size);
}
}
// If there are two decoded magnitude values
else
{
// Check the column before storing values
assert(0 <= column && column < row_width);
if (column < (row_width - 1)) {
// Store both highpass values in the current row
highpass[column++] = quantization * entry->value0;
highpass[column++] = quantization * entry->value1;
}
else {
highpass[column] = quantization * entry->value0;
value = entry->value1;
// Dequantize the highpass coefficients
//DequantizeBandRow(rowptr, width, quantization, highpass);
// Apply the inverse temporal transform to the current row
InvertTemporalRow16s(lowpass, highpass, even, odd, width);
// Advance to the next lowpass input row
lowpass += lowpass_pitch;
// Advance to the next even and odd output rows
even += even_pitch;
odd += odd_pitch;
// Advance to the next row
row++;
// Clear the highpass buffer before decoding the non-zero coefficients
ZeroHighPassRow(highpass, buffer_row_size);
column = 0;
highpass[column++] = quantization * value;
}
}
}
/***** End of the code included from DecodeBandFSM8s() *****/
#if 0
assert(result == true);
if (result != true) {
decoder->error = CODEC_ERROR_RUN_DECODE;
return false;
}
#endif
#if (0 && DEBUG && _WIN32)
_CrtCheckMemory();
#endif
#if (0 && DEBUG)
if (logfile)
DumpBand("Band", wavelet, band_index, NULL, logfile);
#endif
#if 0
end:
return true;
#endif
}
#endif
/***** End of the code for the finite state machine decoder *****/
#if 1
// The second version applies the horizontal inverse filters row by row, so the
// memory access pattern is more efficient. The lowpass and highpass temporal
// coefficients for each row are inverted and packed into the output in one pass.
// Apply the inverse horizontal-temporal transform and pack the output into a buffer
void TransformInverseFrameToYUV(TRANSFORM *transform[], int frame_index, int num_channels,
uint8_t *output, int output_pitch, FRAME_INFO *frame,
const SCRATCH *scratch, int chroma_offset, int precision)
{
// Pointers to the rows in the horizontal wavelet for each channel
PIXEL *horizontal_lowlow[TRANSFORM_MAX_CHANNELS];
PIXEL *horizontal_lowhigh[TRANSFORM_MAX_CHANNELS];
PIXEL *horizontal_highlow[TRANSFORM_MAX_CHANNELS];
PIXEL *horizontal_highhigh[TRANSFORM_MAX_CHANNELS];
// Horizontal wavelet band width and pitch
int horizontal_width[TRANSFORM_MAX_CHANNELS];
int horizontal_pitch[TRANSFORM_MAX_CHANNELS];
int horizontal_pitch8s[TRANSFORM_MAX_CHANNELS];
// Quantization factors
int lowlow_quantization[TRANSFORM_MAX_CHANNELS];
int lowhigh_quantization[TRANSFORM_MAX_CHANNELS];
int highlow_quantization[TRANSFORM_MAX_CHANNELS];
int highhigh_quantization[TRANSFORM_MAX_CHANNELS];
// Pointers to the rows in the temporal wavelet for each channel
PIXEL *temporal_lowpass[TRANSFORM_MAX_CHANNELS];
PIXEL *temporal_highpass[TRANSFORM_MAX_CHANNELS];
// Push the scratch space state to allocate a new section
char *buffer = scratch->free_ptr;
#if DEBUG
size_t buffer_size = scratch->free_size;
#endif
// Dimensions of the reconstructed frame
int frame_width = frame->width;
int frame_height = frame->height;
int half_height = frame_height / 2;
size_t temporal_row_size = frame_width * sizeof(PIXEL);
int field_pitch = 2 * output_pitch;
int output_width;
int channel;
int row;
// Round up the temporal row size to an integral number of cache lines
temporal_row_size = ALIGN(temporal_row_size, _CACHE_LINE_SIZE);
// Check that the buffer starts on a cache line boundary
assert(ISALIGNED(buffer, _CACHE_LINE_SIZE));
// Check that the number of channels is reasonable
assert(0 < num_channels && num_channels <= TRANSFORM_MAX_CHANNELS);
// Check that the buffer is large enough
#if DEBUG
assert((2 * num_channels * temporal_row_size) <= buffer_size);
#endif
// Allocate buffers for a single row of lowpass and highpass temporal coefficients
// and initialize the arrays of row pointers into the horizontal transform bands
for (channel = 0; channel < num_channels; channel++)
{
IMAGE *wavelet = transform[channel]->wavelet[frame_index];
#if (0 && DEBUG)
int static count = 0;
if (count < 20) {
char label[_MAX_PATH];
int i;
sprintf(label, "Frame%d-%d-", frame_index, count);
DumpPGM(label, wavelet, NULL);
for (i = 1; i < wavelet->num_bands; i++)
{
sprintf(label, "Frame-%d-band%d-%d-", frame_index, i, count);
DumpBandPGM(label, wavelet, i, NULL);
}
}
count++;
#endif
// Initialize the row pointers into the horizontal bands
horizontal_lowlow[channel] = wavelet->band[LL_BAND];
horizontal_lowhigh[channel] = wavelet->band[LH_BAND];
horizontal_highlow[channel] = wavelet->band[HL_BAND];
horizontal_highhigh[channel] = wavelet->band[HH_BAND];
lowlow_quantization[channel] = wavelet->quantization[LL_BAND];
lowhigh_quantization[channel] = wavelet->quantization[LH_BAND];
highlow_quantization[channel] = wavelet->quantization[HL_BAND];
highhigh_quantization[channel] = wavelet->quantization[HH_BAND];
// Compute the pitch in units of pixels
horizontal_pitch[channel] = wavelet->pitch/sizeof(PIXEL);
// Compute the 8-bit pitch in units of pixels
horizontal_pitch8s[channel] = wavelet->pitch8s/sizeof(PIXEL);
//horizontal_pitch8s[channel] = wavelet->pitch8s/sizeof(PIXEL8S);
// Remember the width of the horizontal wavelet rows for this channel
horizontal_width[channel] = wavelet->width;
//TODO: Need to recode the buffer allocations using the scratch space API
// Divide the buffer into temporal lowpass and highpass rows
temporal_lowpass[channel] = (PIXEL *)(buffer + (2 * channel) * temporal_row_size);
temporal_highpass[channel] = (PIXEL *)(buffer + (2 * channel + 1) * temporal_row_size);
}
// Process one row at a time from each channel
for (row = 0; row < half_height; row++)
{
PIXEL *line_buffer = (PIXEL *)(buffer + (2 * num_channels + 2) * temporal_row_size);
// Invert the horizontal transform applied to the temporal bands in each channel
for (channel = 0; channel < num_channels; channel++)
{
int pitch = horizontal_pitch[channel];
//int pitch8s = horizontal_pitch8s[channel];
// Invert the horizontal transform applied to the temporal lowpass row
InvertHorizontalRow16s8sTo16sBuffered(horizontal_lowlow[channel], lowlow_quantization[channel],
(PIXEL8S *)horizontal_lowhigh[channel], lowhigh_quantization[channel],
temporal_lowpass[channel],
horizontal_width[channel],
(PIXEL *)line_buffer);
// Invert the horizontal transform applied to the temporal highpass row
//DAN20051004 -- possible reversiblity issue
//InvertHorizontalRow8sBuffered //----------------------- Maybe bad
InvertHorizontalRow16s8sTo16sBuffered(horizontal_highlow[channel], highlow_quantization[channel],
(PIXEL8S *)horizontal_highhigh[channel], highhigh_quantization[channel],
temporal_highpass[channel],
horizontal_width[channel],
(PIXEL *)line_buffer);
// Advance to the next row in each horizontal band in this channel
horizontal_lowlow[channel] += pitch;
horizontal_lowhigh[channel] += pitch;
horizontal_highlow[channel] += pitch;
horizontal_highhigh[channel] += pitch;
}
// The output width is twice the width of the wavelet bands
output_width = 2 * horizontal_width[0];
// Adjust the frame width to fill to the end of each row
//frame_width = output_pitch / 2;
if (precision == CODEC_PRECISION_10BIT)
{
// Invert the temporal bands from all channels and pack output pixels
switch (frame->format)
{
// Need to reduce the resolution from 10 bits to 8 bits during the inverse
case DECODED_FORMAT_YUYV:
InvertInterlacedRow16s10bitToYUV(temporal_lowpass, temporal_highpass, num_channels,
output, output_pitch, output_width, frame_width,
chroma_offset);
break;
case DECODED_FORMAT_UYVY:
InvertInterlacedRow16s10bitToUYVY(temporal_lowpass, temporal_highpass, num_channels,
output, output_pitch, output_width, frame_width,
chroma_offset);
break;
default:
assert(0);
break;
}
}
else // Older code for 8-bit precision
{
int format;
assert(precision == CODEC_PRECISION_8BIT);
switch (frame->format)
{
case DECODED_FORMAT_YUYV:
format = COLOR_FORMAT_YUYV;
break;
case DECODED_FORMAT_UYVY:
format = COLOR_FORMAT_UYVY;
break;
}
// Invert the temporal bands from all channels and pack output pixels
InvertInterlacedRow16sToYUV(temporal_lowpass, temporal_highpass, num_channels,
output, output_pitch, output_width, frame_width,
chroma_offset, format);
}
// Advance to the next row in the packed output image
output += field_pitch;
}
}
#endif
#if _INTERLACED_WORKER_THREADS
void TransformInverseFrameSectionToYUV(DECODER *decoder, int thread_index, int frame_index, int num_channels,
uint8_t *output, int output_pitch, FRAME_INFO *frame,
int chroma_offset, int precision)
{
FILE *logfile = decoder->logfile;
TRANSFORM **transform = decoder->transform;
const SCRATCH *scratch = &decoder->scratch;
// Pointers to the rows in the horizontal wavelet for each channel
PIXEL *horizontal_lowlow[TRANSFORM_MAX_CHANNELS];
PIXEL *horizontal_lowhigh[TRANSFORM_MAX_CHANNELS];
PIXEL *horizontal_highlow[TRANSFORM_MAX_CHANNELS];
PIXEL *horizontal_highhigh[TRANSFORM_MAX_CHANNELS];
// Horizontal wavelet band width and pitch
int horizontal_width[TRANSFORM_MAX_CHANNELS];
int horizontal_pitch[TRANSFORM_MAX_CHANNELS];
int horizontal_pitch8s[TRANSFORM_MAX_CHANNELS];
// Quantization factors
int lowlow_quantization[TRANSFORM_MAX_CHANNELS];
int lowhigh_quantization[TRANSFORM_MAX_CHANNELS];
int highlow_quantization[TRANSFORM_MAX_CHANNELS];
int highhigh_quantization[TRANSFORM_MAX_CHANNELS];
// Pointers to the rows in the temporal wavelet for each channel
PIXEL *temporal_lowpass[TRANSFORM_MAX_CHANNELS];
PIXEL *temporal_highpass[TRANSFORM_MAX_CHANNELS];
// Push the scratch space state to allocate a new section
char *buffer = scratch->free_ptr;
size_t buffer_size = scratch->free_size;
uint8_t *output_row_ptr = output;
// Dimensions of the reconstructed frame
int frame_width = frame->width;
int frame_height = frame->height;
int half_height = frame_height / 2;
size_t temporal_row_size = frame_width * sizeof(PIXEL);
int field_pitch = 2 * output_pitch;
int output_width;
int channel;
int row;
HANDLE row_semaphore = decoder->interlaced_worker.row_semaphore;
int return_value;
// Round up the temporal row size to an integral number of cache lines
temporal_row_size = ALIGN(temporal_row_size, _CACHE_LINE_SIZE);
// Divide the buffer space between the four threads
buffer_size /= 4;
buffer += buffer_size * thread_index;
// Check that the buffer starts on a cache line boundary
assert(ISALIGNED(buffer, _CACHE_LINE_SIZE));
// Check that the number of channels is reasonable
assert(0 < num_channels && num_channels <= TRANSFORM_MAX_CHANNELS);
// Check that the buffer is large enough
assert((2 * num_channels * temporal_row_size) <= buffer_size);
// Allocate buffers for a single row of lowpass and highpass temporal coefficients
// and initialize the arrays of row pointers into the horizontal transform bands
for (channel = 0; channel < num_channels; channel++)
{
IMAGE *wavelet = transform[channel]->wavelet[frame_index];
#if (0 && DEBUG)
int static count = 0;
if (count < 20) {
char label[_MAX_PATH];
int i;
sprintf(label, "Frame%d-%d-", frame_index, count);
DumpPGM(label, wavelet, NULL);
for (i = 1; i < wavelet->num_bands; i++)
{
sprintf(label, "Frame-%d-band%d-%d-", frame_index, i, count);
DumpBandPGM(label, wavelet, i, NULL);
}
}
count++;
#endif
// Initialize the row pointers into the horizontal bands
horizontal_lowlow[channel] = wavelet->band[LL_BAND];
horizontal_lowhigh[channel] = wavelet->band[LH_BAND];
horizontal_highlow[channel] = wavelet->band[HL_BAND];
horizontal_highhigh[channel] = wavelet->band[HH_BAND];
lowlow_quantization[channel] = wavelet->quantization[LL_BAND];
lowhigh_quantization[channel] = wavelet->quantization[LH_BAND];
highlow_quantization[channel] = wavelet->quantization[HL_BAND];
highhigh_quantization[channel] = wavelet->quantization[HH_BAND];
// Compute the pitch in units of pixels
horizontal_pitch[channel] = wavelet->pitch/sizeof(PIXEL);
// Compute the 8-bit pitch in units of pixels
horizontal_pitch8s[channel] = wavelet->pitch8s/sizeof(PIXEL);
//horizontal_pitch8s[channel] = wavelet->pitch8s/sizeof(PIXEL8S);
// Remember the width of the horizontal wavelet rows for this channel
horizontal_width[channel] = wavelet->width;
//TODO: Need to recode the buffer allocations using the scratch space API
// Divide the buffer into temporal lowpass and highpass rows
temporal_lowpass[channel] = (PIXEL *)(buffer + (2 * channel) * temporal_row_size);
temporal_highpass[channel] = (PIXEL *)(buffer + (2 * channel + 1) * temporal_row_size);
}
#if (0 && DEBUG)
if (logfile) {
fprintf(logfile, "Output buffer: %d (0x%p)\n", output, output);
}
#endif
/* if (thread_index == 0)
{
row = 0;
row_step = 1;
}
else if (thread_index == 1)
{
row = half_height - 1;
row_step = -1;
// Move to the bottom of the transform and process moving up
for (channel = 0; channel < num_channels; channel++)
{
int offset = horizontal_pitch[channel] * (half_height - 1);
horizontal_lowlow[channel] += offset;
horizontal_lowhigh[channel] += offset;
horizontal_highlow[channel] += offset;
horizontal_highhigh[channel] += offset;
horizontal_pitch[channel] = NEG(horizontal_pitch[channel]);
horizontal_pitch8s[channel] = NEG(horizontal_pitch8s[channel]);
}
output += field_pitch * (half_height - 1);
field_pitch = NEG(field_pitch);
}
else
{
assert(0); // what about middle threads?
}
#if (0 && DEBUG)
if (logfile) {
fprintf(logfile, "Thread index: %d, start row: %d, row step: %d, field_pitch: %d\n",
thread_index, row, row_step, field_pitch);
}
#endif
*/
// Loop until all of the rows have been processed
for (;;)
{
// Wait for one row from each channel to invert the transform
return_value = WaitForSingleObject(row_semaphore, 0);
// Determine the index of this worker thread
if (return_value == WAIT_OBJECT_0)
{
if(decoder->interlaced_worker.lock_init)
{
EnterCriticalSection(&decoder->interlaced_worker.lock);
}
row = decoder->interlaced_worker.current_row++;
if(decoder->interlaced_worker.lock_init)
LeaveCriticalSection(&decoder->interlaced_worker.lock);
output_row_ptr = output;
output_row_ptr += row * 2 * output_pitch;
for (channel = 0; channel < num_channels; channel++)
{
int pitch = horizontal_pitch[channel];
IMAGE *wavelet = transform[channel]->wavelet[frame_index];
horizontal_lowlow[channel] = wavelet->band[LL_BAND];
horizontal_lowhigh[channel] = wavelet->band[LH_BAND];
horizontal_highlow[channel] = wavelet->band[HL_BAND];
horizontal_highhigh[channel] = wavelet->band[HH_BAND];
horizontal_lowlow[channel] += pitch*row;
horizontal_lowhigh[channel] += pitch*row;
horizontal_highlow[channel] += pitch*row;
horizontal_highhigh[channel] += pitch*row;
}
}
if (return_value == WAIT_OBJECT_0 && 0 <= row && row < half_height)
{
//PIXEL *line_buffer = (PIXEL *)(buffer + (2 * num_channels + 2) * temporal_row_size);
PIXEL *line_buffer = (PIXEL *)(buffer + 2 * num_channels * temporal_row_size);
// assert(0 <= row && row < half_height);
#if (0 && DEBUG)
if (logfile) {
fprintf(logfile, "Processing row: %d, thread index: %d, output: %d (0x%p)\n",
row, thread_index, output_row_ptr);
}
#endif
// Invert the horizontal transform applied to the temporal bands in each channel
for (channel = 0; channel < num_channels; channel++)
{
int pitch = horizontal_pitch[channel];
//int pitch8s = horizontal_pitch8s[channel];
#if (0 && DEBUG)
// Invert the horizontal transform by duplicating the lowpass pixels
InvertHorizontalRowDuplicated16s(horizontal_lowlow[channel], lowlow_quantization[channel],
(PIXEL8S *)horizontal_lowhigh[channel], lowhigh_quantization[channel],
temporal_lowpass[channel], horizontal_width[channel],
(PIXEL *)line_buffer);
#else
// Invert the horizontal transform applied to the temporal lowpass row
InvertHorizontalRow16s8sTo16sBuffered(horizontal_lowlow[channel], lowlow_quantization[channel],
(PIXEL8S *)horizontal_lowhigh[channel], lowhigh_quantization[channel],
temporal_lowpass[channel],
horizontal_width[channel],
(PIXEL *)line_buffer);
#endif
// Invert the horizontal transform applied to the temporal highpass row
InvertHorizontalRow8sBuffered((PIXEL8S *)horizontal_highlow[channel], highlow_quantization[channel],
(PIXEL8S *)horizontal_highhigh[channel], highhigh_quantization[channel],
temporal_highpass[channel],
horizontal_width[channel],
(PIXEL *)line_buffer);
// Advance to the next row in each horizontal band in this channel
//horizontal_lowlow[channel] += pitch;
//horizontal_lowhigh[channel] += pitch;
//horizontal_highlow[channel] += pitch;
//horizontal_highhigh[channel] += pitch;
}
// The output width is twice the width of the wavelet bands
output_width = 2 * horizontal_width[0];
// Adjust the frame width to fill to the end of each row
//frame_width = output_pitch / 2;
if (precision == CODEC_PRECISION_10BIT)
{
// Invert the temporal bands from all channels and pack output pixels
switch (frame->format)
{
// Need to reduce the resolution from 10 bits to 8 bits during the inverse
case DECODED_FORMAT_YUYV:
InvertInterlacedRow16s10bitToYUV(temporal_lowpass, temporal_highpass, num_channels,
output_row_ptr, output_pitch, output_width, frame_width,
chroma_offset);
break;
case DECODED_FORMAT_UYVY:
InvertInterlacedRow16s10bitToUYVY(temporal_lowpass, temporal_highpass, num_channels,
output_row_ptr, output_pitch, output_width, frame_width,
chroma_offset);
break;
default:
assert(0);
break;
}
}
else // Older code for 8-bit precision
{
int format;
assert(precision == CODEC_PRECISION_8BIT);
switch (frame->format)
{
case DECODED_FORMAT_YUYV:
format = COLOR_FORMAT_YUYV;
break;
case DECODED_FORMAT_UYVY:
format = COLOR_FORMAT_UYVY;
break;
}
// Invert the temporal bands from all channels and pack output pixels
InvertInterlacedRow16sToYUV(temporal_lowpass, temporal_highpass, num_channels,
output_row_ptr, output_pitch, output_width, frame_width,
chroma_offset, format);
}
// Advance to the next row in the input transforms
//row += row_step;
// Advance to the next row in the packed output image
//output += field_pitch;
}
else
{
// No more rows to process
break;
}
}
#if (0 && DEBUG)
if (logfile) {
fprintf(logfile, "Finished transform, thread index: %d\n", thread_index);
}
#endif
}
#endif
//#if BUILD_PROSPECT
// Apply the inverse horizontal-temporal transform and output rows of luma and chroma
#if 0
void TransformInverseFrameToRow16u(TRANSFORM *transform[], int frame_index, int num_channels,
PIXEL16U *output, int output_pitch, FRAME_INFO *frame,
char *buffer, size_t buffer_size, int chroma_offset,
int precision)
#else
void TransformInverseFrameToRow16u(DECODER *decoder, TRANSFORM *transform[], int frame_index, int num_channels,
PIXEL16U *output, int output_pitch, FRAME_INFO *frame,
const SCRATCH *scratch, int chroma_offset,
int precision)
#endif
{
// Pointers to the rows in the horizontal wavelet for each channel
PIXEL *horizontal_lowlow[TRANSFORM_MAX_CHANNELS];
PIXEL *horizontal_lowhigh[TRANSFORM_MAX_CHANNELS];
PIXEL *horizontal_highlow[TRANSFORM_MAX_CHANNELS];
PIXEL *horizontal_highhigh[TRANSFORM_MAX_CHANNELS];
// Horizontal wavelet band width and pitch
int horizontal_width[TRANSFORM_MAX_CHANNELS];
int horizontal_pitch[TRANSFORM_MAX_CHANNELS];
// Quantization factors
int lowlow_quantization[TRANSFORM_MAX_CHANNELS];
int lowhigh_quantization[TRANSFORM_MAX_CHANNELS];
int highlow_quantization[TRANSFORM_MAX_CHANNELS];
int highhigh_quantization[TRANSFORM_MAX_CHANNELS];
// Push the scratch space state to allocate a new section
char *buffer = scratch->free_ptr;
#if DEBUG
size_t buffer_size = scratch->free_size;
#endif
// Buffers for the rows in the temporal wavelet (reused for each channel)
PIXEL *temporal_lowpass;
PIXEL *temporal_highpass;
int output_row_width[TRANSFORM_MAX_CHANNELS];
// Dimensions of the reconstructed frame
int frame_width = frame->width;
int frame_height = frame->height;
int half_height = frame_height / 2;
size_t temporal_row_size = frame_width * sizeof(PIXEL);
int field_pitch = 2 * output_pitch;
int luma_width = frame_width;
int chroma_width = luma_width/2;
int channel;
int row;
#if (1 && DEBUG_ROW16U)
PIXEL16U *output_buffer;
#endif
// This routine should only be called to decode rows of 16-bit luma and chroma
//assert(frame->format == DECODED_FORMAT_YR16);
// Round up the temporal row size to an integral number of cache lines
temporal_row_size = ALIGN(temporal_row_size, _CACHE_LINE_SIZE);
// Check that the buffer starts on a cache line boundary
assert(ISALIGNED(buffer, _CACHE_LINE_SIZE));
// Check that the number of channels is reasonable
assert(0 < num_channels && num_channels <= TRANSFORM_MAX_CHANNELS);
// Buffer must be large enough for two rows of temporal coefficients (lowpass and highpass)
// plus the buffer used by the inverse horizontal transform for its intermediate results
#if DEBUG
assert((2 * temporal_row_size) <= buffer_size);
#endif
// Allocate buffers for one row of lowpass and highpass temporal coefficients
temporal_lowpass = (PIXEL *)&buffer[0];
temporal_highpass = (PIXEL *)&buffer[temporal_row_size];
#if (1 && DEBUG_ROW16U)
output_buffer = (PIXEL16U *)&buffer[2 * temporal_row_size];
#endif
// Initialize the arrays of row pointers into the horizontal transform bands
for (channel = 0; channel < num_channels; channel++)
{
IMAGE *wavelet = transform[channel]->wavelet[frame_index];
#if (0 && DEBUG)
int static count = 0;
if (count < 20) {
char label[_MAX_PATH];
int i;
sprintf(label, "Frame%d-%d-", frame_index, count);
DumpPGM(label, wavelet, NULL);
for (i = 1; i < wavelet->num_bands; i++)
{
sprintf(label, "Frame-%d-band%d-%d-", frame_index, i, count);
DumpBandPGM(label, wavelet, i, NULL);
}
}
count++;
#endif
// Initialize the row pointers into the horizontal bands
horizontal_lowlow[channel] = wavelet->band[LL_BAND];
horizontal_lowhigh[channel] = wavelet->band[LH_BAND];
horizontal_highlow[channel] = wavelet->band[HL_BAND];
horizontal_highhigh[channel] = wavelet->band[HH_BAND];
lowlow_quantization[channel] = wavelet->quantization[LL_BAND];
lowhigh_quantization[channel] = wavelet->quantization[LH_BAND];
highlow_quantization[channel] = wavelet->quantization[HL_BAND];
highhigh_quantization[channel] = wavelet->quantization[HH_BAND];
// Compute the pitch in units of pixels
horizontal_pitch[channel] = wavelet->pitch/sizeof(PIXEL);
// Remember the width of the horizontal wavelet rows for this channel
horizontal_width[channel] = wavelet->width;
// Compute the width of each row of output pixels
output_row_width[channel] = (channel == 0) ? luma_width : chroma_width;
}
// Process one row at a time from each channel
for (row = 0; row < half_height; row++)
{
#if (1 && DEBUG_ROW16U)
PIXEL16U *output_row_ptr = output_buffer;
PIXEL16U *planar_output[TRANSFORM_MAX_CHANNELS];
int planar_pitch[TRANSFORM_MAX_CHANNELS];
ROI strip = {luma_width, 2};
uint8_t *yuv_output = (uint8_t *)output;
uint8_t *output1 = yuv_output;
uint8_t *output2 = yuv_output + output_pitch;
#else
PIXEL16U *output_row_ptr = output;
#endif
// Invert the horizontal transform applied to the temporal bands in each channel
for (channel = 0; channel < num_channels; channel++)
{
int pitch = horizontal_pitch[channel];
if(decoder->frame.resolution == DECODED_RESOLUTION_HALF_HORIZONTAL)
{
// Invert the horizontal transform applied to the temporal lowpass row
BypassHorizontalRow16s(horizontal_lowlow[channel], horizontal_lowhigh[channel],
temporal_lowpass, horizontal_width[channel]);
// Invert the horizontal transform applied to the temporal highpass row
BypassHorizontalRow16s(horizontal_highlow[channel], horizontal_highhigh[channel],
temporal_highpass, horizontal_width[channel]);
}
else
{
// Invert the horizontal transform applied to the temporal lowpass row
InvertHorizontalRow16s(horizontal_lowlow[channel], horizontal_lowhigh[channel],
temporal_lowpass, horizontal_width[channel]);
// Invert the horizontal transform applied to the temporal highpass row
InvertHorizontalRow16s(horizontal_highlow[channel], horizontal_highhigh[channel],
temporal_highpass, horizontal_width[channel]);
}
//***DEBUG***
//ZeroMemory(temporal_highpass, temporal_row_size);
//FillPixelMemory(temporal_highpass, temporal_row_size/sizeof(PIXEL), 50);
// Advance to the next row in each horizontal band in this channel
horizontal_lowlow[channel] += pitch;
horizontal_lowhigh[channel] += pitch;
horizontal_highlow[channel] += pitch;
horizontal_highhigh[channel] += pitch;
#if (1 && DEBUG_ROW16U)
// Write the rows of 16-bit pixels to a temporary buffer
planar_output[channel] = output_row_ptr;
planar_pitch[channel] = output_pitch * sizeof(PIXEL);
// Invert the temporal transform and output two rows of luma or chroma
InvertInterlacedRow16sToRow16u(temporal_lowpass, temporal_highpass,
planar_output[channel], planar_pitch[channel],
output_row_width[channel],
frame_width, chroma_offset, precision);
//if (channel > 0)
if (0)
{
uint8_t *output3 = (uint8_t *)planar_output[channel];
uint8_t *output4 = (uint8_t *)output3 + planar_pitch[channel];
int output_size = output_row_width[channel] * sizeof(PIXEL);
int fill_value = (128 << 8);
//ZeroMemory(output3, output_size);
//ZeroMemory(output4, output_size);
FillPixelMemory((PIXEL *)output3, output_row_width[channel], fill_value);
FillPixelMemory((PIXEL *)output4, output_row_width[channel], fill_value);
}
#else
// Invert the temporal transform and output two rows of luma or chroma
InvertInterlacedRow16sToRow16u(temporal_lowpass, temporal_highpass,
output_row_ptr, output_pitch, output_row_width[channel],
frame_width, chroma_offset, precision);
#endif
// Advance the output row pointer to the next channel
output_row_ptr += output_row_width[channel];
// Check the output row alignment
assert(ISALIGNED16(output_row_ptr));
}
// Advance to the next group of rows in the output image
output += field_pitch/sizeof(PIXEL16U);
}
}
//#endif
#if _INTERLACED_WORKER_THREADS
void TransformInverseFrameSectionToRow16u(DECODER *decoder, int thread_index, int frame_index, int num_channels,
PIXEL16U *output, int output_pitch, FRAME_INFO *frame,
int chroma_offset, int precision)
{
FILE *logfile = decoder->logfile;
TRANSFORM **transform = decoder->transform;
const SCRATCH *scratch = &decoder->scratch;
// Pointers to the rows in the horizontal wavelet for each channel
PIXEL *horizontal_lowlow[TRANSFORM_MAX_CHANNELS];
PIXEL *horizontal_lowhigh[TRANSFORM_MAX_CHANNELS];
PIXEL *horizontal_highlow[TRANSFORM_MAX_CHANNELS];
PIXEL *horizontal_highhigh[TRANSFORM_MAX_CHANNELS];
// Horizontal wavelet band width and pitch
int horizontal_width[TRANSFORM_MAX_CHANNELS];
int horizontal_pitch[TRANSFORM_MAX_CHANNELS];
// Quantization factors
int lowlow_quantization[TRANSFORM_MAX_CHANNELS];
int lowhigh_quantization[TRANSFORM_MAX_CHANNELS];
int highlow_quantization[TRANSFORM_MAX_CHANNELS];
int highhigh_quantization[TRANSFORM_MAX_CHANNELS];
// Push the scratch space state to allocate a new section
char *buffer = scratch->free_ptr;
size_t buffer_size = scratch->free_size;
// Buffers for the rows in the temporal wavelet (reused for each channel)
PIXEL *temporal_lowpass;
PIXEL *temporal_highpass;
int output_row_width[TRANSFORM_MAX_CHANNELS];
// Dimensions of the reconstructed frame
int frame_width = frame->width;
int frame_height = frame->height;
int half_height = frame_height / 2;
size_t temporal_row_size = frame_width * sizeof(PIXEL);
int field_pitch = 2 * output_pitch;
int luma_width = frame_width;
int chroma_width = luma_width/2;
int channel;
int row;
HANDLE row_semaphore = decoder->interlaced_worker.row_semaphore;
int return_value;
#if (1 && DEBUG_ROW16U)
PIXEL16U *output_buffer;
#endif
// This routine should only be called to decode rows of 16-bit luma and chroma
//assert(frame->format == DECODED_FORMAT_YR16);
// Round up the temporal row size to an integral number of cache lines
temporal_row_size = ALIGN(temporal_row_size, _CACHE_LINE_SIZE);
#if 0
if (thread_index == 1)
{
// Skip over the buffer space used by the other thread
size_t buffer_usage = 2 * temporal_row_size;
buffer += buffer_usage;
buffer_size -= buffer_usage;
}
#else
// Divide the buffer space between the two threads
buffer_size /= 4;
buffer += buffer_size * thread_index;
#endif
// Check that the buffer starts on a cache line boundary
assert(ISALIGNED(buffer, _CACHE_LINE_SIZE));
// Check that the number of channels is reasonable
assert(0 < num_channels && num_channels <= TRANSFORM_MAX_CHANNELS);
// Buffer must be large enough for two rows of temporal coefficients (lowpass and highpass)
// plus the buffer used by the inverse horizontal transform for its intermediate results
assert((2 * temporal_row_size) <= buffer_size);
// Allocate buffers for one row of lowpass and highpass temporal coefficients
temporal_lowpass = (PIXEL *)&buffer[0];
temporal_highpass = (PIXEL *)&buffer[temporal_row_size];
#if (1 && DEBUG_ROW16U)
output_buffer = (PIXEL16U *)&buffer[2 * temporal_row_size];
#endif
// Initialize the arrays of row pointers into the horizontal transform bands
for (channel = 0; channel < num_channels; channel++)
{
IMAGE *wavelet = transform[channel]->wavelet[frame_index];
#if (0 && DEBUG)
int static count = 0;
if (count < 20) {
char label[_MAX_PATH];
int i;
sprintf(label, "Frame%d-%d-", frame_index, count);
DumpPGM(label, wavelet, NULL);
for (i = 1; i < wavelet->num_bands; i++)
{
sprintf(label, "Frame-%d-band%d-%d-", frame_index, i, count);
DumpBandPGM(label, wavelet, i, NULL);
}
}
count++;
#endif
// Initialize the row pointers into the horizontal bands
horizontal_lowlow[channel] = wavelet->band[LL_BAND];
horizontal_lowhigh[channel] = wavelet->band[LH_BAND];
horizontal_highlow[channel] = wavelet->band[HL_BAND];
horizontal_highhigh[channel] = wavelet->band[HH_BAND];
lowlow_quantization[channel] = wavelet->quantization[LL_BAND];
lowhigh_quantization[channel] = wavelet->quantization[LH_BAND];
highlow_quantization[channel] = wavelet->quantization[HL_BAND];
highhigh_quantization[channel] = wavelet->quantization[HH_BAND];
// Compute the pitch in units of pixels
horizontal_pitch[channel] = wavelet->pitch/sizeof(PIXEL);
// Remember the width of the horizontal wavelet rows for this channel
horizontal_width[channel] = wavelet->width;
// Compute the width of each row of output pixels
output_row_width[channel] = (channel == 0) ? luma_width : chroma_width;
}
#if (0 && DEBUG)
if (logfile) {
fprintf(logfile, "Output buffer: %d (0x%p)\n", output, output);
}
#endif
/* if (thread_index == 0)
{
row = 0;
row_step = 1;
}
else if (thread_index == 1)
{
row = half_height - 1;
row_step = -1;
// Move to the bottom of the transform and process moving up
for (channel = 0; channel < num_channels; channel++)
{
int offset = horizontal_pitch[channel] * (half_height - 1);
horizontal_lowlow[channel] += offset;
horizontal_lowhigh[channel] += offset;
horizontal_highlow[channel] += offset;
horizontal_highhigh[channel] += offset;
horizontal_pitch[channel] = NEG(horizontal_pitch[channel]);
//horizontal_pitch8s[channel] = NEG(horizontal_pitch8s[channel]);
}
//output += field_pitch * (half_height - 1);
output += (frame_height - 1) * output_pitch/sizeof(PIXEL16U);
output_pitch = NEG(output_pitch);
field_pitch = NEG(field_pitch);
}
else
{
assert(0); // middle threads
}
*/
#if (0 && DEBUG)
if (logfile) {
fprintf(logfile, "Thread index: %d, start row: %d, row step: %d, field_pitch: %d\n",
thread_index, row, row_step, field_pitch);
}
#endif
// Loop until all of the rows have been processed
for (;;)
{
PIXEL16U *output_row_ptr;
// Wait for one row from each channel to invert the transform
return_value = WaitForSingleObject(row_semaphore, 0);
// Determine the index of this worker thread
if (return_value == WAIT_OBJECT_0)
{
if(decoder->interlaced_worker.lock_init)
{
EnterCriticalSection(&decoder->interlaced_worker.lock);
}
row = decoder->interlaced_worker.current_row++;
if(decoder->interlaced_worker.lock_init)
LeaveCriticalSection(&decoder->interlaced_worker.lock);
output_row_ptr = output;
output_row_ptr += row * output_pitch;
for (channel = 0; channel < num_channels; channel++)
{
int pitch = horizontal_pitch[channel];
IMAGE *wavelet = transform[channel]->wavelet[frame_index];
horizontal_lowlow[channel] = wavelet->band[LL_BAND];
horizontal_lowhigh[channel] = wavelet->band[LH_BAND];
horizontal_highlow[channel] = wavelet->band[HL_BAND];
horizontal_highhigh[channel] = wavelet->band[HH_BAND];
horizontal_lowlow[channel] += pitch*row;
horizontal_lowhigh[channel] += pitch*row;
horizontal_highlow[channel] += pitch*row;
horizontal_highhigh[channel] += pitch*row;
}
}
if (return_value == WAIT_OBJECT_0 && 0 <= row && row < half_height)
{
assert(0 <= row && row < half_height);
if(decoder->frame.resolution == DECODED_RESOLUTION_FULL)
{
// Invert the horizontal transform applied to the temporal bands in each channel
for (channel = 0; channel < num_channels; channel++)
{
int pitch = horizontal_pitch[channel];
// Invert the horizontal transform applied to the temporal lowpass row
InvertHorizontalRow16s(horizontal_lowlow[channel], horizontal_lowhigh[channel],
temporal_lowpass, horizontal_width[channel]);
// Invert the horizontal transform applied to the temporal highpass row
InvertHorizontalRow16s(horizontal_highlow[channel], horizontal_highhigh[channel],
temporal_highpass, horizontal_width[channel]);
// Invert the temporal transform and output two rows of luma or chroma
InvertInterlacedRow16sToRow16u(temporal_lowpass, temporal_highpass,
output_row_ptr, output_pitch, output_row_width[channel],
frame_width, chroma_offset, precision);
// Advance the output row pointer to the next channel
output_row_ptr += output_row_width[channel];
}
}
else if(decoder->frame.resolution == DECODED_RESOLUTION_HALF_HORIZONTAL)
{
// Invert the horizontal transform applied to the temporal bands in each channel
for (channel = 0; channel < num_channels; channel++)
{
int pitch = horizontal_pitch[channel];
// Invert the horizontal transform applied to the temporal lowpass row
BypassHorizontalRow16s(horizontal_lowlow[channel], horizontal_lowhigh[channel],
temporal_lowpass, horizontal_width[channel]);
// Invert the horizontal transform applied to the temporal highpass row
BypassHorizontalRow16s(horizontal_highlow[channel], horizontal_highhigh[channel],
temporal_highpass, horizontal_width[channel]);
// Invert the temporal transform and output two rows of luma or chroma
InvertInterlacedRow16sToRow16u(temporal_lowpass, temporal_highpass,
output_row_ptr, output_pitch, output_row_width[channel],
frame_width, chroma_offset, precision);
// Advance the output row pointer to the next channel
output_row_ptr += output_row_width[channel];
}
}
}
else
{
// No more rows to process
break;
}
}
#if (1 && DEBUG)
if (logfile) {
fprintf(logfile, "Finished transform, thread index: %d\n", thread_index);
}
#endif
}
#endif
#if 0
DWORD WINAPI TransformInverseFrameToRow16utopThread(LPVOID param)
{
struct data
{
TRANSFORM *transform[3];
int frame_index;
int num_channels;
uint8_t *output;
int output_pitch;
FRAME_INFO *info;
SCRATCH *scratch;
int chroma_offset;
int precision;
} *dptr;
dptr = (struct data *)param;
TransformInverseFrameToRow16utop(dptr->transform, dptr->frame_index, dptr->num_channels,
(PIXEL16U *)dptr->output, dptr->output_pitch, dptr->info,
dptr->scratch, dptr->chroma_offset, dptr->precision);
return 0;
}
DWORD WINAPI TransformInverseFrameToRow16ubottomThread(LPVOID param)
{
struct data
{
TRANSFORM *transform[3];
int frame_index;
int num_channels;
uint8_t *output;
int output_pitch;
FRAME_INFO *info;
SCRATCH *scratch;
int chroma_offset;
int precision;
} *dptr;
dptr = (struct data *)param;
TransformInverseFrameToRow16ubottom(dptr->transform, dptr->frame_index, dptr->num_channels,
(PIXEL16U *)dptr->output, dptr->output_pitch, dptr->info,
dptr->scratch, dptr->chroma_offset, dptr->precision);
return 0;
}
#endif
extern void fast_srand( int seed );
// Apply the inverse horizontal-temporal transform and pack the output into a buffer
#if 0
void TransformInverseFrameToBuffer(TRANSFORM *transform[], int frame_index, int num_channels,
uint8_t *output, int output_pitch, FRAME_INFO *frame,
char *buffer, size_t buffer_size, int chroma_offset,
int precision)
#else
void TransformInverseFrameToBuffer(TRANSFORM *transform[], int frame_index, int num_channels,
uint8_t *output, int output_pitch, FRAME_INFO *frame,
const SCRATCH *scratch, int chroma_offset, int precision)
#endif
{
// Pointers to the rows in the horizontal wavelet for each channel
PIXEL *horizontal_lowlow[TRANSFORM_MAX_CHANNELS];
PIXEL *horizontal_lowhigh[TRANSFORM_MAX_CHANNELS];
PIXEL *horizontal_highlow[TRANSFORM_MAX_CHANNELS];
PIXEL *horizontal_highhigh[TRANSFORM_MAX_CHANNELS];
// Horizontal wavelet band width and pitch
int horizontal_width[TRANSFORM_MAX_CHANNELS];
int horizontal_pitch[TRANSFORM_MAX_CHANNELS];
//int horizontal_pitch8s[TRANSFORM_MAX_CHANNELS];
// Quantization factors
int lowlow_quantization[TRANSFORM_MAX_CHANNELS];
int lowhigh_quantization[TRANSFORM_MAX_CHANNELS];
int highlow_quantization[TRANSFORM_MAX_CHANNELS];
int highhigh_quantization[TRANSFORM_MAX_CHANNELS];
// Push the scratch space state to allocate a new section
char *buffer = scratch->free_ptr;
size_t buffer_size = scratch->free_size;
// Pointers to the rows in the temporal wavelet for each channel
PIXEL *temporal_lowpass[TRANSFORM_MAX_CHANNELS];
PIXEL *temporal_highpass[TRANSFORM_MAX_CHANNELS];
// Dimensions of the reconstructed frame
int frame_width = frame->width;
int frame_height = frame->height;
int half_height = frame_height / 2;
size_t temporal_row_size = frame_width * sizeof(PIXEL);
size_t temporal_buffer_size = 2 * num_channels * temporal_row_size;
#if DEBUG
size_t yuv_row_size = frame_width * 2;
#endif
char *yuv_buffer;
size_t yuv_buffer_size;
int field_pitch = 2 * output_pitch;
int format = frame->format;
bool inverted = (format == DECODED_FORMAT_RGB24 || format == DECODED_FORMAT_RGB32);
int output_width;
int channel;
int row;
// Round up the temporal row size to an integral number of cache lines
temporal_row_size = ALIGN(temporal_row_size, _CACHE_LINE_SIZE);
// Check that the buffer starts on a cache line boundary
assert(ISALIGNED(buffer, _CACHE_LINE_SIZE));
// Check that the number of channels is reasonable
assert(0 < num_channels && num_channels <= TRANSFORM_MAX_CHANNELS);
// Check that the buffer is large enough
assert((2 * num_channels * temporal_row_size) <= buffer_size);
// Allocate buffers for a single row of lowpass and highpass temporal coefficients
// and initialize the arrays of row pointers into the horizontal transform bands
for (channel = 0; channel < num_channels; channel++)
{
IMAGE *wavelet = transform[channel]->wavelet[frame_index];
// Initialize the row pointers into the horizontal bands
horizontal_lowlow[channel] = wavelet->band[LL_BAND];
horizontal_lowhigh[channel] = wavelet->band[LH_BAND];
horizontal_highlow[channel] = wavelet->band[HL_BAND];
horizontal_highhigh[channel] = wavelet->band[HH_BAND];
lowlow_quantization[channel] = wavelet->quantization[LL_BAND];
lowhigh_quantization[channel] = wavelet->quantization[LH_BAND];
highlow_quantization[channel] = wavelet->quantization[HL_BAND];
highhigh_quantization[channel] = wavelet->quantization[HH_BAND];
// Compute the pitch in units of pixels
horizontal_pitch[channel] = wavelet->pitch/sizeof(PIXEL);
// Compute the 8-bit pitch in units of pixels
//horizontal_pitch8s[channel] = wavelet->pitch8s/sizeof(PIXEL);
//horizontal_pitch8s[channel] = wavelet->pitch8s/sizeof(PIXEL8S);
// Remember the width of the horizontal wavelet rows for this channel
horizontal_width[channel] = wavelet->width;
// Divide the buffer into temporal lowpass and highpass rows
temporal_lowpass[channel] = (PIXEL *)(buffer + (2 * channel) * temporal_row_size);
temporal_highpass[channel] = (PIXEL *)(buffer + (2 * channel + 1) * temporal_row_size);
}
// Allocate buffer space for the intermediate YUV data
yuv_buffer = buffer + temporal_buffer_size;
yuv_buffer_size = buffer_size - temporal_buffer_size;
#if DEBUG
assert(yuv_buffer_size >= 2 * yuv_row_size);
#endif
if (inverted)
{
output += (frame_height - 1) * output_pitch;
output_pitch = (- output_pitch);
field_pitch = (- field_pitch);
}
// Process one row at a time from each channel
for (row = 0; row < half_height; row++)
{
PIXEL *line_buffer = (PIXEL *)(buffer + (2 * num_channels + 2) * temporal_row_size);
// Invert the horizontal transform applied to the temporal bands in each channel
for (channel = 0; channel < num_channels; channel++)
{
int pitch = horizontal_pitch[channel];
//int pitch8s = horizontal_pitch8s[channel];
// Invert the horizontal transform applied to the temporal lowpass row
InvertHorizontalRow16s8sTo16sBuffered(horizontal_lowlow[channel], lowlow_quantization[channel],
(PIXEL8S *)horizontal_lowhigh[channel], lowhigh_quantization[channel],
temporal_lowpass[channel],
horizontal_width[channel],
(PIXEL *)line_buffer);
// Invert the horizontal transform applied to the temporal highpass row
InvertHorizontalRow8sBuffered((PIXEL8S *)horizontal_highlow[channel], highlow_quantization[channel],
(PIXEL8S *)horizontal_highhigh[channel], highhigh_quantization[channel],
temporal_highpass[channel],
horizontal_width[channel],
(PIXEL *)line_buffer);
// Advance to the next row in each horizontal band in this channel
horizontal_lowlow[channel] += pitch;
horizontal_lowhigh[channel] += pitch;
horizontal_highlow[channel] += pitch;
horizontal_highhigh[channel] += pitch;
}
// The output width is twice the width of the wavelet bands
output_width = 2 * horizontal_width[0];
// Adjust the frame width to fill to the end of each row
//frame_width = output_pitch / 2;
//#if BUILD_PROSPECT
if (format == DECODED_FORMAT_V210 || format == DECODED_FORMAT_YU64)
{
// Invert the temporal bands from all channels and pack as V210 output
InvertInterlacedRow16sToV210(temporal_lowpass, temporal_highpass, num_channels,
output, output_pitch, output_width, frame_width,
yuv_buffer, yuv_buffer_size, format, chroma_offset, precision);
}
else
//#endif
{
// Invert the temporal bands from all channels and pack as 8-bit output
InvertInterlacedRow16s(temporal_lowpass, temporal_highpass, num_channels,
output, output_pitch, output_width, frame_width,
yuv_buffer, yuv_buffer_size, format, frame->colorspace,
chroma_offset, precision, row);
}
// Advance to the next row in the packed output image
output += field_pitch;
}
}
void CopyImageToBuffer(IMAGE *image, uint8_t *output_buffer, int32_t output_pitch, int format)
{
bool inverted = false;
size_t output_size;
START(tk_convert);
// Determine the type of conversion
switch (format)
{
case DECODED_FORMAT_RGB24:
inverted = true;
// Fall through and convert to RGB (first image row displayed at the bottom)
case DECODED_FORMAT_RGB24_INVERTED:
ConvertImageToRGB(image, output_buffer, output_pitch, COLOR_FORMAT_RGB24, inverted);
break;
case DECODED_FORMAT_RGB32:
inverted = true;
// Fall through and convert to RGB (first image row displayed at the bottom)
case DECODED_FORMAT_RGB32_INVERTED:
ConvertImageToRGB(image, output_buffer, output_pitch, COLOR_FORMAT_RGB32, inverted);
break;
#if 0
case DECODED_FORMAT_YUYV_INVERTED:
inverted = true;
// Fall through and convert to YUV (first image row displayed at the bottom)
#endif
case DECODED_FORMAT_YUYV:
ConvertImageToYUV(image, output_buffer, output_pitch, COLOR_FORMAT_YUYV, inverted);
break;
#if 0
case DECODED_FORMAT_UYVY_INVERTED:
inverted = true;
// Fall through and convert to YUV (first image row displayed at the bottom)
#endif
case DECODED_FORMAT_UYVY:
ConvertImageToYUV(image, output_buffer, output_pitch, COLOR_FORMAT_UYVY, inverted);
break;
default: // Unsupported format (return a blank frame)
assert(0);
output_size = image->height * output_pitch;
memset(output_buffer, COLOR_CHROMA_ZERO, output_size);
break;
}
STOP(tk_convert);
}
void SideLowpass16s10bitToYUYV(IMAGE *images[], uint8_t *output_buffer, int output_width, int output_height,
int output_pitch, bool inverted)
{
IMAGE *y_image = images[0];
IMAGE *u_image = images[1];
IMAGE *v_image = images[2];
int width = y_image->width;
int height = output_height;
PIXEL *y_row_ptr = y_image->band[0];
PIXEL *u_row_ptr = u_image->band[0];
PIXEL *v_row_ptr = v_image->band[0];
int y_pitch = y_image->pitch/sizeof(PIXEL);
int u_pitch = u_image->pitch/sizeof(PIXEL);
int v_pitch = v_image->pitch/sizeof(PIXEL);
uint8_t *outrow = output_buffer;
uint8_t *outptr;
int row, column;
// Definitions for optimization
//const int column_step = 2 * sizeof(__m64);
// Column at which post processing must begin
//int post_column = width - (width % column_step);
// The output pitch should be a positive number before inversion
assert(output_pitch > 0);
// Should the image be inverted?
if (inverted) {
outrow += (height - 1) * output_pitch; // Start at the bottom row
output_pitch = NEG(output_pitch); // Negate the pitch to go up
}
for (row = 0; row < height; row++)
{
outptr = outrow;
// Fill the rest of the output row
for (column = 0; column < width; column+=4)
{
int chroma_column = column>>1;
*(outptr++) = SATURATE_8U((y_row_ptr[column]+y_row_ptr[column+1])>>5);
*(outptr++) = SATURATE_8U((v_row_ptr[chroma_column]+v_row_ptr[chroma_column+1])>>5);
*(outptr++) = SATURATE_8U((y_row_ptr[column+2]+y_row_ptr[column+3])>>5);
*(outptr++) = SATURATE_8U((u_row_ptr[chroma_column]+u_row_ptr[chroma_column+1])>>5);
}
// Advance to the next rows in the input and output images
y_row_ptr += y_pitch;// 3D Work
u_row_ptr += u_pitch;
v_row_ptr += v_pitch;
outrow += output_pitch;
}
}
// Convert 16-bit signed lowpass data into packed RGB/YUV and store it in the output buffer
void CopyLowpass16sToBuffer(DECODER *decoder, IMAGE *images[], int num_channels, uint8_t *output_buffer, int32_t output_pitch,
FRAME_INFO *info, int chroma_offset, int precision, int encode_format, int whitebitdepth)
{
//IMAGE *image = frame->channel[0];
bool inverted = false;
int output_width = info->width;
int output_height = info->height;
int descale = precision - 8;
// Get the color format from the decoded format
int color_format = info->format & COLOR_FORMAT_MASK;
// Must compile this routine with switches set for decoding to 8-bit unsigned pixels
#if !defined(_DECODE_FRAME_8U) || (_DECODE_FRAME_8U == 0)
assert(0);
return;
#endif
START(tk_convert);
#if 0
// Fill the output buffer with blank values
EraseOutputBuffer(output_buffer, info->width, info->height, output_pitch, info->format);
#endif
// Determine the type of conversion
switch (info->format)
{
case DECODED_FORMAT_RGB24:
inverted = true;
// Fall through and convert to RGB (first image row displayed at the bottom)
case DECODED_FORMAT_RGB24_INVERTED:
if(encode_format == ENCODED_FORMAT_RGB_444 || encode_format == ENCODED_FORMAT_RGBA_4444)
{
ConvertLowpass16sRGB48ToRGB(images, output_buffer, output_width, output_height, output_pitch,
COLOR_FORMAT_RGB24, info->colorspace, inverted, descale, num_channels);
}
else
{
ConvertLowpass16sToRGBNoIPPFast(images, output_buffer, output_width, output_height, output_pitch,
COLOR_FORMAT_RGB24, info->colorspace, inverted, descale);
}
break;
case DECODED_FORMAT_RGB32:
inverted = true;
// Fall through and convert to RGB (first image row displayed at the bottom)
case DECODED_FORMAT_RGB32_INVERTED:
if(encode_format == ENCODED_FORMAT_RGB_444 || encode_format == ENCODED_FORMAT_RGBA_4444)
{
ConvertLowpass16sRGB48ToRGB(images, output_buffer, output_width, output_height, output_pitch,
COLOR_FORMAT_RGB32, info->colorspace, inverted, descale, num_channels);
}
else
{
ConvertLowpass16sToRGBNoIPPFast(images, output_buffer, output_width, output_height, output_pitch,
COLOR_FORMAT_RGB32, info->colorspace, inverted, descale);
}
break;
case DECODED_FORMAT_RG48:
if(encode_format == ENCODED_FORMAT_BAYER)
{
ConvertLowpass16sBayerToRGB48(images, output_buffer, output_width, output_height,
output_pitch, 2, num_channels);
}
else if(encode_format == ENCODED_FORMAT_RGB_444 || encode_format == ENCODED_FORMAT_RGBA_4444)
{
int scale = 1;
if(encode_format == ENCODED_FORMAT_RGB_444 || encode_format == ENCODED_FORMAT_RGBA_4444)
scale = 2;
ConvertLowpass16sRGB48ToRGB48(images, output_buffer, output_width, output_height,
output_pitch, scale, num_channels);
}
else
{
ConvertLowpass16sYUVtoRGB48(images, (uint8_t *)output_buffer, output_width,
output_height, output_pitch, info->colorspace, inverted, descale,
info->format, whitebitdepth);
}
break;
case DECODED_FORMAT_RG64:
if(encode_format == ENCODED_FORMAT_RGB_444 || encode_format == ENCODED_FORMAT_RGBA_4444)
{
ConvertLowpass16sRGBA64ToRGBA64(images, output_buffer, output_width, output_height, output_pitch,
descale, num_channels, info->format & 0xffff);
}
else
{
assert(0);
}
break;
case DECODED_FORMAT_B64A:
case DECODED_FORMAT_R210:
case DECODED_FORMAT_DPX0:
case DECODED_FORMAT_RG30:
case DECODED_FORMAT_AR10:
case DECODED_FORMAT_AB10:
if(encode_format == ENCODED_FORMAT_RGB_444 || encode_format == ENCODED_FORMAT_RGBA_4444)
{
ConvertLowpass16sRGBA64ToRGBA64(images, output_buffer, output_width, output_height, output_pitch,
descale, num_channels, info->format & 0xffff);
}
else
{
ConvertLowpass16sYUVtoRGB48(images, (uint8_t *)output_buffer, output_width,
output_height, output_pitch, info->colorspace, inverted, descale,
info->format, whitebitdepth);
}
break;
#if 0
case DECODED_FORMAT_YUYV_INVERTED:
inverted = true;
// Fall through and convert to YUV (first image row displayed at the bottom)
#endif
case DECODED_FORMAT_YUYV:
case DECODED_FORMAT_UYVY:
if (precision == CODEC_PRECISION_10BIT)
{
int lineskip = 1; // 3D Work
int pitch = output_pitch;
if(decoder->channel_decodes > 1 && decoder->frame.format == DECODED_FORMAT_YUYV)
{
if(decoder->channel_blend_type == BLEND_STACKED_ANAMORPHIC || decoder->channel_blend_type == BLEND_LINE_INTERLEAVED) // 3d Work
{
lineskip = 2;
if(decoder->channel_blend_type == 3)
pitch *= 2;
}
}
if((decoder->channel_blend_type == BLEND_SIDEBYSIDE_ANAMORPHIC || decoder->channel_blend_type == BLEND_FREEVIEW) && decoder->frame.format == DECODED_FORMAT_YUYV) //side by side
{
SideLowpass16s10bitToYUYV(images, output_buffer, output_width, output_height, pitch, inverted);
}
else
{
//ConvertLowpass16s10bitToYUV(images, output_buffer, output_width, output_height, pitch, COLOR_FORMAT_YUYV, inverted, lineskip);
ConvertLowpass16s10bitToYUV(images, output_buffer, output_width, output_height, pitch, color_format, inverted, lineskip);
}
}
else
{
//ConvertLowpass16sToYUV(images, output_buffer, output_width, output_height, output_pitch, COLOR_FORMAT_YUYV, inverted);
ConvertLowpass16sToYUV(images, output_buffer, output_width, output_height, output_pitch, color_format, inverted);
}
break;
#if 0
case DECODED_FORMAT_UYVY_INVERTED:
inverted = true;
// Fall through and convert to YUV (first image row displayed at the bottom)
#endif
#if 0
case DECODED_FORMAT_UYVY:
ConvertLowpass16sToYUV(images, output_buffer, output_width, output_height, output_pitch, COLOR_FORMAT_UYVY, inverted);
break;
#endif
//#if BUILD_PROSPECT
case DECODED_FORMAT_V210:
if (precision == CODEC_PRECISION_10BIT)
{
ConvertLowpass16s10bitToV210(images, output_buffer, output_width, output_height, output_pitch, COLOR_FORMAT_V210, inverted);
}
else
{
//ConvertLowpass16sToV210(images, output_buffer, output_width, output_pitch, COLOR_FORMAT_V210, inverted);
assert(0);
}
break;
//#endif
case DECODED_FORMAT_YU64:
// DAN04262004
ConvertLowpass16sToYUV64(images, output_buffer, output_width, output_height, output_pitch, COLOR_FORMAT_YU64, inverted, precision);
break;
//#if BUILD_PROSPECT
case DECODED_FORMAT_YR16:
ConvertLowpass16sToYR16(images, output_buffer, output_width, output_height, output_pitch, COLOR_FORMAT_YR16, inverted, precision);
break;
//#endif
default: // Unsupported format (output a blank frame)
assert(0);
break;
}
STOP(tk_convert);
}
void ConvertYUVStripPlanarToBuffer(uint8_t *planar_output[], int planar_pitch[], ROI roi,
uint8_t *output_buffer, int output_pitch, int frame_width,
int format, int colorspace)
{
bool inverted = false;
int output_width = roi.width;
#if !defined(_DECODE_FRAME_8U) || (_DECODE_FRAME_8U == 0)
#error Must set compile-time switches to decode to 8-bit pixels
#endif
START(tk_convert);
#if _ENCODE_CHROMA_OFFSET
#error Cannot handle images encoded with a non-zero chroma offset
#endif
// Determine the type of conversion
switch(format)
{
case DECODED_FORMAT_RGB24:
inverted = true;
// Fall through and convert to RGB (first image row displayed at the bottom)
case DECODED_FORMAT_RGB24_INVERTED:
ConvertPlanarYUVToRGB(planar_output, planar_pitch, roi, output_buffer, output_width, output_pitch,
COLOR_FORMAT_RGB24, colorspace, inverted);
break;
case DECODED_FORMAT_RGB32:
inverted = true;
// Fall through and convert to RGB (first image row displayed at the bottom)
case DECODED_FORMAT_RGB32_INVERTED:
ConvertPlanarYUVToRGB(planar_output, planar_pitch, roi, output_buffer, output_width, output_pitch,
COLOR_FORMAT_RGB32, colorspace, inverted);
break;
#if 0
case DECODED_FORMAT_YUYV_INVERTED:
inverted = true;
// Fall through and convert to YUV (first image row displayed at the bottom)
#endif
case DECODED_FORMAT_YUYV:
ConvertYUVStripPlanarToPacked(planar_output, planar_pitch, roi,
output_buffer, output_pitch, frame_width, format);
break;
#if 0
case DECODED_FORMAT_UYVY_INVERTED:
inverted = true;
// Fall through and convert to YUV (first image row displayed at the bottom)
#endif
case DECODED_FORMAT_UYVY:
ConvertPlanarYUVToUYVY(planar_output, planar_pitch, roi, output_buffer, output_width, output_pitch,
COLOR_FORMAT_UYVY, colorspace, inverted);
break;
default: // Unsupported format (output a blank frame)
assert(0);
break;
}
STOP(tk_convert);
}
void ConvertRow16uToDitheredBuffer(DECODER *decoder, uint8_t *planar_output[], int planar_pitch[], ROI roi,
uint8_t *output_buffer, int output_pitch, int frame_width,
int format, int colorspace)
{
bool inverted = false;
int output_width = roi.width;
START(tk_convert);
// Determine the type of conversion
switch(format)
{
case DECODED_FORMAT_RGB24:
inverted = true;
// Fall through and convert to RGB (first image row displayed at the bottom)
case DECODED_FORMAT_RGB24_INVERTED:
//ConvertPlanarYUVToRGB
ConvertRow16uToDitheredRGB(decoder, planar_output, planar_pitch, roi, output_buffer, output_width, output_pitch,
COLOR_FORMAT_RGB24, colorspace, inverted);
break;
case DECODED_FORMAT_RGB32:
inverted = true;
// Fall through and convert to RGB (first image row displayed at the bottom)
case DECODED_FORMAT_RGB32_INVERTED:
ConvertRow16uToDitheredRGB(decoder, planar_output, planar_pitch, roi, output_buffer, output_width, output_pitch,
COLOR_FORMAT_RGB32, colorspace, inverted);
break;
case COLOR_FORMAT_WP13:
case COLOR_FORMAT_B64A:
case COLOR_FORMAT_RG48:
case COLOR_FORMAT_R210:
case COLOR_FORMAT_DPX0:
case COLOR_FORMAT_RG30:
case COLOR_FORMAT_AR10:
case COLOR_FORMAT_AB10:
ConvertYUVRow16uToBGRA64(planar_output, planar_pitch, roi, output_buffer, output_width, output_pitch, format, colorspace, NULL, NULL);
break;
case DECODED_FORMAT_YUYV:
assert(0);// These routines are not yet updated for ROW16u inputs
ConvertYUVStripPlanarToPacked(planar_output, planar_pitch, roi,
output_buffer, output_pitch, frame_width, format);
break;
case DECODED_FORMAT_UYVY:
assert(0);// These routines are not yet updated for ROW16u inputs
ConvertPlanarYUVToUYVY(planar_output, planar_pitch, roi, output_buffer, output_width, output_pitch,
COLOR_FORMAT_UYVY, colorspace, inverted);
break;
default: // Unsupported format (output a blank frame)
assert(0);
break;
}
STOP(tk_convert);
}
// Convert one row of packed YUYV to the specified color
void ConvertRowYUYV(uint8_t *input, uint8_t *output, int length, int format, int colorspace, int precision)
{
size_t row_size = 2 * length;
bool inverted = false;
START(tk_convert);
// Determine the type of color conversion
switch (format)
{
case DECODED_FORMAT_RGB24:
inverted = true;
// Fall through and convert to RGB (first image row displayed at the bottom)
case DECODED_FORMAT_RGB24_INVERTED:
ConvertYUYVRowToRGB(input, output, length, COLOR_FORMAT_RGB24, colorspace, precision);
break;
case DECODED_FORMAT_RGB32:
inverted = true;
// Fall through and convert to RGB (first image row displayed at the bottom)
case DECODED_FORMAT_RGB32_INVERTED:
ConvertYUYVRowToRGB(input, output, length, COLOR_FORMAT_RGB32, colorspace, precision);
break;
case DECODED_FORMAT_YUYV:
if(precision == 8)
memcpy(output, input, row_size);
else
{
//need to dither to 8-bit
assert(0);
}
break;
case DECODED_FORMAT_UYVY:
if(precision == 8)
ConvertYUYVRowToUYVY(input, output, length, COLOR_FORMAT_UYVY);
else
{
//need to dither to 8-bit
assert(0);
}
break;
//#if BUILD_PROSPECT
case DECODED_FORMAT_V210:
assert(0); // should get here with 8bit data.
//ConvertYUYVRowToV210(input, output, length, COLOR_FORMAT_V210);
break;
case DECODED_FORMAT_YU64:
assert(0); // should get here with 8bit data.
//ConvertYUYVRowToYU64(input, output, length, COLOR_FORMAT_YU64);
break;
case DECODED_FORMAT_BYR3:
case DECODED_FORMAT_BYR4:
assert(0); // should get here with 8bit data.
//ConvertYUYVRowToYU64(input, output, length, COLOR_FORMAT_YU64);
break;
//#endif
default: // Unsupported format (output a blank frame)
assert(0);
memset(output, 0, row_size);
break;
}
STOP(tk_convert);
}
#if _THREADED_DECODER
IMAGE *GetWaveletThreadSafe(DECODER *decoder, TRANSFORM *transform, int index,
int width, int height, int level, int type)
{
IMAGE *wavelet = transform->wavelet[index];
assert(decoder != NULL && transform != NULL);
if (decoder != NULL && transform != NULL)
{
#if (1 && DEBUG)
FILE *logfile = decoder->logfile;
#endif
// Lock access to the wavelet data
#if _DELAYED_THREAD_START==0
Lock(&decoder->entropy_worker_new.lock);
#endif
// Get the wavelet from the transform data structure (thread safe)
wavelet = transform->wavelet[index];
// Allocate (or reallocate) the wavelet
#if _ALLOCATOR
wavelet = ReallocWaveletEx(decoder->allocator, wavelet, width, height, level, type);
#else
wavelet = ReallocWaveletEx(wavelet, width, height, level, type);
#endif
// Save this wavelet in the transform data structure
transform->wavelet[index] = wavelet;
// Unlock access to the wavelet data
#if _DELAYED_THREAD_START==0
Unlock(&decoder->entropy_worker_new.lock);
#endif
}
return wavelet;
}
// Update the codec state with the information in a tag value pair
CODEC_ERROR UpdateCodecState(DECODER *decoder, BITSTREAM *input, CODEC_STATE *codec, TAGWORD tag, TAGWORD value)
{
CODEC_ERROR error = CODEC_ERROR_OKAY;
#if (1 && DEBUG)
FILE *logfile = decoder->logfile;
#endif
bool optional = false;
int chunksize = 0;
bool result;
// Is this an optional tag?
if (tag < 0) {
tag = NEG(tag);
optional = true;
}
#if (0 && DEBUG)
if (logfile) {
fprintf(logfile, "UpdateCodecState tag: %d, value: %d, optional: %d\n",
tag, value, optional);
}
#endif
switch (tag)
{
case CODEC_TAG_ZERO: // Used internally
assert(0); // Should not occur in the bitstream
error = CODEC_ERROR_INVALID_BITSTREAM;
break;
case CODEC_TAG_SAMPLE: // Type of sample
//assert(0);
if (value == SAMPLE_TYPE_CHANNEL)
{
result = DecodeSampleChannelHeader(decoder, input);
if (!result)
error = CODEC_ERROR_DECODE_SAMPLE_CHANNEL_HEADER;
else
error = CODEC_ERROR_OKAY;
}
break;
case CODEC_TAG_INDEX: // Sample index table
//assert(0); // Need to figure out how to return the group index
{
int count = value;
uint32_t *index = (uint32_t *)(&codec->channel_size[0]);
DecodeGroupIndex( input, index, min( count, TRANSFORM_MAX_CHANNELS ) );
codec->num_channels = count;
}
break;
case CODEC_TAG_SUBBAND: // Has the decoder encountered a subband?
{ // This tag is obsolete and not used in modern streams
int subband = value;
// Check that the subband number makes sense
assert(0 <= subband && subband <= codec->max_subband);
if (! (0 <= subband && subband <= codec->max_subband))
{
error = CODEC_ERROR_DECODING_SUBBAND;
break;
}
// Decompress the subband
result = DecodeSampleSubband(decoder, input, subband);
if (!result)
error = CODEC_ERROR_DECODING_SUBBAND;
else
error = CODEC_ERROR_OKAY;
}
break;
case CODEC_TAG_BAND_HEADER: //CODEC_TAG_BAND_DIVISOR: // Band divisor. this is last TAG before subband data so act.
codec->band.divisor = value; // This tag value pair encodes the band divisor which is obsolete
{
// This tag value pair marks the beginning of the encoded coefficients
// The subband number has already been decoded
int subband = codec->band.subband;
result = DecodeSampleSubband(decoder, input, subband);
if (!result)
error = CODEC_ERROR_DECODING_SUBBAND;
else
error = CODEC_ERROR_OKAY;
}
break;
case CODEC_TAG_ENTRY: // Entry in sample index
assert(0); // Need to figure out how to return the group index
break;
case CODEC_TAG_MARKER: // Bitstream marker
{
int marker = value;
uint8_t *current_position;
// Save the current bitstream position
current_position = GetBitstreamPosition(input);
current_position -= 4; // Step back to before the GetSegment i.e. the TAG
if (IsLowPassHeaderMarker(marker))
{
// Save the bitstream position for the start of the channel
codec->channel_position = current_position;
}
else if (IsLowPassBandMarker(marker))
{
int subband = 0;
result = DecodeSampleSubband(decoder, input, subband);
if (!result)
error = CODEC_ERROR_DECODING_SUBBAND;
else
error = CODEC_ERROR_OKAY;
}
}
break;
case CODEC_TAG_VERSION_MAJOR: // Version
assert(0);
break;
case CODEC_TAG_VERSION_MINOR: // Minor version number
assert(0);
break;
case CODEC_TAG_VERSION_REVISION: // Revision number
assert(0);
break;
case CODEC_TAG_VERSION_EDIT: // Edit number
assert(0);
break;
case CODEC_TAG_SEQUENCE_FLAGS: // Video sequence flags
assert(0);
break;
case CODEC_TAG_TRANSFORM_TYPE: // Type of transform
assert(TRANSFORM_TYPE_FIRST <= value && value <= TRANSFORM_TYPE_LAST);
if (TRANSFORM_TYPE_FIRST <= value && value <= TRANSFORM_TYPE_LAST)
{
int i;
codec->transform_type = value;
for(i=0;i<TRANSFORM_MAX_CHANNELS;i++)
{
TRANSFORM *transform = decoder->transform[i];
if(transform)
{
GetTransformPrescale(transform, codec->transform_type, codec->precision);
}
}
}
else
error = CODEC_ERROR_TRANSFORM_TYPE;
break;
case CODEC_TAG_NUM_FRAMES: // Number of frames in the group
assert(0 <= value && value <= TRANSFORM_NUM_FRAMES);
if (0 <= value && value <= TRANSFORM_NUM_FRAMES)
codec->num_frames = value;
else
error = CODEC_ERROR_NUM_FRAMES;
break;
case CODEC_TAG_NUM_CHANNELS: // Number of channels in the transform
assert(value <= CODEC_MAX_CHANNELS);
if (value <= CODEC_MAX_CHANNELS)
codec->num_channels = value;
else
error = CODEC_ERROR_NUM_CHANNELS;
break;
case CODEC_TAG_NUM_WAVELETS: // Number of wavelets in the transform
assert(0 < value && value <= TRANSFORM_NUM_WAVELETS);
if (0 < value && value <= TRANSFORM_NUM_WAVELETS)
codec->num_wavelets = value;
else
error = CODEC_ERROR_NUM_WAVELETS;
break;
case CODEC_TAG_NUM_SUBBANDS: // Number of encoded subbands
assert(0 < value && value <= TRANSFORM_NUM_SUBBANDS);
if (0 < value && value <= TRANSFORM_NUM_SUBBANDS)
codec->num_subbands = value;
else
error = CODEC_ERROR_NUM_SUBBANDS;
break;
case CODEC_TAG_NUM_SPATIAL: // Number of spatial levels
assert(0 < value && value <= TRANSFORM_NUM_SPATIAL);
if (0 < value && value <= TRANSFORM_NUM_SPATIAL)
codec->num_spatial = value;
else
error = CODEC_ERROR_NUM_SPATIAL;
break;
case CODEC_TAG_FIRST_WAVELET: // Type of the first wavelet
assert(value == TRANSFORM_FIRST_WAVELET);
if (value == TRANSFORM_FIRST_WAVELET)
codec->first_wavelet = value;
else
error = CODEC_ERROR_FIRST_WAVELET;
break;
case CODEC_TAG_CHANNEL_SIZE: // Number of bytes in each channel
assert(0);
break;
case CODEC_TAG_GROUP_TRAILER: // Group trailer and checksum
codec->sample_done = true;
break;
case CODEC_TAG_FRAME_TYPE: // Type of frame marks the frame start
codec->frame.type = value;
break;
case CODEC_TAG_FRAME_WIDTH: // Width of the frame
codec->frame.width = value;
break;
case CODEC_TAG_FRAME_HEIGHT: // Height of the frame
codec->frame.height = value;
//DAN20080729 -- Initialize the default colorspace based on clip resolution
if ((decoder->frame.colorspace & COLORSPACE_MASK) == COLOR_SPACE_UNDEFINED)
{
int internalheight = value;
int internalwidth = codec->frame.width;
if(decoder->codec.encoded_format == ENCODED_FORMAT_BAYER)
{
internalwidth *= 2;
internalheight *= 2;
}
if(internalheight > 576 || internalwidth > 720)
decoder->frame.colorspace |= COLOR_SPACE_CG_709;
else
decoder->frame.colorspace |= COLOR_SPACE_CG_601;
}
//if(decoder->frame.colorspace_filedefault)
// decoder->frame.colorspace = decoder->frame.colorspace_filedefault;
if(decoder->frame.colorspace_override)
decoder->frame.colorspace = decoder->frame.colorspace_override;
break;
case CODEC_TAG_ENCODED_COLORSPACE: //DAN20080729
if(decoder->codec.encoded_format == ENCODED_FORMAT_BAYER)
value &= ~(COLOR_SPACE_BT_601|COLOR_SPACE_BT_709); // Bayer has no 601 vs 709,
//there was a bug in 3.9.4 that had bayer flagged as 601.
if(decoder->frame.colorspace_override)
decoder->frame.colorspace = decoder->frame.colorspace_override;
else
{
if(decoder->codec.encoded_format == ENCODED_FORMAT_YUV_422)
{
decoder->frame.colorspace &= ~(COLOR_SPACE_BT_601 | COLOR_SPACE_BT_709);
decoder->frame.colorspace |= (value & (COLOR_SPACE_BT_601 | COLOR_SPACE_BT_709));
//Let the VSRGB status be controllable by the calling application (e.g. Vegas)
}
else
{
decoder->frame.colorspace &= ~(COLOR_SPACE_VS_RGB);
decoder->frame.colorspace |= (value & (COLOR_SPACE_VS_RGB));
}
}
decoder->frame.colorspace_filedefault = value;
break;
case CODEC_TAG_FRAME_FORMAT: // Format of the encoded pixels (GRAY, YUV, RGB, RGBA)
assert(0);
break;
case CODEC_TAG_INPUT_FORMAT: // Format of the original pixels
codec->input_format = value;
// Set the encoded format if it has not already been set
// error = UpdateEncodedFormat(codec, (COLOR_FORMAT)value);
break;
case CODEC_TAG_ENCODED_FORMAT: // Internal format of the encoded data
case CODEC_TAG_OLD_ENCODED_FORMAT:
codec->encoded_format = value;
if(codec->encoded_format == ENCODED_FORMAT_RGBA_4444 && codec->num_channels == 3)
codec->encoded_format = ENCODED_FORMAT_RGB_444;
break;
case CODEC_TAG_FRAME_INDEX: // Position of frame within the group
codec->frame.group_index = value;
break;
case CODEC_TAG_FRAME_TRAILER: // Frame trailer and checksum
codec->sample_done = true;
break;
case CODEC_TAG_LOWPASS_SUBBAND: // Subband number of the lowpass band
codec->lowpass.subband = value;
error = SetDefaultEncodedFormat(codec);
break;
case CODEC_TAG_NUM_LEVELS: // Number of wavelet levels
codec->lowpass.level = value;
break;
case CODEC_TAG_LOWPASS_WIDTH: // Width of the lowpass band
codec->lowpass.width = value;
break;
case CODEC_TAG_LOWPASS_HEIGHT: // Height of the lowpass band
codec->lowpass.height = value;
break;
case CODEC_TAG_MARGIN_TOP: // Margins that define the encoded subset
codec->lowpass.margin.top = value;
break;
case CODEC_TAG_MARGIN_BOTTOM:
codec->lowpass.margin.bottom = value;
break;
case CODEC_TAG_MARGIN_LEFT:
codec->lowpass.margin.left = value;
break;
case CODEC_TAG_MARGIN_RIGHT:
codec->lowpass.margin.right = value;
break;
case CODEC_TAG_PIXEL_OFFSET: // Quantization parameters
codec->lowpass.pixel_offset = value;
break;
case CODEC_TAG_QUANTIZATION: // Quantization divisor used during encoding
codec->lowpass.quantization = value;
break;
case CODEC_TAG_PIXEL_DEPTH: // Number of bits per pixel
codec->lowpass.bits_per_pixel = value;
break;
case CODEC_TAG_LOWPASS_TRAILER: // Lowpass trailer
assert(0);
break;
case CODEC_TAG_WAVELET_TYPE: // Type of wavelet
codec->highpass.wavelet_type = value;
break;
case CODEC_TAG_WAVELET_NUMBER: // Number of the wavelet in the transform
codec->highpass.wavelet_number = value;
break;
case CODEC_TAG_WAVELET_LEVEL: // Level of the wavelet in the transform
codec->highpass.wavelet_level = value;
break;
case CODEC_TAG_NUM_BANDS: // Number of wavelet bands
codec->highpass.num_bands = value;
break;
case CODEC_TAG_HIGHPASS_WIDTH: // Width of each highpass band
codec->highpass.width = value;
break;
case CODEC_TAG_HIGHPASS_HEIGHT: // Height of each highpass band
codec->highpass.height = value;
break;
case CODEC_TAG_LOWPASS_BORDER: // Dimensions of lowpass border (obsolete)
codec->highpass.lowpass_border = value;
break;
case CODEC_TAG_HIGHPASS_BORDER: // Dimensions of highpass border (obsolete)
codec->highpass.highpass_border = value;
break;
case CODEC_TAG_LOWPASS_SCALE: // Scale factor for lowpass band
codec->highpass.lowpass_scale = value;
break;
case CODEC_TAG_LOWPASS_DIVISOR: // Divisor for the lowpass band
codec->highpass.lowpass_divisor = value;
break;
case CODEC_TAG_HIGHPASS_TRAILER: // Highpass trailer
assert(0);
break;
case CODEC_TAG_BAND_NUMBER: // Identifying number of a wavelet band
codec->band.number = value;
break;
case CODEC_TAG_BAND_WIDTH: // Band data width
codec->band.width = value;
break;
case CODEC_TAG_BAND_HEIGHT: // Band data height
codec->band.height = value;
break;
case CODEC_TAG_BAND_SUBBAND: // Subband number of this wavelet band
codec->band.subband = value;
//assert(value != 255);
break;
case CODEC_TAG_BAND_ENCODING: // Encoding method for this band
codec->band.encoding = value;
break;
case CODEC_TAG_BAND_QUANTIZATION: // Quantization applied to band
codec->band.quantization = value;
break;
case CODEC_TAG_BAND_SCALE: // Band scale factor
codec->band.scale = value;
break;
case CODEC_TAG_BAND_TRAILER: // Band trailer
assert(0);
break;
case CODEC_TAG_NUM_ZEROVALUES: // Number of zero values
assert(0);
break;
case CODEC_TAG_NUM_ZEROTREES: // Number of zerotrees
assert(0);
break;
case CODEC_TAG_NUM_POSITIVES: // Number of positive values
assert(0);
break;
case CODEC_TAG_NUM_NEGATIVES: // Number of negative values
assert(0);
break;
case CODEC_TAG_NUM_ZERONODES: // Number of zerotree nodes
assert(0);
break;
case CODEC_TAG_CHANNEL: // Channel number
assert(0);
break;
case CODEC_TAG_INTERLACED_FLAGS: // Interlaced structure of the video stream
//assert(0);
break;
//assert(0);
case CODEC_TAG_PROTECTION_FLAGS: // Copy protection bits
//assert(0);
break;
case CODEC_TAG_PICTURE_ASPECT_X: // Numerator of the picture aspect ratio
codec->picture_aspect_x = value;
//assert(0);
break;
case CODEC_TAG_PICTURE_ASPECT_Y: // Denominator of the picture aspect ratio
codec->picture_aspect_y = value;
//assert(0);
break;
case CODEC_TAG_SAMPLE_FLAGS: // Flag bits that control sample decoding
// Progressive versus interlaced decoding is specified by the sample flags
error = UpdateCodecFlags(codec, value);
break;
case CODEC_TAG_FRAME_NUMBER: // Sequence number of the frame in the bitstream
codec->frame_number = value;
break;
// This TAG is now support as part of the universal decoder.
// Only Prospect HD builds can decode 10bit.
case CODEC_TAG_PRECISION: // Number of bits in the video source
codec->precision = value;
{
int i;
for(i=0;i<TRANSFORM_MAX_CHANNELS;i++)
{
TRANSFORM *transform = decoder->transform[i];
if(transform)
{
GetTransformPrescale(transform, codec->transform_type, codec->precision);
}
}
}
break;
case CODEC_TAG_PRESCALE_TABLE:
{
int i;
int prescale[TRANSFORM_MAX_WAVELETS] = {0};
for(i=0;i<TRANSFORM_MAX_WAVELETS;i++)
prescale[i] = value >> (14-i*2) & 0x3;
for(i=0;i<TRANSFORM_MAX_CHANNELS;i++)
{
TRANSFORM *transform = decoder->transform[i];
if(transform)
{
memcpy(transform->prescale, prescale, sizeof(prescale));
}
}
}
break;
case CODEC_TAG_VERSION: // Version number of the encoder used in each GOP.
codec->version[0] = (value>>12) & 0xf;
codec->version[1] = (value>>8) & 0xf;
codec->version[2] = value & 0xff;
break;
case CODEC_TAG_QUALITY_L: //
codec->encode_quality &= 0xffff0000;
codec->encode_quality |= value;
break;
case CODEC_TAG_QUALITY_H: //
codec->encode_quality &= 0xffff;
codec->encode_quality |= value<<16;
break;
case CODEC_TAG_BAND_CODING_FLAGS:
codec->active_codebook = value & 0xf; // 0-15 valid code books
codec->difference_coding = (value>>4) & 1;
break;
// Peak table processing
case CODEC_TAG_PEAK_TABLE_OFFSET_L:
codec->peak_table.offset &= ~0xffff;
codec->peak_table.offset |= (value & 0xffff);
codec->peak_table.base = (PIXEL *)(input->lpCurrentWord);
codec->peak_table.level = 0; // reset for the next subband
break;
case CODEC_TAG_PEAK_TABLE_OFFSET_H:
codec->peak_table.offset &= 0xffff;
codec->peak_table.offset |= (value & 0xffff)<<16;
codec->peak_table.level = 0; // reset for the next subband
break;
case CODEC_TAG_PEAK_LEVEL:
codec->peak_table.level = value;
codec->peak_table.base += codec->peak_table.offset / sizeof(PIXEL);
break;
case CODEC_TAG_PEAK_TABLE:
//this is the chunk header, so we have peak data
codec->peak_table.level = 0; // reset for the next subband
//Just skip as the data was read ahead
chunksize = value;
chunksize &= 0xffff;
input->lpCurrentWord += chunksize*4;
input->nWordsUsed -= chunksize*4;
break;
#if (1 && DEBUG)
case CODEC_TAG_SAMPLE_END: // Marks the end of the sample (for debugging only)
assert(0);
break;
#endif
default: // Unknown tag
if(tag & 0x4000)
{
if(tag & 0x2000) // i.e. 0x6xxx = 24bit size.
{
chunksize = value;
chunksize &= 0xffff;
chunksize += ((tag&0xff)<<16);
}
else // 16bit size
{
chunksize = value;
chunksize &= 0xffff;
}
}
else if(tag & 0x2000) //24bit LONGs chunk size
{
optional = true; // Fixes a weird seneraio where the size fields in SizeTagPop() has not
// updated the size and turned the tag to optional. TODO : WHY
chunksize = 0; // not not skip
// chunksize = value + ((tag & 0xff)<<16);
// do not skip an unknown but optional chunk
// These are only use to size subbands, but the data within should not be skipped
// unless
if((tag & 0xff00) == CODEC_TAG_UNCOMPRESS)
{
optional = true;
chunksize = value;
chunksize &= 0xffff;
chunksize += ((tag&0xff)<<16);
decoder->uncompressed_chunk = (uint32_t *)input->lpCurrentWord;
decoder->uncompressed_size = chunksize*4;
decoder->sample_uncompressed = 1;
}
}
assert(optional);
if(!optional)
{
error = CODEC_ERROR_UNKNOWN_REQUIRED_TAG;
}
else if(chunksize > 0) // skip this option chunk
{
input->lpCurrentWord += chunksize*4;
input->nWordsUsed -= chunksize*4;
}
break;
}
return error;
}
void UpdateWaveletBandValidFlags(DECODER *decoder, IMAGE *wavelet, int band)
{
assert(decoder != NULL);
assert(wavelet != NULL);
if (decoder != NULL && wavelet != NULL)
{
#if (1 && DEBUG)
FILE *logfile = decoder->logfile;
#endif
#if _THREADED_DECODER
// Lock access to the wavelet data
if(decoder->entropy_worker_new.pool.thread_count)
Lock(&decoder->entropy_worker_new.lock);
#endif
#if (0 && DEBUG)
if (logfile) {
fprintf(logfile, "Changing band valid flags: 0x%04X, mask: 0x%04X\n",
wavelet->band_valid_flags, BAND_VALID_MASK(band));
}
#endif
// Update the wavelet band flags
wavelet->band_valid_flags |= BAND_VALID_MASK(band);
wavelet->band_started_flags |= BAND_VALID_MASK(band);
#if _THREADED_DECODER
// Unlock access to the wavelet data
if(decoder->entropy_worker_new.pool.thread_count)
Unlock(&decoder->entropy_worker_new.lock);
#endif
}
}
void UpdateWaveletBandStartedFlags(DECODER *decoder, IMAGE *wavelet, int band)
{
assert(decoder != NULL);
assert(wavelet != NULL);
if (decoder != NULL && wavelet != NULL)
{
// Update the wavelet band flags
#if _DELAYED_THREAD_START==0
if(decoder->entropy_worker_new.pool.thread_count)
Lock(&decoder->entropy_worker_new.lock);
#endif
wavelet->band_started_flags |= BAND_VALID_MASK(band);
#if _DELAYED_THREAD_START==0
if(decoder->entropy_worker_new.pool.thread_count)
Unlock(&decoder->entropy_worker_new.lock);
#endif
}
}
bool DecodedBandsValid(IMAGE *wavelet, int index, int transform_type)
{
uint32_t threaded_band_mask;
uint32_t wavelet_band_mask;
uint32_t decoded_band_mask;
bool decoded_bands_valid;
// Has this wavelet been created?
if (wavelet == NULL)
{
// Too soon to wait for the wavelet bands to be decoded
return false;
}
// Is this a fieldplus transform?
if (transform_type == TRANSFORM_TYPE_FIELDPLUS)
{
// Is this the temporal wavelet?
if (index == 2)
{
assert(wavelet->wavelet_type == WAVELET_TYPE_TEMPORAL);
assert(wavelet->num_bands == 2);
// Earlier transforms in the queue will compute both wavelet bands
return true;
}
// Is this wavelet at the end of a chain of transforms?
if (index == 3 || index == 5)
{
// Must wait for all bands to be decoded
threaded_band_mask = 0;
}
else
{
// The lowpass band will be computed by transforms earlier in the queue
threaded_band_mask = BAND_VALID_MASK(0);
}
}
// Is this a spatial transform?
else if (transform_type == TRANSFORM_TYPE_SPATIAL)
{
// Is this wavelet at the top of the pyramid?
if (index == 2)
{
// Must wait for all bands to be decoded
threaded_band_mask = 0;
}
#if 0
// Is this wavelet at the bottom of the pyramid?
else if (index == 0)
{
// Must wait for all bands to be decoded
threaded_band_mask = 0;
}
#endif
else
{
// The lowpass band will be computed by transforms earlier in the queue
threaded_band_mask = BAND_VALID_MASK(0);
}
}
else
{
// Unknown type of transform
assert(0);
// Assume that the bands are not valid
return false;
}
// Compute the mask for the bands in this wavelet
decoded_band_mask = ((1 << wavelet->num_bands) - 1);
// Clear the bit for the band computed by the threaded transform
decoded_band_mask &= ~threaded_band_mask;
// Compute the wavelet bands that have been decoded
wavelet_band_mask = (wavelet->band_valid_flags & decoded_band_mask);
// Have all of the bands not computed by the transform thread been decoded?
decoded_bands_valid = (wavelet_band_mask == decoded_band_mask);
return decoded_bands_valid;
}
void QueueThreadedTransform(DECODER *decoder, int channel, int index)
{
#if (1 && DEBUG)
FILE *logfile = decoder->logfile;
#endif
CODEC_STATE *codec = &decoder->codec;
TRANSFORM *transform = decoder->transform[channel];
//IMAGE *wavelet = transform->wavelet[index];
int precision = codec->precision;
// The transform data structure must exist
assert(transform != NULL);
// The transform thread variables should have been created
{
int free_entry;
#if _DELAYED_THREAD_START==0
// Lock access to the transform queue
Lock(&decoder->entropy_worker_new.lock);
#endif
// Copy the transform parameters into the next queue entry
free_entry = decoder->transform_queue.free_entry;
assert(0 <= free_entry && free_entry < DECODING_QUEUE_LENGTH);
if (0 <= free_entry && free_entry < DECODING_QUEUE_LENGTH)
{
assert(transform != NULL);
assert(0 <= channel && channel < TRANSFORM_MAX_CHANNELS);
assert(0 <= index && index < TRANSFORM_MAX_WAVELETS);
// Note: The wavelet may not exist when the transform is queued
decoder->transform_queue.queue[free_entry].transform = transform;
decoder->transform_queue.queue[free_entry].channel = channel;
decoder->transform_queue.queue[free_entry].index = index;
decoder->transform_queue.queue[free_entry].precision = precision;
decoder->transform_queue.queue[free_entry].done = 0;
// Update the transform request queue
decoder->transform_queue.free_entry++;
decoder->transform_queue.num_entries++;
#if (1 && DEBUG)
if (logfile) {
fprintf(logfile, "Queued transform, channel: %d, index: %d\n", channel, index);
}
#endif
}
#if _DELAYED_THREAD_START==0
Unlock(&decoder->entropy_worker_new.lock);
#endif
}
}
#if _THREADED_DECODER
void WaitForTransformThread(DECODER *decoder)
{
if(decoder->entropy_worker_new.pool.thread_count)
{
#if _DELAYED_THREAD_START
ThreadPoolSendMessage(&decoder->entropy_worker_new.pool, THREAD_MESSAGE_START);
#endif
ThreadPoolWaitAllDone(&decoder->entropy_worker_new.pool);
decoder->transform_queue.started = 0;
decoder->transform_queue.num_entries = 0;
decoder->transform_queue.next_entry = 0;
decoder->transform_queue.free_entry = 0;
}
}
#endif
#endif
#if _INTERLACED_WORKER_THREADS
void TransformInverseFrameThreadedToYUV(DECODER *decoder, int frame_index, int num_channels,
uint8_t *output, int pitch, FRAME_INFO *info,
int chroma_offset, int precision)
{
int32_t lPreviousCount,i;
// There are half as many input rows as output rows
int transform_height = (((info->height+7)/8)*8) / 2;
int middle_row_count = transform_height;
// Post a message to the mailbox
struct interlace_data *mailbox = &decoder->interlaced_worker.interlace_data;
mailbox->type = THREAD_TRANSFORM_FRAME_YUV;
mailbox->frame = frame_index;
mailbox->num_channels = num_channels;
mailbox->output = output;
mailbox->pitch = pitch;
memcpy(&mailbox->info, info, sizeof(FRAME_INFO));
mailbox->chroma_offset = chroma_offset;
mailbox->precision = precision;
// Set the semaphore to the number of rows
decoder->interlaced_worker.current_row = 0;
ReleaseSemaphore(decoder->interlaced_worker.row_semaphore, middle_row_count, &lPreviousCount);
assert(lPreviousCount == 0);
// Wake up both worker threads
for(i=0; i<THREADS_IN_LAST_WAVELET; i++)
{
SetEvent(decoder->interlaced_worker.start_event[i]);
}
// Wait for both worker threads to finish
WaitForMultipleObjects(THREADS_IN_LAST_WAVELET, decoder->interlaced_worker.done_event, true, INFINITE);
}
void TransformInverseFrameThreadedToRow16u(DECODER *decoder, int frame_index, int num_channels,
PIXEL16U *output, int pitch, FRAME_INFO *info,
int chroma_offset, int precision)
{
int32_t lPreviousCount,i;
// There are half as many input rows as output rows
int transform_height = (((info->height+7)/8)*8) / 2;
int middle_row_count = transform_height;
// Post a message to the mailbox
struct interlace_data *mailbox = &decoder->interlaced_worker.interlace_data;
mailbox->type = THREAD_TRANSFORM_FRAME_ROW16U;
mailbox->frame = frame_index;
mailbox->num_channels = num_channels;
mailbox->output = (uint8_t *)output;
mailbox->pitch = pitch;
memcpy(&mailbox->info, info, sizeof(FRAME_INFO));
mailbox->chroma_offset = chroma_offset;
mailbox->precision = precision;
// Set the semaphore to the number of rows
decoder->interlaced_worker.current_row = 0;
ReleaseSemaphore(decoder->interlaced_worker.row_semaphore, middle_row_count, &lPreviousCount);
assert(lPreviousCount == 0);
// Wake up both worker threads
for(i=0; i<THREADS_IN_LAST_WAVELET; i++)
{
SetEvent(decoder->interlaced_worker.start_event[i]);
}
// Wait for both worker threads to finish
WaitForMultipleObjects(THREADS_IN_LAST_WAVELET, decoder->interlaced_worker.done_event, true, INFINITE);
}
DWORD WINAPI InterlacedWorkerThreadProc(LPVOID lpParam)
{
DECODER *decoder = (DECODER *)lpParam;
FILE *logfile = decoder->logfile;
struct interlace_data *data = &decoder->interlaced_worker.interlace_data;
int thread_index;
HANDLE hObjects[2];
DWORD dwReturnValue;
if(decoder->thread_cntrl.affinity)
{
HANDLE hCurrentThread = GetCurrentThread();
SetThreadAffinityMask(hCurrentThread,decoder->thread_cntrl.affinity);
}
// Set the handler for system exceptions
#ifdef _WIN32
SetDefaultExceptionHandler();
#endif
// Determine the index of this worker thread
if(decoder->interlaced_worker.lock_init)
{
EnterCriticalSection(&decoder->interlaced_worker.lock);
}
thread_index = decoder->interlaced_worker.thread_count++;
if(decoder->interlaced_worker.lock_init)
LeaveCriticalSection(&decoder->interlaced_worker.lock);
// The transform worker variables should have been created
assert(decoder->interlaced_worker.start_event[thread_index] != NULL);
assert(decoder->interlaced_worker.row_semaphore != NULL);
assert(decoder->interlaced_worker.done_event[thread_index] != NULL);
assert(decoder->interlaced_worker.stop_event != NULL);
if (!(decoder->interlaced_worker.start_event[thread_index] != NULL &&
decoder->interlaced_worker.row_semaphore != NULL &&
decoder->interlaced_worker.done_event[thread_index] != NULL &&
decoder->interlaced_worker.stop_event != NULL)) {
return 1;
}
hObjects[0] = decoder->interlaced_worker.start_event[thread_index];
hObjects[1] = decoder->interlaced_worker.stop_event;
for (;;)
{
// Wait for the signal to begin processing a transform
dwReturnValue = WaitForMultipleObjects(2, hObjects, false, INFINITE);
// Received a signal to begin inverse transform processing?
if (dwReturnValue == WAIT_OBJECT_0)
{
int type; // Type of inverse transform to perform
int frame_index; // Index of output frame to produce
int num_channels; // Number of channels in the transform array
uint8_t *output; // Output frame buffer
int pitch; // Output frame pitch
FRAME_INFO info; // Format of the output frame
int chroma_offset; // Offset for the output chroma
int precision; // Source pixel bit depth
// Lock access to the transform data
if(decoder->interlaced_worker.lock_init) {
EnterCriticalSection(&decoder->interlaced_worker.lock);
}
// Get the processing parameters
type = data->type;
frame_index = data->frame;
num_channels = data->num_channels;
output = data->output;
pitch = data->pitch;
memcpy(&info, &data->info, sizeof(FRAME_INFO));
chroma_offset = data->chroma_offset;
precision = data->precision;
// Unlock access to the transform data
if(decoder->interlaced_worker.lock_init)
LeaveCriticalSection(&decoder->interlaced_worker.lock);
// Select the type of inverse transform to perform
switch (type)
{
case THREAD_TRANSFORM_FRAME_YUV:
//TODO: more to new _THREADED model
TransformInverseFrameSectionToYUV(decoder, thread_index, frame_index, num_channels,
output, pitch, &info, chroma_offset, precision);
break;
case THREAD_TRANSFORM_FRAME_ROW16U:
//TODO: more to new _THREADED model
TransformInverseFrameSectionToRow16u(decoder, thread_index, frame_index, num_channels,
(PIXEL16U *)output, pitch, &info, chroma_offset, precision);
break;
default:
assert(0);
break;
}
// Signal that this thread is done
SetEvent(decoder->interlaced_worker.done_event[thread_index]);
}
else
{
// Should have a condition that causes the thread to terminate
assert(dwReturnValue == WAIT_OBJECT_0+1 || dwReturnValue == WAIT_ABANDONED);
break;
}
}
return 0;
}
#endif
void GetDecodedFrameDimensions(TRANSFORM **transform_array,
int num_channels,
int frame_index,
int resolution,
int *decoded_width_out,
int *decoded_height_out)
{
IMAGE *wavelet = NULL;
int decoded_scale = 0;
int wavelet_width;
int wavelet_height;
int decoded_width;
int decoded_height;
// Get the decoding scale
switch(resolution)
{
case DECODED_RESOLUTION_FULL_DEBAYER:
case DECODED_RESOLUTION_HALF_HORIZONTAL_DEBAYER:
#if DEBUG
assert(AllTransformBandsValid(transform_array, num_channels, frame_index));
#endif
decoded_scale = 2;
wavelet = transform_array[0]->wavelet[0];
break;
case DECODED_RESOLUTION_FULL:
#if DEBUG
assert(AllTransformBandsValid(transform_array, num_channels, frame_index));
#endif
decoded_scale = 2;
wavelet = transform_array[0]->wavelet[0];
break;
case DECODED_RESOLUTION_HALF_NODEBAYER:
case DECODED_RESOLUTION_HALF:
#if DEBUG
assert(AllLowpassBandsValid(transform_array, num_channels, frame_index));
#endif
decoded_scale = 1;
wavelet = transform_array[0]->wavelet[0];
break;
case DECODED_RESOLUTION_QUARTER:
decoded_scale = 1;
wavelet = transform_array[0]->wavelet[3];
break;
case DECODED_RESOLUTION_LOWPASS_ONLY:
decoded_scale = 1;
wavelet = transform_array[0]->wavelet[5];
// Is this an intra frame?
if (wavelet == NULL) {
wavelet = transform_array[0]->wavelet[2];
}
break;
default:
assert(0);
break;
}
// Compute the decoded frame dimensions
assert(wavelet != NULL);
wavelet_width = wavelet->width;
wavelet_height = wavelet->height;
decoded_width = decoded_scale * wavelet_width;
decoded_height = decoded_scale * wavelet_height;
if (decoded_width_out) {
*decoded_width_out = decoded_width;
}
if (decoded_height_out) {
*decoded_height_out = decoded_height;
}
}
// Reconstruct Bayer format to the requested output format
CODEC_ERROR UncompressedSampleFrameBayerToBuffer(DECODER *decoder, FRAME_INFO *info, int frame, uint8_t *output_buffer, int output_pitch)
{
CODEC_ERROR error = CODEC_ERROR_OKAY;
#if (1 && DEBUG)
FILE *logfile = decoder->logfile;
#endif
//CODEC_STATE *codec = &decoder->codec;
//int num_channels = codec->num_channels;
//int precision = codec->precision;
int format = info->format;
int width = info->width;
int height = info->height;
//int resolution = info->resolution;
// Compute the number of bytes between each row of Bayer data
//int bayer_pitch = 2 * width * sizeof(PIXEL16U);
// Compute the pitch between pairs of rows of bayer data (one pair per image row)
//int raw_bayer_pitch = 2 * bayer_pitch;
//int chroma_offset = decoder->codec.chroma_offset;
error = CODEC_ERROR_UNSUPPORTED_FORMAT;
switch (format)
{
case DECODED_FORMAT_RGB24:
case DECODED_FORMAT_RGB32:
case DECODED_FORMAT_RG48: //DAN20090120 added not sure why they weren't here.
case DECODED_FORMAT_RG64: //DAN20101207 added not sure why they weren't here.
case DECODED_FORMAT_WP13: //DAN20090120 ""
case DECODED_FORMAT_W13A: //DAN20101207 ""
case DECODED_FORMAT_B64A:
case DECODED_FORMAT_R210:
case DECODED_FORMAT_DPX0:
case DECODED_FORMAT_RG30:
case DECODED_FORMAT_AR10:
case DECODED_FORMAT_AB10:
case DECODED_FORMAT_YR16:
case DECODED_FORMAT_V210:
case DECODED_FORMAT_YU64:
case DECODED_FORMAT_YUYV: //?
case DECODED_FORMAT_UYVY: //?
case DECODED_FORMAT_R408:
case DECODED_FORMAT_V408:
error = CODEC_ERROR_OKAY;
break;
case DECODED_FORMAT_BYR2:
case DECODED_FORMAT_BYR4:
{
//bool linearRestore = false;
unsigned short *curve = NULL;
if(decoder->BYR4LinearRestore && decoder->frame.format == DECODED_FORMAT_BYR4 && decoder->cfhddata.encode_curve_preset == 0)
{
curve = decoder->BYR4LinearRestore;
}
ConvertPackedToBYR2(width, height, decoder->uncompressed_chunk, decoder->uncompressed_size, output_buffer, output_pitch, curve);
}
decoder->uncompressed_chunk = 0;
decoder->uncompressed_size = 0;
return CODEC_ERROR_OKAY;
break;
case DECODED_FORMAT_BYR3:
ConvertPackedToBYR3(width, height, decoder->uncompressed_chunk, decoder->uncompressed_size, output_buffer, output_pitch);
decoder->uncompressed_chunk = 0;
decoder->uncompressed_size = 0;
return CODEC_ERROR_OKAY;
break;
}
if(error)
return error;
//int row;
//int column;
// Need to allocate a scratch buffer for decoding the Bayer frame?
if (decoder->RawBayer16 == NULL)
{
// Four Bayer data samples at each 2x2 quad in the grid
int pixel_size = 4 * sizeof(PIXEL16U);
int frame_size;
const size_t alignment = 16;
#if _ALLOCATOR
ALLOCATOR *allocator = decoder->allocator;
#endif
frame_size = width * height * pixel_size;
#if _ALLOCATOR
decoder->RawBayer16 = (PIXEL16U *)AllocAligned(allocator, (size_t)frame_size, alignment);
#else
decoder->RawBayer16 = (PIXEL16U *)MEMORY_ALIGNED_ALLOC(frame_size, alignment);
#endif
assert(decoder->RawBayer16 != NULL);
if (! (decoder->RawBayer16 != NULL)) {
return CODEC_ERROR_MEMORY_ALLOC;
}
decoder->RawBayerSize = frame_size;
if(decoder->RGBFilterBuffer16 == NULL)
{
int size = frame_size*3;
if(decoder->codec.encoded_format == ENCODED_FORMAT_RGBA_4444 && ALPHAOUTPUT(decoder->frame.format))
size = frame_size*4;
#if _ALLOCATOR
decoder->RGBFilterBuffer16 = (PIXEL16U *)AllocAligned(allocator, (size_t)size, 16);
#else
decoder->RGBFilterBuffer16 = (PIXEL16U *)MEMORY_ALIGNED_ALLOC(size, 16);
#endif
assert(decoder->RGBFilterBuffer16 != NULL);
if (! (decoder->RGBFilterBuffer16 != NULL)) {
return CODEC_ERROR_MEMORY_ALLOC;
}
decoder->RGBFilterBufferSize = frame_size*3;
}
}
// Using the RGBFilterBuffer16 as scratch space
ConvertPackedToRawBayer16(width, height, decoder->uncompressed_chunk, decoder->uncompressed_size, decoder->RawBayer16, decoder->RGBFilterBuffer16, info->resolution);
decoder->uncompressed_chunk = 0;
decoder->uncompressed_size = 0;
#if _THREADED
//DemosaicRAW
{
WORKER_THREAD_DATA *mailbox = &decoder->worker_thread.data;
int inverted = false;
uint8_t *output = output_buffer;
int pitch = output_pitch;
#if _DELAY_THREAD_START
if(decoder->worker_thread.pool.thread_count == 0)
{
CreateLock(&decoder->worker_thread.lock);
// Initialize the pool of transform worker threads
ThreadPoolCreate(&decoder->worker_thread.pool,
decoder->thread_cntrl.capabilities >> 16/*cpus*/,
WorkerThreadProc,
decoder);
}
#endif
if (format == DECODED_FORMAT_RGB24)
{
format = DECODED_FORMAT_RGB24_INVERTED;
inverted = true;
}
else if (format == DECODED_FORMAT_RGB32)
{
format = DECODED_FORMAT_RGB32_INVERTED;
inverted = true;
}
// Have the output location and pitch been inverted?
if (inverted && pitch > 0) {
int height = info->height;
if(info->resolution == DECODED_RESOLUTION_FULL_DEBAYER || info->resolution == DECODED_RESOLUTION_HALF_HORIZONTAL_DEBAYER)
height *= 2;
output += (height - 1) * pitch; // Start at the bottom row
pitch = NEG(pitch); // Negate the pitch to go up
}
// Post a message to the mailbox
mailbox->output = output;
mailbox->pitch = pitch;
memcpy(&mailbox->info, info, sizeof(FRAME_INFO));
mailbox->jobType = JOB_TYPE_OUTPUT;
// Set the work count to the number of rows to process
ThreadPoolSetWorkCount(&decoder->worker_thread.pool, info->height);
// Start the transform worker threads
ThreadPoolSendMessage(&decoder->worker_thread.pool, THREAD_MESSAGE_START);
// Wait for all of the worker threads to finish
ThreadPoolWaitAllDone(&decoder->worker_thread.pool);
}
#else
error = CODEC_ERROR_UNSUPPORTED_FORMAT;
#endif
return error;
}
// Reconstruct uncompressed v210 YUV format to the requested output format
CODEC_ERROR UncompressedSampleFrameYUVToBuffer(DECODER *decoder, FRAME_INFO *info, int frame, uint8_t *output_buffer, int output_pitch)
{
CODEC_ERROR error = CODEC_ERROR_OKAY;
#if (1 && DEBUG)
FILE *logfile = decoder->logfile;
#endif
//CODEC_STATE *codec = &decoder->codec;
//int num_channels = codec->num_channels;
//int precision = codec->precision;
int format = info->format;
int width = info->width;
int height = info->height;
int resolution = info->resolution;
// Compute the number of bytes between each row of Bayer data
//int bayer_pitch = 2 * width * sizeof(PIXEL16U);
// Compute the pitch between pairs of rows of bayer data (one pair per image row)
//int raw_bayer_pitch = 2 * bayer_pitch;
//int chroma_offset = decoder->codec.chroma_offset;
error = CODEC_ERROR_UNSUPPORTED_FORMAT;
if(format == DECODED_FORMAT_V210 && resolution == DECODED_RESOLUTION_FULL && decoder->use_active_metadata_decoder == false)
{
int smallest_Stride = output_pitch;
int unc_Stride = decoder->uncompressed_size / height;
if(unc_Stride < smallest_Stride)
smallest_Stride = unc_Stride;
if(unc_Stride == output_pitch)
memcpy(output_buffer, decoder->uncompressed_chunk, decoder->uncompressed_size);
else
{
int y;
uint8_t *src = (uint8_t *)decoder->uncompressed_chunk;
uint8_t *dst = (uint8_t *)output_buffer;
for(y=0; y<height; y++)
{
memcpy(dst, src, smallest_Stride);
src += unc_Stride;
dst += output_pitch;
}
}
decoder->uncompressed_chunk = 0;
decoder->uncompressed_size = 0;
return CODEC_ERROR_OKAY;
}
if((format == DECODED_FORMAT_YUYV || format == DECODED_FORMAT_UYVY) && resolution == DECODED_RESOLUTION_FULL && decoder->use_active_metadata_decoder == false)
{
int smallest_Stride = output_pitch;
int unc_Stride = decoder->uncompressed_size / height;
if(unc_Stride < smallest_Stride)
smallest_Stride = unc_Stride;
{
int y;
uint8_t *src = (uint8_t *)decoder->uncompressed_chunk;
uint8_t *dst = (uint8_t *)output_buffer;
for(y=0; y<height; y++)
{
uint32_t *input_ptr = (uint32_t *)src;
int pos = 0;
int column=0,length = width;
length -= length % 6; //DAN03252004 -- fix a memory overflow.
for (column=0; column < length; column += 6)
{
uint32_t yuv;
int y;
int u;
int v;
// Read the first word
yuv = *(input_ptr++);
u = (yuv >> V210_VALUE1_SHIFT) & V210_VALUE_MASK;
y = (yuv >> V210_VALUE2_SHIFT) & V210_VALUE_MASK;
v = (yuv >> V210_VALUE3_SHIFT) & V210_VALUE_MASK;
// Expand the pixels to sixteen bits
u <<= 6;
y <<= 6;
v <<= 6;
dst[pos++] = SATURATE_16U(y)>>8;
dst[pos++] = SATURATE_16U(u)>>8;
// Read the second word
yuv = *(input_ptr++);
y = (yuv >> V210_VALUE1_SHIFT) & V210_VALUE_MASK;
y <<= 6;
dst[pos++] = SATURATE_16U(y)>>8;
dst[pos++] = SATURATE_16U(v)>>8;
u = (yuv >> V210_VALUE2_SHIFT) & V210_VALUE_MASK;
y = (yuv >> V210_VALUE3_SHIFT) & V210_VALUE_MASK;
u <<= 6;
y <<= 6;
dst[pos++] = SATURATE_16U(y)>>8;
dst[pos++] = SATURATE_16U(u)>>8;
// Read the third word
yuv = *(input_ptr++);
v = (yuv >> V210_VALUE1_SHIFT) & V210_VALUE_MASK;
y = (yuv >> V210_VALUE2_SHIFT) & V210_VALUE_MASK;
v <<= 6;
y <<= 6;
dst[pos++] = SATURATE_16U(y)>>8;
dst[pos++] = SATURATE_16U(v)>>8;
u = (yuv >> V210_VALUE3_SHIFT) & V210_VALUE_MASK;
u <<= 6;
// Read the fourth word
yuv = *(input_ptr++);
y = (yuv >> V210_VALUE1_SHIFT) & V210_VALUE_MASK;
y <<= 6;
dst[pos++] = SATURATE_16U(y)>>8;
dst[pos++] = SATURATE_16U(u)>>8;
v = (yuv >> V210_VALUE2_SHIFT) & V210_VALUE_MASK;
y = (yuv >> V210_VALUE3_SHIFT) & V210_VALUE_MASK;
v <<= 6;
y <<= 6;
dst[pos++] = SATURATE_16U(y)>>8;
dst[pos++] = SATURATE_16U(v)>>8;
}
if(format == DECODED_FORMAT_UYVY)
{
for (column=0; column < pos; column += 2)
{
int t = dst[column];
dst[column] = dst[column+1];
dst[column+1] = t;
}
}
src += unc_Stride;
dst += output_pitch;
}
}
decoder->uncompressed_chunk = 0;
decoder->uncompressed_size = 0;
return CODEC_ERROR_OKAY;
}
{
// Expand YUV at the target resolution, and use the ActiveMetadata engine.
// Need to allocate a scratch buffer for decoding the frame?
if (decoder->RawBayer16 == NULL || decoder->RawBayerSize < width * 64) //RawBayer used as a scratch buffer
{
//int pixel_size = 2 * sizeof(PIXEL16U);
const size_t alignment = 16;
#if _ALLOCATOR
ALLOCATOR *allocator = decoder->allocator;
#endif
int orig_width = width;
if(resolution == DECODED_RESOLUTION_HALF)
orig_width *= 2;
if(resolution == DECODED_RESOLUTION_QUARTER)
orig_width *= 4;
if(decoder->RawBayer16)
{
#if _ALLOCATOR
FreeAligned(allocator, decoder->RawBayer16);
decoder->RawBayer16 = NULL;
decoder->RawBayerSize = 0;
#else
MEMORY_ALIGNED_FREE(decoder->RawBayer16);
decoder->RawBayer16 = NULL;
decoder->RawBayerSize = 0;
#endif
}
#if _ALLOCATOR
decoder->RawBayer16 = (PIXEL16U *)AllocAligned(allocator, orig_width * 64, alignment);
#else
decoder->RawBayer16 = (PIXEL16U *)MEMORY_ALIGNED_ALLOC(orig_width * 64, alignment);
#endif
assert(decoder->RawBayer16 != NULL);
if (! (decoder->RawBayer16 != NULL)) {
return CODEC_ERROR_MEMORY_ALLOC;
}
decoder->RawBayerSize = orig_width * 64;
}
}
// unpack source original YUV into YU64?
if(decoder->RawBayer16)
{
//uint8_t *src = (uint8_t *)decoder->uncompressed_chunk;
//uint8_t *dst = (uint8_t *)output_buffer;
#if _THREADED
{
WORKER_THREAD_DATA *mailbox = &decoder->worker_thread.data;
#if _DELAY_THREAD_START
if(decoder->worker_thread.pool.thread_count == 0)
{
CreateLock(&decoder->worker_thread.lock);
// Initialize the pool of transform worker threads
ThreadPoolCreate(&decoder->worker_thread.pool,
decoder->thread_cntrl.capabilities >> 16/*cpus*/,
WorkerThreadProc,
decoder);
}
#endif
// Post a message to the mailbox
mailbox->output = output_buffer;
mailbox->pitch = output_pitch;
memcpy(&mailbox->info, info, sizeof(FRAME_INFO));
mailbox->jobType = JOB_TYPE_OUTPUT_UNCOMPRESSED;
// Set the work count to the number of rows to process
ThreadPoolSetWorkCount(&decoder->worker_thread.pool, height);
// Start the transform worker threads
ThreadPoolSendMessage(&decoder->worker_thread.pool, THREAD_MESSAGE_START);
// Wait for all of the worker threads to finish
ThreadPoolWaitAllDone(&decoder->worker_thread.pool);
}
#else
{
int orig_width = width;
int orig_height = height;
int row,lines = 1;
int start,end;
if(resolution == DECODED_RESOLUTION_HALF)
{
orig_width *= 2;
orig_height *= 2;
lines = 2;
}
if(resolution == DECODED_RESOLUTION_QUARTER)
{
orig_width *= 4;
orig_height *= 4;
lines = 4;
}
start = 0;
end = height;
if(format == DECODED_FORMAT_RGB32 || format == DECODED_FORMAT_RGB24)
{
start = height-1;
end = -1;
}
for (row = start; row != end; end > start ? row++ : row--)
{
int whitebitdepth = 16;
int flags = 0;
uint8_t *planar_output[3];
int planar_pitch[3];
ROI roi;
PIXEL16U *y_row_ptr;
PIXEL16U *u_row_ptr;
PIXEL16U *v_row_ptr;
PIXEL16U *scanline = (PIXEL16U *)decoder->RawBayer16;
PIXEL16U *scanline2 = scanline + orig_width * 8;
unsigned short *sptr;
int i,unc_Stride = decoder->uncompressed_size / orig_height;
y_row_ptr = (PIXEL16U *)scanline;
u_row_ptr = y_row_ptr + orig_width;
v_row_ptr = u_row_ptr + orig_width/2;
for(i=0; i<lines; i++)
{
src = (uint8_t *)decoder->uncompressed_chunk;
src += row * unc_Stride;
// Repack the row of 10-bit pixels into 16-bit pixels
ConvertV210RowToYUV16((uint8_t *)src, y_row_ptr, u_row_ptr, v_row_ptr, orig_width, scanline2);
// Advance to the next rows in the input and output images
y_row_ptr += orig_width*2;
u_row_ptr = y_row_ptr + orig_width;
v_row_ptr = u_row_ptr + orig_width/2;
}
y_row_ptr = (PIXEL16U *)scanline;
u_row_ptr = y_row_ptr + width;
v_row_ptr = u_row_ptr + width/2;
if(lines == 2)
{
for(i=0; i<width*2;i++)
y_row_ptr[i] = (y_row_ptr[i*2] + y_row_ptr[i*2+1] + y_row_ptr[orig_width*2+i*2] + y_row_ptr[orig_width*2+i*2+1]) >> 2;
}
else if(lines == 4)
{
for(i=0; i<width*2;i++)
y_row_ptr[i] = (y_row_ptr[i*4] + y_row_ptr[i*4+2] + y_row_ptr[orig_width*2*2+i*4] + y_row_ptr[orig_width*2*2+i*4+2]) >> 2;
}
roi.width = width;
roi.height = 1;
planar_output[0] = (uint8_t *)y_row_ptr;
planar_output[1] = (uint8_t *)v_row_ptr;
planar_output[2] = (uint8_t *)u_row_ptr;
planar_pitch[0] = 0;
planar_pitch[1] = 0;
planar_pitch[2] = 0;
if(decoder->apply_color_active_metadata)
{
ConvertYUVRow16uToBGRA64(planar_output, planar_pitch, roi,
(unsigned char *)scanline2, width, output_pitch,
COLOR_FORMAT_RGB_8PIXEL_PLANAR, decoder->frame.colorspace, &whitebitdepth, &flags);
sptr = scanline2;
sptr = ApplyActiveMetaData(decoder, width, 1, row, scanline2, scanline,
info->format, &whitebitdepth, &flags);
}
else
{
ConvertYUVRow16uToBGRA64(planar_output, planar_pitch, roi,
(unsigned char *)scanline2, width, output_pitch,
COLOR_FORMAT_WP13, decoder->frame.colorspace, &whitebitdepth, &flags);
sptr = scanline2;
}
ConvertLinesToOutput(decoder, width, 1, row, sptr,
dst, output_pitch, format, whitebitdepth, flags);
dst += output_pitch;
}
}
#endif
}
error = CODEC_ERROR_OKAY;
return error;
}
// Reconstruct uncompressed DPX0 RGB format to the requested output format
CODEC_ERROR UncompressedSampleFrameRGBToBuffer(DECODER *decoder, FRAME_INFO *info, int frame, uint8_t *output_buffer, int output_pitch)
{
CODEC_ERROR error = CODEC_ERROR_OKAY;
#if (1 && DEBUG)
FILE *logfile = decoder->logfile;
#endif
//CODEC_STATE *codec = &decoder->codec;
//int num_channels = codec->num_channels;
//int precision = codec->precision;
int format = info->format;
//int output_format = info->output_format; // used by image_dev_only decodes
int width = info->width;
int height = info->height;
int resolution = info->resolution;
//int chroma_offset = decoder->codec.chroma_offset;
error = CODEC_ERROR_UNSUPPORTED_FORMAT;
if( (format == DECODED_FORMAT_DPX0 || format == DECODED_FORMAT_AR10 || format == DECODED_FORMAT_AB10 || format == DECODED_FORMAT_RG30 || format == DECODED_FORMAT_R210) &&
resolution == DECODED_RESOLUTION_FULL && decoder->use_active_metadata_decoder == false)
{
int smallest_Stride = output_pitch;
int unc_Stride = decoder->uncompressed_size / height;
if(unc_Stride < smallest_Stride)
smallest_Stride = unc_Stride;
if(format != DECODED_FORMAT_DPX0)
{
int unc_Stride = decoder->uncompressed_size / height;
ConvertDPX0ToRGB10((uint8_t *)decoder->uncompressed_chunk, unc_Stride, width, height, format);
}
if(unc_Stride == output_pitch)
memcpy(output_buffer, decoder->uncompressed_chunk, decoder->uncompressed_size);
else
{
int y;
uint8_t *src = (uint8_t *)decoder->uncompressed_chunk;
uint8_t *dst = (uint8_t *)output_buffer;
for(y=0; y<height; y++)
{
memcpy(dst, src, smallest_Stride);
src += unc_Stride;
dst += output_pitch;
}
}
decoder->uncompressed_chunk = 0;
decoder->uncompressed_size = 0;
return CODEC_ERROR_OKAY;
}
{
// Expand YUV at the target resolution, and use the ActiveMetadata engine.
// Need to allocate a scratch buffer for decoding the frame?
if (decoder->RawBayer16 == NULL || decoder->RawBayerSize < width * 64) //RawBayer used as a scratch buffer
{
//int pixel_size = 2 * sizeof(PIXEL16U);
const size_t alignment = 16;
#if _ALLOCATOR
ALLOCATOR *allocator = decoder->allocator;
#endif
int orig_width = width;
if(resolution == DECODED_RESOLUTION_HALF)
orig_width *= 2;
if(resolution == DECODED_RESOLUTION_QUARTER)
orig_width *= 4;
if(decoder->RawBayer16)
{
#if _ALLOCATOR
FreeAligned(allocator, decoder->RawBayer16);
decoder->RawBayer16 = NULL;
decoder->RawBayerSize = 0;
#else
MEMORY_ALIGNED_FREE(decoder->RawBayer16);
decoder->RawBayer16 = NULL;
decoder->RawBayerSize = 0;
#endif
}
#if _ALLOCATOR
decoder->RawBayer16 = (PIXEL16U *)AllocAligned(allocator, orig_width * 64, alignment);
#else
decoder->RawBayer16 = (PIXEL16U *)MEMORY_ALIGNED_ALLOC(orig_width * 64, alignment);
#endif
assert(decoder->RawBayer16 != NULL);
if (! (decoder->RawBayer16 != NULL)) {
return CODEC_ERROR_MEMORY_ALLOC;
}
decoder->RawBayerSize = orig_width * 64;
}
}
// unpack source original YUV into YU64?
if(decoder->RawBayer16)
{
//uint8_t *src = (uint8_t *)decoder->uncompressed_chunk;
//uint8_t *dst = (uint8_t *)output_buffer;
#if _THREADED
{
WORKER_THREAD_DATA *mailbox = &decoder->worker_thread.data;
#if _DELAY_THREAD_START
if(decoder->worker_thread.pool.thread_count == 0)
{
CreateLock(&decoder->worker_thread.lock);
// Initialize the pool of transform worker threads
ThreadPoolCreate(&decoder->worker_thread.pool,
decoder->thread_cntrl.capabilities >> 16/*cpus*/,
WorkerThreadProc,
decoder);
}
#endif
// Post a message to the mailbox
mailbox->output = output_buffer;
mailbox->pitch = output_pitch;
memcpy(&mailbox->info, info, sizeof(FRAME_INFO));
mailbox->jobType = JOB_TYPE_OUTPUT_UNCOMPRESSED;
// Set the work count to the number of rows to process
ThreadPoolSetWorkCount(&decoder->worker_thread.pool, height);
// Start the transform worker threads
ThreadPoolSendMessage(&decoder->worker_thread.pool, THREAD_MESSAGE_START);
// Wait for all of the worker threads to finish
ThreadPoolWaitAllDone(&decoder->worker_thread.pool);
}
#else
{
int orig_width = width;
int orig_height = height;
int row,lines = 1;
int start,end;
if(resolution == DECODED_RESOLUTION_HALF)
{
orig_width *= 2;
orig_height *= 2;
lines = 2;
}
if(resolution == DECODED_RESOLUTION_QUARTER)
{
orig_width *= 4;
orig_height *= 4;
lines = 4;
}
start = 0;
end = height;
if(format == DECODED_FORMAT_RGB32 || format == DECODED_FORMAT_RGB24) // Can this work, all the code below expects 10-bit
{
start = height-1;
end = -1;
}
for (row = start; row != end; end > start ? row++ : row--)
{
int whitebitdepth = 16;
int flags = 0;
uint8_t *planar_output[3];
int planar_pitch[3];
ROI roi;
PIXEL16U *y_row_ptr;
PIXEL16U *u_row_ptr;
PIXEL16U *v_row_ptr;
PIXEL16U *scanline = (PIXEL16U *)decoder->RawBayer16;
PIXEL16U *scanline2 = scanline + orig_width * 8;
unsigned short *sptr;
int i,unc_Stride = decoder->uncompressed_size / orig_height;
whitebitdepth = 13;
if(decoder->apply_color_active_metadata)
flags = ACTIVEMETADATA_SRC_8PIXEL_PLANAR;
else
flags = 0;
roi.width = width;
roi.height = 1;
if(lines == 1)
{
uint16_t *sptr;
uint32_t j,*lptr = (uint32_t *)decoder->uncompressed_chunk;
PIXEL16U *ptr = (PIXEL16U *)scanline;
lptr += row * (unc_Stride>>2);
sptr = (uint16_t *)lptr;
for(i=0; i<width;i+=8)
{
int val,r,g,b;
if(flags == ACTIVEMETADATA_SRC_8PIXEL_PLANAR)
{
if(decoder->image_dev_only) // HACK, currently assuming RG48 input data.
{
for(j=0; j<8; j++)
{
ptr[j] = sptr[0] >> 3;
ptr[j+8] = sptr[1] >> 3;
ptr[j+16] = sptr[2] >> 3;
sptr += 3;
}
}
else
{
for(j=0; j<8; j++)
{
val = SwapInt32(*lptr++);
val >>= 2;
b = (val & 0x3ff) << 3;
val >>= 10;
g = (val & 0x3ff) << 3;
val >>= 10;
r = (val & 0x3ff) << 3;
ptr[j] = r;
ptr[j+8] = g;
ptr[j+16] = b;
}
}
}
else
{
if(decoder->image_dev_only) // HACK, currently assuming RG48 input data.
{
for(j=0; j<8*3; j+=3)
{
ptr[j] = sptr[0] >> 3;
ptr[j+1] = sptr[1] >> 3;
ptr[j+2] = sptr[2] >> 3;
sptr += 3;
}
}
else
{
for(j=0; j<8*3; j+=3)
{
val = SwapInt32(*lptr++);
val >>= 2;
b = (val & 0x3ff) << 3;
val >>= 10;
g = (val & 0x3ff) << 3;
val >>= 10;
r = (val & 0x3ff) << 3;
ptr[j] = r;
ptr[j+1] = g;
ptr[j+2] = b;
}
}
}
ptr += 24;
}
}
else if(lines == 2)
{
uint32_t j,*lptr = (uint32_t)decoder->uncompressed_chunk;
PIXEL16U *ptr = (PIXEL16U *)scanline;
lptr += row * (unc_Stride>>2) * lines;
for(i=0; i<width;i+=8)
{
int val,r,g,b,r2,g2,b2,r3,g3,b3,r4,g4,b4;
for(j=0; j<8; j++)
{
val = SwapInt32(lptr[0]);
val >>= 2;
b = (val & 0x3ff) << 3;
val >>= 10;
g = (val & 0x3ff) << 3;
val >>= 10;
r = (val & 0x3ff) << 3;
val = SwapInt32(lptr[1]);
val >>= 2;
b += (val & 0x3ff) << 3;
val >>= 10;
g += (val & 0x3ff) << 3;
val >>= 10;
r += (val & 0x3ff) << 3;
val = SwapInt32(lptr[unc_Stride>>2]);
val >>= 2;
b += (val & 0x3ff) << 3;
val >>= 10;
g += (val & 0x3ff) << 3;
val >>= 10;
r += (val & 0x3ff) << 3;
val = SwapInt32(lptr[(unc_Stride>>2)+1]);
val >>= 2;
b += (val & 0x3ff) << 3;
val >>= 10;
g += (val & 0x3ff) << 3;
val >>= 10;
r += (val & 0x3ff) << 3;
if(flags == ACTIVEMETADATA_SRC_8PIXEL_PLANAR)
{
ptr[j] = r>>2;
ptr[j+8] = g>>2;
ptr[j+16] = b>>2;
}
else
{
ptr[j*3] = r>>2;
ptr[j*3+1] = g>>2;
ptr[j*3+2] = b>>2;
}
lptr += lines;
}
ptr += 24;
}
}
else if(lines == 4)
{
uint32_t j,*lptr = (uint32_t)decoder->uncompressed_chunk;
PIXEL16U *ptr = (PIXEL16U *)scanline;
lptr += row * (unc_Stride>>2) * lines;
for(i=0; i<width;i+=8)
{
int val,r,g,b,r2,g2,b2,r3,g3,b3,r4,g4,b4;
for(j=0; j<8; j++)
{
val = SwapInt32(lptr[0]);
val >>= 2;
b = (val & 0x3ff) << 3;
val >>= 10;
g = (val & 0x3ff) << 3;
val >>= 10;
r = (val & 0x3ff) << 3;
val = SwapInt32(lptr[2]);
val >>= 2;
b += (val & 0x3ff) << 3;
val >>= 10;
g += (val & 0x3ff) << 3;
val >>= 10;
r += (val & 0x3ff) << 3;
val = SwapInt32(lptr[unc_Stride>>1]);
val >>= 2;
b += (val & 0x3ff) << 3;
val >>= 10;
g += (val & 0x3ff) << 3;
val >>= 10;
r += (val & 0x3ff) << 3;
val = SwapInt32(lptr[(unc_Stride>>1)+2]);
val >>= 2;
b += (val & 0x3ff) << 3;
val >>= 10;
g += (val & 0x3ff) << 3;
val >>= 10;
r += (val & 0x3ff) << 3;
if(flags == ACTIVEMETADATA_SRC_8PIXEL_PLANAR)
{
ptr[j] = r>>2;
ptr[j+8] = g>>2;
ptr[j+16] = b>>2;
}
else
{
ptr[j*3] = r>>2;
ptr[j*3+1] = g>>2;
ptr[j*3+2] = b>>2;
}
lptr += lines;
}
ptr += 24;
}
}
sptr = scanline;
if(decoder->apply_color_active_metadata)
sptr = ApplyActiveMetaData(decoder, width, 1, row, scanline, scanline2,
info->format, &whitebitdepth, &flags);
ConvertLinesToOutput(decoder, width, 1, row, sptr,
dst, output_pitch, format, whitebitdepth, flags);
dst += output_pitch;
}
}
#endif
}
error = CODEC_ERROR_OKAY;
return error;
}
// Reconstruct Bayer format to the requested output format
CODEC_ERROR ReconstructSampleFrameBayerToBuffer(DECODER *decoder, FRAME_INFO *info, int frame, uint8_t *output, int pitch)
{
CODEC_ERROR error = CODEC_ERROR_OKAY;
#if (1 && DEBUG)
FILE *logfile = decoder->logfile;
#endif
//CODEC_STATE *codec = &decoder->codec;
//int num_channels = codec->num_channels;
//int progressive = codec->progressive;
//int precision = codec->precision;
//TRANSFORM **transform_array = decoder->transform;
int resolution = info->resolution;
//int format = info->format;
// Switch to the subroutine for the requested resolution
switch (resolution)
{
case DECODED_RESOLUTION_FULL_DEBAYER:
case DECODED_RESOLUTION_HALF_HORIZONTAL_DEBAYER:
//error = CODEC_ERROR_UNSUPPORTED_FORMAT;
return ReconstructSampleFrameDeBayerFullToBuffer(decoder, info, frame, output, pitch);
break;
case DECODED_RESOLUTION_FULL:
//return ReconstructSampleFrameBayerFullToBuffer(decoder, info, frame, output, pitch);
error = CODEC_ERROR_UNSUPPORTED_FORMAT;
break;
//case DECODED_RESOLUTION_HALF_HORIZONTAL_DEBAYER:
case DECODED_RESOLUTION_HALF_NODEBAYER:
case DECODED_RESOLUTION_HALF:
//return ReconstructSampleFrameBayerHalfToBuffer(decoder, info, frame, output, pitch);
error = CODEC_ERROR_UNSUPPORTED_FORMAT;
break;
case DECODED_RESOLUTION_QUARTER:
//return ReconstructSampleFrameBayerQuarterToBuffer(decoder, frame, output, pitch);
error = CODEC_ERROR_UNSUPPORTED_FORMAT;
break;
case DECODED_RESOLUTION_LOWPASS_ONLY:
error = CODEC_ERROR_UNSUPPORTED_FORMAT;
break;
default:
// The decoded resolution is not supported by this routine
assert(0);
error = CODEC_ERROR_UNSUPPORTED_FORMAT;
break;
}
return error;
}
// Reconstruct Bayer encoded data to full resolution
CODEC_ERROR ReconstructSampleFrameBayerFullToBuffer(DECODER *decoder, FRAME_INFO *info, int frame, uint8_t *output_buffer, int output_pitch)
{
CODEC_ERROR error = CODEC_ERROR_OKAY;
#if (1 && DEBUG)
FILE *logfile = decoder->logfile;
#endif
CODEC_STATE *codec = &decoder->codec;
int num_channels = codec->num_channels;
//int progressive = codec->progressive;
//int precision = codec->precision;
//TRANSFORM **transform_array = decoder->transform;
//int decoded_width = 0;
//int decoded_height = 0;
//int resolution = info->resolution;
int format = info->format;
//int width = info->width;
//int height = info->height;
// Compute the number of bytes between each row of Bayer data
//int bayer_pitch = 2 * width * sizeof(PIXEL16U);
// Compute the pitch between pairs of rows of bayer data (one pair per image row)
//int raw_bayer_pitch = 2 * bayer_pitch;
//int chroma_offset = decoder->codec.chroma_offset;
//int row;
//int column;
// Need to allocate a scratch buffer for decoding the Bayer frame?
if (decoder->RawBayer16 == NULL)
{
TRANSFORM **transform_array = decoder->transform;
int decoded_width = 0;
int decoded_height = 0;
int resolution = info->resolution;
//int format = info->format;
// Four Bayer data samples at each 2x2 quad in the grid
int pixel_size = 4 * sizeof(PIXEL16U);
int frame_size;
const size_t alignment = 16;
#if _ALLOCATOR
ALLOCATOR *allocator = decoder->allocator;
#endif
// Compute the decoded width and height for the specified resolution
GetDecodedFrameDimensions(transform_array, num_channels, frame, resolution, &decoded_width, &decoded_height);
assert(decoded_width > 0 && decoded_height > 0);
if (! (decoded_width > 0 && decoded_height > 0)) {
return CODEC_ERROR_UNSUPPORTED_FORMAT;
}
frame_size = decoded_width * decoded_height * pixel_size;
#if _ALLOCATOR
decoder->RawBayer16 = (PIXEL16U *)AllocAligned(allocator, (size_t)frame_size, alignment);
#else
decoder->RawBayer16 = (PIXEL16U *)MEMORY_ALIGNED_ALLOC(frame_size, alignment);
#endif
assert(decoder->RawBayer16 != NULL);
if (! (decoder->RawBayer16 != NULL)) {
return CODEC_ERROR_MEMORY_ALLOC;
}
decoder->RawBayerSize = frame_size;
//#ifdef SHARPENING
if(decoder->RGBFilterBuffer16 == NULL)
{
int size = frame_size*3;
if(decoder->codec.encoded_format == ENCODED_FORMAT_RGBA_4444 && ALPHAOUTPUT(decoder->frame.format))
size = frame_size*4;
#if _ALLOCATOR
decoder->RGBFilterBuffer16 = (PIXEL16U *)AllocAligned(allocator, (size_t)size, 16);
#else
decoder->RGBFilterBuffer16 = (PIXEL16U *)MEMORY_ALIGNED_ALLOC(size, 16);
#endif
assert(decoder->RGBFilterBuffer16 != NULL);
if (! (decoder->RGBFilterBuffer16 != NULL)) {
return CODEC_ERROR_MEMORY_ALLOC;
}
decoder->RGBFilterBufferSize = frame_size*3;
}
//#endif
}
//TODO: Need to add more output formats to this routine
switch (format)
{
case DECODED_FORMAT_RGB32:
error = CODEC_ERROR_UNSUPPORTED_FORMAT;
// Decode the last transform to rows of Bayer data (one row per channel)
// TransformInverseSpatialToRow16u(transform_array, frame, num_channels,
// decoder->RawBayer16, raw_bayer_pitch, info,
// &decoder->scratch, chroma_offset, precision);
// ConvertPackedBayerToRGB32(decoder->RawBayer16, info, bayer_pitch,
// output_buffer, output_pitch,
// width, height);
break;
case DECODED_FORMAT_RGB24:
error = CODEC_ERROR_UNSUPPORTED_FORMAT;
// Decode the last transform to rows of Bayer data (one row per channel)
//TransformInverseSpatialToRow16u(transform_array, frame, num_channels,
// decoder->RawBayer16, raw_bayer_pitch, info,
// &decoder->scratch, chroma_offset, precision);
//ConvertPackedBayerToRGB24(decoder->RawBayer16, info, bayer_pitch,
// output_buffer, output_pitch,
// width, height);
break;
default:
error = CODEC_ERROR_UNSUPPORTED_FORMAT;
break;
}
return error;
}
// Reconstruct Bayer encoded data and demosaic to full resolution
CODEC_ERROR ReconstructSampleFrameDeBayerFullToBuffer(DECODER *decoder, FRAME_INFO *info, int frame, uint8_t *output_buffer, int output_pitch)
{
CODEC_ERROR error = CODEC_ERROR_OKAY;
#if (1 && DEBUG)
FILE *logfile = decoder->logfile;
#endif
CODEC_STATE *codec = &decoder->codec;
int num_channels = codec->num_channels;
//int progressive = codec->progressive;
int precision = codec->precision;
//TRANSFORM **transform_array = decoder->transform;
//int decoded_width = 0;
//int decoded_height = 0;
//int resolution = info->resolution;
int format = info->format;
int width = info->width;
//int height = info->height;
// Compute the number of bytes between each row of Bayer data
int bayer_pitch = 2 * width * sizeof(PIXEL16U);
// Compute the pitch between pairs of rows of bayer data (one pair per image row)
//int raw_bayer_pitch = 2 * bayer_pitch;
int chroma_offset = decoder->codec.chroma_offset;
error = CODEC_ERROR_UNSUPPORTED_FORMAT;
switch (format)
{
case DECODED_FORMAT_RGB24:
case DECODED_FORMAT_RGB32:
case DECODED_FORMAT_RG48: //DAN20090120 added not sure why they weren't here.
case DECODED_FORMAT_WP13: //DAN20090120 ""
case DECODED_FORMAT_B64A:
case DECODED_FORMAT_R210:
case DECODED_FORMAT_DPX0:
case DECODED_FORMAT_RG30:
case DECODED_FORMAT_AR10:
case DECODED_FORMAT_AB10:
case DECODED_FORMAT_YR16:
case DECODED_FORMAT_V210:
case DECODED_FORMAT_YU64:
error = CODEC_ERROR_OKAY;
break;
}
if(error)
return error;
//int row;
//int column;
// Need to allocate a scratch buffer for decoding the Bayer frame?
if (decoder->RawBayer16 == NULL)
{
TRANSFORM **transform_array = decoder->transform;
int decoded_width = 0;
int decoded_height = 0;
int resolution = info->resolution;
//int format = info->format;
// Four Bayer data samples at each 2x2 quad in the grid
int pixel_size = 4 * sizeof(PIXEL16U);
int frame_size;
const size_t alignment = 16;
#if _ALLOCATOR
ALLOCATOR *allocator = decoder->allocator;
#endif
// Compute the decoded width and height for the specified resolution
GetDecodedFrameDimensions(transform_array, num_channels, frame, resolution, &decoded_width, &decoded_height);
assert(decoded_width > 0 && decoded_height > 0);
if (! (decoded_width > 0 && decoded_height > 0)) {
return CODEC_ERROR_UNSUPPORTED_FORMAT;
}
frame_size = decoded_width * decoded_height * pixel_size;
#if _ALLOCATOR
decoder->RawBayer16 = (PIXEL16U *)AllocAligned(allocator, (size_t)frame_size, alignment);
#else
decoder->RawBayer16 = (PIXEL16U *)MEMORY_ALIGNED_ALLOC(frame_size, alignment);
#endif
assert(decoder->RawBayer16 != NULL);
if (! (decoder->RawBayer16 != NULL)) {
return CODEC_ERROR_MEMORY_ALLOC;
}
decoder->RawBayerSize = frame_size;
//#ifdef SHARPENING
if(decoder->RGBFilterBuffer16 == NULL)
{
int size = frame_size*3;
if(decoder->codec.encoded_format == ENCODED_FORMAT_RGBA_4444 && ALPHAOUTPUT(decoder->frame.format))
size = frame_size*4;
#if _ALLOCATOR
decoder->RGBFilterBuffer16 = (PIXEL16U *)AllocAligned(allocator, (size_t)size, 16);
#else
decoder->RGBFilterBuffer16 = (PIXEL16U *)MEMORY_ALIGNED_ALLOC(size, 16);
#endif
assert(decoder->RGBFilterBuffer16 != NULL);
if (! (decoder->RGBFilterBuffer16 != NULL)) {
return CODEC_ERROR_MEMORY_ALLOC;
}
decoder->RGBFilterBufferSize = frame_size*3;
}
//#endif
}
#if _THREADED
TransformInverseSpatialUniversalThreadedToRow16u(decoder, frame, num_channels,
(uint8_t *)decoder->RawBayer16, bayer_pitch*sizeof(PIXEL),
info, chroma_offset, precision);
//DemosaicRAW
{
WORKER_THREAD_DATA *mailbox = &decoder->worker_thread.data;
int inverted = false;
uint8_t *output = output_buffer;
int pitch = output_pitch;
#if _DELAY_THREAD_START
if(decoder->worker_thread.pool.thread_count == 0)
{
CreateLock(&decoder->worker_thread.lock);
// Initialize the pool of transform worker threads
ThreadPoolCreate(&decoder->worker_thread.pool,
decoder->thread_cntrl.capabilities >> 16/*cpus*/,
WorkerThreadProc,
decoder);
}
#endif
if (format == DECODED_FORMAT_RGB24)
{
format = DECODED_FORMAT_RGB24_INVERTED;
inverted = true;
}
else if (format == DECODED_FORMAT_RGB32)
{
format = DECODED_FORMAT_RGB32_INVERTED;
inverted = true;
}
// Have the output location and pitch been inverted?
if (inverted && pitch > 0) {
int height = info->height;
if(info->resolution == DECODED_RESOLUTION_FULL_DEBAYER || info->resolution == DECODED_RESOLUTION_HALF_HORIZONTAL_DEBAYER)
height *= 2;
output += (height - 1) * pitch; // Start at the bottom row
pitch = NEG(pitch); // Negate the pitch to go up
}
// Post a message to the mailbox
mailbox->output = output;
mailbox->pitch = pitch;
memcpy(&mailbox->info, info, sizeof(FRAME_INFO));
mailbox->jobType = JOB_TYPE_OUTPUT;
// Set the work count to the number of rows to process
ThreadPoolSetWorkCount(&decoder->worker_thread.pool, info->height);
// Start the transform worker threads
ThreadPoolSendMessage(&decoder->worker_thread.pool, THREAD_MESSAGE_START);
// Wait for all of the worker threads to finish
ThreadPoolWaitAllDone(&decoder->worker_thread.pool);
}
#else
error = CODEC_ERROR_UNSUPPORTED_FORMAT;
#endif
return error;
}
// Reconstruct Bayer encoded data to half resolution
CODEC_ERROR ReconstructSampleFrameBayerHalfToBuffer(DECODER *decoder, FRAME_INFO *info, int frame, uint8_t *output_buffer, int output_pitch)
{
CODEC_ERROR error = CODEC_ERROR_OKAY;
#if (1 && DEBUG)
FILE *logfile = decoder->logfile;
#endif
//CODEC_STATE *codec = &decoder->codec;
//int num_channels = codec->num_channels;
//int progressive = codec->progressive;
//int precision = codec->precision;
TRANSFORM **transform_array = decoder->transform;
int frame_width = info->width;
int frame_height = info->height;
//int resolution = info->resolution;
int format = info->format;
//IMAGE *lowpass_images[TRANSFORM_MAX_CHANNELS];
PIXEL16U *g1_plane;
PIXEL16U *rg_plane;
PIXEL16U *bg_plane;
PIXEL16U *g2_plane;
int g1_pitch;
int rg_pitch;
int bg_pitch;
int g2_pitch;
#if 0
int channel;
for (channel = 0; channel < num_channels; channel++)
{
lowpass_images[channel] = transform_array[channel]->wavelet[frame];
#if (0 && DEBUG)
if (logfile) {
char label[_MAX_PATH];
char *format = decoded_format_string[info->format];
sprintf(label, "Output, channel: %d, format: %s", channel, format);
DumpImageStatistics(label, lowpass_images[channel], logfile);
}
#endif
}
#endif
// Get the lowpass bands in the wavelet coresponding to the output frame
g1_plane = (PIXEL16U *)transform_array[0]->wavelet[frame]->band[0];
rg_plane = (PIXEL16U *)transform_array[1]->wavelet[frame]->band[0];
bg_plane = (PIXEL16U *)transform_array[2]->wavelet[frame]->band[0];
if(transform_array[3]->wavelet[frame]) //half res don't decode g1-g2 //HACK
{
g2_plane = (PIXEL16U *)transform_array[3]->wavelet[frame]->band[0];
g2_pitch = transform_array[3]->wavelet[frame]->pitch;
}
else
{
g2_plane = NULL;
g2_pitch = 0;
}
// Get the pitch of each plane
g1_pitch = transform_array[0]->wavelet[frame]->pitch;
rg_pitch = transform_array[1]->wavelet[frame]->pitch;
bg_pitch = transform_array[2]->wavelet[frame]->pitch;
switch (format)
{
case DECODED_FORMAT_RGB32:
ConvertPlanarBayerToRGB32(g1_plane, g1_pitch, rg_plane, rg_pitch,
bg_plane, bg_pitch, g2_plane, g2_pitch,
output_buffer, output_pitch,
frame_width, frame_height);
break;
default:
error = CODEC_ERROR_UNSUPPORTED_FORMAT;
break;
}
return error;
}
// Reconstruct Bayer encoded data to quarter resolution
CODEC_ERROR ReconstructSampleFrameBayerQuarterToBuffer(DECODER *decoder, int frame, uint8_t *output, int pitch)
{
CODEC_ERROR error = CODEC_ERROR_OKAY;
#if (1 && DEBUG)
FILE *logfile = decoder->logfile;
#endif
//FRAME_INFO *info = &decoder->frame;
//CODEC_STATE *codec = &decoder->codec;
//int num_channels = codec->num_channels;
//int progressive = codec->progressive;
//int precision = codec->precision;
//TRANSFORM **transform_array = decoder->transform;
//int decoded_width = 0;
//int decoded_height = 0;
//int resolution = info->resolution;
//int format = info->format;
//TODO: Need to finish this routine
assert(0);
return error;
}
// Reconstruct the original YUV 4:2:2 encoded format to the requested output format
CODEC_ERROR ReconstructSampleFrameYUV422ToBuffer(DECODER *decoder, int frame, uint8_t *output, int pitch)
{
CODEC_ERROR error = CODEC_ERROR_OKAY;
#if (1 && DEBUG)
FILE *logfile = decoder->logfile;
#endif
FRAME_INFO *info = &decoder->frame;
CODEC_STATE *codec = &decoder->codec;
int num_channels = codec->num_channels;
int progressive = codec->progressive;
int precision = codec->precision;
TRANSFORM **transform_array = decoder->transform;
//int decoded_width = 0;
//int decoded_height = 0;
int resolution = info->resolution;
int format = info->format;
//int color_space = decoder->frame.colorspace;
//TODO: Eliminate use of the chroma offset
int chroma_offset = decoder->codec.chroma_offset;
#if _THREADED
// Type of threaded inverse transform
//int type;
#endif
#if _ALLOCATOR
ALLOCATOR *allocator = decoder->allocator;
#endif
if (decoder == NULL) {
return CODEC_ERROR_INVALID_ARGUMENT;
}
//TODO: Split this routine into subroutines for progressive versus interlaced video
//TODO: Split progressive and interlaced routines into subroutines for each resolution
if(resolution == DECODED_RESOLUTION_HALF)
{
bool inverted = false;
FRAME_INFO info2;
memcpy(&info2, info, sizeof(FRAME_INFO));
format = info2.format;
if (format == DECODED_FORMAT_RGB24) {
format = DECODED_FORMAT_RGB24_INVERTED;
info2.format = format;
inverted = true;
}
else if (format == DECODED_FORMAT_RGB32) {
format = DECODED_FORMAT_RGB32_INVERTED;
info2.format = format;
inverted = true;
}
#if 1
// Have the output location and pitch been inverted?
if (inverted && pitch > 0) {
int height = info->height;
output += (height - 1) * pitch; // Start at the bottom row
pitch = NEG(pitch); // Negate the pitch to go up
}
#endif
if(decoder->use_active_metadata_decoder)
{
#if _THREADED
WORKER_THREAD_DATA *mailbox = &decoder->worker_thread.data;
#if _DELAY_THREAD_START
if(decoder->worker_thread.pool.thread_count == 0)
{
CreateLock(&decoder->worker_thread.lock);
// Initialize the pool of transform worker threads
ThreadPoolCreate(&decoder->worker_thread.pool,
decoder->thread_cntrl.capabilities >> 16/*cpus*/,
WorkerThreadProc,
decoder);
}
#endif
// Post a message to the mailbox
mailbox->output = output;
mailbox->pitch = pitch;
mailbox->framenum = frame;
memcpy(&mailbox->info, info, sizeof(FRAME_INFO));
mailbox->jobType = JOB_TYPE_OUTPUT;
decoder->RGBFilterBufferPhase = 1;
// Set the work count to the number of rows to process
ThreadPoolSetWorkCount(&decoder->worker_thread.pool, info->height);
// Start the transform worker threads
ThreadPoolSendMessage(&decoder->worker_thread.pool, THREAD_MESSAGE_START);
// Wait for all of the worker threads to finish
ThreadPoolWaitAllDone(&decoder->worker_thread.pool);
decoder->RGBFilterBufferPhase = 0;
return CODEC_ERROR_OKAY;
#endif
}
else
{
int precision = codec->precision;
TRANSFORM **transform_array = decoder->transform;
int channel;
IMAGE *lowpass_images[TRANSFORM_MAX_CHANNELS];
CODEC_STATE *codec = &decoder->codec;
int num_channels = codec->num_channels;
for (channel = 0; channel < num_channels; channel++)
{
lowpass_images[channel] = transform_array[channel]->wavelet[frame];
}
CopyLowpass16sToBuffer(decoder, lowpass_images, num_channels, output, pitch, &info2, chroma_offset,
precision, decoder->codec.encoded_format, decoder->frame.white_point);
}
return CODEC_ERROR_OKAY;
}
// Was the video source interlaced or progressive?
if (progressive)
{
// The video source was progressive (the first transform was a spatial transform)
if (resolution == DECODED_RESOLUTION_FULL || resolution == DECODED_RESOLUTION_HALF_HORIZONTAL)
{
FRAME_INFO info2;
int format;
bool inverted = false;
int precision = codec->precision;
memcpy(&info2, info, sizeof(FRAME_INFO));
format = info2.format;
if (format == DECODED_FORMAT_RGB24) {
format = DECODED_FORMAT_RGB24_INVERTED;
info2.format = format;
inverted = true;
}
else if (format == DECODED_FORMAT_RGB32) {
format = DECODED_FORMAT_RGB32_INVERTED;
info2.format = format;
inverted = true;
}
#if 1
// Have the output location and pitch been inverted?
if (inverted && pitch > 0) {
int height = info->height;
output += (height - 1) * pitch; // Start at the bottom row
pitch = NEG(pitch); // Negate the pitch to go up
}
#endif
/*if(decoder->use_active_metadata_decoder)
{
switch (format & 0x7ffffff)
{
case DECODED_FORMAT_RGB24: // Output buffer is too small to decode into for
case DECODED_FORMAT_YUYV: // computing the active metadata.
case DECODED_FORMAT_UYVY:
return CODEC_ERROR_OKAY;
break;
}
}*/
switch (format & 0x7ffffff)
{
case DECODED_FORMAT_RGB24: // Output buffer is too small to decode into for
if(decoder->use_active_metadata_decoder)
{
#if _THREADED
TransformInverseSpatialUniversalThreadedToOutput(decoder, frame, num_channels,
output, pitch,
info, chroma_offset, precision,
InvertHorizontalStrip16sThruActiveMetadata);
return CODEC_ERROR_OKAY;
#endif
}
else
{
#if _THREADED
TransformInverseSpatialUniversalThreadedToOutput(decoder, frame, num_channels,
output, pitch,
info, chroma_offset, precision,
InvertHorizontalStrip16sYUVtoRGB);
return CODEC_ERROR_OKAY;
#endif
}
break;
case DECODED_FORMAT_YUYV:
case DECODED_FORMAT_UYVY:
if(decoder->use_active_metadata_decoder)
{
#if _THREADED
TransformInverseSpatialUniversalThreadedToOutput(decoder, frame, num_channels,
output, pitch,
info, chroma_offset, precision,
InvertHorizontalStrip16sThruActiveMetadata);
return CODEC_ERROR_OKAY;
#endif
}
else
{
#if _THREADED
TransformInverseSpatialUniversalThreadedToOutput(decoder, frame, num_channels,
output, pitch,
info, chroma_offset, precision,
InvertHorizontalStrip16sToYUV);
return CODEC_ERROR_OKAY;
#endif
}
break;
//Handle sizes that are smaller than the interim decode buffer //DAN20081222
case DECODED_FORMAT_CbYCrY_10bit_2_8:
decoder->upper_plane = output;
decoder->lower_plane = output + decoder->frame.width * decoder->frame.height / 2;
// Use the address and pitch of the lower plane
output = decoder->lower_plane;
pitch = decoder->frame.width * 2;
// Fall through and compute the inverse spatial transform
case DECODED_FORMAT_CbYCrY_16bit_2_14:
case DECODED_FORMAT_CbYCrY_16bit_10_6:
case DECODED_FORMAT_CbYCrY_8bit:
case DECODED_FORMAT_CbYCrY_16bit:
if(decoder->use_active_metadata_decoder)
{
TransformInverseSpatialUniversalThreadedToOutput(decoder, frame, num_channels,
output, pitch,
info, chroma_offset, precision,
InvertHorizontalStrip16sThruActiveMetadata);
return CODEC_ERROR_OKAY;
}
else
{
TransformInverseSpatialUniversalThreadedToOutput(decoder, frame, num_channels,
output, pitch,
info, chroma_offset, precision,
InvertHorizontalStrip16sToOutput);
return CODEC_ERROR_OKAY;
}
break;
case DECODED_FORMAT_V210:
if(decoder->use_active_metadata_decoder)
{
TransformInverseSpatialUniversalThreadedToOutput(decoder, frame, num_channels,
output, pitch,
info, chroma_offset, precision,
InvertHorizontalStrip16sThruActiveMetadata);
return CODEC_ERROR_OKAY;
}
else
{
TransformInverseSpatialUniversalThreadedToOutput(decoder, frame, num_channels,
output, pitch,
info, chroma_offset, precision,
InvertHorizontalYUVStrip16sToYUVOutput);
return CODEC_ERROR_OKAY;
}
break;
case DECODED_FORMAT_RGB32:
case DECODED_FORMAT_RGB32_INVERTED:
// As long as the outpitch is greater or equal to 4:2:2 16-bit YR16 this works.
case DECODED_FORMAT_RG48:
case DECODED_FORMAT_RG64:
case DECODED_FORMAT_R210:
case DECODED_FORMAT_DPX0:
case DECODED_FORMAT_RG30:
case DECODED_FORMAT_AR10:
case DECODED_FORMAT_AB10:
case DECODED_FORMAT_B64A:
case DECODED_FORMAT_R408:
case DECODED_FORMAT_V408:
case DECODED_FORMAT_YU64:
case DECODED_FORMAT_YR16:
case DECODED_FORMAT_WP13:
case DECODED_FORMAT_W13A:
if((format & 0x7FFFFFFF) == DECODED_FORMAT_RGB32 && decoder->use_active_metadata_decoder == false)
{
#if _THREADED
TransformInverseSpatialThreadedYUV422ToBuffer(decoder,
frame, num_channels, output, pitch,
&info2, chroma_offset, precision);
#elif 0
TransformInverseSpatialToBuffer(decoder, transform_array, frame,
num_channels, output, pitch,
&info2, &decoder->scratch, chroma_offset, precision);
#else
TransformInverseSpatialYUV422ToOutput(decoder, transform_array,
frame, num_channels, output, pitch,
&info2, &decoder->scratch, chroma_offset, precision,
InvertHorizontalStripYUV16sToPackedRGB32);
#endif
return CODEC_ERROR_OKAY;
}
#if _THREADED
if(decoder->use_active_metadata_decoder)
{
TransformInverseSpatialUniversalThreadedToOutput(decoder, frame, num_channels,
output, pitch,
info, chroma_offset, precision,
InvertHorizontalStrip16sThruActiveMetadata);
return CODEC_ERROR_OKAY;
}
else
{
TransformInverseSpatialUniversalThreadedToRow16u(decoder, frame,
num_channels, output, pitch,
&info2, chroma_offset, precision);
ConvertRow16uToOutput(decoder, frame, num_channels, output, pitch,
&info2, chroma_offset, precision);
return CODEC_ERROR_OKAY;
}
#endif
break;
default:
if(decoder->use_active_metadata_decoder)
{
#if _THREADED
TransformInverseSpatialUniversalThreadedToOutput(decoder, frame, num_channels,
output, pitch,
info, chroma_offset, precision,
InvertHorizontalStrip16sThruActiveMetadata);
return CODEC_ERROR_OKAY;
#endif
}
// else Return the error code for unsupported output format
break;
}
}
}
else
{
// The video source was interlaced (the first transform was a frame transform)
if (resolution == DECODED_RESOLUTION_FULL || resolution == DECODED_RESOLUTION_HALF_HORIZONTAL)
{
bool inverted = false;
if (format == DECODED_FORMAT_RGB32 || format == DECODED_FORMAT_RGB24) {
// info->format = DECODED_FORMAT_RGB32_INVERTED; //DAN20080702 vertically flips QT decodes if active.
inverted = true;
}
#if 1
// Have the output location and pitch been inverted?
if (inverted && pitch > 0) {
int height = info->height;
output += (height - 1) * pitch; // Start at the bottom row
pitch = NEG(pitch); // Negate the pitch to go up
}
#endif
switch (format & 0x7ffffff)
{
case DECODED_FORMAT_NV12:
case DECODED_FORMAT_RGB24: // Output buffer is too small to decode into for
case DECODED_FORMAT_YUYV:
case DECODED_FORMAT_UYVY:
case DECODED_FORMAT_V210: // only supported with use_active_metadata_decoder
if(decoder->use_active_metadata_decoder)
{
int frame_size = info->width * info->height * 4;
if(decoder->RGBFilterBuffer16==NULL || decoder->RGBFilterBufferSize < frame_size)
{
#if _ALLOCATOR
if(decoder->RGBFilterBuffer16)
{
FreeAligned(decoder->allocator, decoder->RGBFilterBuffer16);
decoder->RGBFilterBuffer16 = NULL;
}
decoder->RGBFilterBuffer16 = (PIXEL16U *)AllocAligned(allocator, frame_size, 16);
#else
if(decoder->RGBFilterBuffer16)
{
MEMORY_ALIGNED_FREE(decoder->RGBFilterBuffer16);
decoder->RGBFilterBuffer16 = NULL;
}
decoder->RGBFilterBuffer16 = (PIXEL16U *)MEMORY_ALIGNED_ALLOC(frame_size, 16);
#endif
assert(decoder->RGBFilterBuffer16 != NULL);
if (! (decoder->RGBFilterBuffer16 != NULL)) {
return CODEC_ERROR_MEMORY_ALLOC;
}
decoder->RGBFilterBufferSize = frame_size;
}
//TransformInverseSpatialUniversalThreadedToRow16u(
// decoder, frame, num_channels,
// (uint8_t *)decoder->RGBFilterBuffer16, info->width * 3 * 2,
// info, chroma_offset, precision);
#if _INTERLACED_WORKER_THREADS
StartInterlaceWorkerThreads(decoder);
//TODO: support new threading
// Send the upper and lower rows of the transforms to the worker threads
TransformInverseFrameThreadedToRow16u(decoder, frame, num_channels,
(PIXEL16U *)decoder->RGBFilterBuffer16,
info->width * 4,
info, chroma_offset, precision);
#else
// Transform the wavelets for each channel to the output image (not threaded)
TransformInverseFrameToRow16u(decoder, transform_array, frame, num_channels,
(PIXEL16U *)decoder->RGBFilterBuffer16,
info->width * 4, info,
&decoder->scratch, chroma_offset, precision);
#endif
#if _THREADED
{
WORKER_THREAD_DATA *mailbox = &decoder->worker_thread.data;
#if _DELAY_THREAD_START
if(decoder->worker_thread.pool.thread_count == 0)
{
CreateLock(&decoder->worker_thread.lock);
// Initialize the pool of transform worker threads
ThreadPoolCreate(&decoder->worker_thread.pool,
decoder->thread_cntrl.capabilities >> 16/*cpus*/,
WorkerThreadProc,
decoder);
}
#endif
// Post a message to the mailbox
mailbox->output = output;
mailbox->pitch = pitch;
memcpy(&mailbox->info, info, sizeof(FRAME_INFO));
mailbox->jobType = JOB_TYPE_OUTPUT;
decoder->RGBFilterBufferPhase = 2; // yuv
// Set the work count to the number of rows to process
ThreadPoolSetWorkCount(&decoder->worker_thread.pool, info->height);
// Start the transform worker threads
ThreadPoolSendMessage(&decoder->worker_thread.pool, THREAD_MESSAGE_START);
// Wait for all of the worker threads to finish
ThreadPoolWaitAllDone(&decoder->worker_thread.pool);
decoder->RGBFilterBufferPhase = 0;
}
#endif
return CODEC_ERROR_OKAY;
}
}
switch (format)
{
// As long as the outpitch is greater or equal to 4:2:2 16-bit YR16 this works.
case DECODED_FORMAT_WP13: //DAN20110203 - missing
case DECODED_FORMAT_W13A: //DAN20110203 - missing
case DECODED_FORMAT_RG48:
case DECODED_FORMAT_RG64:
case DECODED_FORMAT_R210:
case DECODED_FORMAT_DPX0:
case DECODED_FORMAT_RG30:
case DECODED_FORMAT_AR10:
case DECODED_FORMAT_AB10:
case DECODED_FORMAT_B64A:
case DECODED_FORMAT_RGB32: //32-bit format can fit the interim YR16 decode into
case DECODED_FORMAT_R408: //the output buffer
case DECODED_FORMAT_V408:
case DECODED_FORMAT_YU64:
case DECODED_FORMAT_YR16:
#if _INTERLACED_WORKER_THREADS
StartInterlaceWorkerThreads(decoder);
//TODO: support new threading
// Send the upper and lower rows of the transforms to the worker threads
TransformInverseFrameThreadedToRow16u(decoder, frame, num_channels,
(PIXEL16U *)output, pitch,
info, chroma_offset, precision);
ConvertRow16uToOutput(decoder, frame, num_channels, output, pitch,
info, chroma_offset, precision);
#else
// Transform the wavelets for each channel to the output image (not threaded)
TransformInverseFrameToRow16u(decoder, transform_array, frame, num_channels,
(PIXEL16U *)output, pitch, info,
&decoder->scratch, chroma_offset, precision);
ConvertRow16uToOutput(decoder, frame, num_channels, output, pitch,
info, chroma_offset, precision);
//Old code converts 4:2:2 directly to RGBA (single threaded.)
//TransformInverseFrameToBuffer(transform_array, frame, num_channels, output, pitch,
// info, &decoder->scratch, chroma_offset, precision);
#endif
return CODEC_ERROR_OKAY;
default:
// else Return the error code for unsupported output format
break;
}
}
}
// The output format is not supported by this routine
error = CODEC_ERROR_UNSUPPORTED_FORMAT;
return error;
}
// Routines for converting the new encoded formats to the requested output format
CODEC_ERROR ReconstructSampleFrameRGB444ToBuffer(DECODER *decoder, int frame, uint8_t *output, int pitch)
{
CODEC_ERROR error = CODEC_ERROR_OKAY;
#if (1 && DEBUG)
FILE *logfile = decoder->logfile;
#endif
FRAME_INFO *info = &decoder->frame;
CODEC_STATE *codec = &decoder->codec;
int num_channels = codec->num_channels;
//int progressive = codec->progressive;
TRANSFORM **transform_array = decoder->transform;
//IMAGE *lowpass_images[TRANSFORM_MAX_CHANNELS];
//IMAGE *wavelet;
//int wavelet_width;
//int wavelet_height;
int decoded_width = 0;
int decoded_height = 0;
int resolution = info->resolution;
//int chroma_offset = decoder->codec.chroma_offset;
//int decoded_scale;
#if _ALLOCATOR
ALLOCATOR *allocator = decoder->allocator;
#endif
//TODO: Eliminate use of the chroma offset
if (decoder == NULL) {
return CODEC_ERROR_INVALID_ARGUMENT;
}
// This routine should only be called for progressive frames
assert(codec->progressive);
// The decoder can decode a video sample without returning a frame
if (output == NULL || pitch == 0) {
return CODEC_ERROR_OKAY;
}
// Does this frame have to be reconstructed?
if ((decoder->flags & DECODER_FLAGS_RENDER) == 0) {
return CODEC_ERROR_OKAY;
}
// Check that the requested frame is within the limits of the group of frames
assert(0 <= frame && frame < decoder->gop_length);
// Check that the frame resolution is valid
assert(IsValidFrameResolution(resolution));
if (!IsValidFrameResolution(resolution)) {
return CODEC_ERROR_RESOLUTION;
}
// Compute the decoded width and height
ComputeOutputDimensions(decoder, frame, &decoded_width, &decoded_height);
assert(decoded_width > 0 && decoded_height > 0);
if (info->format == DECODED_FORMAT_RGB24 || info->format == DECODED_FORMAT_RGB32)
{
output += (info->height-1)*pitch;
pitch = -pitch;
}
#if (0 && DEBUG)
if (logfile) {
IMAGE *wavelet = transform[0]->wavelet[frame];
int band = 0;
fprintf(logfile, "Luminance wavelet, frame: %d, band: %d\n", frame, band);
DumpArray16s("Lowpass Band", wavelet->band[band], wavelet->width, wavelet->height, wavelet->pitch, logfile);
}
#endif
// Check that the requested frame is large enough to hold the decoded frame
#if (0 && DEBUG)
//if (! (info->width >= decoded_width))
{
if (logfile) {
//fprintf(logfile, "Requested frame not large enough to hold decoded frame: %d < %d\n", info->width, decoded_width);
fprintf(logfile, "Output frame width: %d, decoded frame width: %d\n", info->width, decoded_width);
}
}
#endif
assert(info->width >= decoded_width);
if (!(info->width >= decoded_width)) {
return CODEC_ERROR_FRAMESIZE;
}
// assert((info->height+7)/8 >= (decoded_height+7)/8);
// if (!(info->height+7)/8 >= (decoded_height+7)/8) {
// return CODEC_ERROR_FRAMESIZE;
// }
START(tk_convert);
if (resolution == DECODED_RESOLUTION_LOWPASS_ONLY)
{
//int precision = codec->precision;
int scale = 13;
int channel;
IMAGE *lowpass_images[TRANSFORM_MAX_CHANNELS];
int chroma_offset = decoder->codec.chroma_offset;
//DAN20081203 -- fix for 444 decodes in AE32-bit float
decoder->frame.white_point = 16;
//decoder->frame.signed_pixels = 0;
for (channel = 0; channel < num_channels; channel++)
{
lowpass_images[channel] = transform_array[channel]->wavelet[5];
if(lowpass_images[channel] == NULL) // therefore IntreFrame compressed.
{
scale = 12;
lowpass_images[channel] = transform_array[channel]->wavelet[2];
}
}
CopyLowpass16sToBuffer(decoder, lowpass_images, num_channels, output, pitch, info, chroma_offset,
scale, decoder->codec.encoded_format, decoder->frame.white_point);
}
else
// Quarter resolution
if (resolution == DECODED_RESOLUTION_QUARTER)
{
// Output quarter resolution for the two frame GOP
int precision = codec->precision;
// Reconstruct the frame to quarter resolution
ReconstructQuarterFrame(decoder, num_channels, frame, output, pitch,
info, &decoder->scratch, precision);
// Quarter resolution one frame GOP is handled in DecodeSampleIntraFrame
}
else
// Half resolution
if (resolution == DECODED_RESOLUTION_HALF)
{
IMAGE *wavelet_array[TRANSFORM_MAX_CHANNELS];
int precision = codec->precision;
int chroma_offset = 0;
int channel;
if(decoder->use_active_metadata_decoder)
{
#if _THREADED
{
WORKER_THREAD_DATA *mailbox = &decoder->worker_thread.data;
#if _DELAY_THREAD_START
if(decoder->worker_thread.pool.thread_count == 0)
{
CreateLock(&decoder->worker_thread.lock);
// Initialize the pool of transform worker threads
ThreadPoolCreate(&decoder->worker_thread.pool,
decoder->thread_cntrl.capabilities >> 16/*cpus*/,
WorkerThreadProc,
decoder);
}
#endif
// Post a message to the mailbox
mailbox->output = output;
mailbox->pitch = pitch;
mailbox->framenum = frame;
memcpy(&mailbox->info, info, sizeof(FRAME_INFO));
mailbox->jobType = JOB_TYPE_OUTPUT;
decoder->RGBFilterBufferPhase = 1;
// Set the work count to the number of rows to process
ThreadPoolSetWorkCount(&decoder->worker_thread.pool, info->height);
// Start the transform worker threads
ThreadPoolSendMessage(&decoder->worker_thread.pool, THREAD_MESSAGE_START);
// Wait for all of the worker threads to finish
ThreadPoolWaitAllDone(&decoder->worker_thread.pool);
decoder->RGBFilterBufferPhase = 0;
}
#endif
}
else
{
//DAN20081203 -- fix for 444 decodes in AE32-bit float
decoder->frame.white_point = 16;
//decoder->frame.signed_pixels = 0;
// Get the first level wavelet in each channel
for (channel = 0; channel < num_channels; channel++)
{
wavelet_array[channel] = transform_array[channel]->wavelet[frame];
}
// Pack the pixels from the lowpass band in each channel into the output buffer
CopyLowpassRGB444ToBuffer(decoder, wavelet_array, num_channels, output, pitch,
info, chroma_offset, precision);
}
}
// Full resolution or half horizontal
else
{
int chroma_offset = 0;
int precision = codec->precision;
// Reconstruct the output frame from a full resolution decode
//assert(resolution == DECODED_RESOLUTION_FULL);
if(decoder->use_active_metadata_decoder)
{
int frame_size, channels = 3;
if(decoder->codec.encoded_format == ENCODED_FORMAT_RGBA_4444 && ALPHAOUTPUT(decoder->frame.format))
channels = 4;
frame_size = info->width * info->height * channels * 2;
if(decoder->RGBFilterBuffer16==NULL || decoder->RGBFilterBufferSize < frame_size)
{
#if _ALLOCATOR
if(decoder->RGBFilterBuffer16)
{
FreeAligned(decoder->allocator, decoder->RGBFilterBuffer16);
decoder->RGBFilterBuffer16 = NULL;
}
decoder->RGBFilterBuffer16 = (PIXEL16U *)AllocAligned(allocator, frame_size, 16);
#else
if(decoder->RGBFilterBuffer16)
{
MEMORY_ALIGNED_FREE(decoder->RGBFilterBuffer16);
decoder->RGBFilterBuffer16 = NULL;
}
decoder->RGBFilterBuffer16 = (PIXEL16U *)MEMORY_ALIGNED_ALLOC(frame_size, 16);
#endif
assert(decoder->RGBFilterBuffer16 != NULL);
if (! (decoder->RGBFilterBuffer16 != NULL)) {
return CODEC_ERROR_MEMORY_ALLOC;
}
decoder->RGBFilterBufferSize = frame_size;
}
#if _THREADED
TransformInverseSpatialUniversalThreadedToRow16u(decoder, frame, num_channels,
(uint8_t *)decoder->RGBFilterBuffer16, info->width * channels * 2,
info, chroma_offset, precision);
#else
// Decode that last transform to rows of Bayer data (one row per channel)
TransformInverseSpatialToRow16u(transform_array, frame, num_channels,
(uint8_t *)decoder->RGBFilterBuffer16, info->width * channels * 2,
info, &decoder->scratch, chroma_offset, precision);
#endif
#if _THREADED
{
WORKER_THREAD_DATA *mailbox = &decoder->worker_thread.data;
#if _DELAY_THREAD_START
if(decoder->worker_thread.pool.thread_count == 0)
{
CreateLock(&decoder->worker_thread.lock);
// Initialize the pool of transform worker threads
ThreadPoolCreate(&decoder->worker_thread.pool,
decoder->thread_cntrl.capabilities >> 16/*cpus*/,
WorkerThreadProc,
decoder);
}
#endif
// Post a message to the mailbox
mailbox->output = output;
mailbox->pitch = pitch;
memcpy(&mailbox->info, info, sizeof(FRAME_INFO));
mailbox->jobType = JOB_TYPE_OUTPUT;
decoder->RGBFilterBufferPhase = 1;
// Set the work count to the number of rows to process
ThreadPoolSetWorkCount(&decoder->worker_thread.pool, info->height);
// Start the transform worker threads
ThreadPoolSendMessage(&decoder->worker_thread.pool, THREAD_MESSAGE_START);
// Wait for all of the worker threads to finish
ThreadPoolWaitAllDone(&decoder->worker_thread.pool);
decoder->RGBFilterBufferPhase = 0;
}
#endif
}
else
{
//DAN20081203 -- fix for 444 decodes in AE32-bit float
decoder->frame.white_point = 16;
//decoder->frame.signed_pixels = 0;
switch (info->format)
{
case DECODED_FORMAT_B64A:
#if _THREADED
TransformInverseSpatialUniversalThreadedToOutput(decoder, frame, num_channels,
output, pitch,
info, chroma_offset, precision,
InvertHorizontalStrip16sRGB2B64A);
#else
TransformInverseRGB444ToB64A(transform_array, frame, num_channels, output, pitch,
info, &decoder->scratch, chroma_offset, precision);
#endif
break;
case DECODED_FORMAT_YU64: //TODO : Threading
TransformInverseRGB444ToYU64(transform_array, frame, num_channels, output, pitch,
info, &decoder->scratch, chroma_offset, precision);
break;
case DECODED_FORMAT_RGB24:
case DECODED_FORMAT_RGB24_INVERTED:
case DECODED_FORMAT_RGB32:
case DECODED_FORMAT_RGB32_INVERTED://TODO, needs to be threaded. WIP
TransformInverseRGB444ToRGB32(transform_array, frame, num_channels, output, pitch,
info, &decoder->scratch, chroma_offset, precision);
break;
case DECODED_FORMAT_RG48:
case DECODED_FORMAT_RG64: //TODO, needs to be threaded. WIP
TransformInverseRGB444ToRGB48(transform_array, frame, num_channels, output, pitch,
info, &decoder->scratch, chroma_offset, precision);
break;
case DECODED_FORMAT_R210:
case DECODED_FORMAT_DPX0:
case DECODED_FORMAT_RG30:
case DECODED_FORMAT_AR10:
case DECODED_FORMAT_AB10:
#if _THREADED
TransformInverseSpatialUniversalThreadedToOutput(decoder, frame, num_channels,
output, pitch,
info, chroma_offset, precision,
InvertHorizontalStrip16sRGB2RG30);
#else
TransformInverseRGB444ToRGB48(transform_array, frame, num_channels, output, pitch,
info, &decoder->scratch, chroma_offset, precision);
#endif
break;
case DECODED_FORMAT_YUYV:
case DECODED_FORMAT_UYVY:
#if _THREADED
TransformInverseSpatialUniversalThreadedToOutput(decoder, frame, num_channels,
output, pitch,
info, chroma_offset, precision,
InvertHorizontalStrip16sRGB2YUV);
#else
TransformInverseSpatialYUV422ToOutput(decoder, transform_array, frame, num_channels, output, pitch,
info, &decoder->scratch, chroma_offset, precision,
InvertHorizontalStripRGB16sToPackedYUV8u);
#endif
break;
case DECODED_FORMAT_R408:
case DECODED_FORMAT_V408:
#if _THREADED
TransformInverseSpatialUniversalThreadedToOutput(decoder, frame, num_channels,
output, pitch,
info, chroma_offset, precision,
InvertHorizontalStrip16sRGBA2YUVA);
#else
assert(0);
#endif
break;
case DECODED_FORMAT_YR16:
#if _THREADED
TransformInverseSpatialUniversalThreadedToOutput(decoder, frame, num_channels,
output, pitch,
info, chroma_offset, precision,
InvertHorizontalStrip16sRGB2YR16);
#else
assert(0);// missing non-threaded version
#endif
break;
case DECODED_FORMAT_V210:
#if _THREADED
TransformInverseSpatialUniversalThreadedToOutput(decoder, frame, num_channels,
output, pitch,
info, chroma_offset, precision,
InvertHorizontalStrip16sRGB2v210);
#else
assert(0);// missing non-threaded version
#endif
break;
case DECODED_FORMAT_CbYCrY_8bit: // DECODED_FORMAT_CT_UCHAR
#if _THREADED
TransformInverseSpatialUniversalThreadedToOutput(decoder, frame, num_channels,
output, pitch,
info, chroma_offset, precision,
InvertHorizontalStrip16sRGB2YUV);
#else
assert(0);// missing non-threaded version
#endif
break;
//TODO: Add code to handle other Avid pixel formats
case DECODED_FORMAT_CbYCrY_16bit: // DECODED_FORMAT_CT_SHORT
case DECODED_FORMAT_CbYCrY_10bit_2_8: // DECODED_FORMAT_CT_10Bit_2_8
case DECODED_FORMAT_CbYCrY_16bit_2_14: // DECODED_FORMAT_CT_SHORT_2_14
case DECODED_FORMAT_CbYCrY_16bit_10_6: // DECODED_FORMAT_CT_USHORT_10_6
assert(0);
break;
default:
#if (1 && DEBUG)
if (logfile) {
fprintf(logfile, "Invalid decoded format: %d\n", info->format);
}
#endif
assert(0);
error = CODEC_ERROR_INVALID_FORMAT;
break;
}
}
}
STOP(tk_convert);
return error;
}
// Convert 16-bit signed lowpass data into the requested output format
void CopyLowpassRGB444ToBuffer(DECODER *decoder, IMAGE *image_array[], int num_channels,
uint8_t *output_buffer, int32_t output_pitch,
FRAME_INFO *info, int chroma_offset,
int precision)
{
bool inverted = false;
int output_width = info->width;
int output_height = info->height;
int format = info->format;
// Left shift to scale the pixels to 16 bits minus the shift already in the lowpass values
const int shift = 16 - precision - PRESCALE_LUMA;
START(tk_convert);
#if 0
// Fill the output buffer with blank values
EraseOutputBuffer(output_buffer, info->width, info->height, output_pitch, info->format);
#endif
// Determine the type of conversion
switch (info->format)
{
case DECODED_FORMAT_RGB24:
case DECODED_FORMAT_RGB32:
inverted = true;
case DECODED_FORMAT_RGB24_INVERTED:
case DECODED_FORMAT_RGB32_INVERTED:
case DECODED_FORMAT_B64A:
case DECODED_FORMAT_R210:
case DECODED_FORMAT_DPX0:
case DECODED_FORMAT_RG30:
case DECODED_FORMAT_AR10:
case DECODED_FORMAT_AB10:
case DECODED_FORMAT_RG48:
case DECODED_FORMAT_RG64: //WIP
ConvertLowpassRGB444ToRGB(image_array, output_buffer, output_width, output_height,
output_pitch, format, inverted, shift, num_channels);
break;
case DECODED_FORMAT_YUYV:
case DECODED_FORMAT_UYVY:
{
IMAGE *g_image = image_array[0];
IMAGE *r_image = image_array[1];
IMAGE *b_image = image_array[2];
if (info->format == COLOR_FORMAT_YUYV)
{
ConvertRGB2YUV(r_image->band[0], g_image->band[0], b_image->band[0],
r_image->pitch, g_image->pitch, b_image->pitch,
output_buffer, output_pitch,
output_width, output_height, 14,
info->colorspace, info->format);
}
else if (info->format == COLOR_FORMAT_UYVY)
{
ConvertRGB2UYVY(r_image->band[0], g_image->band[0], b_image->band[0],
r_image->pitch, g_image->pitch, b_image->pitch,
output_buffer, output_pitch,
output_width, output_height, 14,
info->colorspace, info->format);
}
}
break;
default:
{
int y;
IMAGE *g_image = image_array[0];
IMAGE *r_image = image_array[1];
IMAGE *b_image = image_array[2];
IMAGE *a_image = image_array[3];
unsigned short *scanline = (unsigned short *)decoder->scratch.free_ptr;
//unsigned short *scanline2 = scanline + output_width*3;
uint8_t *newline = (uint8_t *)output_buffer;
unsigned short *Rptr,*Gptr,*Bptr,*Aptr = NULL;
Rptr = (unsigned short *)r_image->band[0];
Gptr = (unsigned short *)g_image->band[0];
Bptr = (unsigned short *)b_image->band[0];
if(decoder->codec.encoded_format == ENCODED_FORMAT_RGBA_4444 && ALPHAOUTPUT(decoder->frame.format))
{
Aptr = (unsigned short *)a_image->band[0];
for(y=0; y<output_height; y++)
{
int flags = (ACTIVEMETADATA_PLANAR);
int whitebitdepth = 14;
memcpy(scanline, Rptr, info->width*2);
memcpy(scanline+info->width, Gptr, info->width*2);
memcpy(scanline+info->width*2, Bptr, info->width*2);
memcpy(scanline+info->width*3, Aptr, info->width*2);
Rptr += r_image->pitch/2;
Gptr += g_image->pitch/2;
Bptr += b_image->pitch/2;
Aptr += a_image->pitch/2;
Convert4444LinesToOutput(decoder, info->width, 1, y, scanline,
newline, output_pitch, info->format, whitebitdepth, flags);
newline += output_pitch;
}
}
else
{
for(y=0; y<output_height; y++)
{
int flags = (ACTIVEMETADATA_PLANAR);
int whitebitdepth = 14;
memcpy(scanline, Rptr, info->width*2);
memcpy(scanline+info->width, Gptr, info->width*2);
memcpy(scanline+info->width*2, Bptr, info->width*2);
Rptr += r_image->pitch/2;
Gptr += g_image->pitch/2;
Bptr += b_image->pitch/2;
ConvertLinesToOutput(decoder, info->width, 1, y, scanline,
newline, output_pitch, info->format, whitebitdepth, flags);
newline += output_pitch;
}
}
}
//assert(0);
break;
}
STOP(tk_convert);
}
#if _THREADED
// Threaded inverse transform using the new threads API
void TransformInverseSpatialThreadedYUV422ToBuffer(DECODER *decoder, int frame_index, int num_channels,
uint8_t *output, int pitch, FRAME_INFO *info,
int chroma_offset, int precision)
{
#if (1 && DEBUG)
FILE *logfile = decoder->logfile;
#endif
//TODO: Add support for more output formats
int format = DECODED_FORMAT_RGB32;
// The upper and lower spatial transforms only share the middle rows
int transform_height = (((info->height + 7) / 8) * 8) / 2;
int middle_row_count = transform_height;
// Data structure for passing information to the worker threads
WORKER_THREAD_DATA *mailbox = &decoder->worker_thread.data;
// Inverse horizontal filter that outputs the desired format
HorizontalInverseFilterOutputProc horizontal_filter_proc;
#if _DELAY_THREAD_START
if(decoder->worker_thread.pool.thread_count == 0)
{
CreateLock(&decoder->worker_thread.lock);
// Initialize the pool of transform worker threads
ThreadPoolCreate(&decoder->worker_thread.pool,
decoder->thread_cntrl.capabilities >> 16/*cpus*/,
WorkerThreadProc,
decoder);
}
#endif
// Choose the correct inverse horizontal filter for the output format
switch (format)
{
case DECODED_FORMAT_RGB32:
horizontal_filter_proc = InvertHorizontalStripYUV16sToPackedRGB32;
break;
default:
assert(0);
return;
}
// Post a message to the mailbox
mailbox->horizontal_filter_proc = horizontal_filter_proc;
mailbox->frame = frame_index;
mailbox->num_channels = num_channels;
mailbox->output = output;
mailbox->pitch = pitch;
memcpy(&mailbox->info, info, sizeof(FRAME_INFO));
mailbox->chroma_offset = chroma_offset;
mailbox->precision = precision;
mailbox->jobType = JOB_TYPE_WAVELET;
// Set the work count to the number of rows to process
ThreadPoolSetWorkCount(&decoder->worker_thread.pool, middle_row_count);
// Start the transform worker threads
ThreadPoolSendMessage(&decoder->worker_thread.pool, THREAD_MESSAGE_START);
// Wait for all of the worker threads to finish
ThreadPoolWaitAllDone(&decoder->worker_thread.pool);
#if (1 && DEBUG)
if (logfile) {
fprintf(logfile, "All worker threads signalled done\n");
}
#endif
}
// Threaded inverse transform using the new threads API
// Convert RGB RGBA or BAYER (4 channel) data to a 16-bit planar format
void TransformInverseSpatialUniversalThreadedToRow16u(DECODER *decoder, int frame_index, int num_channels,
uint8_t *output, int pitch, FRAME_INFO *info,
int chroma_offset, int precision)
{
#if (1 && DEBUG)
FILE *logfile = decoder->logfile;
#endif
// The upper and lower spatial transforms only share the middle rows
int transform_height = (((info->height + 7) / 8) * 8) / 2;
int middle_row_count = transform_height;
// Data structure for passing information to the worker threads
WORKER_THREAD_DATA *mailbox = &decoder->worker_thread.data;
// Inverse horizontal filter that outputs the desired format
HorizontalInverseFilterOutputProc horizontal_filter_proc;
horizontal_filter_proc = InvertHorizontalStrip16sToRow16uPlanar;
#if _DELAY_THREAD_START
if(decoder->worker_thread.pool.thread_count == 0)
{
CreateLock(&decoder->worker_thread.lock);
// Initialize the pool of transform worker threads
ThreadPoolCreate(&decoder->worker_thread.pool,
decoder->thread_cntrl.capabilities >> 16/*cpus*/,
WorkerThreadProc,
decoder);
}
#endif
// Post a message to the mailbox
mailbox->horizontal_filter_proc = horizontal_filter_proc;
mailbox->frame = frame_index;
mailbox->num_channels = num_channels;
mailbox->output = output;
mailbox->pitch = pitch;
memcpy(&mailbox->info, info, sizeof(FRAME_INFO));
mailbox->chroma_offset = chroma_offset;
mailbox->precision = precision;
mailbox->jobType = JOB_TYPE_WAVELET;
// Set the work count to the number of rows to process
ThreadPoolSetWorkCount(&decoder->worker_thread.pool, middle_row_count);
// Start the transform worker threads
ThreadPoolSendMessage(&decoder->worker_thread.pool, THREAD_MESSAGE_START);
// Wait for all of the worker threads to finish
ThreadPoolWaitAllDone(&decoder->worker_thread.pool);
}
// Threaded inverse transform using the new threads API
// Convert RGB RGBA or BAYER (4 channel) data to a 16-bit planar format
void TransformInverseSpatialUniversalThreadedToOutput(
DECODER *decoder, int frame_index, int num_channels,
uint8_t *output, int pitch, FRAME_INFO *info,
int chroma_offset, int precision,
HorizontalInverseFilterOutputProc horizontal_filter_proc)
{
#if (1 && DEBUG)
FILE *logfile = decoder->logfile;
#endif
// The upper and lower spatial transforms only share the middle rows
int transform_height = (((info->height + 7) / 8) * 8) / 2;
int middle_row_count = transform_height;
// Data structure for passing information to the worker threads
WORKER_THREAD_DATA *mailbox = &decoder->worker_thread.data;
// Inverse horizontal filter that outputs the desired format
#if _DELAY_THREAD_START
if(decoder->worker_thread.pool.thread_count == 0)
{
CreateLock(&decoder->worker_thread.lock);
// Initialize the pool of transform worker threads
ThreadPoolCreate(&decoder->worker_thread.pool,
decoder->thread_cntrl.capabilities >> 16/*cpus*/,
WorkerThreadProc,
decoder);
}
#endif
// Post a message to the mailbox
mailbox->horizontal_filter_proc = horizontal_filter_proc;
mailbox->frame = frame_index;
mailbox->num_channels = num_channels;
mailbox->output = output;
mailbox->pitch = pitch;
memcpy(&mailbox->info, info, sizeof(FRAME_INFO));
mailbox->chroma_offset = chroma_offset;
mailbox->precision = precision;
mailbox->jobType = JOB_TYPE_WAVELET;
// Set the work count to the number of rows to process
ThreadPoolSetWorkCount(&decoder->worker_thread.pool, middle_row_count);
// Start the transform worker threads
ThreadPoolSendMessage(&decoder->worker_thread.pool, THREAD_MESSAGE_START);
// Wait for all of the worker threads to finish
ThreadPoolWaitAllDone(&decoder->worker_thread.pool);
}
// Routines for the worker threads that use the new threads API
void TransformInverseSpatialSectionToOutput(DECODER *decoder, int thread_index,
int frame_index, int num_channels,
uint8_t *output_buffer, int output_pitch, FRAME_INFO *info,
int chroma_offset, int precision,
HorizontalInverseFilterOutputProc horizontal_filter_proc)
{
#if (1 && DEBUG)
FILE *logfile = decoder->logfile;
#endif
TRANSFORM **transform = decoder->transform;
const SCRATCH *scratch = &decoder->scratch;
PIXEL *lowlow_band[CODEC_MAX_CHANNELS];
PIXEL *lowhigh_band[CODEC_MAX_CHANNELS];
PIXEL *highlow_band[CODEC_MAX_CHANNELS];
PIXEL *highhigh_band[CODEC_MAX_CHANNELS];
int lowlow_pitch[CODEC_MAX_CHANNELS];
int lowhigh_pitch[CODEC_MAX_CHANNELS];
int highlow_pitch[CODEC_MAX_CHANNELS];
int highhigh_pitch[CODEC_MAX_CHANNELS];
int channel_width[CODEC_MAX_CHANNELS];
uint8_t *output_row_ptr;
uint8_t *plane_array[TRANSFORM_MAX_CHANNELS];
int plane_pitch[TRANSFORM_MAX_CHANNELS];
int output_width = info->width;
int output_height = info->height;
int half_height = output_height/2;
int luma_band_width;
ROI strip;
char *bufptr;
int last_row;
int last_display_row;
int last_line;
int channel;
int row;
int odd_display_lines = 0;
THREAD_ERROR error;
// Push the scratch space state to allocate a new section
char *buffer = scratch->free_ptr;
size_t buffer_size = scratch->free_size;
//TODO: Replace uses of buffer variables with calls to the scratch space API
// This version is for 16-bit pixels
assert(sizeof(PIXEL) == 2);
// Must have a valid inverse horizontal filter
assert(horizontal_filter_proc != NULL);
// Check for enough space in the local array allocations
// assert(num_channels <= CODEC_NUM_CHANNELS);
assert(num_channels <= TRANSFORM_MAX_CHANNELS);
// Divide the buffer space between the four threads
buffer_size /= decoder->worker_thread.pool.thread_count; // used to assume max of 4
buffer += buffer_size * thread_index;
// Round the buffer pointer up to the next cache line
buffer_size -= (_CACHE_LINE_SIZE - ((uintptr_t)buffer & _CACHE_LINE_MASK));
bufptr = (char *)ALIGN(buffer, _CACHE_LINE_SIZE);
// Allocate buffer space for the output rows from each channel
for (channel = 0; channel < num_channels; channel++)
{
// Get the row width for this channel
IMAGE *wavelet = transform[channel]->wavelet[frame_index];
int width = wavelet->width;
int height = wavelet->height;
//int pitch = wavelet->pitch;
size_t channel_buffer_size;
// Compute the width and pitch for the output rows stored in this buffer
int buffer_width = 2 * width;
int buffer_height = 2;
int buffer_pitch = ALIGN16(buffer_width);
// Compute the total allocation for this channel
channel_buffer_size = buffer_height * buffer_pitch;
// Check that there is enough space available
assert(channel_buffer_size <= buffer_size);
// Allocate the buffer for this channel
plane_array[channel] = (uint8_t *)bufptr;
// Remember the pitch for rows in this channel
plane_pitch[channel] = buffer_pitch;
// Advance the buffer pointer past the allocated space for this channel
bufptr += channel_buffer_size;
// Reduce the amount of space remaining in the buffer
buffer_size -= channel_buffer_size;
// The dimensions of the output image are the same as the luma channel
if (channel == 0)
{
strip.width = buffer_width;
strip.height = buffer_height;
last_row = height;
//DAN20050606 Added to fix issue with non-div by 8 heihts.
last_display_row = (info->height+1)/2; // DAN20090215 -- fix for odd display lines.
odd_display_lines = info->height & 1;
// Remember the width of the wavelet bands for luma
luma_band_width = width;
}
// Save the bands per channel for routines that process all channels at once
lowlow_band[channel] = wavelet->band[0];
lowhigh_band[channel] = wavelet->band[1];
highlow_band[channel] = wavelet->band[2];
highhigh_band[channel] = wavelet->band[3];
lowlow_pitch[channel] = wavelet->pitch;
lowhigh_pitch[channel] = wavelet->pitch;
highlow_pitch[channel] = wavelet->pitch;
highhigh_pitch[channel] = wavelet->pitch;
// Remember the width of the wavelet for this channel
channel_width[channel] = width;
}
// Use the remaining buffer space for intermediate results
buffer_size -= (_CACHE_LINE_SIZE - ((uintptr_t)bufptr & _CACHE_LINE_MASK));
buffer = (char *)ALIGN(bufptr, _CACHE_LINE_SIZE);
if (last_row == last_display_row)
{
last_line = half_height - 1;
}
else
{
last_line = half_height;
}
if(odd_display_lines)
last_line++;
if (thread_index == TRANSFORM_WORKER_TOP_THREAD)
{
// Process the first row
row = 0;
output_row_ptr = output_buffer;
#if (0 && DEBUG)
if (logfile) {
fprintf(logfile, "Thread: %d, processing row: %d\n", thread_index, row);
}
#endif
// Process the first row using special border filters for the top row
InvertSpatialTopRow16sToOutput(decoder, thread_index, lowlow_band, lowlow_pitch,
lowhigh_band, lowhigh_pitch,
highlow_band, highlow_pitch,
highhigh_band, highhigh_pitch,
output_row_ptr, output_pitch,
output_width, info->format, info->colorspace,
row, channel_width,
(PIXEL *)buffer, buffer_size,
precision,
horizontal_filter_proc);
}
if (thread_index == TRANSFORM_WORKER_BOTTOM_THREAD || decoder->worker_thread.pool.thread_count == 1)
{
if(last_row == last_display_row) //DAN20071218 -- Added as old 1080 RAW files would crash
{
int pitch = output_pitch;
// Process the last row
row = last_row - 1;
if(decoder->channel_decodes > 1 && decoder->frame.format == DECODED_FORMAT_YUYV) // 3d work
if(decoder->channel_blend_type == BLEND_STACKED_ANAMORPHIC)
pitch >>= 1;
// Begin filling the last output row with results
output_row_ptr = output_buffer + row * 2 * pitch;
#if (0 && DEBUG)
if (logfile) {
fprintf(logfile, "Thread: %d, processing row: %d\n", thread_index, row);
}
#endif
// Process the last row using special border filters for the bottom row
if(decoder->channel_decodes > 1 && decoder->frame.format == DECODED_FORMAT_YUYV)
if(decoder->channel_blend_type == BLEND_STACKED_ANAMORPHIC || decoder->channel_blend_type == BLEND_LINE_INTERLEAVED) // 3d Work TODO Fix
output_row_ptr -= output_pitch;
InvertSpatialBottomRow16sToOutput(decoder, thread_index, lowlow_band, lowlow_pitch,
lowhigh_band, lowhigh_pitch,
highlow_band, highlow_pitch,
highhigh_band, highhigh_pitch,
output_row_ptr, output_pitch,
output_width, info->format, info->colorspace,
row, channel_width,
(PIXEL *)buffer, buffer_size,
precision, odd_display_lines,
horizontal_filter_proc);
}
}
// Loop until all of the middle rows have been processed
for (;;)
{
int work_index;
int row;
// Wait for one row from each channel to process
error = PoolThreadWaitForWork(&decoder->worker_thread.pool, &work_index, thread_index);
// Is there another row to process?
if (error == THREAD_ERROR_OKAY)
{
int pitch = output_pitch;
// Compute the next row to process from the work index
row = work_index + 1;
if(decoder->channel_decodes > 1 && decoder->frame.format == DECODED_FORMAT_YUYV) // 3d work
if(decoder->channel_blend_type == BLEND_STACKED_ANAMORPHIC) // stacked
pitch >>= 1;
// Compute the output row corresponding to this row index
output_row_ptr = output_buffer + row * 2 * pitch;
}
else
{
// No more work to do
return;
}
// Is the row inside the top and bottom border?
if (0 < row && row < last_line)
{
int outputlines = 2;
#if (0 && DEBUG)
if (logfile) {
fprintf(logfile, "Thread: %d, processing row: %d\n", thread_index, row);
}
#endif
if(odd_display_lines && row==last_line-1)
{
outputlines = 1;
}
// Process the middle row using the normal wavelet filters
InvertSpatialMiddleRow16sToOutput(decoder, thread_index,
lowlow_band, lowlow_pitch,
lowhigh_band, lowhigh_pitch,
highlow_band, highlow_pitch,
highhigh_band, highhigh_pitch,
output_row_ptr, output_pitch,
output_width, info->format, info->colorspace,
row, channel_width,
(PIXEL *)buffer, buffer_size,
precision,
horizontal_filter_proc,
outputlines);
}
}
}
#endif //_THREADED
bool GetTuplet(unsigned char *data, int datasize,
unsigned short findtag, unsigned short *retvalue)
{
bool ret = false;
BITSTREAM myinput, *pinput;
TAGVALUE segment;
TAGWORD tag,value;
int error = 0;
//char t[100];
InitBitstream(&myinput);
myinput.lpCurrentWord = data;
myinput.nWordsUsed = datasize;
pinput = &myinput;
do
{
bool optional = false;
int chunksize = 0;
// Read the next tag value pair from the bitstream
segment = GetSegment(pinput);
tag = segment.tuple.tag;
value = segment.tuple.value;
// Is this an optional tag?
if (tag < 0)
{
tag = NEG(tag);
optional = true;
}
if(tag & 0x2000)
{
chunksize = value;
chunksize &= 0xffff;
chunksize += ((tag&0xff)<<16);
}
else if(tag & 0x4000)
{
chunksize = value;
chunksize &= 0xffff;
}
else if(tag == CODEC_TAG_INDEX)
{
chunksize = value;
chunksize &= 0xffff;
}
else
{
chunksize = 0;
}
if((int)(tag) <= ((int)CODEC_TAG_LAST_NON_SIZED) || tag & 0x6000)
{
int skip = 1;
error = 0;
if(tag == (int)findtag)
{
*retvalue = value;
ret = true;
break;
}
if((tag & 0xff00) == 0x2200) //sample size
{
chunksize = 0; // don't test against pinput->nWordsUsed, as we might be only reader enough for metadata only.
skip = 0;
}
if((tag & 0xff00) == 0x2300) //uncompressed sample size
{
skip = 1;
}
if((tag & 0xff00) == 0x2100) //level
skip = 0;
if(chunksize)
{
if(chunksize*4 > pinput->nWordsUsed || chunksize < 0)
{
break;
}
if(skip)
{
//unsigned int *iptr = (unsigned int *)pinput->lpCurrentWord;
pinput->lpCurrentWord += chunksize*4;
pinput->nWordsUsed -= chunksize*4;
}
}
}
else
{
error = 1;
}
} while(tag != CODEC_TAG_GROUP_TRAILER &&
tag != CODEC_TAG_FRAME_TRAILER &&
pinput->nWordsUsed>0 && !error);
return ret;
}
/*!
Copied from metadata.cpp in the cedoc common directory
*/
uint8_t *GetTupletAddr(uint8_t *data,
int datasize,
uint16_t findtag,
int16_t *retvalue)
{
unsigned char *ret = NULL;
BITSTREAM myinput, *pinput;
TAGVALUE segment;
TAGWORD tag,value;
int error = 0;
if (data == NULL || datasize == 0) {
return NULL;
}
//InitBitstream(&myinput);
memset(&myinput, 0, sizeof(BITSTREAM));
myinput.lpCurrentWord = data;
myinput.nWordsUsed = datasize;
myinput.nBitsFree = BITSTREAM_LONG_SIZE;
pinput = &myinput;
do
{
//BOOL optional = FALSE;
bool optional = false;
int chunksize = 0;
// Read the next tag value pair from the bitstream
segment = GetSegment(pinput);
tag = segment.tuple.tag;
value = segment.tuple.value;
// Is this an optional tag?
if (tag < 0)
{
tag = NEG(tag);
//optional = TRUE;
optional = true;
}
if(tag & 0x2000)
{
chunksize = value;
chunksize &= 0xffff;
chunksize += ((tag&0xff)<<16);
}
else if(tag & 0x4000)
{
chunksize = value;
chunksize &= 0xffff;
}
else if(tag == CODEC_TAG_INDEX)
{
chunksize = value;
chunksize &= 0xffff;
}
else
{
chunksize = 0;
}
if((int)(tag) <= ((int)CODEC_TAG_LAST_NON_SIZED) || tag & 0x6000)
{
int skip = 1;
error = 0;
if(tag == (int)findtag)
{
*retvalue = value;
ret = pinput->lpCurrentWord;
break;
}
if((tag & 0xff00) == 0x2200) //sample size
{
chunksize = 0; // don't test against pinput->nWordsUsed, as we might be only reader enough for metadata only.
skip = 0;
}
if((tag & 0xff00) == 0x2300) //uncompressed sample size
{
skip = 1;
}
if((tag & 0xff00) == 0x2100) //level
skip = 0;
if(chunksize)
{
if(chunksize*4 > pinput->nWordsUsed || chunksize < 0)
{
break;
}
if(skip)
{
//unsigned int *iptr = (unsigned int *)pinput->lpCurrentWord;
pinput->lpCurrentWord += chunksize*4;
pinput->nWordsUsed -= chunksize*4;
}
}
}
else
{
error = 1;
}
} while(tag != CODEC_TAG_GROUP_TRAILER &&
tag != CODEC_TAG_FRAME_TRAILER &&
pinput->nWordsUsed>0 && !error);
return ret;
}
|
PersistenceDiagramsBarycenter.h | #ifndef _PERSISTENCEDIAGRAMSBARYCENTER_H
#define _PERSISTENCEDIAGRAMSBARYCENTER_H
#ifndef diagramTuple
#define diagramTuple std::tuple<ttk::SimplexId, ttk::CriticalType, ttk::SimplexId, \
ttk::CriticalType, dataType, ttk::SimplexId, \
dataType, float, float, float, dataType, float, float, float>
#endif
#ifndef BNodeType
#define BNodeType ttk::CriticalType
#define BLocalMax ttk::CriticalType::Local_maximum
#define BLocalMin ttk::CriticalType::Local_minimum
#define BSaddle1 ttk::CriticalType::Saddle1
#define BSaddle2 ttk::CriticalType::Saddle2
#define BIdVertex ttk::SimplexId
#endif
// base code includes
#include <PersistenceDiagramsBarycenter.cpp>
#include <Wrapper.h>
#include <PersistenceDiagram.h>
#include <Auction.h>
#include <KDTree.h>
#include <limits>
#include <PDBarycenter.h>
using namespace std;
using namespace ttk;
namespace ttk{
template<typename dataType>
class PersistenceDiagramsBarycenter : public Debug{
public:
PersistenceDiagramsBarycenter(){
wasserstein_ = 2;
alpha_ = 1;
lambda_ = 1;
inputData_ = NULL;
numberOfInputs_ = 0;
threadNumber_ = 1;
time_limit_ = 1;
deterministic_ = 1;
reinit_prices_ = 1;
epsilon_decreases_ = 1;
debugLevel_ = 1;
use_progressive_ = 1;
timings_plot_mode_=false;
};
~PersistenceDiagramsBarycenter(){};
std::vector<std::vector<matchingTuple> >
execute(std::vector<diagramTuple>* barycenter);
// inline int setDiagram(int idx, void* data){
// if(idx < numberOfInputs_){
// inputData_[idx] = data;
// }
// else{
// return -1;
// }
// return 0;
// }
inline int setDiagrams(void *data){
inputData_ = data;
return 0;
}
inline int setNumberOfInputs(int numberOfInputs){
numberOfInputs_ = numberOfInputs;
// if(inputData_)
// free(inputData_);
// inputData_ = (void **) malloc(numberOfInputs*sizeof(void *));
// for(int i=0 ; i<numberOfInputs ; i++){
// inputData_[i] = NULL;
// }
return 0;
}
inline void setDebugLevel(const int debugLevel){
debugLevel_ = debugLevel;
}
inline void setTimingsPlotMode(const bool timings_plot_mode){
timings_plot_mode_ = timings_plot_mode;
}
inline void setDeterministic(const bool deterministic){
deterministic_ = deterministic;
}
inline void setWasserstein(const std::string &wasserstein){
wasserstein_ = (wasserstein == "inf") ? -1 : stoi(wasserstein);
}
inline void setThreadNumber(const int &ThreadNumber){
threadNumber_ = ThreadNumber;
}
inline void setUseProgressive(const bool use_progressive){
if(use_progressive)
epsilon_decreases_ = true;
use_progressive_ = use_progressive;
}
inline void setAlpha(const double alpha){
alpha_ = alpha;
}
inline void setLambda(const double lambda){
lambda_ = lambda;
}
inline void setTimeLimit(const double time_limit){
time_limit_ = time_limit;
}
template<typename type>
static type abs(const type var) {
return (var >= 0) ? var : -var;
}
inline void setMethod(const int &method){
method_ = method;
}
inline void setReinitPrices(const bool reinit_prices){
reinit_prices_ = reinit_prices;
}
inline void setEpsilonDecreases(const bool epsilon_decreases){
if(use_progressive_)
epsilon_decreases_ = true;
else
epsilon_decreases_ = epsilon_decreases;
}
inline void setEarlyStoppage(const bool early_stoppage){
early_stoppage_ = early_stoppage;
}
protected:
int debugLevel_;
bool deterministic_;
bool timings_plot_mode_;
int method_;
int wasserstein_;
int numberOfInputs_;
void* inputData_; //TODO : std::vector<void*>
int threadNumber_;
bool use_progressive_;
double alpha_;
double lambda_;
double time_limit_;
int points_added_;
int points_deleted_;
std::vector<std::vector<dataType>> all_matchings_;
std::vector<std::vector<dataType>> all_old_matchings_;
std::vector<BidderDiagram<dataType>> bidder_diagrams_;
std::vector<GoodDiagram<dataType>> barycenter_goods_;
bool reinit_prices_;
bool epsilon_decreases_;
bool early_stoppage_;
};
template <typename dataType>
std::vector<std::vector<matchingTuple>>
PersistenceDiagramsBarycenter<dataType>::execute(
std::vector<diagramTuple>* barycenter){
Timer t;
{
std::vector<std::vector<diagramTuple> > *intermediateDiagrams =
(std::vector<std::vector<diagramTuple> > *) inputData_;
std::vector<std::vector<diagramTuple> > data_min(numberOfInputs_);
std::vector<std::vector<diagramTuple> > data_sad(numberOfInputs_);
std::vector<std::vector<diagramTuple> > data_max(numberOfInputs_);
std::vector<std::vector<int>> data_min_idx(numberOfInputs_);
std::vector<std::vector<int>> data_sad_idx(numberOfInputs_);
std::vector<std::vector<int>> data_max_idx(numberOfInputs_);
std::vector<std::vector<matchingTuple>> all_matchings(numberOfInputs_);
bool do_min = false;
bool do_sad = false;
bool do_max = false;
// Create diagrams for min, saddle and max persistence pairs
for(int i=0; i<numberOfInputs_; i++){
std::vector<diagramTuple>* CTDiagram = &((*intermediateDiagrams)[i]);
for(int j=0; j<(int) CTDiagram->size(); ++j){
diagramTuple t = CTDiagram->at(j);
BNodeType nt1 = std::get<1>(t);
BNodeType nt2 = std::get<3>(t);
dataType dt = std::get<4>(t);
//if (abs<dataType>(dt) < zeroThresh) continue;
if(dt>0){
if (nt1 == BLocalMin && nt2 == BLocalMax) {
data_max[i].push_back(t);
data_max_idx[i].push_back(j);
do_max = true;
}
else {
if (nt1 == BLocalMax || nt2 == BLocalMax) {
data_max[i].push_back(t);
data_max_idx[i].push_back(j);
do_max = true;
}
if (nt1 == BLocalMin || nt2 == BLocalMin) {
data_min[i].push_back(t);
data_min_idx[i].push_back(j);
do_min = true;
}
if ((nt1 == BSaddle1 && nt2 == BSaddle2)
|| (nt1 == BSaddle2 && nt2 == BSaddle1)) {
data_sad[i].push_back(t);
data_sad_idx[i].push_back(j);
do_sad = true;
}
}
}
}
}
std::vector<diagramTuple> barycenter_min;
std::vector<diagramTuple> barycenter_sad;
std::vector<diagramTuple> barycenter_max;
std::vector<std::vector<matchingTuple>>
matching_min, matching_sad, matching_max;
dataType total_cost = 0;
if(do_min && do_max){
time_limit_ = time_limit_/2;
}
if(do_sad){
time_limit_=time_limit_/3;
}
/*omp_set_num_threads(1);
#ifdef TTK_ENABLE_OPENMP
#pragma omp parallel sections
#endif
{
#ifdef TTK_ENABLE_OPENMP
#pragma omp section
#endif
{*/
if(do_min){
if(debugLevel_ > 2) {
std::cout << "Computing Minima barycenter..." << std::endl;
}
PDBarycenter<dataType> bary_min = PDBarycenter<dataType>();
bary_min.setThreadNumber(threadNumber_);
bary_min.setWasserstein(wasserstein_);
bary_min.setNumberOfInputs(numberOfInputs_);
bary_min.setDiagramType(0);
bary_min.setUseProgressive(use_progressive_);
bary_min.setTimeLimit(time_limit_);
bary_min.setGeometricalFactor(alpha_);
bary_min.setDebugLevel(debugLevel_);
bary_min.setDeterministic(deterministic_);
bary_min.setTimingsPlotMode(timings_plot_mode_);
bary_min.setLambda(lambda_);
bary_min.setMethod(method_);
bary_min.setEarlyStoppage(early_stoppage_);
bary_min.setEpsilonDecreases(epsilon_decreases_);
bary_min.setReinitPrices(reinit_prices_);
bary_min.setDiagrams(&data_min);
matching_min = bary_min.execute(barycenter_min);
total_cost += bary_min.getCost();
}
/*}
#ifdef TTK_ENABLE_OPENMP
#pragma omp section
#endif
{*/
if(do_sad){
if(debugLevel_ > 2) {
std::cout << "Computing Saddles barycenter..."<<std::endl;
}
PDBarycenter<dataType> bary_sad = PDBarycenter<dataType>();
bary_sad.setThreadNumber(threadNumber_);
bary_sad.setWasserstein(wasserstein_);
bary_sad.setNumberOfInputs(numberOfInputs_);
bary_sad.setDiagramType(1);
bary_sad.setUseProgressive(use_progressive_);
bary_sad.setTimeLimit(time_limit_);
bary_sad.setGeometricalFactor(alpha_);
bary_sad.setLambda(lambda_);
bary_sad.setDebugLevel(debugLevel_);
bary_sad.setMethod(method_);
bary_sad.setEarlyStoppage(early_stoppage_);
bary_sad.setEpsilonDecreases(epsilon_decreases_);
bary_sad.setDeterministic(deterministic_);
bary_sad.setReinitPrices(reinit_prices_);
bary_sad.setDiagrams(&data_sad);
matching_sad = bary_sad.execute(barycenter_sad);
total_cost += bary_sad.getCost();
}
/*}
#ifdef TTK_ENABLE_OPENMP
#pragma omp section
#endif
{*/
if(do_max){
if(debugLevel_ > 2) {
std::cout << "Computing Maxima barycenter..."<<std::endl;
}
PDBarycenter<dataType> bary_max = PDBarycenter<dataType>();
bary_max.setThreadNumber(threadNumber_);
bary_max.setWasserstein(wasserstein_);
bary_max.setNumberOfInputs(numberOfInputs_);
bary_max.setDiagramType(2);
bary_max.setUseProgressive(use_progressive_);
bary_max.setTimeLimit(time_limit_);
bary_max.setGeometricalFactor(alpha_);
bary_max.setLambda(lambda_);
bary_max.setMethod(method_);
bary_max.setDebugLevel(debugLevel_);
bary_max.setEarlyStoppage(early_stoppage_);
bary_max.setDeterministic(deterministic_);
bary_max.setEpsilonDecreases(epsilon_decreases_);
bary_max.setReinitPrices(reinit_prices_);
bary_max.setDiagrams(&data_max);
matching_max = bary_max.execute(barycenter_max);
total_cost += bary_max.getCost();
}
//}
//}
// Reconstruct matchings
for(int i=0; i<numberOfInputs_; i++){
if(do_min){
for(unsigned int j=0; j<matching_min[i].size(); j++){
matchingTuple t = matching_min[i][j];
int bidder_id = std::get<0>(t);
std::get<0>(t) = data_min_idx[i][bidder_id];
all_matchings[i].push_back(t);
}
}
if(do_sad){
for(unsigned int j=0; j<matching_sad[i].size(); j++){
matchingTuple t = matching_sad[i][j];
int bidder_id = std::get<0>(t);
std::get<0>(t) = data_sad_idx[i][bidder_id];
std::get<1>(t) = std::get<1>(t) + barycenter_min.size();
all_matchings[i].push_back(t);
}
}
if(do_max){
for(unsigned int j=0; j<matching_max[i].size(); j++){
matchingTuple t = matching_max[i][j];
int bidder_id = std::get<0>(t);
std::get<0>(t) = data_max_idx[i][bidder_id];
std::get<1>(t) = std::get<1>(t) + barycenter_min.size() + barycenter_sad.size();
all_matchings[i].push_back(t);
}
}
}
// Reconstruct barcenter
for(unsigned int j=0; j<barycenter_min.size(); j++){
diagramTuple dt = barycenter_min[j];
barycenter->push_back(dt);
}
for(unsigned int j=0; j<barycenter_sad.size(); j++){
diagramTuple dt = barycenter_sad[j];
barycenter->push_back(dt);
}
for(unsigned int j=0; j<barycenter_max.size(); j++){
diagramTuple dt = barycenter_max[j];
barycenter->push_back(dt);
}
// Recreate 3D critical coordinates of barycentric points
std::vector<int> number_of_matchings_for_point(barycenter->size());
std::vector<float> cords_x1(barycenter->size());
std::vector<float> cords_y1(barycenter->size());
std::vector<float> cords_z1(barycenter->size());
std::vector<float> cords_x2(barycenter->size());
std::vector<float> cords_y2(barycenter->size());
std::vector<float> cords_z2(barycenter->size());
for(unsigned i=0; i<barycenter->size(); i++){
number_of_matchings_for_point[i] = 0;
cords_x1[i] = 0;
cords_y1[i] = 0;
cords_z1[i] = 0;
cords_x2[i] = 0;
cords_y2[i] = 0;
cords_z2[i] = 0;
}
for(unsigned i=0; i<all_matchings.size(); i++){
std::vector<diagramTuple>* CTDiagram = &((*intermediateDiagrams)[i]);
for(unsigned j=0; j<all_matchings[i].size(); j++){
matchingTuple t = all_matchings[i][j];
int bidder_id = std::get<0>(t);
int bary_id = std::get<1>(t);
diagramTuple &bidder = CTDiagram->at(bidder_id);
number_of_matchings_for_point[bary_id] +=1;
cords_x1[bary_id] += std::get<7>(bidder);
cords_y1[bary_id] += std::get<8>(bidder);
cords_z1[bary_id] += std::get<9>(bidder);
cords_x2[bary_id] += std::get<11>(bidder);
cords_y2[bary_id] += std::get<12>(bidder);
cords_z2[bary_id] += std::get<13>(bidder);
}
}
for(unsigned i=0; i<barycenter->size(); i++){
if(number_of_matchings_for_point[i]>0){
std::get<7>(barycenter->at(i)) = cords_x1[i] / number_of_matchings_for_point[i];
std::get<8>(barycenter->at(i)) = cords_y1[i] / number_of_matchings_for_point[i];
std::get<9>(barycenter->at(i)) = cords_z1[i] / number_of_matchings_for_point[i];
std::get<11>(barycenter->at(i)) = cords_x2[i] / number_of_matchings_for_point[i];
std::get<12>(barycenter->at(i)) = cords_y2[i] / number_of_matchings_for_point[i];
std::get<13>(barycenter->at(i)) = cords_z2[i] / number_of_matchings_for_point[i];
}
}
// for(int i=0; i<numberOfInputs_; i++){
// delete data_min[i];
// delete data_sad[i];
// delete data_max[i];
// }
if(debugLevel_==-1 || debugLevel_==-3){
std::cout << "[PersistenceDiagramBarycenter] Total cost : " << total_cost<< std::endl;
}
// std::stringstream msg;
if(debugLevel_==-2 || debugLevel_ ==-3){
cout << "[PersistenceDiagramsBarycenter] processed in "
<< t.getElapsedTime() << " s. (" << threadNumber_
<< " thread(s))."
<< std::endl;
}
// dMsg(std::cout, msg.str(), timeMsg);
return all_matchings;
}
}
}
// if the package is a pure template class, uncomment the following line
#include <PDBarycenterImpl.h>
#endif
|
model.h | /*****************************************************************************/
// Copyright (c) 2020-2021 Yuji KOGUMA
// Released under the MIT license
// https://opensource.org/licenses/mit-license.php
/*****************************************************************************/
#ifndef PRINTEMPS_MODEL_MODEL_H__
#define PRINTEMPS_MODEL_MODEL_H__
namespace printemps {
namespace model {
/*****************************************************************************/
struct ModelConstant {
/**
* Since the addresses of created variables, expressions, and constraints
* must not be reallocated, addresses for them are reserved beforehand, and
* the number of their defintions cannot exceed the following limits.
*/
static constexpr int MAX_NUMBER_OF_VARIABLE_PROXIES = 100;
static constexpr int MAX_NUMBER_OF_EXPRESSION_PROXIES = 100;
static constexpr int MAX_NUMBER_OF_CONSTRAINT_PROXIES = 100;
};
/*****************************************************************************/
template <class T_Variable, class T_Expression>
class Model {
private:
std::string m_name;
std::vector<model_component::VariableProxy<T_Variable, T_Expression>>
m_variable_proxies;
std::vector<model_component::ExpressionProxy<T_Variable, T_Expression>>
m_expression_proxies;
std::vector<model_component::ConstraintProxy<T_Variable, T_Expression>>
m_constraint_proxies;
model_component::Objective<T_Variable, T_Expression> m_objective;
std::vector<std::string> m_variable_names;
std::vector<std::string> m_expression_names;
std::vector<std::string> m_constraint_names;
bool m_is_defined_objective;
bool m_is_enabled_fast_evaluation;
bool m_is_linear;
bool m_is_minimization;
bool m_is_solved;
bool m_is_feasible;
std::vector<model_component::Selection<T_Variable, T_Expression>>
m_selections;
model_component::VariableReference<T_Variable, T_Expression> //
m_variable_reference_original;
model_component::ConstraintReference<T_Variable, T_Expression> //
m_constraint_reference_original;
model_component::ConstraintTypeReference<T_Variable, T_Expression> //
m_constraint_type_reference_original;
model_component::VariableReference<T_Variable, T_Expression> //
m_variable_reference;
model_component::ConstraintReference<T_Variable, T_Expression> //
m_constraint_reference;
model_component::ConstraintTypeReference<T_Variable, T_Expression> //
m_constraint_type_reference;
neighborhood::Neighborhood<T_Variable, T_Expression> m_neighborhood;
std::function<void(option::Option *,
solution::IncumbentHolder<T_Variable, T_Expression> *)>
m_callback;
/*************************************************************************/
Model(const Model &) = default;
/*************************************************************************/
Model &operator=(const Model &) = default;
public:
/*************************************************************************/
Model(void) {
this->initialize();
}
/*************************************************************************/
Model(const std::string &a_NAME) {
this->initialize();
this->set_name(a_NAME);
}
/*************************************************************************/
virtual ~Model(void) {
/// nothing to do
}
/*************************************************************************/
void initialize(void) {
m_name = "";
m_variable_proxies.reserve(
ModelConstant::MAX_NUMBER_OF_VARIABLE_PROXIES);
m_expression_proxies.reserve(
ModelConstant::MAX_NUMBER_OF_EXPRESSION_PROXIES);
m_constraint_proxies.reserve(
ModelConstant::MAX_NUMBER_OF_CONSTRAINT_PROXIES);
m_objective.initialize();
m_variable_names.clear();
m_expression_names.clear();
m_constraint_names.clear();
m_is_defined_objective = false;
m_is_enabled_fast_evaluation = true;
m_is_linear = true;
m_is_minimization = true;
m_is_solved = false;
m_is_feasible = false;
m_selections.clear();
m_variable_reference_original.initialize();
m_constraint_reference_original.initialize();
m_constraint_type_reference_original.initialize();
m_variable_reference.initialize();
m_constraint_reference.initialize();
m_constraint_type_reference.initialize();
m_neighborhood.initialize();
m_callback = [](option::Option *,
solution::IncumbentHolder<T_Variable, T_Expression> *) {
};
}
/*************************************************************************/
inline constexpr void set_name(const std::string &a_NAME) {
m_name = a_NAME;
}
/*************************************************************************/
inline constexpr const std::string &name(void) const {
return m_name;
}
/*************************************************************************/
constexpr model_component::VariableProxy<T_Variable, T_Expression>
&create_variable(const std::string &a_NAME) {
if (utility::has_space(a_NAME)) {
throw std::logic_error(utility::format_error_location(
__FILE__, __LINE__, __func__,
"The name of decision variable must not contain spaces."));
}
int proxy_index = m_variable_proxies.size();
if (proxy_index >= ModelConstant::MAX_NUMBER_OF_VARIABLE_PROXIES) {
throw std::logic_error(utility::format_error_location(
__FILE__, __LINE__, __func__,
"The number of decision variable definitions must be equal to "
"or less than " +
std::to_string(
ModelConstant::MAX_NUMBER_OF_VARIABLE_PROXIES) +
"."));
}
m_variable_proxies.emplace_back(
model_component::VariableProxy<
T_Variable, T_Expression>::create_instance(proxy_index));
m_variable_names.push_back(a_NAME);
return m_variable_proxies.back();
}
/*************************************************************************/
constexpr model_component::VariableProxy<T_Variable, T_Expression>
&create_variable(const std::string &a_NAME, //
const T_Variable a_LOWER_BOUND, //
const T_Variable a_UPPER_BOUND) {
auto &variable_proxy = this->create_variable(a_NAME);
variable_proxy.set_bound(a_LOWER_BOUND, a_UPPER_BOUND);
return m_variable_proxies.back();
}
/*************************************************************************/
constexpr model_component::VariableProxy<T_Variable, T_Expression>
&create_variables(const std::string &a_NAME, //
const int a_NUMBER_OF_ELEMENTS) {
if (utility::has_space(a_NAME)) {
throw std::logic_error(utility::format_error_location(
__FILE__, __LINE__, __func__,
"The name of decision variable must not contain spaces."));
}
int proxy_index = m_variable_proxies.size();
if (proxy_index >= ModelConstant::MAX_NUMBER_OF_VARIABLE_PROXIES) {
throw std::logic_error(utility::format_error_location(
__FILE__, __LINE__, __func__,
"The number of decision variable definitions must be equal to "
"or less than " +
std::to_string(
ModelConstant::MAX_NUMBER_OF_VARIABLE_PROXIES) +
"."));
}
m_variable_proxies.emplace_back(
model_component::VariableProxy<T_Variable, T_Expression>::
create_instance(proxy_index, a_NUMBER_OF_ELEMENTS));
m_variable_names.push_back(a_NAME);
return m_variable_proxies.back();
}
/*************************************************************************/
constexpr model_component::VariableProxy<T_Variable, T_Expression>
&create_variables(const std::string &a_NAME, //
const int a_NUMBER_OF_ELEMENTS, //
const T_Variable a_LOWER_BOUND, //
const T_Variable a_UPPER_BOUND) {
auto &variable_proxy = create_variables(a_NAME, a_NUMBER_OF_ELEMENTS);
variable_proxy.set_bound(a_LOWER_BOUND, a_UPPER_BOUND);
return m_variable_proxies.back();
}
/*************************************************************************/
constexpr model_component::VariableProxy<T_Variable, T_Expression>
&create_variables(const std::string & a_NAME, //
const std::vector<int> &a_SHAPE) {
if (utility::has_space(a_NAME)) {
throw std::logic_error(utility::format_error_location(
__FILE__, __LINE__, __func__,
"The name of decision variable must not contain spaces."));
}
int proxy_index = m_variable_proxies.size();
if (proxy_index >= ModelConstant::MAX_NUMBER_OF_VARIABLE_PROXIES) {
throw std::logic_error(utility::format_error_location(
__FILE__, __LINE__, __func__,
"The number of decision variable definitions must be equal to "
"or less than " +
std::to_string(
ModelConstant::MAX_NUMBER_OF_VARIABLE_PROXIES) +
"."));
}
m_variable_proxies.emplace_back(
model_component::VariableProxy<
T_Variable, T_Expression>::create_instance(proxy_index,
a_SHAPE));
m_variable_names.push_back(a_NAME);
return m_variable_proxies.back();
}
/*************************************************************************/
constexpr model_component::VariableProxy<T_Variable, T_Expression>
&create_variables(const std::string & a_NAME, //
const std::vector<int> &a_SHAPE, //
const T_Variable a_LOWER_BOUND, //
const T_Variable a_UPPER_BOUND) {
auto &variable_proxy = create_variables(a_NAME, a_SHAPE);
variable_proxy.set_bound(a_LOWER_BOUND, a_UPPER_BOUND);
return m_variable_proxies.back();
}
/*************************************************************************/
inline constexpr model_component::ExpressionProxy<T_Variable, T_Expression>
&create_expression(const std::string &a_NAME) {
if (utility::has_space(a_NAME)) {
throw std::logic_error(utility::format_error_location(
__FILE__, __LINE__, __func__,
"The name of expression must not contain spaces."));
}
int proxy_index = m_expression_proxies.size();
if (proxy_index >= ModelConstant::MAX_NUMBER_OF_EXPRESSION_PROXIES) {
throw std::logic_error(utility::format_error_location(
__FILE__, __LINE__, __func__,
"The number of expression definitions must be equal to or "
"less than " +
std::to_string(
ModelConstant::MAX_NUMBER_OF_EXPRESSION_PROXIES) +
"."));
}
m_expression_proxies.emplace_back(
model_component::ExpressionProxy<
T_Variable, T_Expression>::create_instance(proxy_index));
m_expression_names.push_back(a_NAME);
return m_expression_proxies.back();
}
/*************************************************************************/
constexpr model_component::ExpressionProxy<T_Variable, T_Expression>
&create_expressions(const std::string &a_NAME, //
int a_NUMBER_OF_ELEMENTS) {
if (utility::has_space(a_NAME)) {
throw std::logic_error(utility::format_error_location(
__FILE__, __LINE__, __func__,
"The name of expression must not contain spaces."));
}
int proxy_index = m_expression_proxies.size();
if (proxy_index >= ModelConstant::MAX_NUMBER_OF_EXPRESSION_PROXIES) {
throw std::logic_error(utility::format_error_location(
__FILE__, __LINE__, __func__,
"The number of expression definitions must be equal to or "
"less than " +
std::to_string(
ModelConstant::MAX_NUMBER_OF_EXPRESSION_PROXIES) +
"."));
}
m_expression_proxies.emplace_back(
model_component::ExpressionProxy<T_Variable, T_Expression>::
create_instance(proxy_index, a_NUMBER_OF_ELEMENTS));
m_expression_names.push_back(a_NAME);
return m_expression_proxies.back();
}
/*************************************************************************/
constexpr model_component::ExpressionProxy<T_Variable, T_Expression>
&create_expressions(const std::string & a_NAME, //
const std::vector<int> &a_SHAPE) {
if (utility::has_space(a_NAME)) {
throw std::logic_error(utility::format_error_location(
__FILE__, __LINE__, __func__,
"The name of expression must not contain spaces."));
}
int proxy_index = m_expression_proxies.size();
if (proxy_index >= ModelConstant::MAX_NUMBER_OF_EXPRESSION_PROXIES) {
throw std::logic_error(utility::format_error_location(
__FILE__, __LINE__, __func__,
"The number of expression definitions must be equal to or "
"less than " +
std::to_string(
ModelConstant::MAX_NUMBER_OF_EXPRESSION_PROXIES) +
"."));
}
m_expression_proxies.emplace_back(
model_component::ExpressionProxy<
T_Variable, T_Expression>::create_instance(proxy_index,
a_SHAPE));
m_expression_names.push_back(a_NAME);
return m_expression_proxies.back();
}
/*************************************************************************/
template <template <class, class> class T_ExpressionLike>
constexpr model_component::ExpressionProxy<T_Variable, T_Expression> &
create_expression(
const std::string & a_NAME, //
const T_ExpressionLike<T_Variable, T_Expression> &a_EXPRESSION_LIKE) {
if (utility::has_space(a_NAME)) {
throw std::logic_error(utility::format_error_location(
__FILE__, __LINE__, __func__,
"The name of expression must not contain spaces."));
}
int proxy_index = m_expression_proxies.size();
if (proxy_index >= ModelConstant::MAX_NUMBER_OF_EXPRESSION_PROXIES) {
throw std::logic_error(utility::format_error_location(
__FILE__, __LINE__, __func__,
"The number of expression definitions must be equal to or "
"less than " +
std::to_string(
ModelConstant::MAX_NUMBER_OF_EXPRESSION_PROXIES) +
"."));
}
m_expression_proxies.emplace_back(
model_component::ExpressionProxy<
T_Variable, T_Expression>::create_instance(proxy_index));
m_expression_names.push_back(a_NAME);
m_expression_proxies.back() = a_EXPRESSION_LIKE.to_expression();
return m_expression_proxies.back();
}
/*************************************************************************/
constexpr model_component::ExpressionProxy<T_Variable, T_Expression>
&create_expression(
const std::string &a_NAME, //
const model_component::Expression<T_Variable, T_Expression>
&a_EXPRESSION) {
if (utility::has_space(a_NAME)) {
throw std::logic_error(utility::format_error_location(
__FILE__, __LINE__, __func__,
"The name of expression must not contain spaces."));
}
int proxy_index = m_expression_proxies.size();
if (proxy_index >= ModelConstant::MAX_NUMBER_OF_EXPRESSION_PROXIES) {
throw std::logic_error(utility::format_error_location(
__FILE__, __LINE__, __func__,
"The number of expression definitions must be equal to or "
"less than " +
std::to_string(
ModelConstant::MAX_NUMBER_OF_EXPRESSION_PROXIES) +
"."));
}
m_expression_proxies.emplace_back(
model_component::ExpressionProxy<
T_Variable, T_Expression>::create_instance(proxy_index));
m_expression_names.push_back(a_NAME);
m_expression_proxies.back() = a_EXPRESSION;
return m_expression_proxies.back();
}
/*************************************************************************/
constexpr model_component::ConstraintProxy<T_Variable, T_Expression>
&create_constraint(const std::string &a_NAME) {
if (utility::has_space(a_NAME)) {
throw std::logic_error(utility::format_error_location(
__FILE__, __LINE__, __func__,
"The name of constraint must not contain spaces."));
}
int proxy_index = m_constraint_proxies.size();
if (proxy_index >= ModelConstant::MAX_NUMBER_OF_CONSTRAINT_PROXIES) {
throw std::logic_error(utility::format_error_location(
__FILE__, __LINE__, __func__,
"The number of constraint definitions must be equal to or "
"less than " +
std::to_string(
ModelConstant::MAX_NUMBER_OF_CONSTRAINT_PROXIES) +
"."));
}
m_constraint_proxies.emplace_back(
model_component::ConstraintProxy<
T_Variable, T_Expression>::create_instance(proxy_index));
m_constraint_names.push_back(a_NAME);
return m_constraint_proxies.back();
}
/*************************************************************************/
constexpr model_component::ConstraintProxy<T_Variable, T_Expression>
&create_constraints(const std::string &a_NAME, //
int a_NUMBER_OF_ELEMENTS) {
if (utility::has_space(a_NAME)) {
throw std::logic_error(utility::format_error_location(
__FILE__, __LINE__, __func__,
"The name of constraint must not contain spaces."));
}
int proxy_index = m_constraint_proxies.size();
if (proxy_index >= ModelConstant::MAX_NUMBER_OF_CONSTRAINT_PROXIES) {
throw std::logic_error(utility::format_error_location(
__FILE__, __LINE__, __func__,
"The number of constraint definitions must be equal to or "
"less than " +
std::to_string(
ModelConstant::MAX_NUMBER_OF_CONSTRAINT_PROXIES) +
"."));
}
m_constraint_proxies.emplace_back(
model_component::ConstraintProxy<T_Variable, T_Expression>::
create_instance(proxy_index, a_NUMBER_OF_ELEMENTS));
m_constraint_names.push_back(a_NAME);
return m_constraint_proxies.back();
}
/*************************************************************************/
constexpr model_component::ConstraintProxy<T_Variable, T_Expression>
&create_constraints(const std::string & a_NAME, //
const std::vector<int> &a_SHAPE) {
if (utility::has_space(a_NAME)) {
throw std::logic_error(utility::format_error_location(
__FILE__, __LINE__, __func__,
"The name of constraint must not contain spaces."));
}
int proxy_index = m_constraint_proxies.size();
if (proxy_index >= ModelConstant::MAX_NUMBER_OF_CONSTRAINT_PROXIES) {
throw std::logic_error(utility::format_error_location(
__FILE__, __LINE__, __func__,
"The number of constraint definitions must be equal to or "
"less than " +
std::to_string(
ModelConstant::MAX_NUMBER_OF_CONSTRAINT_PROXIES) +
"."));
}
m_constraint_proxies.emplace_back(
model_component::ConstraintProxy<
T_Variable, T_Expression>::create_instance(proxy_index,
a_SHAPE));
m_constraint_names.push_back(a_NAME);
return m_constraint_proxies.back();
}
/*************************************************************************/
constexpr model_component::ConstraintProxy<T_Variable, T_Expression>
&create_constraint(
const std::string &a_NAME, //
const model_component::Constraint<T_Variable, T_Expression>
&a_CONSTRAINT) {
if (utility::has_space(a_NAME)) {
throw std::logic_error(utility::format_error_location(
__FILE__, __LINE__, __func__,
"The name of constraint must not contain spaces."));
}
int proxy_index = m_constraint_proxies.size();
if (proxy_index >= ModelConstant::MAX_NUMBER_OF_CONSTRAINT_PROXIES) {
throw std::logic_error(utility::format_error_location(
__FILE__, __LINE__, __func__,
"The number of constraint definitions must be equal to or "
"less than " +
std::to_string(
ModelConstant::MAX_NUMBER_OF_CONSTRAINT_PROXIES) +
"."));
}
m_constraint_proxies.emplace_back(
model_component::ConstraintProxy<
T_Variable, T_Expression>::create_instance(proxy_index));
m_constraint_names.push_back(a_NAME);
m_constraint_proxies.back() = a_CONSTRAINT;
return m_constraint_proxies.back();
}
/*************************************************************************/
inline constexpr void minimize(
const std::function<
T_Expression(const neighborhood::Move<T_Variable, T_Expression> &)>
&a_FUNCTION) {
auto objective = model_component::Objective<
T_Variable, T_Expression>::create_instance(a_FUNCTION);
m_objective = objective;
m_is_defined_objective = true;
m_is_minimization = true;
}
/*************************************************************************/
template <template <class, class> class T_ExpressionLike>
inline constexpr void minimize(
const T_ExpressionLike<T_Variable, T_Expression> &a_EXPRESSION_LIKE) {
auto objective = model_component::Objective<T_Variable, T_Expression>::
create_instance(a_EXPRESSION_LIKE.to_expression());
m_objective = objective;
m_is_defined_objective = true;
m_is_minimization = true;
}
/*************************************************************************/
inline constexpr void minimize(
const model_component::Expression<T_Variable, T_Expression>
&a_EXPRESSION) {
auto objective = model_component::Objective<
T_Variable, T_Expression>::create_instance(a_EXPRESSION);
m_objective = objective;
m_is_defined_objective = true;
m_is_minimization = true;
}
/*************************************************************************/
inline constexpr void maximize(
const std::function<
T_Expression(const neighborhood::Move<T_Variable, T_Expression> &)>
&a_FUNCTION) {
auto objective = model_component::Objective<
T_Variable, T_Expression>::create_instance(a_FUNCTION);
m_objective = objective;
m_is_defined_objective = true;
m_is_minimization = false;
}
/*************************************************************************/
template <template <class, class> class T_ExpressionLike>
inline constexpr void maximize(
const T_ExpressionLike<T_Variable, T_Expression> &a_EXPRESSION_LIKE) {
auto objective = model_component::Objective<T_Variable, T_Expression>::
create_instance(a_EXPRESSION_LIKE.to_expression());
m_objective = objective;
m_is_defined_objective = true;
m_is_minimization = false;
}
/*************************************************************************/
inline constexpr void maximize(
const model_component::Expression<T_Variable, T_Expression>
&a_EXPRESSION) {
auto objective = model_component::Objective<
T_Variable, T_Expression>::create_instance(a_EXPRESSION);
m_objective = objective;
m_is_defined_objective = true;
m_is_minimization = false;
}
/*************************************************************************/
constexpr void setup(
const bool a_IS_ENABLED_PRESOLVE, //
const bool a_IS_ENABLED_INITIAL_VALUE_CORRECTION, //
const bool a_IS_ENABLED_AGGREGATION_MOVE, //
const bool a_IS_ENABLED_PRECEDENCE_MOVE, //
const bool a_IS_ENABLED_VARIABLE_BOUND_MOVE, //
const bool a_IS_ENABLED_USER_DEFINED_MOVE, //
const bool a_IS_ENABLED_CHAIN_MOVE, //
const option::selection_mode::SelectionMode &a_SELECTION_MODE, //
const bool a_IS_ENABLED_PRINT) {
verifier::verify_problem(this, a_IS_ENABLED_PRINT);
/**
* Determine unique name of decision variables and constraints.
*/
this->setup_unique_name();
/**
* Determine the linearity.
*/
this->setup_is_linear();
/**
* Determine if the fast evaluation can be enabled.
*/
this->setup_is_enabled_fast_evaluation();
/**
* Initial categorization.
*/
this->categorize_variables();
this->categorize_constraints();
this->setup_variable_related_zero_one_coefficient_constraints();
this->setup_variable_related_constraints();
this->setup_variable_sensitivity();
/**
* Store original categorization results. The final categorization would
* be changed by presolving, extracting/eliminating intermediate
* variables, and extracting selection constraints.
*/
m_variable_reference_original = m_variable_reference;
m_constraint_reference_original = m_constraint_reference;
m_constraint_type_reference_original = m_constraint_type_reference;
/**
* Presolve the problem by removing redundant constraints and fixing
* decision variables implicitly fixed.
*/
if (a_IS_ENABLED_PRESOLVE) {
presolver::reduce_problem_size(this, true, a_IS_ENABLED_PRINT);
}
/**
* Extract and eliminate the intermediate variables.
*/
if (a_IS_ENABLED_PRESOLVE && m_is_linear &&
m_constraint_type_reference.intermediate_ptrs.size() > 0) {
while (true) {
this->categorize_variables();
this->categorize_constraints();
this->setup_variable_related_zero_one_coefficient_constraints();
this->setup_variable_related_constraints();
this->setup_variable_sensitivity();
if (presolver::extract_dependent_intermediate_variables(
this, //
a_IS_ENABLED_PRINT) == 0) {
break;
}
while (true) {
this->categorize_variables();
this->categorize_constraints();
this->setup_variable_related_zero_one_coefficient_constraints();
this->setup_variable_related_constraints();
this->setup_variable_sensitivity();
if (presolver::eliminate_dependent_intermediate_variables(
this, //
a_IS_ENABLED_PRINT) == 0) {
break;
}
}
presolver::reduce_problem_size(this, false, a_IS_ENABLED_PRINT);
}
}
/**
* Extract selection constraints. If the number of constraints is bigger
* than that of decision variables, this process will be skipped because
* it would affect computational efficiency.
*/
if (a_SELECTION_MODE != option::selection_mode::None &&
this->number_of_variables() > this->number_of_constraints()) {
presolver::extract_selections(this, //
a_SELECTION_MODE, //
a_IS_ENABLED_PRINT);
}
/**
* Final categorization.
*/
this->categorize_variables();
this->categorize_constraints();
this->setup_variable_related_zero_one_coefficient_constraints();
this->setup_variable_related_constraints();
this->setup_variable_sensitivity();
/**
* Setup the neighborhood generators.
*/
this->setup_neighborhood(a_IS_ENABLED_AGGREGATION_MOVE, //
a_IS_ENABLED_PRECEDENCE_MOVE, //
a_IS_ENABLED_VARIABLE_BOUND_MOVE, //
a_IS_ENABLED_USER_DEFINED_MOVE, //
a_IS_ENABLED_CHAIN_MOVE, //
a_IS_ENABLED_PRINT);
/**
* Verify and correct the initial values.
*/
verifier::verify_and_correct_selection_variables_initial_values( //
this, //
a_IS_ENABLED_INITIAL_VALUE_CORRECTION, //
a_IS_ENABLED_PRINT);
verifier::verify_and_correct_binary_variables_initial_values(
this, //
a_IS_ENABLED_INITIAL_VALUE_CORRECTION, //
a_IS_ENABLED_PRINT);
verifier::verify_and_correct_integer_variables_initial_values(
this, //
a_IS_ENABLED_INITIAL_VALUE_CORRECTION, //
a_IS_ENABLED_PRINT);
/**
* Solve GF(2) equations if needed.
*/
if (a_IS_ENABLED_PRESOLVE &&
m_constraint_type_reference.gf2_ptrs.size() > 0) {
auto is_solved = presolver::solve_gf2(this, a_IS_ENABLED_PRINT);
/**
* Update fixed decision varialbes.
*/
if (is_solved) {
this->categorize_variables();
}
}
/**
* Setup the fixed sensitivities for fast evaluation.
*/
this->setup_fixed_sensitivities(a_IS_ENABLED_PRINT);
for (auto &&proxy : m_constraint_proxies) {
for (auto &&constraint : proxy.flat_indexed_constraints()) {
constraint.expression().setup_mask();
}
}
}
/*************************************************************************/
constexpr void setup_unique_name(void) {
const int VARIABLE_PROXIES_SIZE = m_variable_proxies.size();
const int EXPRESSION_PROXIES_SIZE = m_expression_proxies.size();
const int CONSTRAINT_PROXIES_SIZE = m_constraint_proxies.size();
for (auto i = 0; i < VARIABLE_PROXIES_SIZE; i++) {
int number_of_elements = m_variable_proxies[i].number_of_elements();
for (auto j = 0; j < number_of_elements; j++) {
auto &variable =
m_variable_proxies[i].flat_indexed_variables(j);
if (variable.name() == "") {
variable.set_name(m_variable_names[i] +
m_variable_proxies[i].indices_label(j));
}
}
}
/// Expression
for (auto i = 0; i < EXPRESSION_PROXIES_SIZE; i++) {
int number_of_elements =
m_expression_proxies[i].number_of_elements();
for (auto j = 0; j < number_of_elements; j++) {
auto &expression =
m_expression_proxies[i].flat_indexed_expressions(j);
if (expression.name() == "") {
expression.set_name(
m_expression_names[i] +
m_expression_proxies[i].indices_label(j));
}
}
}
/// Constraint
for (auto i = 0; i < CONSTRAINT_PROXIES_SIZE; i++) {
int number_of_elements =
m_constraint_proxies[i].number_of_elements();
for (auto j = 0; j < number_of_elements; j++) {
auto &constraint =
m_constraint_proxies[i].flat_indexed_constraints(j);
if (constraint.name() == "") {
constraint.set_name(
m_constraint_names[i] +
m_constraint_proxies[i].indices_label(j));
}
}
}
}
/*************************************************************************/
constexpr void setup_is_linear(void) {
m_is_linear = true;
for (auto &&proxy : m_constraint_proxies) {
for (auto &&constraint : proxy.flat_indexed_constraints()) {
if (!constraint.is_linear()) {
m_is_linear = false;
}
}
}
if (!m_objective.is_linear()) {
m_is_linear = false;
}
}
/*************************************************************************/
constexpr void setup_is_enabled_fast_evaluation(void) {
m_is_enabled_fast_evaluation = true;
for (auto &&proxy : m_constraint_proxies) {
for (auto &&constraint : proxy.flat_indexed_constraints()) {
if (!constraint.is_linear()) {
m_is_enabled_fast_evaluation = false;
}
}
}
if (m_neighborhood.user_defined().is_enabled()) {
m_is_enabled_fast_evaluation = false;
}
}
/*************************************************************************/
constexpr void setup_variable_related_constraints(void) {
for (auto &&proxy : m_variable_proxies) {
for (auto &&variable : proxy.flat_indexed_variables()) {
variable.reset_related_constraint_ptrs();
}
}
for (auto &&proxy : m_constraint_proxies) {
for (auto &&constraint : proxy.flat_indexed_constraints()) {
for (auto &&sensitivity :
constraint.expression().sensitivities()) {
sensitivity.first->register_related_constraint_ptr(
&constraint);
}
}
}
}
/*************************************************************************/
constexpr void setup_variable_sensitivity(void) {
for (auto &&proxy : m_variable_proxies) {
for (auto &&variable : proxy.flat_indexed_variables()) {
variable.reset_constraint_sensitivities();
}
}
for (auto &&proxy : m_constraint_proxies) {
for (auto &&constraint : proxy.flat_indexed_constraints()) {
for (auto &&sensitivity :
constraint.expression().sensitivities()) {
sensitivity.first->register_constraint_sensitivity(
&constraint, sensitivity.second);
}
}
}
for (auto &&proxy : m_variable_proxies) {
for (auto &&variable : proxy.flat_indexed_variables()) {
variable.setup_uniform_sensitivity();
}
}
for (auto &&sensitivity : m_objective.expression().sensitivities()) {
sensitivity.first->set_objective_sensitivity(sensitivity.second);
}
}
/*************************************************************************/
constexpr void setup_variable_related_zero_one_coefficient_constraints(
void) {
for (auto &&proxy : m_variable_proxies) {
for (auto &&variable : proxy.flat_indexed_variables()) {
variable.reset_related_zero_one_coefficient_constraint_ptrs();
variable.setup_related_zero_one_coefficient_constraint_ptrs();
}
}
}
/*************************************************************************/
constexpr void categorize_variables(void) {
model_component::VariableReference<T_Variable, T_Expression>
variable_reference;
for (auto &&proxy : m_variable_proxies) {
for (auto &&variable : proxy.flat_indexed_variables()) {
variable_reference.variable_ptrs.push_back(&variable);
if (variable.is_fixed()) {
variable_reference.fixed_variable_ptrs.push_back(&variable);
} else {
variable_reference.mutable_variable_ptrs.push_back(
&variable);
}
if (variable.sense() ==
model_component::VariableSense::Binary) {
variable_reference.binary_variable_ptrs.push_back(
&variable);
}
if (variable.sense() ==
model_component::VariableSense::Integer) {
variable_reference.integer_variable_ptrs.push_back(
&variable);
}
if (variable.sense() ==
model_component::VariableSense::Selection) {
variable_reference.selection_variable_ptrs.push_back(
&variable);
}
if (variable.sense() ==
model_component::VariableSense::Intermediate) {
variable_reference.intermediate_variable_ptrs.push_back(
&variable);
}
}
}
m_variable_reference = variable_reference;
}
/*************************************************************************/
constexpr void categorize_constraints(void) {
model_component::ConstraintReference<T_Variable, T_Expression>
constraint_reference;
model_component::ConstraintTypeReference<T_Variable, T_Expression>
constraint_type_reference;
for (auto &&proxy : m_constraint_proxies) {
for (auto &&constraint : proxy.flat_indexed_constraints()) {
constraint.setup_constraint_type();
}
}
for (auto &&proxy : m_constraint_proxies) {
for (auto &&constraint : proxy.flat_indexed_constraints()) {
constraint_reference.constraint_ptrs.push_back(&constraint);
if (constraint.is_enabled()) {
constraint_reference.enabled_constraint_ptrs.push_back(
&constraint);
} else {
constraint_reference.disabled_constraint_ptrs.push_back(
&constraint);
}
if (!constraint.is_linear()) {
constraint_type_reference.nonlinear_ptrs.push_back(
&constraint);
} else {
if (constraint.is_singleton()) {
constraint_type_reference.singleton_ptrs.push_back(
&constraint);
}
if (constraint.is_aggregation()) {
constraint_type_reference.aggregation_ptrs.push_back(
&constraint);
}
if (constraint.is_precedence()) {
constraint_type_reference.precedence_ptrs.push_back(
&constraint);
}
if (constraint.is_variable_bound()) {
constraint_type_reference.variable_bound_ptrs.push_back(
&constraint);
}
if (constraint.is_set_partitioning()) {
constraint_type_reference.set_partitioning_ptrs
.push_back(&constraint);
}
if (constraint.is_set_packing()) {
constraint_type_reference.set_packing_ptrs.push_back(
&constraint);
}
if (constraint.is_set_covering()) {
constraint_type_reference.set_covering_ptrs.push_back(
&constraint);
}
if (constraint.is_cardinality()) {
constraint_type_reference.cardinality_ptrs.push_back(
&constraint);
}
if (constraint.is_invariant_knapsack()) {
constraint_type_reference.invariant_knapsack_ptrs
.push_back(&constraint);
}
if (constraint.is_equation_knapsack()) {
constraint_type_reference.equation_knapsack_ptrs
.push_back(&constraint);
}
if (constraint.is_bin_packing()) {
constraint_type_reference.bin_packing_ptrs.push_back(
&constraint);
}
if (constraint.is_knapsack()) {
constraint_type_reference.knapsack_ptrs.push_back(
&constraint);
}
if (constraint.is_integer_knapsack()) {
constraint_type_reference.integer_knapsack_ptrs
.push_back(&constraint);
}
if (constraint.is_min_max()) {
constraint_type_reference.min_max_ptrs.push_back(
&constraint);
}
if (constraint.is_max_min()) {
constraint_type_reference.max_min_ptrs.push_back(
&constraint);
}
if (constraint.is_intermediate()) {
constraint_type_reference.intermediate_ptrs.push_back(
&constraint);
}
if (constraint.is_gf2()) {
constraint_type_reference.gf2_ptrs.push_back(
&constraint);
}
if (constraint.is_general_linear()) {
constraint_type_reference.general_linear_ptrs.push_back(
&constraint);
}
}
}
}
m_constraint_reference = constraint_reference;
m_constraint_type_reference = constraint_type_reference;
}
/*************************************************************************/
constexpr void setup_neighborhood(
const bool a_IS_ENABLED_AGGREGATION_MOVE, //
const bool a_IS_ENABLED_PRECEDENCE_MOVE, //
const bool a_IS_ENABLED_VARIABLE_BOUND_MOVE, //
const bool a_IS_ENABLED_USER_DEFINED_MOVE, //
const bool a_IS_ENABLED_CHAIN_MOVE, //
const bool a_IS_ENABLED_PRINT) {
utility::print_single_line(a_IS_ENABLED_PRINT);
utility::print_message("Detecting the neighborhood structure...",
a_IS_ENABLED_PRINT);
m_neighborhood.binary().setup(
m_variable_reference.binary_variable_ptrs);
m_neighborhood.integer().setup(
m_variable_reference.integer_variable_ptrs);
m_neighborhood.selection().setup(
m_variable_reference.selection_variable_ptrs);
if (a_IS_ENABLED_AGGREGATION_MOVE) {
m_neighborhood.aggregation().setup(
m_constraint_type_reference.aggregation_ptrs);
}
if (a_IS_ENABLED_PRECEDENCE_MOVE) {
m_neighborhood.precedence().setup(
m_constraint_type_reference.precedence_ptrs);
}
if (a_IS_ENABLED_VARIABLE_BOUND_MOVE) {
m_neighborhood.variable_bound().setup(
m_constraint_type_reference.variable_bound_ptrs);
}
if (a_IS_ENABLED_CHAIN_MOVE) {
m_neighborhood.chain().setup();
}
if (a_IS_ENABLED_USER_DEFINED_MOVE) {
m_neighborhood.user_defined().setup();
}
utility::print_message("Done.", a_IS_ENABLED_PRINT);
}
/*************************************************************************/
constexpr void setup_fixed_sensitivities(const bool a_IS_ENABLED_PRINT) {
utility::print_single_line(a_IS_ENABLED_PRINT);
utility::print_message("Creating the sensitivity matrix...",
a_IS_ENABLED_PRINT);
for (auto &&proxy : m_expression_proxies) {
for (auto &&expression : proxy.flat_indexed_expressions()) {
expression.setup_fixed_sensitivities();
}
}
for (auto &&proxy : m_constraint_proxies) {
for (auto &&constraint : proxy.flat_indexed_constraints()) {
constraint.expression().setup_fixed_sensitivities();
}
}
m_objective.expression().setup_fixed_sensitivities();
/**
* The fixed sensitivities for the constraints and the objective are
* build in their own setup() methods.
*/
utility::print_message("Done.", a_IS_ENABLED_PRINT);
}
/*************************************************************************/
constexpr void set_selections(
const std::vector<model_component::Selection<T_Variable, T_Expression>>
&a_SELECTIONS) {
m_selections = a_SELECTIONS;
for (auto &&selection : m_selections) {
for (auto &&variable_ptr : selection.variable_ptrs) {
/**
* Register the selection object to the variables which is
* covered by the corresponding selection constraint, and
* categorize the variable into "Selection".
*/
variable_ptr->set_selection_ptr(&selection);
}
}
}
/*************************************************************************/
constexpr void print_number_of_variables(void) const {
utility::print_single_line(true);
const auto &original = m_variable_reference_original;
const auto &presolved = m_variable_reference;
auto compute_number_of_variables = [](const auto &a_VARIABLE_PTRS) {
return a_VARIABLE_PTRS.size();
};
auto compute_number_of_mutable_variables =
[](const auto &a_VARIABLE_PTRS) {
return std::count_if(a_VARIABLE_PTRS.begin(),
a_VARIABLE_PTRS.end(),
[](const auto *a_VARIABLE_PTR) {
return !a_VARIABLE_PTR->is_fixed();
});
};
utility::print_info( //
"The number of decision variables: " +
utility::to_string( //
compute_number_of_variables( //
original.variable_ptrs),
"%d") +
" (" +
utility::to_string( //
compute_number_of_mutable_variables( //
presolved.variable_ptrs),
"%d") +
")",
true);
utility::print_info( //
" -- Binary: " +
utility::to_string( //
compute_number_of_variables( //
original.binary_variable_ptrs),
"%d") +
" (" +
utility::to_string( //
compute_number_of_mutable_variables( //
presolved.binary_variable_ptrs),
"%d") +
")",
true);
utility::print_info( //
" -- Integer: " +
utility::to_string( //
compute_number_of_variables( //
original.integer_variable_ptrs),
"%d") +
" (" +
utility::to_string( //
compute_number_of_mutable_variables(
presolved.integer_variable_ptrs),
"%d") +
")",
true);
utility::print_info( //
" -- Selection: " +
utility::to_string( //
compute_number_of_variables(
original.selection_variable_ptrs),
"%d") +
" (" +
utility::to_string( //
compute_number_of_mutable_variables(
presolved.selection_variable_ptrs),
"%d") +
")",
true);
utility::print_info( //
" -- Dependent Intermediate: " +
utility::to_string( //
compute_number_of_variables(
original.intermediate_variable_ptrs),
"%d") +
" (" +
utility::to_string( //
compute_number_of_mutable_variables(
presolved.intermediate_variable_ptrs),
"%d") +
")",
true);
utility::print( //
" ( ) : Number of mutable variables after presolve.",
true);
}
/*************************************************************************/
constexpr void print_number_of_constraints(void) const {
utility::print_single_line(true);
auto compute_number_of_constraints = [](const auto &a_CONSTRAINT_PTRS) {
return a_CONSTRAINT_PTRS.size();
};
auto compute_number_of_enabled_constraints =
[](const auto &a_CONSTRAINT_PTRS) {
return std::count_if(a_CONSTRAINT_PTRS.begin(),
a_CONSTRAINT_PTRS.end(),
[](const auto *a_CONSTRAINT_PTR) {
return a_CONSTRAINT_PTR->is_enabled();
});
};
{
const auto &original = m_constraint_reference_original;
const auto &presolved = m_constraint_reference;
utility::print_info( //
"The number of constraints: " +
utility::to_string( //
compute_number_of_constraints( //
original.constraint_ptrs),
"%d") +
" (" +
utility::to_string( //
compute_number_of_enabled_constraints( //
presolved.constraint_ptrs),
"%d") +
")",
true);
}
{
const auto &original = m_constraint_type_reference_original;
const auto &presolved = m_constraint_type_reference;
utility::print_info( //
" -- Singleton: " + //
utility::to_string( //
compute_number_of_constraints( //
original.singleton_ptrs),
"%d") +
" (" +
utility::to_string( //
compute_number_of_enabled_constraints( //
presolved.singleton_ptrs),
"%d") +
")",
true);
utility::print_info( //
" -- Aggregation: " + //
utility::to_string( //
compute_number_of_constraints( //
original.aggregation_ptrs),
"%d") +
" (" +
utility::to_string( //
compute_number_of_enabled_constraints( //
presolved.aggregation_ptrs),
"%d") +
")",
true);
utility::print_info( //
" -- Precedence: " + //
utility::to_string( //
compute_number_of_constraints( //
original.precedence_ptrs),
"%d") +
" (" +
utility::to_string( //
compute_number_of_enabled_constraints( //
presolved.precedence_ptrs),
"%d") +
")",
true);
utility::print_info( //
" -- Variable Bound: " + //
utility::to_string( //
compute_number_of_constraints( //
original.variable_bound_ptrs),
"%d") +
" (" +
utility::to_string( //
compute_number_of_enabled_constraints( //
presolved.variable_bound_ptrs),
"%d") +
")",
true);
utility::print_info( //
" -- Set Partitioning: " + //
utility::to_string( //
compute_number_of_constraints( //
original.set_partitioning_ptrs),
"%d") +
" (" +
utility::to_string( //
compute_number_of_enabled_constraints( //
presolved.set_partitioning_ptrs),
"%d") +
")",
true);
utility::print_info( //
" -- Set Packing: " + //
utility::to_string( //
compute_number_of_constraints( //
original.set_packing_ptrs),
"%d") +
" (" +
utility::to_string( //
compute_number_of_enabled_constraints( //
presolved.set_packing_ptrs),
"%d") +
")",
true);
utility::print_info( //
" -- Set Covering: " + //
utility::to_string( //
compute_number_of_constraints( //
original.set_covering_ptrs),
"%d") +
" (" +
utility::to_string( //
compute_number_of_enabled_constraints( //
presolved.set_covering_ptrs),
"%d") +
")",
true);
utility::print_info( //
" -- Cardinality: " + //
utility::to_string( //
compute_number_of_constraints( //
original.cardinality_ptrs),
"%d") +
" (" +
utility::to_string( //
compute_number_of_enabled_constraints( //
presolved.cardinality_ptrs),
"%d") +
")",
true);
utility::print_info( //
" -- Invariant Knapsack: " + //
utility::to_string( //
compute_number_of_constraints( //
original.invariant_knapsack_ptrs),
"%d") +
" (" +
utility::to_string( //
compute_number_of_enabled_constraints( //
presolved.invariant_knapsack_ptrs),
"%d") +
")",
true);
utility::print_info( //
" -- Equation Knapsack: " + //
utility::to_string( //
compute_number_of_constraints( //
original.equation_knapsack_ptrs),
"%d") +
" (" +
utility::to_string( //
compute_number_of_enabled_constraints( //
presolved.equation_knapsack_ptrs),
"%d") +
")",
true);
utility::print_info( //
" -- Bin Packing: " + //
utility::to_string( //
compute_number_of_constraints( //
original.bin_packing_ptrs),
"%d") +
" (" +
utility::to_string( //
compute_number_of_enabled_constraints( //
presolved.bin_packing_ptrs),
"%d") +
")",
true);
utility::print_info( //
" -- Knapsack: " + //
utility::to_string( //
compute_number_of_constraints( //
original.knapsack_ptrs),
"%d") +
" (" +
utility::to_string( //
compute_number_of_enabled_constraints( //
presolved.knapsack_ptrs),
"%d") +
")",
true);
utility::print_info( //
" -- Integer Knapsack: " + //
utility::to_string( //
compute_number_of_constraints( //
original.integer_knapsack_ptrs),
"%d") +
" (" +
utility::to_string( //
compute_number_of_enabled_constraints( //
presolved.integer_knapsack_ptrs),
"%d") +
")",
true);
utility::print_info( //
" -- Min-Max: " + //
utility::to_string( //
compute_number_of_constraints( //
original.min_max_ptrs),
"%d") +
" (" +
utility::to_string( //
compute_number_of_enabled_constraints( //
presolved.min_max_ptrs),
"%d") +
")",
true);
utility::print_info( //
" -- Max-Min: " + //
utility::to_string( //
compute_number_of_constraints( //
original.max_min_ptrs),
"%d") +
" (" +
utility::to_string( //
compute_number_of_enabled_constraints( //
presolved.max_min_ptrs),
"%d") +
")",
true);
utility::print_info( //
" -- Intermediate: " + //
utility::to_string( //
compute_number_of_constraints( //
original.intermediate_ptrs),
"%d") +
" (" +
utility::to_string( //
compute_number_of_enabled_constraints( //
presolved.intermediate_ptrs),
"%d") +
")",
true);
utility::print_info( //
" -- GF(2): " + //
utility::to_string( //
compute_number_of_constraints( //
original.gf2_ptrs),
"%d") +
" (" +
utility::to_string( //
compute_number_of_enabled_constraints( //
presolved.gf2_ptrs),
"%d") +
")",
true);
utility::print_info( //
" -- General Linear: " + //
utility::to_string( //
compute_number_of_constraints( //
original.general_linear_ptrs),
"%d") +
" (" +
utility::to_string( //
compute_number_of_enabled_constraints( //
presolved.general_linear_ptrs),
"%d") +
")",
true);
utility::print_info( //
" -- Nonlinear: " + //
utility::to_string( //
compute_number_of_constraints( //
original.nonlinear_ptrs),
"%d") +
" (" +
utility::to_string( //
compute_number_of_enabled_constraints( //
presolved.nonlinear_ptrs),
"%d") +
")",
true);
}
utility::print( //
" ( ) : Number of enabled constraints after presolve.",
true);
}
/*************************************************************************/
inline constexpr void set_callback(
const std::function<
void(option::Option *,
solution::IncumbentHolder<T_Variable, T_Expression> *)>
&a_CALLBACK) {
m_callback = a_CALLBACK;
}
/*************************************************************************/
inline constexpr void callback(
option::Option *a_option_ptr,
solution::IncumbentHolder<T_Variable, T_Expression>
*a_incumbent_holder_ptr) {
m_callback(a_option_ptr, a_incumbent_holder_ptr);
}
/*************************************************************************/
constexpr void import_variable_values(
const std::vector<multi_array::ValueProxy<T_Variable>> &a_PROXIES) {
for (auto &&proxy : m_variable_proxies) {
for (auto &&variable : proxy.flat_indexed_variables()) {
int proxy_index = variable.proxy_index();
int flat_index = variable.flat_index();
variable.set_value_if_mutable(
a_PROXIES[proxy_index].flat_indexed_values(flat_index));
}
}
verifier::verify_and_correct_selection_variables_initial_values( //
this, false, false);
verifier::verify_and_correct_binary_variables_initial_values( //
this, false, false);
verifier::verify_and_correct_integer_variables_initial_values( //
this, false, false);
}
/*************************************************************************/
constexpr void update(void) {
/**
* Update in order of expressions -> objective, constraints.
*/
for (auto &&proxy : m_expression_proxies) {
for (auto &&expression : proxy.flat_indexed_expressions()) {
expression.update();
}
}
for (auto &&proxy : m_constraint_proxies) {
for (auto &&constraint : proxy.flat_indexed_constraints()) {
constraint.update();
}
}
if (m_is_defined_objective) {
m_objective.update();
}
for (auto &&variable_ptr :
m_variable_reference.intermediate_variable_ptrs) {
variable_ptr->update_as_intermediate_variable();
variable_ptr->dependent_constraint_ptr()->update();
}
this->update_feasibility();
}
/*************************************************************************/
constexpr void update(
const neighborhood::Move<T_Variable, T_Expression> &a_MOVE) {
/**
* Update in order of objective, constraints -> expressions ->
* variables.
*/
if (m_is_defined_objective) {
m_objective.update(a_MOVE);
}
if (m_neighborhood.user_defined().is_enabled()) {
for (auto &&proxy : m_constraint_proxies) {
for (auto &&constraint : proxy.flat_indexed_constraints()) {
if (constraint.is_enabled()) {
constraint.update(a_MOVE);
}
}
}
} else {
for (auto &&constraint_ptr : a_MOVE.related_constraint_ptrs) {
constraint_ptr->update(a_MOVE);
}
}
for (auto &&proxy : m_expression_proxies) {
for (auto &&expression : proxy.flat_indexed_expressions()) {
if (expression.is_enabled()) {
expression.update(a_MOVE);
}
}
}
for (auto &&alteration : a_MOVE.alterations) {
alteration.first->set_value_if_mutable(alteration.second);
}
if (a_MOVE.sense == neighborhood::MoveSense::Selection) {
a_MOVE.alterations[1].first->select();
}
for (auto &&variable_ptr :
m_variable_reference.intermediate_variable_ptrs) {
variable_ptr->update_as_intermediate_variable();
variable_ptr->dependent_constraint_ptr()->update();
}
this->update_feasibility();
}
/*************************************************************************/
inline constexpr void reset_variable_objective_improvability(
const std::vector<model_component::Variable<T_Variable, T_Expression> *>
&a_VARIABLE_PTRS) {
for (auto &&variable_ptr : a_VARIABLE_PTRS) {
variable_ptr->set_is_objective_improvable(false);
}
}
/*************************************************************************/
inline constexpr void reset_variable_objective_improvability(void) {
this->reset_variable_objective_improvability(
this->variable_reference().variable_ptrs);
}
/*************************************************************************/
inline constexpr void reset_variable_feasibility_improvability(
const std::vector<model_component::Variable<T_Variable, T_Expression> *>
&a_VARIABLE_PTRS) const noexcept {
for (auto &&variable_ptr : a_VARIABLE_PTRS) {
variable_ptr->set_is_feasibility_improvable(false);
}
}
/*************************************************************************/
inline constexpr void reset_variable_feasibility_improvability(
const std::vector<model_component::Constraint<T_Variable, T_Expression>
*> &a_CONSTRAINT_PTRS) const noexcept {
for (const auto &constraint_ptr : a_CONSTRAINT_PTRS) {
if (!constraint_ptr->is_enabled()) {
continue;
}
auto &sensitivities = constraint_ptr->expression().sensitivities();
for (const auto &sensitivity : sensitivities) {
sensitivity.first->set_is_feasibility_improvable(false);
}
}
}
/*************************************************************************/
inline constexpr void reset_variable_feasibility_improvability(void) {
this->reset_variable_feasibility_improvability(
this->variable_reference().variable_ptrs);
}
/*************************************************************************/
inline constexpr void update_variable_objective_improvability(void) {
this->update_variable_objective_improvability(
this->variable_reference().mutable_variable_ptrs);
}
/*************************************************************************/
constexpr void update_variable_objective_improvability(
const std::vector<model_component::Variable<T_Variable, T_Expression> *>
&a_VARIABLE_PTRS) const noexcept {
for (const auto &variable_ptr : a_VARIABLE_PTRS) {
auto coefficient =
variable_ptr->objective_sensitivity() * this->sign();
if (coefficient > 0 && variable_ptr->has_lower_bound_margin()) {
variable_ptr->set_is_objective_improvable(true);
} else if (coefficient < 0 &&
variable_ptr->has_upper_bound_margin()) {
variable_ptr->set_is_objective_improvable(true);
} else {
variable_ptr->set_is_objective_improvable(false);
}
}
}
/*************************************************************************/
inline constexpr void update_variable_feasibility_improvability(void) {
this->update_variable_feasibility_improvability(
this->constraint_reference().enabled_constraint_ptrs);
}
/*************************************************************************/
constexpr void update_variable_feasibility_improvability(
const std::vector<model_component::Constraint<T_Variable, T_Expression>
*> &a_CONSTRAINT_PTRS) const noexcept {
for (const auto &constraint_ptr : a_CONSTRAINT_PTRS) {
if (constraint_ptr->violation_value() < constant::EPSILON) {
continue;
}
const auto &sensitivities =
constraint_ptr->expression().sensitivities();
const auto &constraint_value = constraint_ptr->constraint_value();
if (constraint_value > constant::EPSILON &&
constraint_ptr->is_less_or_equal()) {
for (const auto &sensitivity : sensitivities) {
const auto &variable_ptr = sensitivity.first;
const auto &coefficient = sensitivity.second;
if (variable_ptr->is_feasibility_improvable() ||
variable_ptr->is_fixed()) {
continue;
}
if (coefficient > 0 &&
variable_ptr->has_lower_bound_margin()) {
variable_ptr->set_is_feasibility_improvable(true);
} else if (coefficient < 0 &&
variable_ptr->has_upper_bound_margin()) {
variable_ptr->set_is_feasibility_improvable(true);
}
}
} else if (constraint_value < -constant::EPSILON &&
constraint_ptr->is_greater_or_equal()) {
for (const auto &sensitivity : sensitivities) {
const auto &variable_ptr = sensitivity.first;
const auto &coefficient = sensitivity.second;
if (variable_ptr->is_feasibility_improvable() ||
variable_ptr->is_fixed()) {
continue;
}
if (coefficient > 0 &&
variable_ptr->has_upper_bound_margin()) {
variable_ptr->set_is_feasibility_improvable(true);
} else if (coefficient < 0 &&
variable_ptr->has_lower_bound_margin()) {
variable_ptr->set_is_feasibility_improvable(true);
}
}
}
}
}
/*************************************************************************/
inline constexpr void update_feasibility(void) {
for (const auto &proxy : m_constraint_proxies) {
for (const auto &constraint : proxy.flat_indexed_constraints()) {
if (constraint.violation_value() > constant::EPSILON) {
m_is_feasible = false;
return;
}
}
}
m_is_feasible = true;
}
/*************************************************************************/
inline solution::SolutionScore evaluate(
const neighborhood::Move<T_Variable, T_Expression> &a_MOVE) const
noexcept {
solution::SolutionScore score;
this->evaluate(&score, a_MOVE);
return score;
}
/*************************************************************************/
inline solution::SolutionScore evaluate(
const neighborhood::Move<T_Variable, T_Expression> &a_MOVE,
const solution::SolutionScore &a_CURRENT_SCORE) const noexcept {
solution::SolutionScore score;
this->evaluate(&score, a_MOVE, a_CURRENT_SCORE);
return score;
}
/*************************************************************************/
constexpr void evaluate(
solution::SolutionScore * a_score_ptr, //
const neighborhood::Move<T_Variable, T_Expression> &a_MOVE) const
noexcept {
double total_violation = 0.0;
double local_penalty = 0.0;
double global_penalty = 0.0;
const int CONSTRAINT_PROXIES_SIZE = m_constraint_proxies.size();
bool is_feasibility_improvable = false;
for (auto i = 0; i < CONSTRAINT_PROXIES_SIZE; i++) {
auto &constraints =
m_constraint_proxies[i].flat_indexed_constraints();
const int CONSTRAINTS_SIZE = constraints.size();
for (auto j = 0; j < CONSTRAINTS_SIZE; j++) {
if (!constraints[j].is_enabled()) {
continue;
}
double constraint_value =
constraints[j].evaluate_constraint(a_MOVE);
double positive_part = std::max(constraint_value, 0.0);
double negative_part = std::max(-constraint_value, 0.0);
double violation = 0.0;
double local_penalty_coefficient = 0.0;
if (constraints[j].is_less_or_equal() && positive_part > 0) {
violation = positive_part;
local_penalty_coefficient =
constraints[j].local_penalty_coefficient_less();
} else if (constraints[j].is_greater_or_equal() &&
negative_part > 0) {
violation = negative_part;
local_penalty_coefficient =
constraints[j].local_penalty_coefficient_greater();
}
if (violation + constant::EPSILON <
constraints[j].violation_value()) {
is_feasibility_improvable = true;
}
total_violation += violation;
local_penalty += violation * local_penalty_coefficient;
global_penalty +=
violation * constraints[j].global_penalty_coefficient();
}
}
double objective = 0.0;
double objective_improvement = 0.0;
if (m_is_defined_objective) {
objective = m_objective.evaluate(a_MOVE) * this->sign();
objective_improvement =
m_objective.value() * this->sign() - objective;
}
a_score_ptr->objective = objective;
a_score_ptr->objective_improvement = objective_improvement;
a_score_ptr->total_violation = total_violation;
a_score_ptr->local_penalty = local_penalty;
a_score_ptr->global_penalty = global_penalty;
a_score_ptr->local_augmented_objective = objective + local_penalty;
a_score_ptr->global_augmented_objective = objective + global_penalty;
a_score_ptr->is_feasible = !(total_violation > constant::EPSILON);
a_score_ptr->is_objective_improvable =
objective_improvement > constant::EPSILON;
a_score_ptr->is_feasibility_improvable = is_feasibility_improvable;
}
/*************************************************************************/
constexpr void evaluate(
solution::SolutionScore * a_score_ptr, //
const neighborhood::Move<T_Variable, T_Expression> &a_MOVE,
const solution::SolutionScore &a_CURRENT_SCORE) const noexcept {
bool is_feasibility_improvable = false;
double total_violation = a_CURRENT_SCORE.total_violation;
double local_penalty = a_CURRENT_SCORE.local_penalty;
double global_penalty = a_CURRENT_SCORE.global_penalty;
double constraint_value = 0.0;
double positive_part = 0.0;
double negative_part = 0.0;
double violation_diff = 0.0;
for (const auto &constraint_ptr : a_MOVE.related_constraint_ptrs) {
if (!constraint_ptr->is_enabled()) {
continue;
}
if (a_MOVE.is_univariable_move) {
auto variable_ptr = a_MOVE.alterations.front().first;
auto variable_value_target = a_MOVE.alterations.front().second;
if (constraint_ptr->is_binary()) {
constraint_value = constraint_ptr->constraint_value() +
variable_value_target -
variable_ptr->value();
} else if (variable_ptr->has_uniform_sensitivity()) {
constraint_value =
constraint_ptr->constraint_value() +
variable_ptr->uniform_sensitivity() *
(variable_value_target - variable_ptr->value());
} else {
constraint_value =
constraint_ptr->evaluate_constraint_with_mask(
variable_ptr, variable_value_target);
}
} else {
constraint_value = constraint_ptr->evaluate_constraint(a_MOVE);
}
positive_part = std::max(constraint_value, 0.0);
negative_part = std::max(-constraint_value, 0.0);
if (constraint_ptr->is_less_or_equal()) {
violation_diff =
positive_part - constraint_ptr->positive_part();
total_violation += violation_diff;
is_feasibility_improvable |=
violation_diff < -constant::EPSILON;
local_penalty +=
violation_diff *
constraint_ptr->local_penalty_coefficient_less();
global_penalty += violation_diff *
constraint_ptr->global_penalty_coefficient();
}
if (constraint_ptr->is_greater_or_equal()) {
violation_diff =
negative_part - constraint_ptr->negative_part();
total_violation += violation_diff;
is_feasibility_improvable |=
violation_diff < -constant::EPSILON;
local_penalty +=
violation_diff *
constraint_ptr->local_penalty_coefficient_greater();
global_penalty += violation_diff *
constraint_ptr->global_penalty_coefficient();
}
}
double objective = 0.0;
double objective_improvement = 0.0;
if (m_is_defined_objective) {
objective = m_objective.evaluate(a_MOVE) * this->sign();
objective_improvement =
m_objective.value() * this->sign() - objective;
}
a_score_ptr->objective = objective;
a_score_ptr->objective_improvement = objective_improvement;
a_score_ptr->total_violation = total_violation;
a_score_ptr->local_penalty = local_penalty;
a_score_ptr->global_penalty = global_penalty;
a_score_ptr->local_augmented_objective = objective + local_penalty;
a_score_ptr->global_augmented_objective = objective + global_penalty;
a_score_ptr->is_feasible = !(total_violation > constant::EPSILON);
a_score_ptr->is_objective_improvable =
objective_improvement > constant::EPSILON;
a_score_ptr->is_feasibility_improvable = is_feasibility_improvable;
}
/*************************************************************************/
constexpr double compute_lagrangian(
const std::vector<multi_array::ValueProxy<double>>
&a_LAGRANGE_MULTIPLIER_PROXIES) const noexcept {
double lagrangian = m_objective.value();
for (auto &&constraint_ptr : m_constraint_reference.constraint_ptrs) {
int proxy_index = constraint_ptr->proxy_index();
int flat_index = constraint_ptr->flat_index();
lagrangian +=
a_LAGRANGE_MULTIPLIER_PROXIES[proxy_index].flat_indexed_values(
flat_index) *
constraint_ptr->constraint_value();
}
return lagrangian;
}
/*************************************************************************/
template <class T_Value>
constexpr std::vector<multi_array::ValueProxy<T_Value>>
generate_variable_parameter_proxies(const T_Value a_VALUE) const {
std::vector<multi_array::ValueProxy<T_Value>>
variable_parameter_proxies;
for (const auto &proxy : m_variable_proxies) {
multi_array::ValueProxy<T_Value> variable_parameter_proxy(
proxy.index(), proxy.shape());
variable_parameter_proxy.fill(a_VALUE);
int number_of_elements = proxy.number_of_elements();
for (auto i = 0; i < number_of_elements; i++) {
variable_parameter_proxy.flat_indexed_names(i) =
proxy.flat_indexed_variables(i).name();
}
variable_parameter_proxies.push_back(variable_parameter_proxy);
}
return variable_parameter_proxies;
}
/*************************************************************************/
template <class T_Value>
constexpr std::vector<multi_array::ValueProxy<T_Value>>
generate_expression_parameter_proxies(const T_Value a_VALUE) const {
std::vector<multi_array::ValueProxy<T_Value>>
expression_parameter_proxies;
for (const auto &proxy : m_expression_proxies) {
multi_array::ValueProxy<T_Value> expression_parameter_proxy(
proxy.index(), proxy.shape());
expression_parameter_proxy.fill(a_VALUE);
int number_of_elements = proxy.number_of_elements();
for (auto i = 0; i < number_of_elements; i++) {
expression_parameter_proxy.flat_indexed_names(i) =
proxy.flat_indexed_expressions(i).name();
}
expression_parameter_proxies.push_back(expression_parameter_proxy);
}
return expression_parameter_proxies;
}
/*************************************************************************/
template <class T_Value>
constexpr std::vector<multi_array::ValueProxy<T_Value>>
generate_constraint_parameter_proxies(const T_Value a_VALUE) const {
std::vector<multi_array::ValueProxy<T_Value>>
constraint_parameter_proxies;
for (const auto &proxy : m_constraint_proxies) {
multi_array::ValueProxy<T_Value> constraint_parameter_proxy(
proxy.index(), proxy.shape());
constraint_parameter_proxy.fill(a_VALUE);
int number_of_elements = proxy.number_of_elements();
for (auto i = 0; i < number_of_elements; i++) {
constraint_parameter_proxy.flat_indexed_names(i) =
proxy.flat_indexed_constraints(i).name();
}
constraint_parameter_proxies.push_back(constraint_parameter_proxy);
}
return constraint_parameter_proxies;
}
/*************************************************************************/
std::vector<multi_array::ValueProxy<double>>
export_local_penalty_coefficient_proxies(void) const {
std::vector<multi_array::ValueProxy<double>>
local_penalty_coefficient_proxies;
for (const auto &proxy : m_constraint_proxies) {
multi_array::ValueProxy<double> local_penalty_coefficient_proxy(
proxy.index(), proxy.shape());
int number_of_elements = proxy.number_of_elements();
for (auto i = 0; i < number_of_elements; i++) {
local_penalty_coefficient_proxy.flat_indexed_names(i) =
proxy.flat_indexed_constraints(i).name();
local_penalty_coefficient_proxy.flat_indexed_values(i) =
std::max(proxy.flat_indexed_constraints(i)
.local_penalty_coefficient_less(),
proxy.flat_indexed_constraints(i)
.local_penalty_coefficient_less());
}
local_penalty_coefficient_proxies.push_back(
local_penalty_coefficient_proxy);
}
return local_penalty_coefficient_proxies;
}
/*************************************************************************/
solution::DenseSolution<T_Variable, T_Expression> export_solution(
void) const {
/// This method cannot be constexpr by clang.
solution::DenseSolution<T_Variable, T_Expression> solution;
/// Decision variables
for (const auto &proxy : m_variable_proxies) {
solution.variable_value_proxies.push_back(
proxy.export_values_and_names());
}
/// Expressions
for (const auto &proxy : m_expression_proxies) {
solution.expression_value_proxies.push_back(
proxy.export_values_and_names());
}
/// Constraints
for (const auto &proxy : m_constraint_proxies) {
solution.constraint_value_proxies.push_back(
proxy.export_values_and_names());
solution.violation_value_proxies.push_back(
proxy.export_violations_and_names());
}
/// Total violation
T_Expression total_violation = 0;
for (const auto &proxy : m_constraint_proxies) {
for (const auto &constraint : proxy.flat_indexed_constraints()) {
total_violation += constraint.violation_value();
}
}
solution.objective = m_objective.value();
solution.total_violation = total_violation;
solution.is_feasible = this->is_feasible();
return solution;
}
/*************************************************************************/
constexpr solution::NamedSolution<T_Variable, T_Expression>
export_named_solution(void) const {
return this->convert_to_named_solution(this->export_solution());
}
/*************************************************************************/
solution::NamedSolution<T_Variable, T_Expression> convert_to_named_solution(
const solution::DenseSolution<T_Variable, T_Expression> &a_SOLUTION)
const {
/// This method cannot be constexpr by clang.
solution::NamedSolution<T_Variable, T_Expression> named_solution;
int VARIABLE_PROXIES_SIZE = m_variable_proxies.size();
int EXPRESSION_PROXIES_SIZE = m_expression_proxies.size();
int CONSTRAINT_PROXIES_SIZE = m_constraint_proxies.size();
/// Decision variables
for (auto i = 0; i < VARIABLE_PROXIES_SIZE; i++) {
named_solution.m_variable_value_proxies[m_variable_names[i]] =
a_SOLUTION.variable_value_proxies[i];
}
/// Expression
for (auto i = 0; i < EXPRESSION_PROXIES_SIZE; i++) {
named_solution.m_expression_value_proxies[m_expression_names[i]] =
a_SOLUTION.expression_value_proxies[i];
}
/// Constraint
for (auto i = 0; i < CONSTRAINT_PROXIES_SIZE; i++) {
named_solution.m_constraint_value_proxies[m_constraint_names[i]] =
a_SOLUTION.constraint_value_proxies[i];
}
/// Violation
for (auto i = 0; i < CONSTRAINT_PROXIES_SIZE; i++) {
named_solution.m_violation_value_proxies[m_constraint_names[i]] =
a_SOLUTION.violation_value_proxies[i];
}
named_solution.m_name = m_name;
named_solution.m_number_of_variables = this->number_of_variables();
named_solution.m_number_of_constraints = this->number_of_constraints();
named_solution.m_objective = a_SOLUTION.objective;
named_solution.m_total_violation = a_SOLUTION.total_violation;
named_solution.m_is_feasible = a_SOLUTION.is_feasible;
return named_solution;
}
/*************************************************************************/
solution::SparseSolution<T_Variable, T_Expression> export_plain_solution(
void) const {
solution::SparseSolution<T_Variable, T_Expression> plain_solution;
/// Decision variables
for (const auto &proxy : m_variable_proxies) {
for (const auto &variable : proxy.flat_indexed_variables()) {
if (variable.value() != 0) {
plain_solution.variables[variable.name()] =
variable.value();
}
}
}
/// Total violation
T_Expression total_violation = 0;
for (const auto &proxy : m_constraint_proxies) {
for (const auto &constraint : proxy.flat_indexed_constraints()) {
total_violation += constraint.violation_value();
}
}
plain_solution.objective = m_objective.value();
plain_solution.total_violation = total_violation;
plain_solution.is_feasible = this->is_feasible();
return plain_solution;
}
/*************************************************************************/
solution::SparseSolution<T_Variable, T_Expression>
convert_to_plain_solution(
const solution::DenseSolution<T_Variable, T_Expression> &a_SOLUTION)
const {
solution::SparseSolution<T_Variable, T_Expression> plain_solution;
/// Decision variables
for (const auto &proxy : m_variable_proxies) {
for (const auto &variable : proxy.flat_indexed_variables()) {
if (variable.value() != 0) {
plain_solution.variables[variable.name()] =
variable.value();
}
}
}
plain_solution.objective = a_SOLUTION.objective;
plain_solution.total_violation = a_SOLUTION.total_violation;
plain_solution.is_feasible = a_SOLUTION.is_feasible;
return plain_solution;
}
/*************************************************************************/
constexpr void import_solution(
const std::unordered_map<std::string, int> &a_SOLUTION) {
for (auto &&proxy : m_variable_proxies) {
for (auto &&variable : proxy.flat_indexed_variables()) {
if (a_SOLUTION.find(variable.name()) != a_SOLUTION.end()) {
variable = a_SOLUTION.at(variable.name());
} else {
variable = 0;
}
}
}
}
/*************************************************************************/
constexpr void fix_variables(
const std::unordered_map<std::string, int> &a_SOLUTION) {
for (auto &&proxy : m_variable_proxies) {
for (auto &&variable : proxy.flat_indexed_variables()) {
if (a_SOLUTION.find(variable.name()) != a_SOLUTION.end()) {
variable.fix_by(a_SOLUTION.at(variable.name()));
}
}
}
}
/*************************************************************************/
constexpr void unfix_variables(
const std::unordered_set<std::string> &a_VARIABLE_NAMES) {
for (auto &&proxy : m_variable_proxies) {
for (auto &&variable : proxy.flat_indexed_variables()) {
variable.fix_by(0);
if (a_VARIABLE_NAMES.find(variable.name()) !=
a_VARIABLE_NAMES.end()) {
variable.unfix();
}
}
}
}
/*********************************************************************/
void import_mps(const mps::MPS &a_MPS, const bool a_ACCEPT_CONTINUOUS) {
using VariableMap = std::unordered_map<
std::string, model_component::Variable<T_Variable, T_Expression> *>;
using Sensitivities = std::unordered_map<
model_component::Variable<T_Variable, T_Expression> *,
T_Expression>;
VariableMap variable_ptrs;
auto &variable_proxy =
this->create_variables("variables", a_MPS.variables.size());
/**
* Set up the decision variables.
*/
int number_of_variables = a_MPS.variable_names.size();
for (auto i = 0; i < number_of_variables; i++) {
auto &name = a_MPS.variable_names[i];
auto &variable = a_MPS.variables.at(name);
if (variable.sense == mps::MPSVariableSense::Continuous) {
if (a_ACCEPT_CONTINUOUS) {
utility::print_warning(
"The continuous variable " + name +
" will be regarded as an integer variable.",
true);
} else {
throw std::logic_error(utility::format_error_location(
__FILE__, __LINE__, __func__,
"The MPS file includes continuous variables."));
}
}
variable_proxy(i).set_bound(variable.integer_lower_bound,
variable.integer_upper_bound);
if (variable.is_fixed) {
variable_proxy(i).fix_by(variable.integer_fixed_value);
}
variable_proxy(i).set_name(name);
variable_ptrs[name] = &variable_proxy(i);
}
/**
* Set up the constraints.
*/
int number_of_constraints = a_MPS.constraint_names.size();
std::vector<int> offsets(number_of_constraints);
auto &constraint_proxy =
this->create_constraints("constraints", number_of_constraints);
#ifdef _OPENMP
#pragma omp parallel for schedule(static)
#endif
for (auto i = 0; i < number_of_constraints; i++) {
auto &name = a_MPS.constraint_names[i];
auto &constraint = a_MPS.constraints.at(name);
auto expression =
model_component::Expression<T_Variable,
T_Expression>::create_instance();
Sensitivities expression_sensitivities;
for (const auto &sensitivity : constraint.sensitivities) {
std::string variable_name = sensitivity.first;
T_Expression coefficient =
static_cast<T_Expression>(sensitivity.second);
expression_sensitivities[variable_ptrs[variable_name]] =
coefficient;
}
expression.set_sensitivities(expression_sensitivities);
switch (constraint.sense) {
case mps::MPSConstraintSense::Less: {
constraint_proxy(i) = (expression <= constraint.rhs);
break;
}
case mps::MPSConstraintSense::Equal: {
constraint_proxy(i) = (expression == constraint.rhs);
break;
}
case mps::MPSConstraintSense::Greater: {
constraint_proxy(i) = (expression >= constraint.rhs);
break;
}
}
constraint_proxy(i).set_name(name);
}
/**
* Set up the objective function.
*/
auto objective =
model_component::Expression<T_Variable,
T_Expression>::create_instance();
Sensitivities objective_sensitivities;
for (const auto &sensitivity : a_MPS.objective.sensitivities) {
std::string variable_name = sensitivity.first;
T_Expression coefficient =
static_cast<T_Expression>(sensitivity.second);
objective_sensitivities[variable_ptrs[variable_name]] = coefficient;
}
objective.set_sensitivities(objective_sensitivities);
this->minimize(objective);
}
/*********************************************************************/
void write_mps(const std::string &a_FILE_NAME) {
std::ofstream ofs(a_FILE_NAME);
/**
* Determine unique name of decision variables and constraints.
*/
this->setup_unique_name();
/**
* Determine the linearity.
*/
this->setup_is_linear();
if (!m_is_linear) {
throw std::logic_error(utility::format_error_location(
__FILE__, __LINE__, __func__,
"Nonlinear model cannot be written in MPS format."));
}
/**
* Determine the constraint sensitivities.
*/
this->setup_variable_related_constraints();
this->setup_variable_sensitivity();
/**
* Write instance name.
*/
if (m_name.empty()) {
ofs << "NAME untitled" << std::endl;
} else {
ofs << "NAME " << utility::delete_space(m_name) << std::endl;
}
/**
* Write ROWS section.
*/
ofs << "ROWS" << std::endl;
ofs << " N obj" << std::endl;
for (const auto &proxy : m_constraint_proxies) {
for (const auto &constraint : proxy.flat_indexed_constraints()) {
switch (constraint.sense()) {
case model_component::ConstraintSense::Equal: {
ofs << " E "
<< utility::delete_space(constraint.name())
<< std::endl;
break;
}
case model_component::ConstraintSense::Less: {
ofs << " L "
<< utility::delete_space(constraint.name())
<< std::endl;
break;
}
case model_component::ConstraintSense::Greater: {
ofs << " G "
<< utility::delete_space(constraint.name())
<< std::endl;
break;
}
default: {
}
}
}
}
/**
* Write COLUMNS section.
*/
ofs << "COLUMNS" << std::endl;
ofs << " MARK0000 'MARKER' 'INTORG'" << std::endl;
for (const auto &proxy : m_variable_proxies) {
for (const auto &variable : proxy.flat_indexed_variables()) {
auto variable_name = utility::delete_space(variable.name());
/// Objective
if (fabs(variable.objective_sensitivity()) >
constant::EPSILON_10) {
if (m_is_minimization) {
ofs << " " << variable_name << " obj "
<< variable.objective_sensitivity() << std::endl;
} else {
ofs << " " << variable_name << " obj "
<< -variable.objective_sensitivity() << std::endl;
}
}
/// Constraints
for (const auto &sensitivity :
variable.constraint_sensitivities()) {
auto constraint_name =
utility::delete_space(sensitivity.first->name());
auto coefficient = sensitivity.second;
ofs << " " << variable_name << " " << constraint_name
<< " " << coefficient << std::endl;
}
}
}
ofs << " MARK0001 'MARKER' 'INTEND'" << std::endl;
/**
* Write RHS section.
*/
ofs << "RHS" << std::endl;
for (const auto &proxy : m_constraint_proxies) {
for (const auto &constraint : proxy.flat_indexed_constraints()) {
auto constraint_name = utility::delete_space(constraint.name());
auto &expression = constraint.expression();
ofs << " rhs " << constraint_name << " "
<< -expression.constant_value() << std::endl;
}
}
/**
* Write Bounds section.
*/
ofs << "BOUNDS" << std::endl;
for (const auto &proxy : m_variable_proxies) {
for (const auto &variable : proxy.flat_indexed_variables()) {
auto variable_name = utility::delete_space(variable.name());
if (variable.is_fixed()) {
ofs << " FX bnd " << variable_name << " "
<< variable.value() << std::endl;
} else {
if (variable.lower_bound() != constant::INT_HALF_MIN) {
if (variable.lower_bound() != 0) {
ofs << " LO bnd " << variable_name
<< " " << variable.lower_bound()
<< std::endl;
}
}
if (variable.upper_bound() != constant::INT_HALF_MAX) {
ofs << " UP bnd " << variable_name << " "
<< variable.upper_bound() << std::endl;
}
}
}
}
/**
* Write END section.
*/
ofs << "ENDATA" << std::endl;
ofs.close();
}
/*************************************************************************/
inline constexpr std::vector<
model_component::VariableProxy<T_Variable, T_Expression>>
&variable_proxies(void) {
return m_variable_proxies;
}
/*************************************************************************/
inline constexpr const std::vector<
model_component::VariableProxy<T_Variable, T_Expression>>
&variable_proxies(void) const {
return m_variable_proxies;
}
/*************************************************************************/
inline constexpr std::vector<
model_component::ExpressionProxy<T_Variable, T_Expression>>
&expression_proxies(void) {
return m_expression_proxies;
}
/*************************************************************************/
inline constexpr const std::vector<
model_component::ExpressionProxy<T_Variable, T_Expression>>
&expression_proxies(void) const {
return m_expression_proxies;
}
/*************************************************************************/
inline constexpr std::vector<
model_component::ConstraintProxy<T_Variable, T_Expression>>
&constraint_proxies(void) {
return m_constraint_proxies;
}
/*************************************************************************/
inline constexpr const std::vector<
model_component::ConstraintProxy<T_Variable, T_Expression>>
&constraint_proxies(void) const {
return m_constraint_proxies;
}
/*************************************************************************/
inline constexpr model_component::Objective<T_Variable, T_Expression>
&objective(void) {
return m_objective;
}
/*************************************************************************/
inline constexpr const model_component::Objective<T_Variable, T_Expression>
&objective(void) const {
return m_objective;
}
/*************************************************************************/
inline constexpr const std::vector<std::string> &variable_names(
void) const {
return m_variable_names;
}
/*************************************************************************/
inline constexpr const std::vector<std::string> &expression_names(
void) const {
return m_expression_names;
}
/*************************************************************************/
inline constexpr const std::vector<std::string> &constraint_names(
void) const {
return m_constraint_names;
}
/*************************************************************************/
inline constexpr const std::vector<
model_component::Selection<T_Variable, T_Expression>>
&selections(void) const {
return m_selections;
}
/*************************************************************************/
inline constexpr model_component::VariableReference<T_Variable,
T_Expression>
&variable_reference(void) {
return m_variable_reference;
}
/*************************************************************************/
inline constexpr const model_component::VariableReference<T_Variable,
T_Expression>
&variable_reference(void) const {
return m_variable_reference;
}
/*************************************************************************/
inline constexpr model_component::ConstraintReference<T_Variable,
T_Expression>
&constraint_reference(void) {
return m_constraint_reference;
}
/*************************************************************************/
inline constexpr const model_component::ConstraintReference<T_Variable,
T_Expression>
&constraint_reference(void) const {
return m_constraint_reference;
}
/*************************************************************************/
inline constexpr model_component::ConstraintTypeReference<T_Variable,
T_Expression>
&constraint_type_reference(void) {
return m_constraint_type_reference;
}
/*************************************************************************/
inline constexpr const model_component::ConstraintTypeReference<
T_Variable, T_Expression>
&constraint_type_reference(void) const {
return m_constraint_type_reference;
}
/*************************************************************************/
inline constexpr bool is_defined_objective(void) const {
return m_is_defined_objective;
}
/*************************************************************************/
inline constexpr bool is_enabled_fast_evaluation(void) const {
return m_is_enabled_fast_evaluation;
}
/*************************************************************************/
inline constexpr bool is_linear(void) const {
return m_is_linear;
}
/*************************************************************************/
inline constexpr bool is_minimization(void) const {
return m_is_minimization;
}
/*************************************************************************/
inline constexpr double sign(void) const {
/**
* In this program, maximization problems are solved as minimization
* problems by nagating the objective function values. This method
* is used to show objective function values for output.
*/
return m_is_minimization ? 1.0 : -1.0;
}
/*************************************************************************/
inline constexpr void set_is_solved(const bool a_IS_SOLVED) {
m_is_solved = a_IS_SOLVED;
}
/*************************************************************************/
inline constexpr bool is_solved(void) const {
return m_is_solved;
}
/*************************************************************************/
inline constexpr bool is_feasible(void) const {
return m_is_feasible;
}
/*************************************************************************/
inline constexpr int number_of_variables(void) const {
return m_variable_reference.variable_ptrs.size();
}
/*************************************************************************/
inline constexpr int number_of_fixed_variables(void) const {
return m_variable_reference.fixed_variable_ptrs.size();
}
/*************************************************************************/
inline constexpr int number_of_mutable_variables(void) const {
return m_variable_reference.mutable_variable_ptrs.size();
}
/*************************************************************************/
inline constexpr int number_of_binary_variables(void) const {
return m_variable_reference.binary_variable_ptrs.size();
}
/*************************************************************************/
inline constexpr int number_of_integer_variables(void) const {
return m_variable_reference.integer_variable_ptrs.size();
}
/*************************************************************************/
inline constexpr int number_of_selection_variables(void) const {
return m_variable_reference.selection_variable_ptrs.size();
}
/*************************************************************************/
inline constexpr int number_of_min_max_variables(void) const {
return m_variable_reference.min_max_variable_ptrs.size();
}
/*************************************************************************/
inline constexpr int number_of_max_min_variables(void) const {
return m_variable_reference.max_min_variable_ptrs.size();
}
/*************************************************************************/
inline constexpr int number_of_intermediate_variables(void) const {
return m_variable_reference.intermediate_variable_ptrs.size();
}
/*************************************************************************/
inline constexpr int number_of_constraints(void) const {
return m_constraint_reference.constraint_ptrs.size();
}
/*************************************************************************/
inline constexpr int number_of_selection_constraints(void) const {
return m_selections.size();
}
/*************************************************************************/
inline constexpr int number_of_enabled_constraints(void) const {
return m_constraint_reference.enabled_constraint_ptrs.size();
}
/*************************************************************************/
inline constexpr int number_of_disabled_constraints(void) const {
return m_constraint_reference.disabled_constraint_ptrs.size();
}
/*************************************************************************/
inline constexpr bool has_zero_one_coefficient_constraints(void) const {
if (m_constraint_type_reference.set_partitioning_ptrs.size() > 0) {
return true;
}
if (m_constraint_type_reference.set_packing_ptrs.size() > 0) {
return true;
}
if (m_constraint_type_reference.set_covering_ptrs.size() > 0) {
return true;
}
if (m_constraint_type_reference.cardinality_ptrs.size() > 0) {
return true;
}
if (m_constraint_type_reference.invariant_knapsack_ptrs.size() > 0) {
return true;
}
return false;
}
/*************************************************************************/
inline constexpr neighborhood::Neighborhood<T_Variable, T_Expression>
&neighborhood(void) {
return m_neighborhood;
}
};
using IPModel = Model<int, double>;
} // namespace model
} // namespace printemps
#endif
/*****************************************************************************/
// END
/*****************************************************************************/ |
post_utilities.h | #ifndef POST_UTILITIES_H
#define POST_UTILITIES_H
#include "utilities/timer.h"
#include "includes/define.h"
#include "includes/variables.h"
#include "custom_utilities/create_and_destroy.h"
#include "custom_utilities/GeometryFunctions.h"
#include "custom_elements/Particle_Contact_Element.h"
#ifdef _OPENMP
#include <omp.h>
#endif
#include "utilities/openmp_utils.h"
#include <limits>
#include <iostream>
#include <iomanip>
#include <cmath>
namespace Kratos {
class PostUtilities {
public:
typedef ModelPart::ElementsContainerType ElementsArrayType;
typedef ModelPart::NodesContainerType NodesContainerType;
KRATOS_CLASS_POINTER_DEFINITION(PostUtilities);
/// Default constructor.
PostUtilities() {};
/// Destructor.
virtual ~PostUtilities() {};
void AddModelPartToModelPart(ModelPart& rCompleteModelPart, ModelPart& rModelPartToAdd)
{
////WATCH OUT! This function respects the existing Id's!
KRATOS_TRY;
//preallocate the memory needed
int tot_nodes = rCompleteModelPart.Nodes().size() + rModelPartToAdd.GetCommunicator().LocalMesh().Nodes().size();
int tot_elements = rCompleteModelPart.Elements().size() + rModelPartToAdd.GetCommunicator().LocalMesh().Elements().size();
rCompleteModelPart.Nodes().reserve(tot_nodes);
rCompleteModelPart.Elements().reserve(tot_elements);
for (ModelPart::NodesContainerType::ptr_iterator node_it = rModelPartToAdd.GetCommunicator().LocalMesh().Nodes().ptr_begin(); node_it != rModelPartToAdd.GetCommunicator().LocalMesh().Nodes().ptr_end(); node_it++)
{
rCompleteModelPart.Nodes().push_back(*node_it);
}
for (ModelPart::ElementsContainerType::ptr_iterator elem_it = rModelPartToAdd.GetCommunicator().LocalMesh().Elements().ptr_begin(); elem_it != rModelPartToAdd.GetCommunicator().LocalMesh().Elements().ptr_end(); elem_it++)
{
rCompleteModelPart.Elements().push_back(*elem_it);
}
KRATOS_CATCH("");
}
void AddSpheresNotBelongingToClustersToMixModelPart(ModelPart& rCompleteModelPart, ModelPart& rModelPartToAdd)
{
////WATCH OUT! This function respects the existing Id's!
KRATOS_TRY;
//preallocate the memory needed
int tot_size = rCompleteModelPart.Nodes().size();
for (ModelPart::NodesContainerType::ptr_iterator node_it = rModelPartToAdd.GetCommunicator().LocalMesh().Nodes().ptr_begin(); node_it != rModelPartToAdd.GetCommunicator().LocalMesh().Nodes().ptr_end(); node_it++)
{
ModelPart::NodeIterator i_iterator = node_it;
Node < 3 > & i = *i_iterator;
if (i.IsNot(DEMFlags::BELONGS_TO_A_CLUSTER)) {tot_size += 1;}
}
rCompleteModelPart.Nodes().reserve(tot_size);
rCompleteModelPart.Elements().reserve(tot_size);
for (ModelPart::NodesContainerType::ptr_iterator node_it = rModelPartToAdd.GetCommunicator().LocalMesh().Nodes().ptr_begin(); node_it != rModelPartToAdd.GetCommunicator().LocalMesh().Nodes().ptr_end(); node_it++)
{
ModelPart::NodeIterator i_iterator = node_it;
Node < 3 > & i = *i_iterator;
if (i.IsNot(DEMFlags::BELONGS_TO_A_CLUSTER)) {rCompleteModelPart.Nodes().push_back(*node_it);}
}
for (ModelPart::ElementsContainerType::ptr_iterator elem_it = rModelPartToAdd.GetCommunicator().LocalMesh().Elements().ptr_begin(); elem_it != rModelPartToAdd.GetCommunicator().LocalMesh().Elements().ptr_end(); elem_it++)
{
Node < 3 >& i = (*elem_it)->GetGeometry()[0];
if (i.IsNot(DEMFlags::BELONGS_TO_A_CLUSTER)) {rCompleteModelPart.Elements().push_back(*elem_it);}
}
KRATOS_CATCH("");
}
array_1d<double,3> VelocityTrap(ModelPart& rModelPart, const array_1d<double,3>& low_point, const array_1d<double,3>& high_point) {
ElementsArrayType& pElements = rModelPart.GetCommunicator().LocalMesh().Elements();
OpenMPUtils::CreatePartition(OpenMPUtils::GetNumThreads(), pElements.size(), this->GetElementPartition());
double velocity_X = 0.0, velocity_Y = 0.0, velocity_Z = 0.0;
int number_of_elements = 0;
#pragma omp parallel for reduction(+: velocity_X, velocity_Y, velocity_Z, number_of_elements)
for (int k = 0; k < OpenMPUtils::GetNumThreads(); k++) {
ElementsArrayType::iterator it_begin = pElements.ptr_begin() + this->GetElementPartition()[k];
ElementsArrayType::iterator it_end = pElements.ptr_begin() + this->GetElementPartition()[k + 1];
for (ElementsArrayType::iterator it = it_begin; it != it_end; ++it) {
array_1d<double,3> coor = (it)->GetGeometry()[0].Coordinates();
if (coor[0] >= low_point[0] && coor[0] <= high_point[0] &&
coor[1] >= low_point[1] && coor[1] <= high_point[1] &&
coor[2] >= low_point[2] && coor[2] <= high_point[2]) {
velocity_X += (it)->GetGeometry()[0].FastGetSolutionStepValue(VELOCITY_X);
velocity_Y += (it)->GetGeometry()[0].FastGetSolutionStepValue(VELOCITY_Y);
velocity_Z += (it)->GetGeometry()[0].FastGetSolutionStepValue(VELOCITY_Z);
number_of_elements++;
}
} //elements for
for (int i = 0; i < 3; ++i) {
KRATOS_ERROR_IF(high_point[i] < low_point[i]) << "Check the limits of the Velocity Trap Box. Maximum coordinates smaller than minimum coordinates." << std::endl;
}
} //parallel for
if (number_of_elements) {
velocity_X /= number_of_elements;
velocity_Y /= number_of_elements;
velocity_Z /= number_of_elements;
}
array_1d<double,3> velocity;
velocity[0] = velocity_X;
velocity[1] = velocity_Y;
velocity[2] = velocity_Z;
return velocity;
}//VelocityTrap
void IntegrationOfForces(ModelPart::NodesContainerType& mesh_nodes , array_1d<double, 3>& total_forces,
array_1d<double, 3>& rotation_center, array_1d<double, 3>& total_moment) {
for (ModelPart::NodesContainerType::ptr_iterator node_pointer_it = mesh_nodes.ptr_begin();
node_pointer_it != mesh_nodes.ptr_end(); ++node_pointer_it) {
const array_1d<double, 3>& contact_forces_summed_at_structure_point = (*node_pointer_it)->FastGetSolutionStepValue(CONTACT_FORCES);
noalias(total_forces) += contact_forces_summed_at_structure_point;
array_1d<double, 3> vector_from_structure_center_to_structure_point;
noalias(vector_from_structure_center_to_structure_point) = (*node_pointer_it)->Coordinates() - rotation_center;
array_1d<double, 3> moment_to_add;
GeometryFunctions::CrossProduct(vector_from_structure_center_to_structure_point, contact_forces_summed_at_structure_point, moment_to_add);
noalias(total_moment) += moment_to_add;
}
}
void IntegrationOfElasticForces(ModelPart::NodesContainerType& mesh_nodes, array_1d<double, 3>& total_forces) {
for (ModelPart::NodesContainerType::ptr_iterator node_pointer_it = mesh_nodes.ptr_begin(); node_pointer_it != mesh_nodes.ptr_end(); ++node_pointer_it) {
const array_1d<double, 3> elastic_forces_added_up_at_node = (*node_pointer_it)->FastGetSolutionStepValue(ELASTIC_FORCES);
noalias(total_forces) += elastic_forces_added_up_at_node;
}
}
array_1d<double, 3> ComputePoisson(ModelPart& rModelPart) {
ElementsArrayType& pElements = rModelPart.GetCommunicator().LocalMesh().Elements();
double total_poisson_value = 0.0;
unsigned int number_of_spheres_to_evaluate_poisson = 0;
array_1d<double, 3> return_data = ZeroVector(3);
// TODO: Add OpenMP code
for (unsigned int k = 0; k < pElements.size(); k++) {
ElementsArrayType::iterator it = pElements.ptr_begin() + k;
Element* raw_p_element = &(*it);
SphericParticle* p_sphere = dynamic_cast<SphericParticle*>(raw_p_element);
double& particle_poisson_value = p_sphere->GetGeometry()[0].FastGetSolutionStepValue(POISSON_VALUE);
particle_poisson_value = 0.0;
double epsilon_XY = 0.0;
double epsilon_Z = 0.0;
unsigned int number_of_neighbors_per_sphere_to_evaluate_poisson = 0;
array_1d<double, 3> other_to_me_vector;
array_1d<double, 3> initial_other_to_me_vector;
unsigned int number_of_neighbors = p_sphere->mNeighbourElements.size();
for (unsigned int i = 0; i < number_of_neighbors; i++) {
if (p_sphere->mNeighbourElements[i] == NULL) continue;
noalias(other_to_me_vector) = p_sphere->GetGeometry()[0].Coordinates() - p_sphere->mNeighbourElements[i]->GetGeometry()[0].Coordinates();
noalias(initial_other_to_me_vector) = p_sphere->GetGeometry()[0].GetInitialPosition() - p_sphere->mNeighbourElements[i]->GetGeometry()[0].GetInitialPosition();
double initial_distance_XY = sqrt(initial_other_to_me_vector[0] * initial_other_to_me_vector[0] + initial_other_to_me_vector[1] * initial_other_to_me_vector[1]);
double initial_distance_Z = initial_other_to_me_vector[2];
if (initial_distance_XY && initial_distance_Z) {
epsilon_XY = -1 + sqrt(other_to_me_vector[0] * other_to_me_vector[0] + other_to_me_vector[1] * other_to_me_vector[1]) / initial_distance_XY;
epsilon_Z = -1 + fabs(other_to_me_vector[2] / initial_distance_Z);
} else continue;
if (epsilon_Z) { // Should it be added here 'if p_sphere->Id() < p_sphere->mNeighbourElements[i]->Id()'?
if (((-epsilon_XY / epsilon_Z) > 0.5) || ((-epsilon_XY / epsilon_Z) < 0.0)) continue; // TODO: Check this
particle_poisson_value -= epsilon_XY / epsilon_Z;
number_of_neighbors_per_sphere_to_evaluate_poisson++;
} else continue;
}
if (number_of_neighbors_per_sphere_to_evaluate_poisson) {
particle_poisson_value /= number_of_neighbors_per_sphere_to_evaluate_poisson;
number_of_spheres_to_evaluate_poisson++;
total_poisson_value += particle_poisson_value;
}
}
if (number_of_spheres_to_evaluate_poisson) total_poisson_value /= number_of_spheres_to_evaluate_poisson;
return_data[0] = total_poisson_value;
return return_data;
} //ComputePoisson
array_1d<double, 3> ComputePoisson2D(ModelPart& rModelPart) { // TODO: Adjust this function to the new changes made in the 3D version
ElementsArrayType& pElements = rModelPart.GetCommunicator().LocalMesh().Elements();
double total_poisson_value = 0.0;
unsigned int number_of_bonds_to_evaluate_poisson = 0;
array_1d<double, 3> return_data = ZeroVector(3);
double total_epsilon_y_value = 0.0;
// TODO: Add OpenMP code
for (unsigned int k = 0; k < pElements.size(); k++) {
ElementsArrayType::iterator it = pElements.ptr_begin() + k;
Element* raw_p_element = &(*it);
SphericParticle* p_sphere = dynamic_cast<SphericParticle*>(raw_p_element);
double& particle_poisson_value = p_sphere->GetGeometry()[0].FastGetSolutionStepValue(POISSON_VALUE);
particle_poisson_value = 0.0;
double epsilon_X = 0.0;
double epsilon_Y = 0.0;
unsigned int number_of_neighbors_to_evaluate_poisson = 0;
array_1d<double, 3> other_to_me_vector;
array_1d<double, 3> initial_other_to_me_vector;
double average_sphere_epsilon_y_value = 0.0;
unsigned int number_of_neighbors = p_sphere->mNeighbourElements.size();
for (unsigned int i = 0; i < number_of_neighbors; i++)
{
if (p_sphere->mNeighbourElements[i] == NULL) continue;
noalias(other_to_me_vector) = p_sphere->GetGeometry()[0].Coordinates() - p_sphere->mNeighbourElements[i]->GetGeometry()[0].Coordinates();
noalias(initial_other_to_me_vector) = p_sphere->GetGeometry()[0].GetInitialPosition() - p_sphere->mNeighbourElements[i]->GetGeometry()[0].GetInitialPosition();
double initial_distance_X = initial_other_to_me_vector[0];
double initial_distance_Y = initial_other_to_me_vector[1];
if (initial_distance_X && initial_distance_Y) {
epsilon_X = -1 + fabs(other_to_me_vector[0] / initial_distance_X);
epsilon_Y = -1 + fabs(other_to_me_vector[1] / initial_distance_Y);
}
if (epsilon_Y) {
particle_poisson_value -= epsilon_X / epsilon_Y;
number_of_neighbors_to_evaluate_poisson++;
total_poisson_value -= epsilon_X / epsilon_Y;
number_of_bonds_to_evaluate_poisson++;
}
average_sphere_epsilon_y_value += epsilon_Y;
}
if (number_of_neighbors_to_evaluate_poisson) particle_poisson_value /= number_of_neighbors_to_evaluate_poisson;
total_epsilon_y_value += average_sphere_epsilon_y_value / number_of_neighbors;
}
if (number_of_bonds_to_evaluate_poisson) total_poisson_value /= number_of_bonds_to_evaluate_poisson;
total_epsilon_y_value /= pElements.size();
return_data[0] = total_poisson_value;
return_data[1] = total_epsilon_y_value;
return return_data;
} //ComputePoisson2D
void ComputeEulerAngles(ModelPart& rSpheresModelPart, ModelPart& rClusterModelPart) {
ProcessInfo& r_process_info = rSpheresModelPart.GetProcessInfo();
bool if_trihedron_option = (bool) r_process_info[TRIHEDRON_OPTION];
typedef ModelPart::NodesContainerType NodesArrayType;
NodesArrayType& pSpheresNodes = rSpheresModelPart.GetCommunicator().LocalMesh().Nodes();
NodesArrayType& pClusterNodes = rClusterModelPart.GetCommunicator().LocalMesh().Nodes();
#pragma omp parallel for
for (int k = 0; k < (int) pSpheresNodes.size(); k++) {
ModelPart::NodeIterator i_iterator = pSpheresNodes.ptr_begin() + k;
Node < 3 > & i = *i_iterator;
array_1d<double, 3 >& rotated_angle = i.FastGetSolutionStepValue(PARTICLE_ROTATION_ANGLE);
if (if_trihedron_option && i.IsNot(DEMFlags::BELONGS_TO_A_CLUSTER)) {
array_1d<double, 3 >& EulerAngles = i.FastGetSolutionStepValue(EULER_ANGLES);
GeometryFunctions::EulerAnglesFromRotationAngle(EulerAngles, rotated_angle);
} // if_trihedron_option && Not BELONGS_TO_A_CLUSTER
}//for Node
#pragma omp parallel for
for (int k = 0; k < (int) pClusterNodes.size(); k++) {
ModelPart::NodeIterator i_iterator = pClusterNodes.ptr_begin() + k;
Node < 3 > & i = *i_iterator;
Quaternion<double>& Orientation = i.FastGetSolutionStepValue(ORIENTATION);
array_1d<double, 3 >& EulerAngles = i.FastGetSolutionStepValue(EULER_ANGLES);
GeometryFunctions::QuaternionToGiDEulerAngles(Orientation, EulerAngles);
}//for Node
} //ComputeEulerAngles
double QuasiStaticAdimensionalNumber(ModelPart& rParticlesModelPart, ModelPart& rContactModelPart, ProcessInfo& r_process_info) {
double adimensional_value = 0.0;
ElementsArrayType& pParticleElements = rParticlesModelPart.GetCommunicator().LocalMesh().Elements();
OpenMPUtils::CreatePartition(OpenMPUtils::GetNumThreads(), pParticleElements.size(), this->GetElementPartition());
array_1d<double,3> particle_forces;
const array_1d<double,3>& gravity = r_process_info[GRAVITY];
double total_force = 0.0;
//#pragma omp parallel for
#pragma omp parallel for reduction(+:total_force)
for (int k = 0; k < OpenMPUtils::GetNumThreads(); k++) {
ElementsArrayType::iterator it_begin = pParticleElements.ptr_begin() + this->GetElementPartition()[k];
ElementsArrayType::iterator it_end = pParticleElements.ptr_begin() + this->GetElementPartition()[k + 1];
for (ElementsArrayType::iterator it = it_begin; it != it_end; ++it) {
Element::GeometryType& geom = it->GetGeometry();
if (geom[0].IsNot(DEMFlags::FIXED_VEL_X) && geom[0].IsNot(DEMFlags::FIXED_VEL_Y) && geom[0].IsNot(DEMFlags::FIXED_VEL_Z))
{
particle_forces = geom[0].FastGetSolutionStepValue(TOTAL_FORCES);
double mass = geom[0].FastGetSolutionStepValue(NODAL_MASS);
particle_forces[0] += mass * gravity[0];
particle_forces[1] += mass * gravity[1];
particle_forces[2] += mass * gravity[2];
double module = 0.0;
GeometryFunctions::module(particle_forces, module);
total_force += module;
} //if
}//balls
}//paralel
ElementsArrayType& pContactElements = rContactModelPart.GetCommunicator().LocalMesh().Elements();
OpenMPUtils::CreatePartition(OpenMPUtils::GetNumThreads(), pContactElements.size(), this->GetElementPartition());
array_1d<double,3> contact_forces;
double total_elastic_force = 0.0;
#pragma omp parallel for reduction(+:total_elastic_force)
for (int k = 0; k < OpenMPUtils::GetNumThreads(); k++) {
ElementsArrayType::iterator it_begin = pContactElements.ptr_begin() + this->GetElementPartition()[k];
ElementsArrayType::iterator it_end = pContactElements.ptr_begin() + this->GetElementPartition()[k + 1];
for (ElementsArrayType::iterator it = it_begin; it != it_end; ++it){
Element::GeometryType& geom = it->GetGeometry();
if (geom[0].IsNot(DEMFlags::FIXED_VEL_X) && geom[0].IsNot(DEMFlags::FIXED_VEL_Y) && geom[0].IsNot(DEMFlags::FIXED_VEL_Z) &&
geom[1].IsNot(DEMFlags::FIXED_VEL_X) && geom[1].IsNot(DEMFlags::FIXED_VEL_Y) && geom[1].IsNot(DEMFlags::FIXED_VEL_Z)) {
contact_forces = it->GetValue(LOCAL_CONTACT_FORCE);
double module = 0.0;
GeometryFunctions::module(contact_forces, module);
total_elastic_force += module;
}
}
}
if (total_elastic_force != 0.0) {
adimensional_value = total_force/total_elastic_force;
}
else {
KRATOS_ERROR << "There are no elastic forces= " << total_elastic_force << std::endl;
}
return adimensional_value;
}//QuasiStaticAdimensionalNumber
std::vector<unsigned int>& GetElementPartition() {return (mElementPartition);};
protected:
std::vector<unsigned int> mElementPartition;
}; // Class PostUtilities
} // namespace Kratos.
#endif // POST_UTILITIES_H
|
1.norace1.c | // RUN: clang %loadLLOV %s -o /dev/null 2>&1 | FileCheck %s
#include <omp.h>
#define N 20
int main() {
int A[N][N];
#pragma omp parallel for
for (int i = 1; i < N; i++)
for (int j = 1; j < N; j++)
A[i][j] = A[i][j - 1];
}
// CHECK: Region is Data Race Free.
// END
|
pvector.h | // Copyright (c) 2015, The Regents of the University of California (Regents)
// See LICENSE.txt for license details
#ifndef PVECTOR_H_
#define PVECTOR_H_
#include <algorithm>
/*
GAP Benchmark Suite
Class: pvector
Author: Scott Beamer
Vector class with ability to not initialize or do initialize in parallel
- std::vector (when resizing) will always initialize, and does it serially
- When pvector is resized, new elements are uninitialized
- Resizing is not thread-safe
*/
template <typename T_>
class pvector {
public:
typedef T_* iterator;
pvector() : start_(nullptr), end_size_(nullptr), end_capacity_(nullptr) {}
explicit pvector(size_t num_elements) {
start_ = new T_[num_elements];
end_size_ = start_ + num_elements;
end_capacity_ = end_size_;
}
pvector(size_t num_elements, T_ init_val) : pvector(num_elements) {
fill(init_val);
}
pvector(iterator copy_begin, iterator copy_end)
: pvector(copy_end - copy_begin) {
#pragma omp parallel for
for (size_t i=0; i < capacity(); i++)
start_[i] = copy_begin[i];
}
// don't want this to be copied, too much data to move
pvector(const pvector &other) = delete;
// prefer move because too much data to copy
pvector(pvector &&other)
: start_(other.start_), end_size_(other.end_size_),
end_capacity_(other.end_capacity_) {
other.start_ = nullptr;
other.end_size_ = nullptr;
other.end_capacity_ = nullptr;
}
// want move assignment
pvector& operator= (pvector &&other) {
start_ = other.start_;
end_size_ = other.end_size_;
end_capacity_ = other.end_capacity_;
other.start_ = nullptr;
other.end_size_ = nullptr;
other.end_capacity_ = nullptr;
return *this;
}
~pvector() {
if (start_ != nullptr)
delete[] start_;
}
// not thread-safe
void reserve(size_t num_elements) {
if (num_elements > capacity()) {
T_ *new_range = new T_[num_elements];
#pragma omp parallel for
for (size_t i=0; i < size(); i++)
new_range[i] = start_[i];
end_size_ = new_range + size();
delete[] start_;
start_ = new_range;
end_capacity_ = start_ + num_elements;
}
}
bool empty() {
return end_size_ == start_;
}
void clear() {
end_size_ = start_;
}
void resize(size_t num_elements) {
reserve(num_elements);
end_size_ = start_ + num_elements;
}
T_& operator[](size_t n) {
return start_[n];
}
const T_& operator[](size_t n) const {
return start_[n];
}
void push_back(T_ val) {
if (size() == capacity()) {
size_t new_size = capacity() == 0 ? 1 : capacity() * growth_factor;
reserve(new_size);
}
*end_size_ = val;
end_size_++;
}
void fill(T_ init_val) {
#pragma omp parallel for
for (T_* ptr=start_; ptr < end_size_; ptr++)
*ptr = init_val;
}
size_t capacity() const {
return end_capacity_ - start_;
}
size_t size() const {
return end_size_ - start_;
}
iterator begin() const {
return start_;
}
iterator end() const {
return end_size_;
}
T_* data() const {
return start_;
}
void swap(pvector &other) {
std::swap(start_, other.start_);
std::swap(end_size_, other.end_size_);
std::swap(end_capacity_, other.end_capacity_);
}
private:
T_* start_;
T_* end_size_;
T_* end_capacity_;
static const size_t growth_factor = 2;
};
#endif // PVECTOR_H_
|
yescrypt-simd_c.h | /*-
* Copyright 2009 Colin Percival
* Copyright 2012-2014 Alexander Peslyak
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* This file was originally written by Colin Percival as part of the Tarsnap
* online backup system.
*/
/*
* On 64-bit, enabling SSE4.1 helps our pwxform code indirectly, via avoiding
* gcc bug 54349 (fixed for gcc 4.9+). On 32-bit, it's of direct help. AVX
* and XOP are of further help either way.
*/
#ifndef __SSE4_1__
#warning "Consider enabling SSE4.1, AVX, or XOP in the C compiler for significantly better performance"
#endif
#include <emmintrin.h>
#ifdef __XOP__
#include <x86intrin.h>
#endif
#include <errno.h>
#include <stdint.h>
#include <stdlib.h>
#include <string.h>
#include "sha256.h"
#include "sysendian.h"
#include "yescrypt.h"
#include "yescrypt-platform_c.h"
#if __STDC_VERSION__ >= 199901L
/* have restrict */
#elif defined(__GNUC__)
#define restrict __restrict
#else
#define restrict
#endif
#define PREFETCH(x, hint) _mm_prefetch((const char *)(x), (hint));
#define PREFETCH_OUT(x, hint) /* disabled */
#ifdef __XOP__
#define ARX(out, in1, in2, s) \
out = _mm_xor_si128(out, _mm_roti_epi32(_mm_add_epi32(in1, in2), s));
#else
#define ARX(out, in1, in2, s) \
{ \
__m128i T = _mm_add_epi32(in1, in2); \
out = _mm_xor_si128(out, _mm_slli_epi32(T, s)); \
out = _mm_xor_si128(out, _mm_srli_epi32(T, 32-s)); \
}
#endif
#define SALSA20_2ROUNDS \
/* Operate on "columns" */ \
ARX(X1, X0, X3, 7) \
ARX(X2, X1, X0, 9) \
ARX(X3, X2, X1, 13) \
ARX(X0, X3, X2, 18) \
\
/* Rearrange data */ \
X1 = _mm_shuffle_epi32(X1, 0x93); \
X2 = _mm_shuffle_epi32(X2, 0x4E); \
X3 = _mm_shuffle_epi32(X3, 0x39); \
\
/* Operate on "rows" */ \
ARX(X3, X0, X1, 7) \
ARX(X2, X3, X0, 9) \
ARX(X1, X2, X3, 13) \
ARX(X0, X1, X2, 18) \
\
/* Rearrange data */ \
X1 = _mm_shuffle_epi32(X1, 0x39); \
X2 = _mm_shuffle_epi32(X2, 0x4E); \
X3 = _mm_shuffle_epi32(X3, 0x93);
/**
* Apply the salsa20/8 core to the block provided in (X0 ... X3).
*/
#define SALSA20_8_BASE(maybe_decl, out) \
{ \
maybe_decl Y0 = X0; \
maybe_decl Y1 = X1; \
maybe_decl Y2 = X2; \
maybe_decl Y3 = X3; \
SALSA20_2ROUNDS \
SALSA20_2ROUNDS \
SALSA20_2ROUNDS \
SALSA20_2ROUNDS \
(out)[0] = X0 = _mm_add_epi32(X0, Y0); \
(out)[1] = X1 = _mm_add_epi32(X1, Y1); \
(out)[2] = X2 = _mm_add_epi32(X2, Y2); \
(out)[3] = X3 = _mm_add_epi32(X3, Y3); \
}
#define SALSA20_8(out) \
SALSA20_8_BASE(__m128i, out)
/**
* Apply the salsa20/8 core to the block provided in (X0 ... X3) ^ (Z0 ... Z3).
*/
#define SALSA20_8_XOR_ANY(maybe_decl, Z0, Z1, Z2, Z3, out) \
X0 = _mm_xor_si128(X0, Z0); \
X1 = _mm_xor_si128(X1, Z1); \
X2 = _mm_xor_si128(X2, Z2); \
X3 = _mm_xor_si128(X3, Z3); \
SALSA20_8_BASE(maybe_decl, out)
#define SALSA20_8_XOR_MEM(in, out) \
SALSA20_8_XOR_ANY(__m128i, (in)[0], (in)[1], (in)[2], (in)[3], out)
#define SALSA20_8_XOR_REG(out) \
SALSA20_8_XOR_ANY(/* empty */, Y0, Y1, Y2, Y3, out)
typedef union {
uint32_t w[16];
__m128i q[4];
} salsa20_blk_t;
/**
* blockmix_salsa8(Bin, Bout, r):
* Compute Bout = BlockMix_{salsa20/8, r}(Bin). The input Bin must be 128r
* bytes in length; the output Bout must also be the same size.
*/
static inline void
blockmix_salsa8(const salsa20_blk_t *restrict Bin,
salsa20_blk_t *restrict Bout, size_t r)
{
__m128i X0, X1, X2, X3;
size_t i;
r--;
PREFETCH(&Bin[r * 2 + 1], _MM_HINT_T0)
for (i = 0; i < r; i++) {
PREFETCH(&Bin[i * 2], _MM_HINT_T0)
PREFETCH_OUT(&Bout[i], _MM_HINT_T0)
PREFETCH(&Bin[i * 2 + 1], _MM_HINT_T0)
PREFETCH_OUT(&Bout[r + 1 + i], _MM_HINT_T0)
}
PREFETCH(&Bin[r * 2], _MM_HINT_T0)
PREFETCH_OUT(&Bout[r], _MM_HINT_T0)
PREFETCH_OUT(&Bout[r * 2 + 1], _MM_HINT_T0)
/* 1: X <-- B_{2r - 1} */
X0 = Bin[r * 2 + 1].q[0];
X1 = Bin[r * 2 + 1].q[1];
X2 = Bin[r * 2 + 1].q[2];
X3 = Bin[r * 2 + 1].q[3];
/* 3: X <-- H(X \xor B_i) */
/* 4: Y_i <-- X */
/* 6: B' <-- (Y_0, Y_2 ... Y_{2r-2}, Y_1, Y_3 ... Y_{2r-1}) */
SALSA20_8_XOR_MEM(Bin[0].q, Bout[0].q)
/* 2: for i = 0 to 2r - 1 do */
for (i = 0; i < r;) {
/* 3: X <-- H(X \xor B_i) */
/* 4: Y_i <-- X */
/* 6: B' <-- (Y_0, Y_2 ... Y_{2r-2}, Y_1, Y_3 ... Y_{2r-1}) */
SALSA20_8_XOR_MEM(Bin[i * 2 + 1].q, Bout[r + 1 + i].q)
i++;
/* 3: X <-- H(X \xor B_i) */
/* 4: Y_i <-- X */
/* 6: B' <-- (Y_0, Y_2 ... Y_{2r-2}, Y_1, Y_3 ... Y_{2r-1}) */
SALSA20_8_XOR_MEM(Bin[i * 2].q, Bout[i].q)
}
/* 3: X <-- H(X \xor B_i) */
/* 4: Y_i <-- X */
/* 6: B' <-- (Y_0, Y_2 ... Y_{2r-2}, Y_1, Y_3 ... Y_{2r-1}) */
SALSA20_8_XOR_MEM(Bin[r * 2 + 1].q, Bout[r * 2 + 1].q)
}
/*
* (V)PSRLDQ and (V)PSHUFD have higher throughput than (V)PSRLQ on some CPUs
* starting with Sandy Bridge. Additionally, PSHUFD uses separate source and
* destination registers, whereas the shifts would require an extra move
* instruction for our code when building without AVX. Unfortunately, PSHUFD
* is much slower on Conroe (4 cycles latency vs. 1 cycle latency for PSRLQ)
* and somewhat slower on some non-Intel CPUs (luckily not including AMD
* Bulldozer and Piledriver). Since for many other CPUs using (V)PSHUFD is a
* win in terms of throughput or/and not needing a move instruction, we
* currently use it despite of the higher latency on some older CPUs. As an
* alternative, the #if below may be patched to only enable use of (V)PSHUFD
* when building with SSE4.1 or newer, which is not available on older CPUs
* where this instruction has higher latency.
*/
#if 1
#define HI32(X) \
_mm_shuffle_epi32((X), _MM_SHUFFLE(2,3,0,1))
#elif 0
#define HI32(X) \
_mm_srli_si128((X), 4)
#else
#define HI32(X) \
_mm_srli_epi64((X), 32)
#endif
#if defined(__x86_64__) && (defined(__ICC) || defined(__llvm__))
/* Intel's name, also supported by recent gcc */
#define EXTRACT64(X) _mm_cvtsi128_si64(X)
#elif defined(__x86_64__) && !defined(_MSC_VER) && !defined(__OPEN64__)
/* gcc got the 'x' name earlier than non-'x', MSVC and Open64 had bugs */
#define EXTRACT64(X) _mm_cvtsi128_si64x(X)
#elif defined(__x86_64__) && defined(__SSE4_1__)
/* No known bugs for this intrinsic */
#include <smmintrin.h>
#define EXTRACT64(X) _mm_extract_epi64((X), 0)
#elif defined(__SSE4_1__)
/* 32-bit */
#include <smmintrin.h>
#if 0
/* This is currently unused by the code below, which instead uses these two
* intrinsics explicitly when (!defined(__x86_64__) && defined(__SSE4_1__)) */
#define EXTRACT64(X) \
((uint64_t)(uint32_t)_mm_cvtsi128_si32(X) | \
((uint64_t)(uint32_t)_mm_extract_epi32((X), 1) << 32))
#endif
#else
/* 32-bit or compilers with known past bugs in _mm_cvtsi128_si64*() */
#define EXTRACT64(X) \
((uint64_t)(uint32_t)_mm_cvtsi128_si32(X) | \
((uint64_t)(uint32_t)_mm_cvtsi128_si32(HI32(X)) << 32))
#endif
/* This is tunable */
#define S_BITS 8
/* Not tunable in this implementation, hard-coded in a few places */
#define S_SIMD 2
#define S_P 4
/* Number of S-boxes. Not tunable by design, hard-coded in a few places. */
#define S_N 2
/* Derived values. Not tunable except via S_BITS above. */
#define S_SIZE1 (1 << S_BITS)
#define S_MASK ((S_SIZE1 - 1) * S_SIMD * 8)
#define S_MASK2 (((uint64_t)S_MASK << 32) | S_MASK)
#define S_SIZE_ALL (S_N * S_SIZE1 * S_SIMD * 8)
#if !defined(__x86_64__) && defined(__SSE4_1__)
/* 32-bit with SSE4.1 */
#define PWXFORM_X_T __m128i
#define PWXFORM_SIMD(X, x, s0, s1) \
x = _mm_and_si128(X, _mm_set1_epi64x(S_MASK2)); \
s0 = *(const __m128i *)(S0 + (uint32_t)_mm_cvtsi128_si32(x)); \
s1 = *(const __m128i *)(S1 + (uint32_t)_mm_extract_epi32(x, 1)); \
X = _mm_mul_epu32(HI32(X), X); \
X = _mm_add_epi64(X, s0); \
X = _mm_xor_si128(X, s1);
#else
/* 64-bit, or 32-bit without SSE4.1 */
#define PWXFORM_X_T uint64_t
#define PWXFORM_SIMD(X, x, s0, s1) \
x = EXTRACT64(X) & S_MASK2; \
s0 = *(const __m128i *)(S0 + (uint32_t)x); \
s1 = *(const __m128i *)(S1 + (x >> 32)); \
X = _mm_mul_epu32(HI32(X), X); \
X = _mm_add_epi64(X, s0); \
X = _mm_xor_si128(X, s1);
#endif
#define PWXFORM_ROUND \
PWXFORM_SIMD(X0, x0, s00, s01) \
PWXFORM_SIMD(X1, x1, s10, s11) \
PWXFORM_SIMD(X2, x2, s20, s21) \
PWXFORM_SIMD(X3, x3, s30, s31)
#define PWXFORM \
{ \
PWXFORM_X_T x0, x1, x2, x3; \
__m128i s00, s01, s10, s11, s20, s21, s30, s31; \
PWXFORM_ROUND PWXFORM_ROUND \
PWXFORM_ROUND PWXFORM_ROUND \
PWXFORM_ROUND PWXFORM_ROUND \
}
#define XOR4(in) \
X0 = _mm_xor_si128(X0, (in)[0]); \
X1 = _mm_xor_si128(X1, (in)[1]); \
X2 = _mm_xor_si128(X2, (in)[2]); \
X3 = _mm_xor_si128(X3, (in)[3]);
#define OUT(out) \
(out)[0] = X0; \
(out)[1] = X1; \
(out)[2] = X2; \
(out)[3] = X3;
/**
* blockmix_pwxform(Bin, Bout, r, S):
* Compute Bout = BlockMix_pwxform{salsa20/8, r, S}(Bin). The input Bin must
* be 128r bytes in length; the output Bout must also be the same size.
*/
static void
blockmix(const salsa20_blk_t *restrict Bin, salsa20_blk_t *restrict Bout,
size_t r, const __m128i *restrict S)
{
const uint8_t * S0, * S1;
__m128i X0, X1, X2, X3;
size_t i;
if (!S) {
blockmix_salsa8(Bin, Bout, r);
return;
}
S0 = (const uint8_t *)S;
S1 = (const uint8_t *)S + S_SIZE_ALL / 2;
/* Convert 128-byte blocks to 64-byte blocks */
r *= 2;
r--;
PREFETCH(&Bin[r], _MM_HINT_T0)
for (i = 0; i < r; i++) {
PREFETCH(&Bin[i], _MM_HINT_T0)
PREFETCH_OUT(&Bout[i], _MM_HINT_T0)
}
PREFETCH_OUT(&Bout[r], _MM_HINT_T0)
/* X <-- B_{r1 - 1} */
X0 = Bin[r].q[0];
X1 = Bin[r].q[1];
X2 = Bin[r].q[2];
X3 = Bin[r].q[3];
/* for i = 0 to r1 - 1 do */
for (i = 0; i < r; i++) {
/* X <-- H'(X \xor B_i) */
XOR4(Bin[i].q)
PWXFORM
/* B'_i <-- X */
OUT(Bout[i].q)
}
/* Last iteration of the loop above */
XOR4(Bin[i].q)
PWXFORM
/* B'_i <-- H(B'_i) */
SALSA20_8(Bout[i].q)
}
#define XOR4_2(in1, in2) \
X0 = _mm_xor_si128((in1)[0], (in2)[0]); \
X1 = _mm_xor_si128((in1)[1], (in2)[1]); \
X2 = _mm_xor_si128((in1)[2], (in2)[2]); \
X3 = _mm_xor_si128((in1)[3], (in2)[3]);
static inline uint32_t
blockmix_salsa8_xor(const salsa20_blk_t *restrict Bin1,
const salsa20_blk_t *restrict Bin2, salsa20_blk_t *restrict Bout,
size_t r, int Bin2_in_ROM)
{
__m128i X0, X1, X2, X3;
size_t i;
r--;
if (Bin2_in_ROM) {
PREFETCH(&Bin2[r * 2 + 1], _MM_HINT_NTA)
PREFETCH(&Bin1[r * 2 + 1], _MM_HINT_T0)
for (i = 0; i < r; i++) {
PREFETCH(&Bin2[i * 2], _MM_HINT_NTA)
PREFETCH(&Bin1[i * 2], _MM_HINT_T0)
PREFETCH(&Bin2[i * 2 + 1], _MM_HINT_NTA)
PREFETCH(&Bin1[i * 2 + 1], _MM_HINT_T0)
PREFETCH_OUT(&Bout[i], _MM_HINT_T0)
PREFETCH_OUT(&Bout[r + 1 + i], _MM_HINT_T0)
}
PREFETCH(&Bin2[r * 2], _MM_HINT_T0)
} else {
PREFETCH(&Bin2[r * 2 + 1], _MM_HINT_T0)
PREFETCH(&Bin1[r * 2 + 1], _MM_HINT_T0)
for (i = 0; i < r; i++) {
PREFETCH(&Bin2[i * 2], _MM_HINT_T0)
PREFETCH(&Bin1[i * 2], _MM_HINT_T0)
PREFETCH(&Bin2[i * 2 + 1], _MM_HINT_T0)
PREFETCH(&Bin1[i * 2 + 1], _MM_HINT_T0)
PREFETCH_OUT(&Bout[i], _MM_HINT_T0)
PREFETCH_OUT(&Bout[r + 1 + i], _MM_HINT_T0)
}
PREFETCH(&Bin2[r * 2], _MM_HINT_T0)
}
PREFETCH(&Bin1[r * 2], _MM_HINT_T0)
PREFETCH_OUT(&Bout[r], _MM_HINT_T0)
PREFETCH_OUT(&Bout[r * 2 + 1], _MM_HINT_T0)
/* 1: X <-- B_{2r - 1} */
XOR4_2(Bin1[r * 2 + 1].q, Bin2[r * 2 + 1].q)
/* 3: X <-- H(X \xor B_i) */
/* 4: Y_i <-- X */
/* 6: B' <-- (Y_0, Y_2 ... Y_{2r-2}, Y_1, Y_3 ... Y_{2r-1}) */
XOR4(Bin1[0].q)
SALSA20_8_XOR_MEM(Bin2[0].q, Bout[0].q)
/* 2: for i = 0 to 2r - 1 do */
for (i = 0; i < r;) {
/* 3: X <-- H(X \xor B_i) */
/* 4: Y_i <-- X */
/* 6: B' <-- (Y_0, Y_2 ... Y_{2r-2}, Y_1, Y_3 ... Y_{2r-1}) */
XOR4(Bin1[i * 2 + 1].q)
SALSA20_8_XOR_MEM(Bin2[i * 2 + 1].q, Bout[r + 1 + i].q)
i++;
/* 3: X <-- H(X \xor B_i) */
/* 4: Y_i <-- X */
/* 6: B' <-- (Y_0, Y_2 ... Y_{2r-2}, Y_1, Y_3 ... Y_{2r-1}) */
XOR4(Bin1[i * 2].q)
SALSA20_8_XOR_MEM(Bin2[i * 2].q, Bout[i].q)
}
/* 3: X <-- H(X \xor B_i) */
/* 4: Y_i <-- X */
/* 6: B' <-- (Y_0, Y_2 ... Y_{2r-2}, Y_1, Y_3 ... Y_{2r-1}) */
XOR4(Bin1[r * 2 + 1].q)
SALSA20_8_XOR_MEM(Bin2[r * 2 + 1].q, Bout[r * 2 + 1].q)
return _mm_cvtsi128_si32(X0);
}
static uint32_t
blockmix_xor(const salsa20_blk_t *restrict Bin1,
const salsa20_blk_t *restrict Bin2, salsa20_blk_t *restrict Bout,
size_t r, int Bin2_in_ROM, const __m128i *restrict S)
{
const uint8_t * S0, * S1;
__m128i X0, X1, X2, X3;
size_t i;
if (!S)
return blockmix_salsa8_xor(Bin1, Bin2, Bout, r, Bin2_in_ROM);
S0 = (const uint8_t *)S;
S1 = (const uint8_t *)S + S_SIZE_ALL / 2;
/* Convert 128-byte blocks to 64-byte blocks */
r *= 2;
r--;
if (Bin2_in_ROM) {
PREFETCH(&Bin2[r], _MM_HINT_NTA)
PREFETCH(&Bin1[r], _MM_HINT_T0)
for (i = 0; i < r; i++) {
PREFETCH(&Bin2[i], _MM_HINT_NTA)
PREFETCH(&Bin1[i], _MM_HINT_T0)
PREFETCH_OUT(&Bout[i], _MM_HINT_T0)
}
} else {
PREFETCH(&Bin2[r], _MM_HINT_T0)
PREFETCH(&Bin1[r], _MM_HINT_T0)
for (i = 0; i < r; i++) {
PREFETCH(&Bin2[i], _MM_HINT_T0)
PREFETCH(&Bin1[i], _MM_HINT_T0)
PREFETCH_OUT(&Bout[i], _MM_HINT_T0)
}
}
PREFETCH_OUT(&Bout[r], _MM_HINT_T0);
/* X <-- B_{r1 - 1} */
XOR4_2(Bin1[r].q, Bin2[r].q)
/* for i = 0 to r1 - 1 do */
for (i = 0; i < r; i++) {
/* X <-- H'(X \xor B_i) */
XOR4(Bin1[i].q)
XOR4(Bin2[i].q)
PWXFORM
/* B'_i <-- X */
OUT(Bout[i].q)
}
/* Last iteration of the loop above */
XOR4(Bin1[i].q)
XOR4(Bin2[i].q)
PWXFORM
/* B'_i <-- H(B'_i) */
SALSA20_8(Bout[i].q)
return _mm_cvtsi128_si32(X0);
}
#undef XOR4
#define XOR4(in, out) \
(out)[0] = Y0 = _mm_xor_si128((in)[0], (out)[0]); \
(out)[1] = Y1 = _mm_xor_si128((in)[1], (out)[1]); \
(out)[2] = Y2 = _mm_xor_si128((in)[2], (out)[2]); \
(out)[3] = Y3 = _mm_xor_si128((in)[3], (out)[3]);
static inline uint32_t
blockmix_salsa8_xor_save(const salsa20_blk_t *restrict Bin1,
salsa20_blk_t *restrict Bin2, salsa20_blk_t *restrict Bout,
size_t r)
{
__m128i X0, X1, X2, X3, Y0, Y1, Y2, Y3;
size_t i;
r--;
PREFETCH(&Bin2[r * 2 + 1], _MM_HINT_T0)
PREFETCH(&Bin1[r * 2 + 1], _MM_HINT_T0)
for (i = 0; i < r; i++) {
PREFETCH(&Bin2[i * 2], _MM_HINT_T0)
PREFETCH(&Bin1[i * 2], _MM_HINT_T0)
PREFETCH(&Bin2[i * 2 + 1], _MM_HINT_T0)
PREFETCH(&Bin1[i * 2 + 1], _MM_HINT_T0)
PREFETCH_OUT(&Bout[i], _MM_HINT_T0)
PREFETCH_OUT(&Bout[r + 1 + i], _MM_HINT_T0)
}
PREFETCH(&Bin2[r * 2], _MM_HINT_T0)
PREFETCH(&Bin1[r * 2], _MM_HINT_T0)
PREFETCH_OUT(&Bout[r], _MM_HINT_T0)
PREFETCH_OUT(&Bout[r * 2 + 1], _MM_HINT_T0)
/* 1: X <-- B_{2r - 1} */
XOR4_2(Bin1[r * 2 + 1].q, Bin2[r * 2 + 1].q)
/* 3: X <-- H(X \xor B_i) */
/* 4: Y_i <-- X */
/* 6: B' <-- (Y_0, Y_2 ... Y_{2r-2}, Y_1, Y_3 ... Y_{2r-1}) */
XOR4(Bin1[0].q, Bin2[0].q)
SALSA20_8_XOR_REG(Bout[0].q)
/* 2: for i = 0 to 2r - 1 do */
for (i = 0; i < r;) {
/* 3: X <-- H(X \xor B_i) */
/* 4: Y_i <-- X */
/* 6: B' <-- (Y_0, Y_2 ... Y_{2r-2}, Y_1, Y_3 ... Y_{2r-1}) */
XOR4(Bin1[i * 2 + 1].q, Bin2[i * 2 + 1].q)
SALSA20_8_XOR_REG(Bout[r + 1 + i].q)
i++;
/* 3: X <-- H(X \xor B_i) */
/* 4: Y_i <-- X */
/* 6: B' <-- (Y_0, Y_2 ... Y_{2r-2}, Y_1, Y_3 ... Y_{2r-1}) */
XOR4(Bin1[i * 2].q, Bin2[i * 2].q)
SALSA20_8_XOR_REG(Bout[i].q)
}
/* 3: X <-- H(X \xor B_i) */
/* 4: Y_i <-- X */
/* 6: B' <-- (Y_0, Y_2 ... Y_{2r-2}, Y_1, Y_3 ... Y_{2r-1}) */
XOR4(Bin1[r * 2 + 1].q, Bin2[r * 2 + 1].q)
SALSA20_8_XOR_REG(Bout[r * 2 + 1].q)
return _mm_cvtsi128_si32(X0);
}
#define XOR4_Y \
X0 = _mm_xor_si128(X0, Y0); \
X1 = _mm_xor_si128(X1, Y1); \
X2 = _mm_xor_si128(X2, Y2); \
X3 = _mm_xor_si128(X3, Y3);
static uint32_t
blockmix_xor_save(const salsa20_blk_t *restrict Bin1,
salsa20_blk_t *restrict Bin2, salsa20_blk_t *restrict Bout,
size_t r, const __m128i *restrict S)
{
const uint8_t * S0, * S1;
__m128i X0, X1, X2, X3, Y0, Y1, Y2, Y3;
size_t i;
if (!S)
return blockmix_salsa8_xor_save(Bin1, Bin2, Bout, r);
S0 = (const uint8_t *)S;
S1 = (const uint8_t *)S + S_SIZE_ALL / 2;
/* Convert 128-byte blocks to 64-byte blocks */
r *= 2;
r--;
PREFETCH(&Bin2[r], _MM_HINT_T0)
PREFETCH(&Bin1[r], _MM_HINT_T0)
for (i = 0; i < r; i++) {
PREFETCH(&Bin2[i], _MM_HINT_T0)
PREFETCH(&Bin1[i], _MM_HINT_T0)
PREFETCH_OUT(&Bout[i], _MM_HINT_T0)
}
PREFETCH_OUT(&Bout[r], _MM_HINT_T0);
/* X <-- B_{r1 - 1} */
XOR4_2(Bin1[r].q, Bin2[r].q)
/* for i = 0 to r1 - 1 do */
for (i = 0; i < r; i++) {
XOR4(Bin1[i].q, Bin2[i].q)
/* X <-- H'(X \xor B_i) */
XOR4_Y
PWXFORM
/* B'_i <-- X */
OUT(Bout[i].q)
}
/* Last iteration of the loop above */
XOR4(Bin1[i].q, Bin2[i].q)
XOR4_Y
PWXFORM
/* B'_i <-- H(B'_i) */
SALSA20_8(Bout[i].q)
return _mm_cvtsi128_si32(X0);
}
#undef ARX
#undef SALSA20_2ROUNDS
#undef SALSA20_8
#undef SALSA20_8_XOR_ANY
#undef SALSA20_8_XOR_MEM
#undef SALSA20_8_XOR_REG
#undef PWXFORM_SIMD_1
#undef PWXFORM_SIMD_2
#undef PWXFORM_ROUND
#undef PWXFORM
#undef OUT
#undef XOR4
#undef XOR4_2
#undef XOR4_Y
/**
* integerify(B, r):
* Return the result of parsing B_{2r-1} as a little-endian integer.
*/
static inline uint32_t
integerify(const salsa20_blk_t * B, size_t r)
{
return B[2 * r - 1].w[0];
}
/**
* smix1(B, r, N, flags, V, NROM, shared, XY, S):
* Compute first loop of B = SMix_r(B, N). The input B must be 128r bytes in
* length; the temporary storage V must be 128rN bytes in length; the temporary
* storage XY must be 128r bytes in length. The value N must be even and no
* smaller than 2. The array V must be aligned to a multiple of 64 bytes, and
* arrays B and XY to a multiple of at least 16 bytes (aligning them to 64
* bytes as well saves cache lines, but might result in cache bank conflicts).
*/
static void
smix1(uint8_t * B, size_t r, uint32_t N, yescrypt_flags_t flags,
salsa20_blk_t * V, uint32_t NROM, const yescrypt_shared_t * shared,
salsa20_blk_t * XY, void * S)
{
const salsa20_blk_t * VROM = shared->shared1.aligned;
uint32_t VROM_mask = shared->mask1;
size_t s = 2 * r;
salsa20_blk_t * X = V, * Y;
uint32_t i, j;
size_t k;
/* 1: X <-- B */
/* 3: V_i <-- X */
for (k = 0; k < 2 * r; k++) {
for (i = 0; i < 16; i++) {
X[k].w[i] = le32dec(&B[(k * 16 + (i * 5 % 16)) * 4]);
}
}
if (NROM && (VROM_mask & 1)) {
uint32_t n;
salsa20_blk_t * V_n;
const salsa20_blk_t * V_j;
/* 4: X <-- H(X) */
/* 3: V_i <-- X */
Y = &V[s];
blockmix(X, Y, r, S);
X = &V[2 * s];
if ((1 & VROM_mask) == 1) {
/* j <-- Integerify(X) mod NROM */
j = integerify(Y, r) & (NROM - 1);
V_j = &VROM[j * s];
/* X <-- H(X \xor VROM_j) */
j = blockmix_xor(Y, V_j, X, r, 1, S);
} else {
/* X <-- H(X) */
blockmix(Y, X, r, S);
j = integerify(X, r);
}
for (n = 2; n < N; n <<= 1) {
uint32_t m = (n < N / 2) ? n : (N - 1 - n);
V_n = &V[n * s];
/* 2: for i = 0 to N - 1 do */
for (i = 1; i < m; i += 2) {
/* j <-- Wrap(Integerify(X), i) */
j &= n - 1;
j += i - 1;
V_j = &V[j * s];
/* X <-- X \xor V_j */
/* 4: X <-- H(X) */
/* 3: V_i <-- X */
Y = &V_n[i * s];
j = blockmix_xor(X, V_j, Y, r, 0, S);
if (((n + i) & VROM_mask) == 1) {
/* j <-- Integerify(X) mod NROM */
j &= NROM - 1;
V_j = &VROM[j * s];
} else {
/* j <-- Wrap(Integerify(X), i) */
j &= n - 1;
j += i;
V_j = &V[j * s];
}
/* X <-- H(X \xor VROM_j) */
X = &V_n[(i + 1) * s];
j = blockmix_xor(Y, V_j, X, r, 1, S);
}
}
n >>= 1;
/* j <-- Wrap(Integerify(X), i) */
j &= n - 1;
j += N - 2 - n;
V_j = &V[j * s];
/* X <-- X \xor V_j */
/* 4: X <-- H(X) */
/* 3: V_i <-- X */
Y = &V[(N - 1) * s];
j = blockmix_xor(X, V_j, Y, r, 0, S);
if (((N - 1) & VROM_mask) == 1) {
/* j <-- Integerify(X) mod NROM */
j &= NROM - 1;
V_j = &VROM[j * s];
} else {
/* j <-- Wrap(Integerify(X), i) */
j &= n - 1;
j += N - 1 - n;
V_j = &V[j * s];
}
/* X <-- X \xor V_j */
/* 4: X <-- H(X) */
X = XY;
blockmix_xor(Y, V_j, X, r, 1, S);
} else if (flags & YESCRYPT_RW) {
uint32_t n;
salsa20_blk_t * V_n, * V_j;
/* 4: X <-- H(X) */
/* 3: V_i <-- X */
Y = &V[s];
blockmix(X, Y, r, S);
/* 4: X <-- H(X) */
/* 3: V_i <-- X */
X = &V[2 * s];
blockmix(Y, X, r, S);
j = integerify(X, r);
for (n = 2; n < N; n <<= 1) {
uint32_t m = (n < N / 2) ? n : (N - 1 - n);
V_n = &V[n * s];
/* 2: for i = 0 to N - 1 do */
for (i = 1; i < m; i += 2) {
Y = &V_n[i * s];
/* j <-- Wrap(Integerify(X), i) */
j &= n - 1;
j += i - 1;
V_j = &V[j * s];
/* X <-- X \xor V_j */
/* 4: X <-- H(X) */
/* 3: V_i <-- X */
j = blockmix_xor(X, V_j, Y, r, 0, S);
/* j <-- Wrap(Integerify(X), i) */
j &= n - 1;
j += i;
V_j = &V[j * s];
/* X <-- X \xor V_j */
/* 4: X <-- H(X) */
/* 3: V_i <-- X */
X = &V_n[(i + 1) * s];
j = blockmix_xor(Y, V_j, X, r, 0, S);
}
}
n >>= 1;
/* j <-- Wrap(Integerify(X), i) */
j &= n - 1;
j += N - 2 - n;
V_j = &V[j * s];
/* X <-- X \xor V_j */
/* 4: X <-- H(X) */
/* 3: V_i <-- X */
Y = &V[(N - 1) * s];
j = blockmix_xor(X, V_j, Y, r, 0, S);
/* j <-- Wrap(Integerify(X), i) */
j &= n - 1;
j += N - 1 - n;
V_j = &V[j * s];
/* X <-- X \xor V_j */
/* 4: X <-- H(X) */
X = XY;
blockmix_xor(Y, V_j, X, r, 0, S);
} else {
/* 2: for i = 0 to N - 1 do */
for (i = 1; i < N - 1; i += 2) {
/* 4: X <-- H(X) */
/* 3: V_i <-- X */
Y = &V[i * s];
blockmix(X, Y, r, S);
/* 4: X <-- H(X) */
/* 3: V_i <-- X */
X = &V[(i + 1) * s];
blockmix(Y, X, r, S);
}
/* 4: X <-- H(X) */
/* 3: V_i <-- X */
Y = &V[i * s];
blockmix(X, Y, r, S);
/* 4: X <-- H(X) */
X = XY;
blockmix(Y, X, r, S);
}
/* B' <-- X */
for (k = 0; k < 2 * r; k++) {
for (i = 0; i < 16; i++) {
le32enc(&B[(k * 16 + (i * 5 % 16)) * 4], X[k].w[i]);
}
}
}
/**
* smix2(B, r, N, Nloop, flags, V, NROM, shared, XY, S):
* Compute second loop of B = SMix_r(B, N). The input B must be 128r bytes in
* length; the temporary storage V must be 128rN bytes in length; the temporary
* storage XY must be 256r bytes in length. The value N must be a power of 2
* greater than 1. The value Nloop must be even. The array V must be aligned
* to a multiple of 64 bytes, and arrays B and XY to a multiple of at least 16
* bytes (aligning them to 64 bytes as well saves cache lines, but might result
* in cache bank conflicts).
*/
static void
smix2(uint8_t * B, size_t r, uint32_t N, uint64_t Nloop,
yescrypt_flags_t flags, salsa20_blk_t * V, uint32_t NROM,
const yescrypt_shared_t * shared, salsa20_blk_t * XY, void * S)
{
const salsa20_blk_t * VROM = shared->shared1.aligned;
uint32_t VROM_mask = shared->mask1;
size_t s = 2 * r;
salsa20_blk_t * X = XY, * Y = &XY[s];
uint64_t i;
uint32_t j;
size_t k;
if (Nloop == 0)
return;
/* X <-- B' */
/* 3: V_i <-- X */
for (k = 0; k < 2 * r; k++) {
for (i = 0; i < 16; i++) {
X[k].w[i] = le32dec(&B[(k * 16 + (i * 5 % 16)) * 4]);
}
}
i = Nloop / 2;
/* 7: j <-- Integerify(X) mod N */
j = integerify(X, r) & (N - 1);
/*
* Normally, NROM implies YESCRYPT_RW, but we check for these separately
* because YESCRYPT_PARALLEL_SMIX resets YESCRYPT_RW for the smix2() calls
* operating on the entire V.
*/
if (NROM && (flags & YESCRYPT_RW)) {
/* 6: for i = 0 to N - 1 do */
for (i = 0; i < Nloop; i += 2) {
salsa20_blk_t * V_j = &V[j * s];
/* 8: X <-- H(X \xor V_j) */
/* V_j <-- Xprev \xor V_j */
/* j <-- Integerify(X) mod NROM */
j = blockmix_xor_save(X, V_j, Y, r, S);
if (((i + 1) & VROM_mask) == 1) {
const salsa20_blk_t * VROM_j;
j &= NROM - 1;
VROM_j = &VROM[j * s];
/* X <-- H(X \xor VROM_j) */
/* 7: j <-- Integerify(X) mod N */
j = blockmix_xor(Y, VROM_j, X, r, 1, S);
} else {
j &= N - 1;
V_j = &V[j * s];
/* 8: X <-- H(X \xor V_j) */
/* V_j <-- Xprev \xor V_j */
/* j <-- Integerify(X) mod NROM */
j = blockmix_xor_save(Y, V_j, X, r, S);
}
j &= N - 1;
V_j = &V[j * s];
}
} else if (NROM) {
/* 6: for i = 0 to N - 1 do */
for (i = 0; i < Nloop; i += 2) {
const salsa20_blk_t * V_j = &V[j * s];
/* 8: X <-- H(X \xor V_j) */
/* V_j <-- Xprev \xor V_j */
/* j <-- Integerify(X) mod NROM */
j = blockmix_xor(X, V_j, Y, r, 0, S);
if (((i + 1) & VROM_mask) == 1) {
j &= NROM - 1;
V_j = &VROM[j * s];
} else {
j &= N - 1;
V_j = &V[j * s];
}
/* X <-- H(X \xor VROM_j) */
/* 7: j <-- Integerify(X) mod N */
j = blockmix_xor(Y, V_j, X, r, 1, S);
j &= N - 1;
V_j = &V[j * s];
}
} else if (flags & YESCRYPT_RW) {
/* 6: for i = 0 to N - 1 do */
do {
salsa20_blk_t * V_j = &V[j * s];
/* 8: X <-- H(X \xor V_j) */
/* V_j <-- Xprev \xor V_j */
/* 7: j <-- Integerify(X) mod N */
j = blockmix_xor_save(X, V_j, Y, r, S);
j &= N - 1;
V_j = &V[j * s];
/* 8: X <-- H(X \xor V_j) */
/* V_j <-- Xprev \xor V_j */
/* 7: j <-- Integerify(X) mod N */
j = blockmix_xor_save(Y, V_j, X, r, S);
j &= N - 1;
} while (--i);
} else {
/* 6: for i = 0 to N - 1 do */
do {
const salsa20_blk_t * V_j = &V[j * s];
/* 8: X <-- H(X \xor V_j) */
/* 7: j <-- Integerify(X) mod N */
j = blockmix_xor(X, V_j, Y, r, 0, S);
j &= N - 1;
V_j = &V[j * s];
/* 8: X <-- H(X \xor V_j) */
/* 7: j <-- Integerify(X) mod N */
j = blockmix_xor(Y, V_j, X, r, 0, S);
j &= N - 1;
} while (--i);
}
/* 10: B' <-- X */
for (k = 0; k < 2 * r; k++) {
for (i = 0; i < 16; i++) {
le32enc(&B[(k * 16 + (i * 5 % 16)) * 4], X[k].w[i]);
}
}
}
/**
* p2floor(x):
* Largest power of 2 not greater than argument.
*/
static uint64_t
p2floor(uint64_t x)
{
uint64_t y;
while ((y = x & (x - 1)))
x = y;
return x;
}
/**
* smix(B, r, N, p, t, flags, V, NROM, shared, XY, S):
* Compute B = SMix_r(B, N). The input B must be 128rp bytes in length; the
* temporary storage V must be 128rN bytes in length; the temporary storage XY
* must be 256r or 256rp bytes in length (the larger size is required with
* OpenMP-enabled builds). The value N must be a power of 2 greater than 1.
* The array V must be aligned to a multiple of 64 bytes, and arrays B and
* XY to a multiple of at least 16 bytes (aligning them to 64 bytes as well
* saves cache lines and helps avoid false sharing in OpenMP-enabled builds
* when p > 1, but it might also result in cache bank conflicts).
*/
static void
smix(uint8_t * B, size_t r, uint32_t N, uint32_t p, uint32_t t,
yescrypt_flags_t flags,
salsa20_blk_t * V, uint32_t NROM, const yescrypt_shared_t * shared,
salsa20_blk_t * XY, void * S)
{
size_t s = 2 * r;
uint32_t Nchunk = N / p;
uint64_t Nloop_all, Nloop_rw;
uint32_t i;
Nloop_all = Nchunk;
if (flags & YESCRYPT_RW) {
if (t <= 1) {
if (t)
Nloop_all *= 2; /* 2/3 */
Nloop_all = (Nloop_all + 2) / 3; /* 1/3, round up */
} else {
Nloop_all *= t - 1;
}
} else if (t) {
if (t == 1)
Nloop_all += (Nloop_all + 1) / 2; /* 1.5, round up */
Nloop_all *= t;
}
Nloop_rw = 0;
if (flags & __YESCRYPT_INIT_SHARED)
Nloop_rw = Nloop_all;
else if (flags & YESCRYPT_RW)
Nloop_rw = Nloop_all / p;
Nchunk &= ~(uint32_t)1; /* round down to even */
Nloop_all++; Nloop_all &= ~(uint64_t)1; /* round up to even */
Nloop_rw &= ~(uint64_t)1; /* round down to even */
#ifdef _OPENMP
#pragma omp parallel if (p > 1) default(none) private(i) shared(B, r, N, p, flags, V, NROM, shared, XY, S, s, Nchunk, Nloop_all, Nloop_rw)
{
#pragma omp for
#endif
for (i = 0; i < p; i++) {
uint32_t Vchunk = i * Nchunk;
uint8_t * Bp = &B[128 * r * i];
salsa20_blk_t * Vp = &V[Vchunk * s];
#ifdef _OPENMP
salsa20_blk_t * XYp = &XY[i * (2 * s)];
#else
salsa20_blk_t * XYp = XY;
#endif
uint32_t Np = (i < p - 1) ? Nchunk : (N - Vchunk);
void * Sp = S ? ((uint8_t *)S + i * S_SIZE_ALL) : S;
if (Sp)
smix1(Bp, 1, S_SIZE_ALL / 128,
flags & ~YESCRYPT_PWXFORM,
Sp, NROM, shared, XYp, NULL);
if (!(flags & __YESCRYPT_INIT_SHARED_2))
smix1(Bp, r, Np, flags, Vp, NROM, shared, XYp, Sp);
smix2(Bp, r, p2floor(Np), Nloop_rw, flags, Vp,
NROM, shared, XYp, Sp);
}
if (Nloop_all > Nloop_rw) {
#ifdef _OPENMP
#pragma omp for
#endif
for (i = 0; i < p; i++) {
uint8_t * Bp = &B[128 * r * i];
#ifdef _OPENMP
salsa20_blk_t * XYp = &XY[i * (2 * s)];
#else
salsa20_blk_t * XYp = XY;
#endif
void * Sp = S ? ((uint8_t *)S + i * S_SIZE_ALL) : S;
smix2(Bp, r, N, Nloop_all - Nloop_rw,
flags & ~YESCRYPT_RW, V, NROM, shared, XYp, Sp);
}
}
#ifdef _OPENMP
}
#endif
}
/**
* yescrypt_kdf(shared, local, passwd, passwdlen, salt, saltlen,
* N, r, p, t, flags, buf, buflen):
* Compute scrypt(passwd[0 .. passwdlen - 1], salt[0 .. saltlen - 1], N, r,
* p, buflen), or a revision of scrypt as requested by flags and shared, and
* write the result into buf. The parameters r, p, and buflen must satisfy
* r * p < 2^30 and buflen <= (2^32 - 1) * 32. The parameter N must be a power
* of 2 greater than 1. (This optimized implementation currently additionally
* limits N to the range from 8 to 2^31, but other implementation might not.)
*
* t controls computation time while not affecting peak memory usage. shared
* and flags may request special modes as described in yescrypt.h. local is
* the thread-local data structure, allowing to preserve and reuse a memory
* allocation across calls, thereby reducing its overhead.
*
* Return 0 on success; or -1 on error.
*/
static int
yescrypt_kdf(const yescrypt_shared_t * shared, yescrypt_local_t * local,
const uint8_t * passwd, size_t passwdlen,
const uint8_t * salt, size_t saltlen,
uint64_t N, uint32_t r, uint32_t p, uint32_t t, yescrypt_flags_t flags,
uint8_t * buf, size_t buflen)
{
yescrypt_region_t tmp;
uint64_t NROM;
size_t B_size, V_size, XY_size, need;
uint8_t * B, * S;
salsa20_blk_t * V, * XY;
uint8_t sha256[32];
/*
* YESCRYPT_PARALLEL_SMIX is a no-op at p = 1 for its intended purpose,
* so don't let it have side-effects. Without this adjustment, it'd
* enable the SHA-256 password pre-hashing and output post-hashing,
* because any deviation from classic scrypt implies those.
*/
if (p == 1)
flags &= ~YESCRYPT_PARALLEL_SMIX;
/* Sanity-check parameters */
if (flags & ~YESCRYPT_KNOWN_FLAGS) {
errno = EINVAL;
return -1;
}
#if SIZE_MAX > UINT32_MAX
if (buflen > (((uint64_t)(1) << 32) - 1) * 32) {
errno = EFBIG;
return -1;
}
#endif
if ((uint64_t)(r) * (uint64_t)(p) >= (1 << 30)) {
errno = EFBIG;
return -1;
}
if (N > UINT32_MAX) {
errno = EFBIG;
return -1;
}
if (((N & (N - 1)) != 0) || (N <= 7) || (r < 1) || (p < 1)) {
errno = EINVAL;
return -1;
}
if ((flags & YESCRYPT_PARALLEL_SMIX) && (N / p <= 7)) {
errno = EINVAL;
return -1;
}
if ((r > SIZE_MAX / 256 / p) ||
(N > SIZE_MAX / 128 / r)) {
errno = ENOMEM;
return -1;
}
#ifdef _OPENMP
if (!(flags & YESCRYPT_PARALLEL_SMIX) &&
(N > SIZE_MAX / 128 / (r * p))) {
errno = ENOMEM;
return -1;
}
#endif
if ((flags & YESCRYPT_PWXFORM) &&
#ifndef _OPENMP
(flags & YESCRYPT_PARALLEL_SMIX) &&
#endif
p > SIZE_MAX / S_SIZE_ALL) {
errno = ENOMEM;
return -1;
}
NROM = 0;
if (shared->shared1.aligned) {
NROM = shared->shared1.aligned_size / ((size_t)128 * r);
if (NROM > UINT32_MAX) {
errno = EFBIG;
return -1;
}
if (((NROM & (NROM - 1)) != 0) || (NROM <= 7) ||
!(flags & YESCRYPT_RW)) {
errno = EINVAL;
return -1;
}
}
/* Allocate memory */
V = NULL;
V_size = (size_t)128 * r * N;
#ifdef _OPENMP
if (!(flags & YESCRYPT_PARALLEL_SMIX))
V_size *= p;
#endif
need = V_size;
if (flags & __YESCRYPT_INIT_SHARED) {
if (local->aligned_size < need) {
if (local->base || local->aligned ||
local->base_size || local->aligned_size) {
errno = EINVAL;
return -1;
}
if (!alloc_region(local, need))
return -1;
}
V = (salsa20_blk_t *)local->aligned;
need = 0;
}
B_size = (size_t)128 * r * p;
need += B_size;
if (need < B_size) {
errno = ENOMEM;
return -1;
}
XY_size = (size_t)256 * r;
#ifdef _OPENMP
XY_size *= p;
#endif
need += XY_size;
if (need < XY_size) {
errno = ENOMEM;
return -1;
}
if (flags & YESCRYPT_PWXFORM) {
size_t S_size = S_SIZE_ALL;
#ifdef _OPENMP
S_size *= p;
#else
if (flags & YESCRYPT_PARALLEL_SMIX)
S_size *= p;
#endif
need += S_size;
if (need < S_size) {
errno = ENOMEM;
return -1;
}
}
if (flags & __YESCRYPT_INIT_SHARED) {
if (!alloc_region(&tmp, need))
return -1;
B = (uint8_t *)tmp.aligned;
XY = (salsa20_blk_t *)((uint8_t *)B + B_size);
} else {
init_region(&tmp);
if (local->aligned_size < need) {
if (free_region(local))
return -1;
if (!alloc_region(local, need))
return -1;
}
B = (uint8_t *)local->aligned;
V = (salsa20_blk_t *)((uint8_t *)B + B_size);
XY = (salsa20_blk_t *)((uint8_t *)V + V_size);
}
S = NULL;
if (flags & YESCRYPT_PWXFORM)
S = (uint8_t *)XY + XY_size;
if (t || flags) {
SHA256_CTX ctx;
SHA256_Init(&ctx);
SHA256_Update(&ctx, passwd, passwdlen);
SHA256_Final(sha256, &ctx);
passwd = sha256;
passwdlen = sizeof(sha256);
}
/* 1: (B_0 ... B_{p-1}) <-- PBKDF2(P, S, 1, p * MFLen) */
PBKDF2_SHA256(passwd, passwdlen, salt, saltlen, 1, B, B_size);
if (t || flags)
memcpy(sha256, B, sizeof(sha256));
if (p == 1 || (flags & YESCRYPT_PARALLEL_SMIX)) {
smix(B, r, N, p, t, flags, V, NROM, shared, XY, S);
} else {
uint32_t i;
/* 2: for i = 0 to p - 1 do */
#ifdef _OPENMP
#pragma omp parallel for default(none) private(i) shared(B, r, N, p, t, flags, V, NROM, shared, XY, S)
#endif
for (i = 0; i < p; i++) {
/* 3: B_i <-- MF(B_i, N) */
#ifdef _OPENMP
smix(&B[(size_t)128 * r * i], r, N, 1, t, flags,
&V[(size_t)2 * r * i * N],
NROM, shared,
&XY[(size_t)4 * r * i],
S ? &S[S_SIZE_ALL * i] : S);
#else
smix(&B[(size_t)128 * r * i], r, N, 1, t, flags, V,
NROM, shared, XY, S);
#endif
}
}
/* 5: DK <-- PBKDF2(P, B, 1, dkLen) */
PBKDF2_SHA256(passwd, passwdlen, B, B_size, 1, buf, buflen);
/*
* Except when computing classic scrypt, allow all computation so far
* to be performed on the client. The final steps below match those of
* SCRAM (RFC 5802), so that an extension of SCRAM (with the steps so
* far in place of SCRAM's use of PBKDF2 and with SHA-256 in place of
* SCRAM's use of SHA-1) would be usable with yescrypt hashes.
*/
if ((t || flags) && buflen == sizeof(sha256)) {
/* Compute ClientKey */
{
HMAC_SHA256_CTX ctx;
HMAC_SHA256_Init(&ctx, buf, buflen);
HMAC_SHA256_Update(&ctx, "PPTPPubKey", 10);
HMAC_SHA256_Final(sha256, &ctx);
}
/* Compute StoredKey */
{
SHA256_CTX ctx;
SHA256_Init(&ctx);
SHA256_Update(&ctx, sha256, sizeof(sha256));
SHA256_Final(buf, &ctx);
}
}
if (free_region(&tmp))
return -1;
/* Success! */
return 0;
}
|
info.c | // RUN: %libomptarget-compile-nvptx64-nvidia-cuda \
// RUN: -gline-tables-only -fopenmp-extensions
// RUN: env LIBOMPTARGET_INFO=63 %libomptarget-run-nvptx64-nvidia-cuda 2>&1 | \
// RUN: %fcheck-nvptx64-nvidia-cuda -allow-empty -check-prefix=INFO
// REQUIRES: nvptx64-nvidia-cuda
#include <stdio.h>
#include <omp.h>
#define N 64
#pragma omp declare target
int global;
#pragma omp end declare target
extern void __tgt_set_info_flag(unsigned);
int main() {
int A[N];
int B[N];
int C[N];
int val = 1;
// INFO: CUDA device 0 info: Device supports up to {{[0-9]+}} CUDA blocks and {{[0-9]+}} threads with a warp size of {{[0-9]+}}
// INFO: Libomptarget device 0 info: Entering OpenMP data region at info.c:{{[0-9]+}}:{{[0-9]+}} with 3 arguments:
// INFO: Libomptarget device 0 info: alloc(A[0:64])[256]
// INFO: Libomptarget device 0 info: tofrom(B[0:64])[256]
// INFO: Libomptarget device 0 info: to(C[0:64])[256]
// INFO: Libomptarget device 0 info: Creating new map entry with HstPtrBase={{.*}}, HstPtrBegin={{.*}}, TgtPtrBegin={{.*}}, Size=256, DynRefCount=1, HoldRefCount=0, Name=A[0:64]
// INFO: Libomptarget device 0 info: Creating new map entry with HstPtrBase={{.*}}, HstPtrBegin={{.*}}, TgtPtrBegin={{.*}}, Size=256, DynRefCount=0, HoldRefCount=1, Name=B[0:64]
// INFO: Libomptarget device 0 info: Copying data from host to device, HstPtr={{.*}}, TgtPtr={{.*}}, Size=256, Name=B[0:64]
// INFO: Libomptarget device 0 info: Creating new map entry with HstPtrBase={{.*}}, HstPtrBegin={{.*}}, TgtPtrBegin={{.*}}, Size=256, DynRefCount=1, HoldRefCount=0, Name=C[0:64]
// INFO: Libomptarget device 0 info: Copying data from host to device, HstPtr={{.*}}, TgtPtr={{.*}}, Size=256, Name=C[0:64]
// INFO: Libomptarget device 0 info: OpenMP Host-Device pointer mappings after block at info.c:{{[0-9]+}}:{{[0-9]+}}:
// INFO: Libomptarget device 0 info: Host Ptr Target Ptr Size (B) DynRefCount HoldRefCount Declaration
// INFO: Libomptarget device 0 info: {{.*}} {{.*}} 256 1 0 C[0:64] at info.c:{{[0-9]+}}:{{[0-9]+}}
// INFO: Libomptarget device 0 info: {{.*}} {{.*}} 256 0 1 B[0:64] at info.c:{{[0-9]+}}:{{[0-9]+}}
// INFO: Libomptarget device 0 info: {{.*}} {{.*}} 256 1 0 A[0:64] at info.c:{{[0-9]+}}:{{[0-9]+}}
// INFO: Libomptarget device 0 info: Entering OpenMP kernel at info.c:{{[0-9]+}}:{{[0-9]+}} with 1 arguments:
// INFO: Libomptarget device 0 info: firstprivate(val)[4]
// INFO: CUDA device 0 info: Launching kernel __omp_offloading_{{.*}}main{{.*}} with {{[0-9]+}} blocks and {{[0-9]+}} threads in Generic mode
// INFO: Libomptarget device 0 info: OpenMP Host-Device pointer mappings after block at info.c:{{[0-9]+}}:{{[0-9]+}}:
// INFO: Libomptarget device 0 info: Host Ptr Target Ptr Size (B) DynRefCount HoldRefCount Declaration
// INFO: Libomptarget device 0 info: {{.*}} {{.*}} 256 1 0 C[0:64] at info.c:{{[0-9]+}}:{{[0-9]+}}
// INFO: Libomptarget device 0 info: {{.*}} {{.*}} 256 0 1 B[0:64] at info.c:{{[0-9]+}}:{{[0-9]+}}
// INFO: Libomptarget device 0 info: {{.*}} {{.*}} 256 1 0 A[0:64] at info.c:{{[0-9]+}}:{{[0-9]+}}
// INFO: Libomptarget device 0 info: Exiting OpenMP data region at info.c:{{[0-9]+}}:{{[0-9]+}} with 3 arguments:
// INFO: Libomptarget device 0 info: alloc(A[0:64])[256]
// INFO: Libomptarget device 0 info: tofrom(B[0:64])[256]
// INFO: Libomptarget device 0 info: to(C[0:64])[256]
// INFO: Libomptarget device 0 info: Copying data from device to host, TgtPtr={{.*}}, HstPtr={{.*}}, Size=256, Name=B[0:64]
// INFO: Libomptarget device 0 info: Removing map entry with HstPtrBegin={{.*}}, TgtPtrBegin={{.*}}, Size=256, Name=C[0:64]
// INFO: Libomptarget device 0 info: Removing map entry with HstPtrBegin={{.*}}, TgtPtrBegin={{.*}}, Size=256, Name=B[0:64]
// INFO: Libomptarget device 0 info: Removing map entry with HstPtrBegin={{.*}}, TgtPtrBegin={{.*}}, Size=256, Name=A[0:64]
// INFO: Libomptarget device 0 info: OpenMP Host-Device pointer mappings after block at info.c:[[#%u,]]:[[#%u,]]:
// INFO: Libomptarget device 0 info: Host Ptr Target Ptr Size (B) DynRefCount HoldRefCount Declaration
// INFO: Libomptarget device 0 info: [[#%#x,]] [[#%#x,]] 4 INF 0 global at unknown:0:0
#pragma omp target data map(alloc:A[0:N]) map(ompx_hold,tofrom:B[0:N]) map(to:C[0:N])
#pragma omp target firstprivate(val)
{ val = 1; }
__tgt_set_info_flag(0x0);
// INFO-NOT: Libomptarget device 0 info: {{.*}}
#pragma omp target
{ }
return 0;
}
|
toimg.c | /* Copyright 2013-2018 The Regents of the University of California.
* All rights reserved. Use of this source code is governed by
* a BSD-style license which can be found in the LICENSE file.
*
* Authors:
* 2013, 2015 Martin Uecker <uecker@eecs.berkeley.edu>
* 2015, 2018 Jon Tamir <jtamir@eecs.berkeley.edu>
*/
#include <stdlib.h>
#include <assert.h>
#include <stdio.h>
#include <stdint.h>
#include <strings.h>
#include <complex.h>
#include <stdbool.h>
#include <math.h>
#include "num/multind.h"
#include "num/init.h"
#include "num/flpmath.h"
#include "misc/misc.h"
#include "misc/debug.h"
#include "misc/mmio.h"
#include "misc/png.h"
#include "misc/dicom.h"
#include "misc/opts.h"
#ifndef DIMS
#define DIMS 16
#endif
#ifndef CFL_SIZE
#define CFL_SIZE sizeof(complex float)
#endif
static const char help_str[] = "Create magnitude images as png or proto-dicom.\n"
"The first two non-singleton dimensions will\n"
"be used for the image, and the other dimensions\n"
"will be looped over.";
// from view:src/draw.c
static double clamp(double a, double b, double x)
{
return (x < a) ? a : ((x > b) ? b : x);
}
static double windowing(double g, double a, double b, double x)
{
return pow(clamp(0., 1., (x - a) / (b - a)), g);
}
static void toimg(bool dicom, bool use_windowing, const char* name, long inum, float gamma, float contrast, float window, float scale, long h, long w, const complex float* data)
{
int len = strlen(name);
assert(len >= 1);
int nr_bytes = dicom ? 2 : 3;
unsigned char (*buf)[h][w][nr_bytes] = TYPE_ALLOC(unsigned char[h][w][nr_bytes]);
float max_val = dicom ? 65535. : 255.;
for (int i = 0; i < h; i++) {
for (int j = 0; j < w; j++) {
double val = cabsf(data[j * h + i]) / scale;
unsigned int value = (unsigned int)(max_val * (use_windowing ? windowing(gamma, contrast, window, val) : val));
if (!dicom) {
(*buf)[i][j][0] = value;
(*buf)[i][j][1] = value;
(*buf)[i][j][2] = value;
} else {
(*buf)[i][j][0] = (value >> 0) & 0xFF;
(*buf)[i][j][1] = (value >> 8) & 0xFF;
}
}
}
(dicom ? dicom_write : png_write_rgb24)(name, w, h, inum, &(*buf)[0][0][0]);
free(buf);
}
static void toimg_stack(const char* name, bool dicom, bool single_scale, bool use_windowing, float gamma, float contrast, float window, const long dims[DIMS], const complex float* data)
{
long data_size = md_calc_size(DIMS, dims);
long sq_dims[DIMS] = { [0 ... DIMS - 1] = 1 };
int l = 0;
for (int i = 0; i < DIMS; i++)
if (1 != dims[i])
sq_dims[l++] = dims[i];
float max = 0.;
for (long i = 0; i < data_size; i++)
max = MAX(cabsf(data[i]), max);
int len = strlen(name);
assert(len >= 1);
long num_imgs = md_calc_size(DIMS - 2, sq_dims + 2);
long img_size = md_calc_size(2, sq_dims);
debug_printf(DP_INFO, "Writing %d image(s)...", num_imgs);
#pragma omp parallel for
for (long i = 0; i < num_imgs; i++) {
char name_i[len + 10]; // extra space for ".0000.png"
if (num_imgs > 1)
sprintf(name_i, "%s-%04ld.%s", name, i, dicom ? "dcm" : "png");
else
sprintf(name_i, "%s.%s", name, dicom ? "dcm" : "png");
float scale = 0.;
if (use_windowing)
scale = md_znorm(2, sq_dims, data + i * img_size) / md_calc_size(2, sq_dims);
else if (single_scale)
scale = max;
else
for (long j = 0; j < md_calc_size(2, sq_dims); j++)
scale = MAX(cabsf(data[i * img_size + j]), scale);
if (0. == scale)
scale = 1.;
toimg(dicom, use_windowing, name_i, i, gamma, contrast, window, scale, sq_dims[0], sq_dims[1], data + i * img_size);
}
debug_printf(DP_INFO, "done.\n", num_imgs);
}
int main_toimg(int argc, char* argv[argc])
{
const char* in_file = NULL;
const char* out_prefix = NULL;
struct arg_s args[] = {
ARG_INFILE(true, &in_file, "input"),
ARG_OUTFILE(true, &out_prefix, "output prefix"),
};
float gamma = 1.;
float contrast = 0.;
float window = 750.;
bool use_windowing = false;
bool single_scale = true;
bool dicom = false;
const struct opt_s opts[] = {
OPT_FLOAT('g', &gamma, "gamma", "gamma level"),
OPT_FLOAT('c', &contrast, "contrast", "contrast level"),
OPT_FLOAT('w', &window, "window", "window level"),
OPT_SET('d', &dicom, "write to dicom format (deprecated, use extension .dcm)"),
OPT_CLEAR('m', &single_scale, "re-scale each image"),
OPT_SET('W', &use_windowing, "use dynamic windowing"),
};
cmdline(&argc, argv, ARRAY_SIZE(args), args, help_str, ARRAY_SIZE(opts), opts);
num_init();
char* ext = strrchr(out_prefix, '.');
if (NULL != ext) {
assert(!dicom);
if (0 == strcmp(ext, ".dcm"))
dicom = true;
else
if (0 != strcmp(ext, ".png"))
error("Unknown file extension.");
*ext = '\0';
}
long dims[DIMS];
complex float* data = load_cfl(in_file, DIMS, dims);
toimg_stack(out_prefix, dicom, single_scale, use_windowing, gamma, contrast, window, dims, data);
unmap_cfl(DIMS, dims, data);
return 0;
}
|
detector.c | #include "darknet.h"
#include <locale.h>
static int coco_ids[] = {1,2,3,4,5,6,7,8,9,10,11,13,14,15,16,17,18,19,20,21,22,23,24,25,27,28,31,32,33,34,35,36,37,38,39,40,41,42,43,44,46,47,48,49,50,51,52,53,54,55,56,57,58,59,60,61,62,63,64,65,67,70,72,73,74,75,76,77,78,79,80,81,82,84,85,86,87,88,89,90};
void train_detector(char *datacfg, char *cfgfile, char *weightfile, int *gpus, int ngpus, int clear)
{
list *options = read_data_cfg(datacfg);
char *train_images = option_find_str(options, "train", "data/train.list");
char *backup_directory = option_find_str(options, "backup", "/backup/");
srand(time(0));
char *base = basecfg(cfgfile);
printf("%s\n", base);
float avg_loss = -1;
network **nets = (network**)calloc(ngpus, sizeof(network*));
srand(time(0));
int seed = rand();
int i;
for(i = 0; i < ngpus; ++i){
srand(seed);
#ifdef GPU
cuda_set_device(gpus[i]);
#endif
nets[i] = load_network(cfgfile, weightfile, clear);
nets[i]->learning_rate *= ngpus;
}
srand(time(0));
network *net = nets[0];
int imgs = net->batch * net->subdivisions * ngpus;
printf("Learning Rate: %g, Momentum: %g, Decay: %g\n", net->learning_rate, net->momentum, net->decay);
data train, buffer;
layer l = net->layers[net->n - 1];
int classes = l.classes;
float jitter = l.jitter;
list *plist = get_paths(train_images);
//int N = plist->size;
char **paths = (char **)list_to_array(plist);
load_args args = get_base_args(net);
args.coords = l.coords;
args.paths = paths;
args.n = imgs;
args.m = plist->size;
args.classes = classes;
args.jitter = jitter;
args.num_boxes = l.max_boxes;
args.d = &buffer;
args.type = DETECTION_DATA;
//args.type = INSTANCE_DATA;
args.threads = 64;
#if defined __linux__ || defined __APPLE__ || defined PTHREAD_WINDOWS
pthread_t load_thread = load_data(args);
#endif
double time;
int count = 0;
//while(i*imgs < N*120){
while(get_current_batch(net) < net->max_batches){
if(l.random && count++%10 == 0){
printf("Resizing\n");
int dim = (rand() % 10 + 10) * 32;
if (get_current_batch(net)+200 > net->max_batches) dim = 608;
//int dim = (rand() % 4 + 16) * 32;
printf("%d\n", dim);
args.w = dim;
args.h = dim;
#if defined __linux__ || defined __APPLE__ || defined PTHREAD_WINDOWS
pthread_join(load_thread, 0);
#endif
train = buffer;
free_data(train);
#if defined __linux__ || defined __APPLE__ || defined PTHREAD_WINDOWS
load_thread = load_data(args);
#endif
#pragma omp parallel for
for(i = 0; i < ngpus; ++i){
resize_network(nets[i], dim, dim);
}
net = nets[0];
}
time=what_time_is_it_now();
#if defined __linux__ || defined __APPLE__ || defined PTHREAD_WINDOWS
pthread_join(load_thread, 0);
#endif
train = buffer;
#if defined __linux__ || defined __APPLE__ || defined PTHREAD_WINDOWS
load_thread = load_data(args);
#endif
/*
int k;
for(k = 0; k < l.max_boxes; ++k){
box b = float_to_box(train.y.vals[10] + 1 + k*5);
if(!b.x) break;
printf("loaded: %f %f %f %f\n", b.x, b.y, b.w, b.h);
}
*/
/*
int zz;
for(zz = 0; zz < train.X.cols; ++zz){
image im = float_to_image(net->w, net->h, 3, train.X.vals[zz]);
int k;
for(k = 0; k < l.max_boxes; ++k){
box b = float_to_box(train.y.vals[zz] + k*5, 1);
printf("%f %f %f %f\n", b.x, b.y, b.w, b.h);
draw_bbox(im, b, 1, 1,0,0);
}
show_image(im, "truth11");
cvWaitKey(0);
save_image(im, "truth11");
}
*/
printf("Loaded: %lf seconds\n", what_time_is_it_now()-time);
time=what_time_is_it_now();
float loss = 0;
#ifdef GPU
if(ngpus == 1){
loss = train_network(net, train);
} else {
loss = train_networks(nets, ngpus, train, 4);
}
#else
loss = train_network(net, train);
#endif
if (avg_loss < 0) avg_loss = loss;
avg_loss = avg_loss*.9 + loss*.1;
i = get_current_batch(net);
printf("%ld: %f, %f avg, %f rate, %lf seconds, %d images\n", get_current_batch(net), loss, avg_loss, get_current_rate(net), what_time_is_it_now()-time, i*imgs);
if(i%100==0){
#ifdef GPU
if(ngpus != 1) sync_nets(nets, ngpus, 0);
#endif
char buff[256];
sprintf(buff, "%s/%s.backup", backup_directory, base);
save_weights(net, buff);
}
if(i%10000==0 || (i < 1000 && i%100 == 0)){
#ifdef GPU
if(ngpus != 1) sync_nets(nets, ngpus, 0);
#endif
char buff[256];
sprintf(buff, "%s/%s_%d.weights", backup_directory, base, i);
save_weights(net, buff);
}
free_data(train);
}
#ifdef GPU
if(ngpus != 1) sync_nets(nets, ngpus, 0);
#endif
char buff[256];
sprintf(buff, "%s/%s_final.weights", backup_directory, base);
save_weights(net, buff);
}
static int get_coco_image_id(char *filename)
{
char *p = strrchr(filename, '/');
char *c = strrchr(filename, '_');
if(c) p = c;
return atoi(p+1);
}
static void print_cocos(FILE *fp, char *image_path, detection *dets, int num_boxes, int classes, int w, int h)
{
int i, j;
int image_id = get_coco_image_id(image_path);
for(i = 0; i < num_boxes; ++i){
float xmin = dets[i].bbox.x - dets[i].bbox.w/2.;
float xmax = dets[i].bbox.x + dets[i].bbox.w/2.;
float ymin = dets[i].bbox.y - dets[i].bbox.h/2.;
float ymax = dets[i].bbox.y + dets[i].bbox.h/2.;
if (xmin < 0) xmin = 0;
if (ymin < 0) ymin = 0;
if (xmax > w) xmax = w;
if (ymax > h) ymax = h;
float bx = xmin;
float by = ymin;
float bw = xmax - xmin;
float bh = ymax - ymin;
for(j = 0; j < classes; ++j){
if (dets[i].prob[j]) fprintf(fp, "{\"image_id\":%d, \"category_id\":%d, \"bbox\":[%f, %f, %f, %f], \"score\":%f},\n", image_id, coco_ids[j], bx, by, bw, bh, dets[i].prob[j]);
}
}
}
void print_detector_detections(FILE **fps, char *id, detection *dets, int total, int classes, int w, int h)
{
int i, j;
for(i = 0; i < total; ++i){
float xmin = dets[i].bbox.x - dets[i].bbox.w/2. + 1;
float xmax = dets[i].bbox.x + dets[i].bbox.w/2. + 1;
float ymin = dets[i].bbox.y - dets[i].bbox.h/2. + 1;
float ymax = dets[i].bbox.y + dets[i].bbox.h/2. + 1;
if (xmin < 1) xmin = 1;
if (ymin < 1) ymin = 1;
if (xmax > w) xmax = w;
if (ymax > h) ymax = h;
for(j = 0; j < classes; ++j){
if (dets[i].prob[j]) fprintf(fps[j], "%s %f %f %f %f %f\n", id, dets[i].prob[j],
xmin, ymin, xmax, ymax);
}
}
}
void print_imagenet_detections(FILE *fp, int id, detection *dets, int total, int classes, int w, int h)
{
int i, j;
for(i = 0; i < total; ++i){
float xmin = dets[i].bbox.x - dets[i].bbox.w/2.;
float xmax = dets[i].bbox.x + dets[i].bbox.w/2.;
float ymin = dets[i].bbox.y - dets[i].bbox.h/2.;
float ymax = dets[i].bbox.y + dets[i].bbox.h/2.;
if (xmin < 0) xmin = 0;
if (ymin < 0) ymin = 0;
if (xmax > w) xmax = w;
if (ymax > h) ymax = h;
for(j = 0; j < classes; ++j){
int class1 = j;
if (dets[i].prob[class1]) fprintf(fp, "%d %d %f %f %f %f %f\n", id, j+1, dets[i].prob[class1],
xmin, ymin, xmax, ymax);
}
}
}
void validate_detector_flip(char *datacfg, char *cfgfile, char *weightfile, char *outfile)
{
int j;
list *options = read_data_cfg(datacfg);
char *valid_images = option_find_str(options, "valid", "data/train.list");
char *name_list = option_find_str(options, "names", "data/names.list");
char *prefix = option_find_str(options, "results", "results");
char **names = get_labels(name_list);
char *mapf = option_find_str(options, "map", 0);
int *map = 0;
if (mapf) map = read_map(mapf);
network *net = load_network(cfgfile, weightfile, 0);
set_batch_network(net, 2);
fprintf(stderr, "Learning Rate: %g, Momentum: %g, Decay: %g\n", net->learning_rate, net->momentum, net->decay);
srand(time(0));
list *plist = get_paths(valid_images);
char **paths = (char **)list_to_array(plist);
layer l = net->layers[net->n-1];
int classes = l.classes;
char buff[1024];
char *type = option_find_str(options, "eval", "voc");
FILE *fp = 0;
FILE **fps = 0;
int coco = 0;
int imagenet = 0;
if(0==strcmp(type, "coco")){
if(!outfile) outfile = "coco_results";
#if defined __linux__ || defined __APPLE__
snprintf(buff, 1024, "%s/%s.json", prefix, outfile);
#else
_snprintf(buff, 1024, "%s/%s.json", prefix, outfile);
#endif
fp = fopen(buff, "w");
fprintf(fp, "[\n");
coco = 1;
} else if(0==strcmp(type, "imagenet")){
if(!outfile) outfile = "imagenet-detection";
#if defined __linux__ || defined __APPLE__
snprintf(buff, 1024, "%s/%s.txt", prefix, outfile);
#else
_snprintf(buff, 1024, "%s/%s.txt", prefix, outfile);
#endif
fp = fopen(buff, "w");
imagenet = 1;
classes = 200;
} else {
if(!outfile) outfile = "comp4_det_test_";
fps = (FILE**)calloc(classes, sizeof(FILE *));
for(j = 0; j < classes; ++j){
#if defined __linux__ || defined __APPLE__
snprintf(buff, 1024, "%s/%s%s.txt", prefix, outfile, names[j]);
#else
_snprintf(buff, 1024, "%s/%s%s.txt", prefix, outfile, names[j]);
#endif
fps[j] = fopen(buff, "w");
}
}
int m = plist->size;
int i=0;
int t;
float thresh = .005;
float nms = .45;
int nthreads = 4;
image *val = (image*)calloc(nthreads, sizeof(image));
image *val_resized = (image*)calloc(nthreads, sizeof(image));
image *buf = (image*)calloc(nthreads, sizeof(image));
image *buf_resized = (image*)calloc(nthreads, sizeof(image));
pthread_t *thr = (pthread_t*)calloc(nthreads, sizeof(pthread_t));
image input = make_image(net->w, net->h, net->c*2);
load_args args = {0};
args.w = net->w;
args.h = net->h;
//args.type = IMAGE_DATA;
args.type = LETTERBOX_DATA;
for(t = 0; t < nthreads; ++t){
args.path = paths[i+t];
args.im = &buf[t];
args.resized = &buf_resized[t];
thr[t] = load_data_in_thread(args);
}
double start = what_time_is_it_now();
for(i = nthreads; i < m+nthreads; i += nthreads){
fprintf(stderr, "%d\n", i);
for(t = 0; t < nthreads && i+t-nthreads < m; ++t){
pthread_join(thr[t], 0);
val[t] = buf[t];
val_resized[t] = buf_resized[t];
}
for(t = 0; t < nthreads && i+t < m; ++t){
args.path = paths[i+t];
args.im = &buf[t];
args.resized = &buf_resized[t];
thr[t] = load_data_in_thread(args);
}
for(t = 0; t < nthreads && i+t-nthreads < m; ++t){
char *path = paths[i+t-nthreads];
char *id = basecfg(path);
copy_cpu(net->w*net->h*net->c, val_resized[t].data, 1, input.data, 1);
flip_image(val_resized[t]);
copy_cpu(net->w*net->h*net->c, val_resized[t].data, 1, input.data + net->w*net->h*net->c, 1);
network_predict(net, input.data);
int w = val[t].w;
int h = val[t].h;
int num = 0;
detection *dets = get_network_boxes(net, w, h, thresh, .5, map, 0, &num);
if (nms) do_nms_sort(dets, num, classes, nms);
if (coco){
print_cocos(fp, path, dets, num, classes, w, h);
} else if (imagenet){
print_imagenet_detections(fp, i+t-nthreads+1, dets, num, classes, w, h);
} else {
print_detector_detections(fps, id, dets, num, classes, w, h);
}
free_detections(dets, num);
free(id);
free_image(val[t]);
free_image(val_resized[t]);
}
}
for(j = 0; j < classes; ++j){
if(fps) fclose(fps[j]);
}
if(coco){
fseek(fp, -2, SEEK_CUR);
fprintf(fp, "\n]\n");
fclose(fp);
}
fprintf(stderr, "Total Detection Time: %f Seconds\n", what_time_is_it_now() - start);
}
void validate_detector(char *datacfg, char *cfgfile, char *weightfile, char *outfile)
{
int j;
list *options = read_data_cfg(datacfg);
char *valid_images = option_find_str(options, "valid", "data/train.list");
char *name_list = option_find_str(options, "names", "data/names.list");
char *prefix = option_find_str(options, "results", "results");
char **names = get_labels(name_list);
char *mapf = option_find_str(options, "map", 0);
int *map = 0;
if (mapf) map = read_map(mapf);
network *net = load_network(cfgfile, weightfile, 0);
set_batch_network(net, 1);
fprintf(stderr, "Learning Rate: %g, Momentum: %g, Decay: %g\n", net->learning_rate, net->momentum, net->decay);
srand(time(0));
list *plist = get_paths(valid_images);
char **paths = (char **)list_to_array(plist);
layer l = net->layers[net->n-1];
int classes = l.classes;
char buff[1024];
char *type = option_find_str(options, "eval", "voc");
FILE *fp = 0;
FILE **fps = 0;
int coco = 0;
int imagenet = 0;
if(0==strcmp(type, "coco")){
if(!outfile) outfile = "coco_results";
#if defined __linux__ || defined __APPLE__
snprintf(buff, 1024, "%s/%s.json", prefix, outfile);
#else
_snprintf(buff, 1024, "%s/%s.json", prefix, outfile);
#endif
fp = fopen(buff, "w");
fprintf(fp, "[\n");
coco = 1;
} else if(0==strcmp(type, "imagenet")){
if(!outfile) outfile = "imagenet-detection";
#if defined __linux__ || defined __APPLE__
snprintf(buff, 1024, "%s/%s.txt", prefix, outfile);
#else
_snprintf(buff, 1024, "%s/%s.txt", prefix, outfile);
#endif
fp = fopen(buff, "w");
imagenet = 1;
classes = 200;
} else {
if(!outfile) outfile = "comp4_det_test_";
fps = (FILE**)calloc(classes, sizeof(FILE *));
for(j = 0; j < classes; ++j){
#if defined __linux__ || defined __APPLE__
snprintf(buff, 1024, "%s/%s%s.txt", prefix, outfile, names[j]);
#else
_snprintf(buff, 1024, "%s/%s%s.txt", prefix, outfile, names[j]);
#endif
fps[j] = fopen(buff, "w");
}
}
int m = plist->size;
int i=0;
int t;
float thresh = .005;
float nms = .45;
int nthreads = 4;
image *val = (image*)calloc(nthreads, sizeof(image));
image *val_resized = (image*)calloc(nthreads, sizeof(image));
image *buf = (image*)calloc(nthreads, sizeof(image));
image *buf_resized = (image*)calloc(nthreads, sizeof(image));
#if defined __linux__ || defined __APPLE__ || defined PTHREAD_WINDOWS
pthread_t *thr = (pthread_t*)calloc(nthreads, sizeof(pthread_t));
#endif
load_args args = {0};
args.w = net->w;
args.h = net->h;
//args.type = IMAGE_DATA;
args.type = LETTERBOX_DATA;
for(t = 0; t < nthreads; ++t){
args.path = paths[i+t];
args.im = &buf[t];
args.resized = &buf_resized[t];
#if defined __linux__ || defined __APPLE__ || defined PTHREAD_WINDOWS
thr[t] = load_data_in_thread(args);
#endif
}
double start = what_time_is_it_now();
for(i = nthreads; i < m+nthreads; i += nthreads){
fprintf(stderr, "%d\n", i);
for(t = 0; t < nthreads && i+t-nthreads < m; ++t){
#if defined __linux__ || defined __APPLE__ || defined PTHREAD_WINDOWS
pthread_join(thr[t], 0);
#endif
val[t] = buf[t];
val_resized[t] = buf_resized[t];
}
for(t = 0; t < nthreads && i+t < m; ++t){
args.path = paths[i+t];
args.im = &buf[t];
args.resized = &buf_resized[t];
#if defined __linux__ || defined __APPLE__ || defined PTHREAD_WINDOWS
thr[t] = load_data_in_thread(args);
#endif
}
for(t = 0; t < nthreads && i+t-nthreads < m; ++t){
char *path = paths[i+t-nthreads];
char *id = basecfg(path);
float *X = val_resized[t].data;
network_predict(net, X);
int w = val[t].w;
int h = val[t].h;
int nboxes = 0;
detection *dets = get_network_boxes(net, w, h, thresh, .5, map, 0, &nboxes);
if (nms) do_nms_sort(dets, nboxes, classes, nms);
if (coco){
print_cocos(fp, path, dets, nboxes, classes, w, h);
} else if (imagenet){
print_imagenet_detections(fp, i+t-nthreads+1, dets, nboxes, classes, w, h);
} else {
print_detector_detections(fps, id, dets, nboxes, classes, w, h);
}
free_detections(dets, nboxes);
free(id);
free_image(val[t]);
free_image(val_resized[t]);
}
}
for(j = 0; j < classes; ++j){
if(fps) fclose(fps[j]);
}
if(coco){
fseek(fp, -2, SEEK_CUR);
fprintf(fp, "\n]\n");
fclose(fp);
}
fprintf(stderr, "Total Detection Time: %f Seconds\n", what_time_is_it_now() - start);
}
void validate_detector_recall(char *cfgfile, char *weightfile)
{
network *net = load_network(cfgfile, weightfile, 0);
set_batch_network(net, 1);
fprintf(stderr, "Learning Rate: %g, Momentum: %g, Decay: %g\n", net->learning_rate, net->momentum, net->decay);
srand(time(0));
list *plist = get_paths("data/coco_val_5k.list");
char **paths = (char **)list_to_array(plist);
layer l = net->layers[net->n-1];
int j, k;
int m = plist->size;
int i=0;
float thresh = .001;
float iou_thresh = .5;
float nms = .4;
int total = 0;
int correct = 0;
int proposals = 0;
float avg_iou = 0;
for(i = 0; i < m; ++i){
char *path = paths[i];
image orig = load_image_color(path, 0, 0);
image sized = resize_image(orig, net->w, net->h);
char *id = basecfg(path);
network_predict(net, sized.data);
int nboxes = 0;
detection *dets = get_network_boxes(net, sized.w, sized.h, thresh, .5, 0, 1, &nboxes);
if (nms) do_nms_obj(dets, nboxes, 1, nms);
char labelpath[4096];
find_replace(path, "images", "labels", labelpath);
find_replace(labelpath, "JPEGImages", "labels", labelpath);
find_replace(labelpath, ".jpg", ".txt", labelpath);
find_replace(labelpath, ".JPEG", ".txt", labelpath);
int num_labels = 0;
box_label *truth = read_boxes(labelpath, &num_labels);
for(k = 0; k < nboxes; ++k){
if(dets[k].objectness > thresh){
++proposals;
}
}
for (j = 0; j < num_labels; ++j) {
++total;
box t = {truth[j].x, truth[j].y, truth[j].w, truth[j].h};
float best_iou = 0;
for(k = 0; k < l.w*l.h*l.n; ++k){
float iou = box_iou(dets[k].bbox, t);
if(dets[k].objectness > thresh && iou > best_iou){
best_iou = iou;
}
}
avg_iou += best_iou;
if(best_iou > iou_thresh){
++correct;
}
}
fprintf(stderr, "%5d %5d %5d\tRPs/Img: %.2f\tIOU: %.2f%%\tRecall:%.2f%%\n", i, correct, total, (float)proposals/(i+1), avg_iou*100/total, 100.*correct/total);
free(id);
free_image(orig);
free_image(sized);
}
}
void test_detector(char *datacfg, char *cfgfile, char *weightfile, char *filename, float thresh, float hier_thresh, char *outfile, int fullscreen)
{
list *options = read_data_cfg(datacfg);
char *name_list = option_find_str(options, "names", "data/names.list");
char **names = get_labels(name_list);
image **alphabet = load_alphabet();
network *net = load_network(cfgfile, weightfile, 0);
set_batch_network(net, 1);
srand(2222222);
double time;
char buff[256];
char *input = buff;
float nms=.45;
while(1){
if(filename){
strncpy(input, filename, 256);
} else {
printf("Enter Image Path: ");
fflush(stdout);
input = fgets(input, 256, stdin);
if(!input) return;
strtok(input, "\n");
}
image im = load_image_color(input,0,0);
image sized = letterbox_image(im, net->w, net->h);
//image sized = resize_image(im, net->w, net->h);
//image sized2 = resize_max(im, net->w);
//image sized = crop_image(sized2, -((net->w - sized2.w)/2), -((net->h - sized2.h)/2), net->w, net->h);
//resize_network(net, sized.w, sized.h);
layer l = net->layers[net->n-1];
float *X = sized.data;
time=what_time_is_it_now();
network_predict(net, X);
printf("%s: Predicted in %f seconds.\n", input, what_time_is_it_now()-time);
int nboxes = 0;
detection *dets = get_network_boxes(net, im.w, im.h, thresh, hier_thresh, 0, 1, &nboxes);
//printf("%d\n", nboxes);
//if (nms) do_nms_obj(boxes, probs, l.w*l.h*l.n, l.classes, nms);
if (nms) do_nms_sort(dets, nboxes, l.classes, nms);
draw_detections(im, dets, nboxes, thresh, names, alphabet, l.classes);
free_detections(dets, nboxes);
if(outfile){
save_image(im, outfile);
}
else{
save_image(im, "predictions");
#ifdef OPENCV
cvNamedWindow("predictions", CV_WINDOW_NORMAL);
if(fullscreen){
cvSetWindowProperty("predictions", CV_WND_PROP_FULLSCREEN, CV_WINDOW_FULLSCREEN);
}
show_image(im, "predictions");
cvWaitKey(0);
cvDestroyAllWindows();
#endif
}
free_image(im);
free_image(sized);
if (filename) break;
}
}
/*
void censor_detector(char *datacfg, char *cfgfile, char *weightfile, int cam_index, const char *filename, int class, float thresh, int skip)
{
#ifdef OPENCV
char *base = basecfg(cfgfile);
network *net = load_network(cfgfile, weightfile, 0);
set_batch_network(net, 1);
srand(2222222);
CvCapture * cap;
int w = 1280;
int h = 720;
if(filename){
cap = cvCaptureFromFile(filename);
}else{
cap = cvCaptureFromCAM(cam_index);
}
if(w){
cvSetCaptureProperty(cap, CV_CAP_PROP_FRAME_WIDTH, w);
}
if(h){
cvSetCaptureProperty(cap, CV_CAP_PROP_FRAME_HEIGHT, h);
}
if(!cap) error("Couldn't connect to webcam.\n");
cvNamedWindow(base, CV_WINDOW_NORMAL);
cvResizeWindow(base, 512, 512);
float fps = 0;
int i;
float nms = .45;
while(1){
image in = get_image_from_stream(cap);
//image in_s = resize_image(in, net->w, net->h);
image in_s = letterbox_image(in, net->w, net->h);
layer l = net->layers[net->n-1];
float *X = in_s.data;
network_predict(net, X);
int nboxes = 0;
detection *dets = get_network_boxes(net, in.w, in.h, thresh, 0, 0, 0, &nboxes);
//if (nms) do_nms_obj(boxes, probs, l.w*l.h*l.n, l.classes, nms);
if (nms) do_nms_sort(dets, nboxes, l.classes, nms);
for(i = 0; i < nboxes; ++i){
if(dets[i].prob[class] > thresh){
box b = dets[i].bbox;
int left = b.x-b.w/2.;
int top = b.y-b.h/2.;
censor_image(in, left, top, b.w, b.h);
}
}
show_image(in, base);
cvWaitKey(10);
free_detections(dets, nboxes);
free_image(in_s);
free_image(in);
float curr = 0;
fps = .9*fps + .1*curr;
for(i = 0; i < skip; ++i){
image in = get_image_from_stream(cap);
free_image(in);
}
}
#endif
}
void extract_detector(char *datacfg, char *cfgfile, char *weightfile, int cam_index, const char *filename, int class, float thresh, int skip)
{
#ifdef OPENCV
char *base = basecfg(cfgfile);
network *net = load_network(cfgfile, weightfile, 0);
set_batch_network(net, 1);
srand(2222222);
CvCapture * cap;
int w = 1280;
int h = 720;
if(filename){
cap = cvCaptureFromFile(filename);
}else{
cap = cvCaptureFromCAM(cam_index);
}
if(w){
cvSetCaptureProperty(cap, CV_CAP_PROP_FRAME_WIDTH, w);
}
if(h){
cvSetCaptureProperty(cap, CV_CAP_PROP_FRAME_HEIGHT, h);
}
if(!cap) error("Couldn't connect to webcam.\n");
cvNamedWindow(base, CV_WINDOW_NORMAL);
cvResizeWindow(base, 512, 512);
float fps = 0;
int i;
int count = 0;
float nms = .45;
while(1){
image in = get_image_from_stream(cap);
//image in_s = resize_image(in, net->w, net->h);
image in_s = letterbox_image(in, net->w, net->h);
layer l = net->layers[net->n-1];
show_image(in, base);
int nboxes = 0;
float *X = in_s.data;
network_predict(net, X);
detection *dets = get_network_boxes(net, in.w, in.h, thresh, 0, 0, 1, &nboxes);
//if (nms) do_nms_obj(boxes, probs, l.w*l.h*l.n, l.classes, nms);
if (nms) do_nms_sort(dets, nboxes, l.classes, nms);
for(i = 0; i < nboxes; ++i){
if(dets[i].prob[class] > thresh){
box b = dets[i].bbox;
int size = b.w*in.w > b.h*in.h ? b.w*in.w : b.h*in.h;
int dx = b.x*in.w-size/2.;
int dy = b.y*in.h-size/2.;
image bim = crop_image(in, dx, dy, size, size);
char buff[2048];
sprintf(buff, "results/extract/%07d", count);
++count;
save_image(bim, buff);
free_image(bim);
}
}
free_detections(dets, nboxes);
free_image(in_s);
free_image(in);
float curr = 0;
fps = .9*fps + .1*curr;
for(i = 0; i < skip; ++i){
image in = get_image_from_stream(cap);
free_image(in);
}
}
#endif
}
*/
/*
void network_detect(network *net, image im, float thresh, float hier_thresh, float nms, detection *dets)
{
network_predict_image(net, im);
layer l = net->layers[net->n-1];
int nboxes = num_boxes(net);
fill_network_boxes(net, im.w, im.h, thresh, hier_thresh, 0, 0, dets);
if (nms) do_nms_sort(dets, nboxes, l.classes, nms);
}
*/
void run_detector(int argc, char **argv)
{
// Set locales
setlocale(LC_NUMERIC, "C");
char *prefix = find_char_arg(argc, argv, "-prefix", 0);
float thresh = find_float_arg(argc, argv, "-thresh", .5);
float hier_thresh = find_float_arg(argc, argv, "-hier", .5);
int cam_index = find_int_arg(argc, argv, "-c", 0);
int frame_skip = find_int_arg(argc, argv, "-s", 0);
int avg = find_int_arg(argc, argv, "-avg", 3);
if(argc < 4){
fprintf(stderr, "usage: %s %s [train/test/valid] [cfg] [weights (optional)]\n", argv[0], argv[1]);
return;
}
char *gpu_list = find_char_arg(argc, argv, "-gpus", 0);
char *outfile = find_char_arg(argc, argv, "-out", 0);
int *gpus = 0;
int gpu = 0;
int ngpus = 0;
if(gpu_list){
printf("%s\n", gpu_list);
int len = strlen(gpu_list);
ngpus = 1;
int i;
for(i = 0; i < len; ++i){
if (gpu_list[i] == ',') ++ngpus;
}
gpus = (int*)calloc(ngpus, sizeof(int));
for(i = 0; i < ngpus; ++i){
gpus[i] = atoi(gpu_list);
gpu_list = strchr(gpu_list, ',')+1;
}
} else {
gpu = gpu_index;
gpus = &gpu;
ngpus = 1;
}
int clear = find_arg(argc, argv, "-clear");
int fullscreen = find_arg(argc, argv, "-fullscreen");
int width = find_int_arg(argc, argv, "-w", 0);
int height = find_int_arg(argc, argv, "-h", 0);
int fps = find_int_arg(argc, argv, "-fps", 0);
//int class = find_int_arg(argc, argv, "-class", 0);
char *datacfg = argv[3];
char *cfg = argv[4];
char *weights = (argc > 5) ? argv[5] : 0;
char *filename = (argc > 6) ? argv[6]: 0;
if(0==strcmp(argv[2], "test")) test_detector(datacfg, cfg, weights, filename, thresh, hier_thresh, outfile, fullscreen);
else if(0==strcmp(argv[2], "train")) train_detector(datacfg, cfg, weights, gpus, ngpus, clear);
else if(0==strcmp(argv[2], "valid")) validate_detector(datacfg, cfg, weights, outfile);
else if(0==strcmp(argv[2], "valid2")) validate_detector_flip(datacfg, cfg, weights, outfile);
else if(0==strcmp(argv[2], "recall")) validate_detector_recall(cfg, weights);
else if(0==strcmp(argv[2], "demo")) {
list *options = read_data_cfg(datacfg);
int classes = option_find_int(options, "classes", 20);
char *name_list = option_find_str(options, "names", "data/names.list");
char **names = get_labels(name_list);
demo(cfg, weights, thresh, cam_index, filename, names, classes, frame_skip, prefix, avg, hier_thresh, width, height, fps, fullscreen);
}
//else if(0==strcmp(argv[2], "extract")) extract_detector(datacfg, cfg, weights, cam_index, filename, class, thresh, frame_skip);
//else if(0==strcmp(argv[2], "censor")) censor_detector(datacfg, cfg, weights, cam_index, filename, class, thresh, frame_skip);
}
|
local_temperature_average_response_function.h | // KRATOS ___ ___ _ ___ __ ___ ___ ___ ___
// / __/ _ \| \| \ \ / /__| \_ _| __| __|
// | (_| (_) | .` |\ V /___| |) | || _|| _|
// \___\___/|_|\_| \_/ |___/___|_| |_| APPLICATION
//
// License: BSD License
// Kratos default license: kratos/license.txt
//
// Main authors: Jordi Cotela
//
#ifndef KRATOS_LOCAL_TEMPERATURE_AVERAGE_RESPONSE_FUNCTION_H_INCLUDED
#define KRATOS_LOCAL_TEMPERATURE_AVERAGE_RESPONSE_FUNCTION_H_INCLUDED
#include "includes/kratos_flags.h"
#include "includes/model_part.h"
#include "utilities/variable_utils.h"
#include "response_functions/adjoint_response_function.h"
namespace Kratos {
///@addtogroup ConvectionDiffusionApplication
///@{
///@name Kratos Classes
///@{
class LocalTemperatureAverageResponseFunction: public AdjointResponseFunction
{
public:
///@name Type Definitions
///@{
KRATOS_CLASS_POINTER_DEFINITION(LocalTemperatureAverageResponseFunction);
///@}
///@name Life Cycle
///@{
/// Constructor.
LocalTemperatureAverageResponseFunction(Parameters Settings, ModelPart& rModelPart)
{
KRATOS_TRY;
mTargetModelPartName = Settings["model_part_name"].GetString();
auto& r_target_model_part = GetTargetModelPart(rModelPart, mTargetModelPartName);
auto& r_nodes = r_target_model_part.Nodes();
mNumNodes = r_nodes.size();
VariableUtils variable_utils;
variable_utils.SetFlag(STRUCTURE,true,r_nodes);
// Note: this should not be parallel, the operation is not threadsafe if the variable is uninitialized
for (auto& r_node : r_nodes)
{
r_node.SetValue(NUMBER_OF_NEIGHBOUR_ELEMENTS,0);
}
mNumNodes = rModelPart.GetCommunicator().GetDataCommunicator().SumAll(mNumNodes);
auto& r_elements = rModelPart.Elements();
const int num_elements = r_elements.size();
#pragma omp parallel for
for (int i = 0; i < num_elements; i++)
{
auto i_elem = r_elements.begin() + i;
auto& r_geom = i_elem->GetGeometry();
for (unsigned int i = 0; i < r_geom.PointsNumber(); i++)
{
auto& r_node = r_geom[i];
if (r_node.Is(STRUCTURE))
{
r_node.SetLock();
r_node.GetValue(NUMBER_OF_NEIGHBOUR_ELEMENTS) += 1;
r_node.UnSetLock();
}
}
}
rModelPart.GetCommunicator().AssembleNonHistoricalData(NUMBER_OF_NEIGHBOUR_ELEMENTS);
KRATOS_CATCH("");
}
/// Destructor.
~LocalTemperatureAverageResponseFunction() override
{
}
///@}
///@name Operators
///@{
///@}
///@name Operations
///@{
void Initialize() override
{
KRATOS_TRY;
KRATOS_CATCH("");
}
void CalculateGradient(const Element& rAdjointElement,
const Matrix& rResidualGradient,
Vector& rResponseGradient,
const ProcessInfo& rProcessInfo) override
{
ComputePointTemperatureSensitivityContribution(rResidualGradient, rAdjointElement.GetGeometry().Points(),rResponseGradient);
}
void CalculateGradient(const Condition& rAdjointCondition,
const Matrix& rResidualGradient,
Vector& rResponseGradient,
const ProcessInfo& rProcessInfo) override
{
noalias(rResponseGradient) = ZeroVector(rResidualGradient.size1());
}
void CalculateFirstDerivativesGradient(const Element& rAdjointElement,
const Matrix& rResidualGradient,
Vector& rResponseGradient,
const ProcessInfo& rProcessInfo) override
{
ComputePointTemperatureSensitivityContribution(rResidualGradient, rAdjointElement.GetGeometry().Points(),rResponseGradient);
}
void CalculateSecondDerivativesGradient(const Element& rAdjointElement,
const Matrix& rResidualGradient,
Vector& rResponseGradient,
const ProcessInfo& rProcessInfo) override
{
ComputePointTemperatureSensitivityContribution(rResidualGradient, rAdjointElement.GetGeometry().Points(),rResponseGradient);
}
void CalculatePartialSensitivity(Element& rAdjointElement,
const Variable<array_1d<double, 3>>& rVariable,
const Matrix& rSensitivityMatrix,
Vector& rSensitivityGradient,
const ProcessInfo& rProcessInfo) override
{
if (rSensitivityGradient.size() != rSensitivityMatrix.size1())
rSensitivityGradient.resize(rSensitivityMatrix.size1(), false);
noalias(rSensitivityGradient) = ZeroVector(rSensitivityMatrix.size1());
}
void CalculatePartialSensitivity(Condition& rAdjointElement,
const Variable<array_1d<double, 3>>& rVariable,
const Matrix& rSensitivityMatrix,
Vector& rSensitivityGradient,
const ProcessInfo& rProcessInfo) override
{
if (rSensitivityGradient.size() != rSensitivityMatrix.size1())
rSensitivityGradient.resize(rSensitivityMatrix.size1(), false);
noalias(rSensitivityGradient) = ZeroVector(rSensitivityMatrix.size1());
}
double CalculateValue(ModelPart& rModelPart) override
{
KRATOS_TRY;
const ModelPart& r_target_model_part =
GetTargetModelPart(rModelPart, mTargetModelPartName);
const double domain_aggregated_temperature =
VariableUtils().SumHistoricalVariable<double>(TEMPERATURE, r_target_model_part);
const Communicator& r_communicator = r_target_model_part.GetCommunicator();
const int number_of_nodes = r_communicator.LocalMesh().NumberOfNodes();
const int total_nodes = r_communicator.GetDataCommunicator().SumAll(number_of_nodes);
return domain_aggregated_temperature / static_cast<double>(total_nodes);
KRATOS_CATCH("");
}
///@}
protected:
///@name Protected member Variables
///@{
///@}
///@name Protected Operators
///@{
///@}
///@name Protected Operations
///@{
///@}
private:
///@name Member Variables
///@{
int mNumNodes = 0;
std::string mTargetModelPartName;
///@}
///@name Private Operators
///@{
void ComputePointTemperatureSensitivityContribution(
const Matrix& rDerivativesOfResidual,
const Element::NodesArrayType& rNodes,
Vector& rLocalSensitivityContribution) const
{
if (rLocalSensitivityContribution.size() != rDerivativesOfResidual.size1())
rLocalSensitivityContribution.resize(rDerivativesOfResidual.size1(), false);
noalias(rLocalSensitivityContribution) = ZeroVector(rLocalSensitivityContribution.size());
const unsigned int num_nodes = rNodes.size();
for (unsigned int i = 0; i < num_nodes; i++)
{
if (rNodes[i].Is(STRUCTURE))
{
double factor = 1.0 / (rNodes[i].GetValue(NUMBER_OF_NEIGHBOUR_ELEMENTS)*mNumNodes);
rLocalSensitivityContribution[i] = factor;
}
}
}
ModelPart& GetTargetModelPart(ModelPart& rModelPart, const std::string& rTargetModelPartName)
{
KRATOS_TRY;
if (rModelPart.Name() == rTargetModelPartName)
{
return rModelPart;
}
else if (rModelPart.HasSubModelPart(rTargetModelPartName))
{
return rModelPart.GetSubModelPart(rTargetModelPartName);
}
else
{
KRATOS_ERROR << "Unknown ModelPart " << rTargetModelPartName << "." << std::endl;
}
KRATOS_CATCH("")
return rModelPart;
}
///@}
///@name Private Operations
///@{
///@}
};
///@} // Kratos Classes
///@} // ConvectionDiffusionApplication group
}
#endif // KRATOS_LOCAL_TEMPERATURE_AVERAGE_RESPONSE_FUNCTION_H_INCLUDED
|
GB_unaryop__abs_fp32_bool.c | //------------------------------------------------------------------------------
// GB_unaryop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_iterator.h"
#include "GB_unaryop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop__abs_fp32_bool
// op(A') function: GB_tran__abs_fp32_bool
// C type: float
// A type: bool
// cast: float cij = (float) aij
// unaryop: cij = fabsf (aij)
#define GB_ATYPE \
bool
#define GB_CTYPE \
float
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
bool aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = fabsf (x) ;
// casting
#define GB_CASTING(z, x) \
float z = (float) x ;
// cij = op (cast (aij))
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
GB_GETA (aij, Ax, pA) ; \
/* Cx [pC] = op (cast (aij)) */ \
GB_CASTING (x, aij) ; \
GB_OP (GB_CX (pC), x) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_ABS || GxB_NO_FP32 || GxB_NO_BOOL)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop__abs_fp32_bool
(
float *restrict Cx,
const bool *restrict Ax,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (int64_t p = 0 ; p < anz ; p++)
{
GB_CAST_OP (p, p) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_tran__abs_fp32_bool
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *restrict *Rowcounts,
GBI_single_iterator Iter,
const int64_t *restrict A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#define GB_PHASE_2_OF_2
#include "GB_unaryop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
polybench.c | /**
* polybench.c: This file is part of the PolyBench/C 3.2 test suite.
*
*
* Contact: Louis-Noel Pouchet <pouchet@cse.ohio-state.edu>
* Web address: http://polybench.sourceforge.net
*/
#include <stdio.h>
#include <string.h>
#include <stdlib.h>
#include <unistd.h>
#include <assert.h>
#include <time.h>
//#include <polybench.h> //I added this line.
#include <sys/time.h>
#include <sys/resource.h>
#include <sched.h>
#include <math.h>
#ifdef _OPENMP
# include <omp.h>
#endif
/* By default, collect PAPI counters on thread 0. */
#ifndef POLYBENCH_THREAD_MONITOR
# define POLYBENCH_THREAD_MONITOR 0
#endif
/* Total LLC cache size. By default 32+MB.. */
#ifndef POLYBENCH_CACHE_SIZE_KB
# define POLYBENCH_CACHE_SIZE_KB 32770
#endif
int polybench_papi_counters_threadid = POLYBENCH_THREAD_MONITOR;
double polybench_program_total_flops = 0;
#ifdef POLYBENCH_PAPI
# include <papi.h>
# define POLYBENCH_MAX_NB_PAPI_COUNTERS 96
char* _polybench_papi_eventlist[] = {
#include "papi_counters.list"
NULL
};
int polybench_papi_eventset;
int polybench_papi_eventlist[POLYBENCH_MAX_NB_PAPI_COUNTERS];
long_long polybench_papi_values[POLYBENCH_MAX_NB_PAPI_COUNTERS];
#endif
/* Timer code (gettimeofday). */
double polybench_t_start, polybench_t_end;
/* Timer code (RDTSC). */
unsigned long long int polybench_c_start, polybench_c_end;
static
double rtclock()
{
#ifdef POLYBENCH_TIME
struct timeval Tp;
int stat;
stat = gettimeofday (&Tp, NULL);
if (stat != 0)
printf ("Error return from gettimeofday: %d", stat);
return (Tp.tv_sec + Tp.tv_usec * 1.0e-6);
#else
return 0;
#endif
}
#ifdef POLYBENCH_CYCLE_ACCURATE_TIMER
static
unsigned long long int rdtsc()
{
unsigned long long int ret = 0;
unsigned int cycles_lo;
unsigned int cycles_hi;
__asm__ volatile ("RDTSC" : "=a" (cycles_lo), "=d" (cycles_hi));
ret = (unsigned long long int)cycles_hi << 32 | cycles_lo;
return ret;
}
#endif
void polybench_flush_cache()
{
int cs = POLYBENCH_CACHE_SIZE_KB * 1024 / sizeof(double);
double* flush = (double*) calloc (cs, sizeof(double));
int i;
double tmp = 0.0;
#ifdef _OPENMP
#pragma omp parallel for
#endif
for (i = 0; i < cs; i++)
tmp += flush[i];
assert (tmp <= 10.0);
free (flush);
}
#ifdef POLYBENCH_LINUX_FIFO_SCHEDULER
void polybench_linux_fifo_scheduler()
{
/* Use FIFO scheduler to limit OS interference. Program must be run
as root, and this works only for Linux kernels. */
struct sched_param schedParam;
schedParam.sched_priority = sched_get_priority_max (SCHED_FIFO);
sched_setscheduler (0, SCHED_FIFO, &schedParam);
}
void polybench_linux_standard_scheduler()
{
/* Restore to standard scheduler policy. */
struct sched_param schedParam;
schedParam.sched_priority = sched_get_priority_max (SCHED_OTHER);
sched_setscheduler (0, SCHED_OTHER, &schedParam);
}
#endif
#ifdef POLYBENCH_PAPI
static
void test_fail(char *file, int line, char *call, int retval)
{
char buf[128];
memset(buf, '\0', sizeof(buf));
if (retval != 0)
fprintf (stdout,"%-40s FAILED\nLine # %d\n", file, line);
else
{
fprintf (stdout,"%-40s SKIPPED\n", file);
fprintf (stdout,"Line # %d\n", line);
}
if (retval == PAPI_ESYS)
{
sprintf (buf, "System error in %s", call);
perror (buf);
}
else if (retval > 0)
fprintf (stdout,"Error: %s\n", call);
else if (retval == 0)
fprintf (stdout,"Error: %s\n", call);
else
{
char errstring[PAPI_MAX_STR_LEN];
PAPI_perror (retval, errstring, PAPI_MAX_STR_LEN);
fprintf (stdout,"Error in %s: %s\n", call, errstring);
}
fprintf (stdout,"\n");
if (PAPI_is_initialized ())
PAPI_shutdown ();
exit (1);
}
void polybench_papi_init()
{
# ifdef _OPENMP
#pragma omp parallel
{
#pragma omp master
{
if (omp_get_max_threads () < polybench_papi_counters_threadid)
polybench_papi_counters_threadid = omp_get_max_threads () - 1;
}
#pragma omp barrier
if (omp_get_thread_num () == polybench_papi_counters_threadid)
{
# endif
int retval;
polybench_papi_eventset = PAPI_NULL;
if ((retval = PAPI_library_init (PAPI_VER_CURRENT)) != PAPI_VER_CURRENT)
test_fail (__FILE__, __LINE__, "PAPI_library_init", retval);
if ((retval = PAPI_create_eventset (&polybench_papi_eventset))
!= PAPI_OK)
test_fail (__FILE__, __LINE__, "PAPI_create_eventset", retval);
int k;
for (k = 0; _polybench_papi_eventlist[k]; ++k)
{
if ((retval =
PAPI_event_name_to_code (_polybench_papi_eventlist[k],
&(polybench_papi_eventlist[k])))
!= PAPI_OK)
test_fail (__FILE__, __LINE__, "PAPI_event_name_to_code", retval);
}
polybench_papi_eventlist[k] = 0;
# ifdef _OPENMP
}
}
#pragma omp barrier
# endif
}
void polybench_papi_close()
{
# ifdef _OPENMP
#pragma omp parallel
{
if (omp_get_thread_num () == polybench_papi_counters_threadid)
{
# endif
int retval;
if ((retval = PAPI_destroy_eventset (&polybench_papi_eventset))
!= PAPI_OK)
test_fail (__FILE__, __LINE__, "PAPI_destroy_eventset", retval);
if (PAPI_is_initialized ())
PAPI_shutdown ();
# ifdef _OPENMP
}
}
#pragma omp barrier
# endif
}
int polybench_papi_start_counter(int evid)
{
# ifndef POLYBENCH_NO_FLUSH_CACHE
polybench_flush_cache();
# endif
# ifdef _OPENMP
# pragma omp parallel
{
if (omp_get_thread_num () == polybench_papi_counters_threadid)
{
# endif
int retval = 1;
char descr[PAPI_MAX_STR_LEN];
PAPI_event_info_t evinfo;
PAPI_event_code_to_name (polybench_papi_eventlist[evid], descr);
if (PAPI_add_event (polybench_papi_eventset,
polybench_papi_eventlist[evid]) != PAPI_OK)
test_fail (__FILE__, __LINE__, "PAPI_add_event", 1);
if (PAPI_get_event_info (polybench_papi_eventlist[evid], &evinfo)
!= PAPI_OK)
test_fail (__FILE__, __LINE__, "PAPI_get_event_info", retval);
if ((retval = PAPI_start (polybench_papi_eventset)) != PAPI_OK)
test_fail (__FILE__, __LINE__, "PAPI_start", retval);
# ifdef _OPENMP
}
}
#pragma omp barrier
# endif
return 0;
}
void polybench_papi_stop_counter(int evid)
{
# ifdef _OPENMP
# pragma omp parallel
{
if (omp_get_thread_num () == polybench_papi_counters_threadid)
{
# endif
int retval;
long_long values[1];
values[0] = 0;
if ((retval = PAPI_read (polybench_papi_eventset, &values[0]))
!= PAPI_OK)
test_fail (__FILE__, __LINE__, "PAPI_read", retval);
if ((retval = PAPI_stop (polybench_papi_eventset, NULL)) != PAPI_OK)
test_fail (__FILE__, __LINE__, "PAPI_stop", retval);
polybench_papi_values[evid] = values[0];
if ((retval = PAPI_remove_event
(polybench_papi_eventset,
polybench_papi_eventlist[evid])) != PAPI_OK)
test_fail (__FILE__, __LINE__, "PAPI_remove_event", retval);
# ifdef _OPENMP
}
}
#pragma omp barrier
# endif
}
void polybench_papi_print()
{
int verbose = 0;
# ifdef _OPENMP
# pragma omp parallel
{
if (omp_get_thread_num() == polybench_papi_counters_threadid)
{
#ifdef POLYBENCH_PAPI_VERBOSE
verbose = 1;
#endif
if (verbose)
printf ("On thread %d:\n", polybench_papi_counters_threadid);
#endif
int evid;
for (evid = 0; polybench_papi_eventlist[evid] != 0; ++evid)
{
if (verbose)
printf ("%s=", _polybench_papi_eventlist[evid]);
printf ("%llu ", polybench_papi_values[evid]);
if (verbose)
printf ("\n");
}
printf ("\n");
# ifdef _OPENMP
}
}
#pragma omp barrier
# endif
}
#endif
/* ! POLYBENCH_PAPI */
void polybench_prepare_instruments()
{
#ifndef POLYBENCH_NO_FLUSH_CACHE
polybench_flush_cache ();
#endif
#ifdef POLYBENCH_LINUX_FIFO_SCHEDULER
polybench_linux_fifo_scheduler ();
#endif
}
void polybench_timer_start()
{
//printf("In polybench timer start\n");
polybench_prepare_instruments ();
#ifndef POLYBENCH_CYCLE_ACCURATE_TIMER
polybench_t_start = rtclock ();
#else
polybench_c_start = rdtsc ();
#endif
}
void polybench_timer_stop()
{
//printf("In polybench timer stop\n");
#ifndef POLYBENCH_CYCLE_ACCURATE_TIMER
polybench_t_end = rtclock ();
#else
polybench_c_end = rdtsc ();
#endif
#ifdef POLYBENCH_LINUX_FIFO_SCHEDULER
polybench_linux_standard_scheduler ();
#endif
}
void polybench_timer_print()
{
#ifdef POLYBENCH_GFLOPS
if (__polybench_program_total_flops == 0)
{
printf ("[PolyBench][WARNING] Program flops not defined, use polybench_set_program_flops(value)\n");
printf ("%0.6lf\n", polybench_t_end - polybench_t_start);
}
else
printf ("%0.2lf\n",
(__polybench_program_total_flops /
(double)(polybench_t_end - polybench_t_start)) / 1000000000);
#else
# ifndef POLYBENCH_CYCLE_ACCURATE_TIMER
printf ("%0.6f\n", polybench_t_end - polybench_t_start);
# else
printf ("%Ld\n", polybench_c_end - polybench_c_start);
# endif
#endif
}
static
void *
xmalloc (size_t num)
{
void* new = NULL;
int ret = posix_memalign (&new, 32, num);
if (! new || ret)
{
fprintf (stderr, "[PolyBench] posix_memalign: cannot allocate memory");
exit (1);
}
return new;
}
void* polybench_alloc_data(unsigned long long int n, int elt_size)
{
/// FIXME: detect overflow!
size_t val = n;
val *= elt_size;
void* ret = xmalloc (val);
return ret;
}
|
rii.h | #ifndef RII_H
#define RII_H
#include <iostream>
#include <cassert>
#include "./pqkmeans.h"
#include "./distance.h"
// For py::array_t
// See http://pybind11.readthedocs.io/en/master/advanced/pycpp/numpy.html#direct-access
#include <pybind11/pybind11.h>
#include <pybind11/numpy.h>
namespace py = pybind11;
namespace rii {
struct DistanceTable{
// Helper structure. This is identical to vec<vec<float>> dt(M, vec<float>(Ks))
DistanceTable() {}
DistanceTable(size_t M, size_t Ks) : Ks_(Ks), data_(M * Ks) {}
void SetVal(size_t m, size_t ks, float val) {
data_[m * Ks_ + ks] = val;
}
float GetVal(size_t m, size_t ks) const {
return data_[m * Ks_ + ks];
}
size_t Ks_;
std::vector<float> data_;
};
class RiiCpp {
public:
RiiCpp() {} // Shouldn't be default-constructed
RiiCpp(const py::array_t<float> &codewords, bool verbose);
// ===== Functions that can be called from Python =====
//void SetCodewords(const py::array_t<float> &codewords); // This should be called first
void Reconfigure(int nlist, int iter);
void AddCodes(const py::array_t<unsigned char> &codes, bool update_flag);
// The default integers of Python is int64 (long long), so the type of target_ids is long long
std::pair<std::vector<size_t>, std::vector<float>> QueryLinear(const py::array_t<float> &query,
int topk,
const py::array_t<long long> &target_ids) const;
std::pair<std::vector<size_t>, std::vector<float>> QueryIvf(const py::array_t<float> &query,
int topk,
const py::array_t<long long> &target_ids,
int L) const;
void Clear();
// ===== Functions that would not be called from Python (Used inside c++) =====
void UpdatePostingLists(size_t start, size_t num);
DistanceTable DTable(const py::array_t<float> &vec) const;
float ADist(const DistanceTable &dtable, const std::vector<unsigned char> &code) const;
float ADist(const DistanceTable &dtable, const std::vector<unsigned char> &flattened_codes, size_t n) const;
std::pair<std::vector<size_t>, std::vector<float>> PairVectorToVectorPair(const std::vector<std::pair<size_t, float>> &pair_vec) const;
// Property getter
size_t GetN() const {return flattened_codes_.size() / M_;}
size_t GetNumList() const {return coarse_centers_.size();}
// Given a long (N * M) codes, pick up n-th code
std::vector<unsigned char> NthCode(const std::vector<unsigned char> &long_code, size_t n) const;
// Given a long (N * M) codes, pick up m-th element from n-th code
unsigned char NthCodeMthElement(const std::vector<unsigned char> &long_code, std::size_t n, size_t m) const;
// Member variables
size_t M_, Ks_;
bool verbose_;
std::vector<std::vector<std::vector<float>>> codewords_; // (M, Ks, Ds)
std::vector<std::vector<unsigned char>> coarse_centers_; // (NumList, M)
std::vector<unsigned char> flattened_codes_; // (N, M) PQ codes are flattened to N * M long array
std::vector<std::vector<int>> posting_lists_; // (NumList, any)
};
RiiCpp::RiiCpp(const py::array_t<float> &codewords, bool verbose)
{
verbose_ = verbose;
const auto &r = codewords.unchecked<3>(); // codewords must have ndim=3, with non-writable
M_ = (size_t) r.shape(0);
Ks_ = (size_t) r.shape(1);
size_t Ds = (size_t) r.shape(2);
codewords_.resize(M_, std::vector<std::vector<float>>(Ks_, std::vector<float>(Ds)));
for (ssize_t m = 0; m < r.shape(0); ++m) {
for (ssize_t ks = 0; ks < r.shape(1); ++ks) {
for (ssize_t ds = 0; ds < r.shape(2); ++ds) {
codewords_[m][ks][ds] = r(m, ks, ds);
}
}
}
if (verbose_) {
// Check which SIMD functions are used. See distance.h for this global variable.
std::cout << "SIMD support: " << g_simd_architecture << std::endl;
}
}
void RiiCpp::Reconfigure(int nlist, int iter)
{
assert(0 < nlist);
assert((size_t) nlist <= GetN());
// ===== (1) Sampling vectors for pqk-means =====
// Since clustering takes time, we use a subset of all codes for clustering.
size_t len_for_clustering = std::min(GetN(), (size_t) nlist * 100);
if (verbose_) {
std::cout << "The number of vectors used for training of coarse centers: " << len_for_clustering << std::endl;
}
// Prepare a random set of integers, drawn from [0, ..., N-1], where the cardinality of the set is len_for_clustering
std::vector<size_t> ids_for_clustering(GetN()); // This can be large and might be the bootle neck of memory consumption
std::iota(ids_for_clustering.begin(), ids_for_clustering.end(), 0); // 0, 1, 2, ...
std::shuffle(ids_for_clustering.begin(), ids_for_clustering.end(), std::default_random_engine(123));
ids_for_clustering.resize(len_for_clustering);
ids_for_clustering.shrink_to_fit(); // For efficient memory usage
std::vector<unsigned char> flattened_codes_randomly_picked; // size=len_for_clustering
flattened_codes_randomly_picked.reserve(len_for_clustering * M_);
for (const auto &id : ids_for_clustering) { // Pick up vectors to construct a training set
std::vector<unsigned char> code = NthCode(flattened_codes_, id);
flattened_codes_randomly_picked.insert(flattened_codes_randomly_picked.end(),
code.begin(), code.end());
}
assert(flattened_codes_randomly_picked.size() == len_for_clustering * M_);
// ===== (2) Run pqk-means =====
if (verbose_) {std::cout << "Start to run PQk-means" << std::endl;}
pqkmeans::PQKMeans clustering_instance(codewords_, nlist, iter, verbose_);
clustering_instance.fit(flattened_codes_randomly_picked);
// ===== (3) Update coarse centers =====
coarse_centers_ = clustering_instance.GetClusterCenters();
assert(coarse_centers_.size() == (size_t) nlist);
assert(coarse_centers_[0].size() == M_);
// ===== (4) Update posting lists =====
if (verbose_) {std::cout << "Start to update posting lists" << std::endl;}
posting_lists_.clear();
posting_lists_.resize(nlist);
for (auto &posting_list : posting_lists_) {
posting_list.reserve(GetN() / nlist); // Roughly malloc
}
UpdatePostingLists(0, GetN());
}
void RiiCpp::AddCodes(const py::array_t<unsigned char> &codes, bool update_flag)
{
// (1) Add new input codes to flatted_codes. This imply pushes back the elements.
// After that, if update_flg=true, (2) update posting lists for the input codes.
// Note that update_flag should be true in usual cases. It should be false
// if (1) this is the first call of AddCodes (i.e., calling in add_configure()),
// of (2) you've decided to call reconfigure() manually after add()
if (update_flag && coarse_centers_.empty()) {
std::cerr << "Error. reconfigure() must be called before running add(vecs=X, update_posting_lists=True)."
<< "If this is the first addition, please call add_configure(vecs=X)" << std::endl;
throw;
}
// ===== (1) Add codes to flattened_codes =====
const auto &r = codes.unchecked<2>(); // codes must have ndim=2; with non-writeable
size_t N = (size_t) r.shape(0);
assert(M_ == (size_t) r.shape(1));
size_t N0 = GetN();
flattened_codes_.resize( (N0 + N) * M_);
for (size_t n = 0; n < N; ++n) {
for (size_t m = 0; m < M_; ++m) {
flattened_codes_[ (N0 + n) * M_ + m] = r(n, m);
}
}
if (verbose_) {
std::cout << N << " new vectors are added." << std::endl;
std::cout << "Total number of codes is " << GetN() << std::endl;
}
// ===== (2) Update posting lists =====
if (update_flag) {
if (verbose_) { std::cout << "Start to update posting lists" << std::endl; }
UpdatePostingLists(N0, N);
}
}
std::pair<std::vector<size_t>, std::vector<float> > RiiCpp::QueryLinear(const py::array_t<float> &query,
int topk,
const py::array_t<long long> &target_ids) const
{
const auto &tids = target_ids.unchecked<1>(); // target_ids must have ndim = 1; can be non-writeable
size_t S = tids.shape(0); // The number of target_ids. It might be 0 if not specified.
assert((size_t) topk <= GetN());
// ===== (1) Create dtable =====
DistanceTable dtable = DTable(query);
// ===== (2) Run PQ linear search =====
// [todo] Can be SIMDized?
std::vector<std::pair<size_t, float>> scores;
if (S == 0) { // No target ids
size_t N = GetN();
scores.resize(N);
#pragma omp parallel for
for (size_t n = 0; n < N; ++n) {
scores[n] = {n, ADist(dtable, flattened_codes_, n)};
}
} else { // Target ids are specified
assert((size_t) topk <= S);
assert(S <= GetN());
scores.resize(S);
#pragma omp parallel for
for (size_t s = 0; s < S; ++s) {
size_t tid = static_cast<size_t>(tids(s));
scores[s] = {tid, ADist(dtable, flattened_codes_, tid)};
}
}
// ===== (3) Sort them =====
// [todo] Can be parallelized?
std::partial_sort(scores.begin(), scores.begin() + topk, scores.end(),
[](const std::pair<size_t, float> &a, const std::pair<size_t, float> &b){return a.second < b.second;});
scores.resize(topk);
scores.shrink_to_fit();
// ===== (4) Return the result, in the form of pair<vec, vec> =====
// Note that this returns two lists, not np.array
return PairVectorToVectorPair(scores);
}
std::pair<std::vector<size_t>, std::vector<float> > RiiCpp::QueryIvf(const py::array_t<float> &query,
int topk,
const py::array_t<long long> &target_ids,
int L) const
{
const auto &tids = target_ids.unchecked<1>(); // target_ids must have ndim = 1 with non-writeable
size_t S = tids.shape(0); // The number of target_ids. It might be 0 if not specified.
assert((size_t) topk <= GetN());
assert(topk <= L && (size_t) L <= GetN());
// ===== (1) Create dtable =====
DistanceTable dtable = DTable(query);
// ===== (2) Compare to coarse centers and sort the results =====
std::vector<std::pair<size_t, float>> scores_coarse(coarse_centers_.size());
size_t nlist = GetNumList();
//#pragma omp parallel for
for (size_t no = 0; no < nlist; ++no) {
scores_coarse[no] = {no, ADist(dtable, coarse_centers_[no])};
}
// ===== (3) Partial sort the coarse results. =====
size_t w; // The number of posting lists to be considered
if (S == 0) {
w = (size_t) std::round((double) L * GetNumList() / GetN());
} else {
assert((size_t) topk <= S && S <= GetN());
w = (size_t) std::round((double) L * GetNumList() / S);
}
w += 3; // Top poslists might contain a few items, so we set w litter bit bigger for insurance
if (nlist < w) { // If w is bigger than the original nlist, let's set back nlist
w = nlist;
}
std::partial_sort(scores_coarse.begin(), scores_coarse.begin() + w, scores_coarse.end(),
[](const std::pair<size_t, float> &a, const std::pair<size_t, float> &b){return a.second < b.second;});
// ===== (4) Traverse posting list =====
std::vector<std::pair<size_t, float>> scores;
scores.reserve(L);
int coarse_cnt = 0;
for (const auto &score_coarse : scores_coarse) {
size_t no = score_coarse.first;
coarse_cnt++;
// [todo] This loop can be parallelized
for (const auto &n : posting_lists_[no]) {
// ===== (5) If id is not included in target_ids, skip. =====
// Note that if S==0 (target is all), then evaluate all IDs
if (S != 0 && !std::binary_search(target_ids.data(), target_ids.data() + S, static_cast<long long>(n))) {
continue;
}
// ===== (6) Evaluate n =====
scores.emplace_back(n, ADist(dtable, flattened_codes_, n));
// ===== (7) If scores are collected enough =====
if (scores.size() == (size_t) L) {
goto finish;
}
}
// If w coarse centers are traversed and still L items are not found while more than topk items are found,
// we terminate the process and do the final reranking
if ( (size_t) coarse_cnt == w && scores.size() >= (unsigned long) topk) {
finish:
// ===== (8) Sort them =====
std::partial_sort(scores.begin(), scores.begin() + topk, scores.end(),
[](const std::pair<size_t, float> &a, const std::pair<size_t, float> &b){return a.second < b.second;});
scores.resize(topk);
scores.shrink_to_fit();
// ===== (9) Return the result, in the form of pair<vec, vec> =====
// Note that this returns two lists, not np.array
return PairVectorToVectorPair(scores);
}
}
// It can be happened that vectors are not found
return std::pair<std::vector<size_t>, std::vector<float>>({}, {});
}
void RiiCpp::Clear()
{
coarse_centers_.clear();
flattened_codes_.clear();
posting_lists_.clear();
}
void RiiCpp::UpdatePostingLists(size_t start, size_t num)
{
// Update (add) identifiers to posting lists, from codes[start] to codes[start + num -1]
// This just add IDs, so be careful to call this (e.g., the same IDs will be added if you call
// this funcs twice at the same time, that would be not expected behavior)
assert(start <= GetN());
assert(start + num <= GetN());
// ===== (1) Construct a dummy pqkmeans class for computing Symmetric Distance =====
pqkmeans::PQKMeans clustering_instance(codewords_, GetNumList(), 0, true);
clustering_instance.SetClusterCenters(coarse_centers_);
// ===== (2) Update posting lists =====
std::vector<size_t> assign(num);
#pragma omp parallel for
for (size_t n = 0; n < num; ++n) {
assign[n] = clustering_instance.predict_one(NthCode(flattened_codes_, start + n));
}
for (size_t n = 0; n < num; ++n) {
posting_lists_[assign[n]].push_back(start + n);
}
}
DistanceTable RiiCpp::DTable(const py::array_t<float> &vec) const
{
const auto &v = vec.unchecked<1>();
size_t Ds = codewords_[0][0].size();
assert((size_t) v.shape(0) == M_ * Ds);
DistanceTable dtable(M_, Ks_);
for (size_t m = 0; m < M_; ++m) {
for (size_t ks = 0; ks < Ks_; ++ks) {
dtable.SetVal(m, ks, fvec_L2sqr(&(v(m * Ds)), codewords_[m][ks].data(), Ds));
}
}
return dtable;
}
float RiiCpp::ADist(const DistanceTable &dtable, const std::vector<unsigned char> &code) const
{
assert(code.size() == M_);
float dist = 0;
for (size_t m = 0; m < M_; ++m) {
unsigned char ks = code[m];
dist += dtable.GetVal(m, ks);
}
return dist;
}
float RiiCpp::ADist(const DistanceTable &dtable, const std::vector<unsigned char> &flattened_codes, size_t n) const
{
float dist = 0;
for (size_t m = 0; m < M_; ++m) {
unsigned char ks = NthCodeMthElement(flattened_codes, n, m);
dist += dtable.GetVal(m, ks);
}
return dist;
}
std::pair<std::vector<size_t>, std::vector<float> > RiiCpp::PairVectorToVectorPair(const std::vector<std::pair<size_t, float> > &pair_vec) const
{
std::pair<std::vector<size_t>, std::vector<float>> vec_pair(std::vector<size_t>(pair_vec.size()), std::vector<float>(pair_vec.size()));
for(size_t n = 0, N = pair_vec.size(); n < N; ++n) {
vec_pair.first[n] = pair_vec[n].first;
vec_pair.second[n] = pair_vec[n].second;
}
return vec_pair;
}
std::vector<unsigned char> RiiCpp::NthCode(const std::vector<unsigned char> &long_code, size_t n) const
{
return std::vector<unsigned char>(long_code.begin() + n * M_, long_code.begin() + (n + 1) * M_);
}
unsigned char RiiCpp::NthCodeMthElement(const std::vector<unsigned char> &long_code, std::size_t n, size_t m) const
{
return long_code[ n * M_ + m];
}
} // namespace rii
#endif // RII_H
|
GB_unaryop__ainv_uint32_uint64.c | //------------------------------------------------------------------------------
// GB_unaryop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_iterator.h"
#include "GB_unaryop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop__ainv_uint32_uint64
// op(A') function: GB_tran__ainv_uint32_uint64
// C type: uint32_t
// A type: uint64_t
// cast: uint32_t cij = (uint32_t) aij
// unaryop: cij = -aij
#define GB_ATYPE \
uint64_t
#define GB_CTYPE \
uint32_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
uint64_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = -x ;
// casting
#define GB_CASTING(z, x) \
uint32_t z = (uint32_t) x ;
// cij = op (cast (aij))
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
GB_GETA (aij, Ax, pA) ; \
/* Cx [pC] = op (cast (aij)) */ \
GB_CASTING (x, aij) ; \
GB_OP (GB_CX (pC), x) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_AINV || GxB_NO_UINT32 || GxB_NO_UINT64)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop__ainv_uint32_uint64
(
uint32_t *restrict Cx,
const uint64_t *restrict Ax,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (int64_t p = 0 ; p < anz ; p++)
{
GB_CAST_OP (p, p) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_tran__ainv_uint32_uint64
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *restrict *Rowcounts,
GBI_single_iterator Iter,
const int64_t *restrict A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#define GB_PHASE_2_OF_2
#include "GB_unaryop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
GB_binop__isle_int16.c |
//------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCUDA_DEV
#include "GB_emult.h"
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB (_AaddB__isle_int16)
// A.*B function (eWiseMult): GB (_AemultB_08__isle_int16)
// A.*B function (eWiseMult): GB (_AemultB_02__isle_int16)
// A.*B function (eWiseMult): GB (_AemultB_04__isle_int16)
// A.*B function (eWiseMult): GB (_AemultB_bitmap__isle_int16)
// A*D function (colscale): GB (_AxD__isle_int16)
// D*A function (rowscale): GB (_DxB__isle_int16)
// C+=B function (dense accum): GB (_Cdense_accumB__isle_int16)
// C+=b function (dense accum): GB (_Cdense_accumb__isle_int16)
// C+=A+B function (dense ewise3): GB ((none))
// C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__isle_int16)
// C=scalar+B GB (_bind1st__isle_int16)
// C=scalar+B' GB (_bind1st_tran__isle_int16)
// C=A+scalar GB (_bind2nd__isle_int16)
// C=A'+scalar GB (_bind2nd_tran__isle_int16)
// C type: int16_t
// A type: int16_t
// A pattern? 0
// B type: int16_t
// B pattern? 0
// BinaryOp: cij = (aij <= bij)
#define GB_ATYPE \
int16_t
#define GB_BTYPE \
int16_t
#define GB_CTYPE \
int16_t
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA,A_iso) \
int16_t aij = GBX (Ax, pA, A_iso)
// true if values of A are not used
#define GB_A_IS_PATTERN \
0 \
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB,B_iso) \
int16_t bij = GBX (Bx, pB, B_iso)
// true if values of B are not used
#define GB_B_IS_PATTERN \
0 \
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
int16_t t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \
cij = GBX (Ax, pA, A_iso)
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \
cij = GBX (Bx, pB, B_iso)
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z,x,y,i,j) \
z = (x <= y) ;
// true if the binop must be flipped
#define GB_BINOP_FLIP \
0
// op is second
#define GB_OP_IS_SECOND \
0
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_ISLE || GxB_NO_INT16 || GxB_NO_ISLE_INT16)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
#if 0
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB ((none))
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
#endif
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
void GB (_Cdense_ewise3_noaccum__isle_int16)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_noaccum_template.c"
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumB__isle_int16)
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumb__isle_int16)
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type int16_t
int16_t bwork = (*((int16_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_AxD__isle_int16)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix D,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int16_t *restrict Cx = (int16_t *) C->x ;
#include "GB_AxB_colscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_DxB__isle_int16)
(
GrB_Matrix C,
const GrB_Matrix D,
const GrB_Matrix B,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int16_t *restrict Cx = (int16_t *) C->x ;
#include "GB_AxB_rowscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B
//------------------------------------------------------------------------------
GrB_Info GB (_AaddB__isle_int16)
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool is_eWiseUnion,
const GB_void *alpha_scalar_in,
const GB_void *beta_scalar_in,
const bool Ch_is_Mh,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GB_WERK_DECLARE (M_ek_slicing, int64_t) ;
GB_WERK_DECLARE (A_ek_slicing, int64_t) ;
GB_WERK_DECLARE (B_ek_slicing, int64_t) ;
int16_t alpha_scalar ;
int16_t beta_scalar ;
if (is_eWiseUnion)
{
alpha_scalar = (*((int16_t *) alpha_scalar_in)) ;
beta_scalar = (*((int16_t *) beta_scalar_in )) ;
}
#include "GB_add_template.c"
GB_FREE_WORKSPACE ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_08__isle_int16)
(
GrB_Matrix C,
const int C_sparsity,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_08_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_02__isle_int16)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool flipxy,
const int64_t *restrict Cp_kfirst,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if GB_BINOP_FLIP
// The operator is not commutative, and does not have a flipped
// variant. For example z=atan2(y,x).
if (flipxy)
{
// use fmult(y,x)
#undef GB_FLIPPED
#define GB_FLIPPED 1
#include "GB_emult_02_template.c"
}
else
{
// use fmult(x,y)
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
}
#else
// No need to handle the flip: the operator is either commutative, or
// has been handled by changing z=div(y,x) to z=rdiv(x,y) for example.
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_04__isle_int16)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict Cp_kfirst,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_04_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_bitmap__isle_int16)
(
GrB_Matrix C,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_bitmap_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB (_bind1st__isle_int16)
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *restrict Bb,
int64_t bnz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int16_t *Cx = (int16_t *) Cx_output ;
int16_t x = (*((int16_t *) x_input)) ;
int16_t *Bx = (int16_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < bnz ; p++)
{
if (!GBB (Bb, p)) continue ;
int16_t bij = GBX (Bx, p, false) ;
Cx [p] = (x <= bij) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB (_bind2nd__isle_int16)
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *restrict Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
int16_t *Cx = (int16_t *) Cx_output ;
int16_t *Ax = (int16_t *) Ax_input ;
int16_t y = (*((int16_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
int16_t aij = GBX (Ax, p, false) ;
Cx [p] = (aij <= y) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
int16_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = (x <= aij) ; \
}
GrB_Info GB (_bind1st_tran__isle_int16)
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
int16_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int16_t x = (*((const int16_t *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
int16_t
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
int16_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = (aij <= y) ; \
}
GrB_Info GB (_bind2nd_tran__isle_int16)
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int16_t y = (*((const int16_t *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
fields_values.c | // RUN: %libomp-compile-and-run
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <omp.h>
#define XSTR(x) #x
#define STR(x) XSTR(x)
#define streqls(s1, s2) (!strcmp(s1, s2))
#define check(condition) \
if (!(condition)) { \
fprintf(stderr, "error: %s: %d: " STR(condition) "\n", __FILE__, \
__LINE__); \
exit(1); \
}
#if defined(_WIN32)
#include <windows.h>
#define getpid _getpid
typedef int pid_t;
#define gettid GetCurrentThreadId
#define my_gethostname(buf, sz) GetComputerNameA(buf, &(sz))
#else
#include <unistd.h>
#include <sys/types.h>
#define my_gethostname(buf, sz) gethostname(buf, sz)
#endif
#define BUFFER_SIZE 256
int get_integer() {
int n, retval;
char buf[BUFFER_SIZE];
size_t needed = omp_capture_affinity(buf, BUFFER_SIZE, NULL);
check(needed < BUFFER_SIZE);
n = sscanf(buf, "%d", &retval);
check(n == 1);
return retval;
}
char* get_string() {
int n, retval;
char buf[BUFFER_SIZE];
size_t needed = omp_capture_affinity(buf, BUFFER_SIZE, NULL);
check(needed < BUFFER_SIZE);
return strdup(buf);
}
void check_integer(const char* formats[2], int(*func)()) {
int i;
for (i = 0; i < 2; ++i) {
omp_set_affinity_format(formats[i]);
#pragma omp parallel num_threads(8)
{
check(get_integer() == func());
#pragma omp parallel num_threads(3)
{
check(get_integer() == func());
}
check(get_integer() == func());
}
}
}
void check_nesting_level() {
// Check %{nesting_level} and %L
const char* formats[2] = {"%{nesting_level}", "%L"};
check_integer(formats, omp_get_level);
}
void check_thread_num() {
// Check %{thread_num} and %n
const char* formats[2] = {"%{thread_num}", "%n"};
check_integer(formats, omp_get_thread_num);
}
void check_num_threads() {
// Check %{num_threads} and %N
const char* formats[2] = {"%{num_threads}", "%N"};
check_integer(formats, omp_get_num_threads);
}
int ancestor_helper() {
return omp_get_ancestor_thread_num(omp_get_level() - 1);
}
void check_ancestor_tnum() {
// Check %{ancestor_tnum} and %a
const char* formats[2] = {"%{ancestor_tnum}", "%a"};
check_integer(formats, ancestor_helper);
}
int my_get_pid() { return (int)getpid(); }
void check_process_id() {
// Check %{process_id} and %P
const char* formats[2] = {"%{process_id}", "%P"};
check_integer(formats, my_get_pid);
}
/*
int my_get_tid() { return (int)gettid(); }
void check_native_thread_id() {
// Check %{native_thread_id} and %i
const char* formats[2] = {"%{native_thread_id}", "%i"};
check_integer(formats, my_get_tid);
}
*/
void check_host() {
int i;
int buffer_size = 256;
const char* formats[2] = {"%{host}", "%H"};
char hostname[256];
my_gethostname(hostname, buffer_size);
for (i = 0; i < 2; ++i) {
omp_set_affinity_format(formats[i]);
#pragma omp parallel num_threads(8)
{
char* host = get_string();
check(streqls(host, hostname));
free(host);
}
}
}
void check_undefined() {
int i;
const char* formats[2] = {"%{foobar}", "%X"};
for (i = 0; i < 2; ++i) {
omp_set_affinity_format(formats[i]);
#pragma omp parallel num_threads(8)
{
char* undef = get_string();
check(streqls(undef, "undefined"));
free(undef);
}
}
}
int main(int argc, char** argv) {
omp_set_nested(1);
check_nesting_level();
check_num_threads();
check_ancestor_tnum();
check_process_id();
//check_native_thread_id();
check_host();
check_undefined();
return 0;
}
|
GB_bitmap_assign_C_whole_template.c | //------------------------------------------------------------------------------
// GB_bitmap_assign_C_whole_template: iterate over a bitmap matrix C
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// The #include'ing file defines a GB_CIJ_WORK macro for the body of the loop,
// which operates on the entry C(iC,jC) at position Cx [pC] and Cb [pC]. The C
// matrix held in bitmap form. If the mask matrix is also a bitmap matrix or
// full matrix, the GB_GET_MIJ macro can compute the effective value of the
// mask for the C(iC,jC) entry.
// C must be bitmap or full. If M is accessed, it must also be bitmap or full.
#ifndef GB_GET_MIJ
#define GB_GET_MIJ(mij,pM) ;
#endif
{
// iterate over all of C(:,:).
int nthreads = GB_nthreads (cnzmax, chunk, nthreads_max) ;
int tid ;
#pragma omp parallel for num_threads(nthreads) schedule(static) \
reduction(+:cnvals)
for (tid = 0 ; tid < nthreads ; tid++)
{
int64_t pC_start, pC_end, task_cnvals = 0 ;
GB_PARTITION (pC_start, pC_end, cnzmax, tid, nthreads) ;
for (int64_t pC = pC_start ; pC < pC_end ; pC++)
{
// int64_t iC = pC % cvlen ;
// int64_t jC = pC / cvlen ;
GB_GET_MIJ (mij, pC) ; // mij = Mask (pC)
GB_CIJ_WORK (pC) ; // operate on C(iC,jC)
}
cnvals += task_cnvals ;
}
}
|
pmem-streams.c | #include<stdio.h>
#include<stdlib.h>
#include<time.h>
#include<string.h>
#include<libpmem.h>
#include<omp.h>
#include"utils.h"
#define ARRAY_SIZE 100000000
#define MB 1048576
#define REPEATS 10
void copy(double *, double *, long int);
void scale(double *, double *, double, long int);
void add(double *, double *, double *, long int);
void triadd(double *, double *, double *, double, long int);
void initialise(double *, double *, double *, long int);
int main(int argc, char *argv[]){
struct timespec start, end;
char *path;
char title[100] = "";
double *a, *b, *c;
char *pmemaddr = NULL;
long int array_size, bytes;
int repeats;
int i;
int array_element_size;
int is_pmem;
size_t mapped_len;
int num_threads;
if(argc != 4){
array_size = ARRAY_SIZE;
repeats = REPEATS;
path = "";
}else{
array_size = atoi(argv[1]);
repeats = atoi(argv[2]);
path = argv[3];
}
a = malloc(sizeof(double)*array_size);
b = malloc(sizeof(double)*array_size);
c = malloc(sizeof(double)*array_size);
array_element_size = sizeof(a[0]);
printf("Using an array of %ld doubles (%ld MB) for experiments\n",array_size,array_size*array_element_size/MB);
#pragma omp parallel shared(num_threads)
{
num_threads = omp_get_num_threads();
}
printf("Running on %d threads\n", num_threads);
printf("Memory test\n");
initialise(a,b,c,array_size);
clock_gettime(CLOCK_MONOTONIC, &start);
for(i=0; i<repeats; i++){
copy(a,b,array_size);
}
clock_gettime(CLOCK_MONOTONIC, &end);
strcpy(title, "Copy");
elapsed_time_bw_hr(start, end, repeats, (2*array_size*array_element_size)/MB, title);
clock_gettime(CLOCK_MONOTONIC, &start);
for(i=0; i<repeats; i++){
scale(a,c,2.4,array_size);
}
clock_gettime(CLOCK_MONOTONIC, &end);
strcpy(title, "Scale");
elapsed_time_bw_hr(start, end, repeats, (2*array_size*array_element_size)/MB, title);
clock_gettime(CLOCK_MONOTONIC, &start);
for(i=0; i<repeats; i++){
add(b,a,c,array_size);
}
clock_gettime(CLOCK_MONOTONIC, &end);
strcpy(title, "Add");
elapsed_time_bw_hr(start, end, repeats, (3*array_size*array_element_size)/MB, title);
clock_gettime(CLOCK_MONOTONIC, &start);
for(i=0; i<repeats; i++){
triadd(a,b,c,2.4,array_size);
}
clock_gettime(CLOCK_MONOTONIC, &end);
strcpy(title, "Triadd");
elapsed_time_bw_hr(start, end, repeats, (3*array_size*array_element_size)/MB, title);
free(a);
free(b);
free(c);
printf("PMem test\n");
sprintf(path+strlen(path), "pstream_test_file");
if ((pmemaddr = pmem_map_file(path, array_size*array_element_size*3,
PMEM_FILE_CREATE|PMEM_FILE_EXCL,
0666, &mapped_len, &is_pmem)) == NULL) {
perror("pmem_map_file");
fprintf(stderr, "Failed to pmem_map_file for filename:%s.\n", path);
exit(-100);
}
printf("Using file %s for pmem\n",path);
a = pmemaddr;
b = pmemaddr + array_size*array_element_size;
c = pmemaddr + array_size*array_element_size*2;
initialise(a,b,c,array_size);
clock_gettime(CLOCK_MONOTONIC, &start);
for(i=0; i<repeats; i++){
copy(a,b,array_size);
pmem_persist(pmemaddr, array_size*array_element_size);
}
clock_gettime(CLOCK_MONOTONIC, &end);
strcpy(title, "Copy");
elapsed_time_bw_hr(start, end, repeats, (2*array_size*array_element_size)/MB, title);
clock_gettime(CLOCK_MONOTONIC, &start);
for(i=0; i<repeats; i++){
scale(a,c,2.4,array_size);
}
clock_gettime(CLOCK_MONOTONIC, &end);
strcpy(title, "Scale");
elapsed_time_bw_hr(start, end, repeats, (2*array_size*array_element_size)/MB, title);
clock_gettime(CLOCK_MONOTONIC, &start);
for(i=0; i<repeats; i++){
add(b,a,c,array_size);
}
clock_gettime(CLOCK_MONOTONIC, &end);
strcpy(title, "Add");
elapsed_time_bw_hr(start, end, repeats, (3*array_size*array_element_size)/MB, title);
clock_gettime(CLOCK_MONOTONIC, &start);
for(i=0; i<repeats; i++){
triadd(a,b,c,2.4,array_size);
}
clock_gettime(CLOCK_MONOTONIC, &end);
strcpy(title, "Triadd");
elapsed_time_bw_hr(start, end, repeats, (3*array_size*array_element_size)/MB, title);
pmem_persist(pmemaddr, mapped_len);
pmem_unmap(pmemaddr, mapped_len);
return 0;
}
void initialise(double *a, double *b, double *c, long int array_size){
int j;
#pragma omp parallel for
for (j=0; j<array_size; j++){
a[j] = 0.0;
b[j] = 2.0;
c[j] = 1.0;
}
return;
}
void copy(double *a, double *b, long int array_size){
int j;
#pragma omp parallel for
for (j=0; j<array_size; j++){
b[j] = a[j];
}
return;
}
void scale(double *a, double *b, double scalar, long int array_size){
int j;
#pragma omp parallel for
for (j=0; j<array_size; j++){
b[j] = a[j]*scalar;
}
return;
}
void add(double *a, double *b, double *c, long int array_size){
int j;
#pragma omp parallel for
for (j=0; j<array_size; j++){
c[j] = a[j]+b[j];
}
return;
}
void triadd(double *a, double *b, double *c, double scalar, long int array_size){
int j;
#pragma omp parallel for
for (j=0; j<array_size; j++){
c[j] = a[j]+b[j]*scalar;
}
return;
}
|
threading.h | #ifndef LIGHTGBM_UTILS_THREADING_H_
#define LIGHTGBM_UTILS_THREADING_H_
#include <LightGBM/utils/openmp_wrapper.h>
#include <vector>
#include <functional>
namespace LightGBM {
class Threading {
public:
template<typename INDEX_T>
static inline void For(INDEX_T start, INDEX_T end, const std::function<void(int, INDEX_T, INDEX_T)>& inner_fun) {
int num_threads = 1;
#pragma omp parallel
#pragma omp master
{
num_threads = omp_get_num_threads();
}
INDEX_T num_inner = (end - start + num_threads - 1) / num_threads;
if (num_inner <= 0) { num_inner = 1; }
#pragma omp parallel for schedule(static,1)
for (int i = 0; i < num_threads; ++i) {
INDEX_T inner_start = start + num_inner * i;
INDEX_T inner_end = inner_start + num_inner;
if (inner_end > end) { inner_end = end; }
if (inner_start < end) {
inner_fun(i, inner_start, inner_end);
}
}
}
};
} // namespace LightGBM
#endif // LightGBM_UTILS_THREADING_H_
|
matching_coefficients.h | // Copyright (c) 2013-2015 Anton Kozhevnikov, Thomas Schulthess
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without modification, are permitted provided that
// the following conditions are met:
//
// 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the
// following disclaimer.
// 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions
// and the following disclaimer in the documentation and/or other materials provided with the distribution.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED
// WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
// PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR
// ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
// OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
/** \file matching_coefficients.h
*
* \brief Contains definition and partial implementation of sirius::Matching_coefficients class.
*/
#ifndef __MATCHING_COEFFICIENTS_H__
#define __MATCHING_COEFFICIENTS_H__
namespace sirius {
/** The following matching conditions must be fulfilled:
* \f[
* \frac{\partial^j}{\partial r^j} \sum_{L \nu} A_{L \nu}^{\bf k}({\bf G})u_{\ell \nu}(r)
* Y_{L}(\hat {\bf r}) \bigg|_{R^{MT}} = \frac{\partial^j}{\partial r^j} \frac{4 \pi}{\sqrt \Omega}
* e^{i{\bf (G+k)\tau}} \sum_{L}i^{\ell} j_{\ell}(|{\bf G+k}|r) Y_{L}^{*}(\widehat {\bf G+k}) Y_{L}(\hat {\bf r}) \bigg|_{R^{MT}}
* \f]
* where \f$ L = \{ \ell, m \} \f$. Dropping sum over L we arrive to the following system of linear equations:
* \f[
* \sum_{\nu} \frac{\partial^j u_{\ell \nu}(r)}{\partial r^j} \bigg|_{R^{MT}} A_{L \nu}^{\bf k}({\bf G}) =
* \frac{4 \pi}{\sqrt \Omega} e^{i{\bf (G+k)\tau}} i^{\ell} \frac{\partial^j j_{\ell}(|{\bf G+k}|r)}{\partial r^j}
* \bigg|_{R^{MT}} Y_{L}^{*}(\widehat {\bf G+k})
* \f]
* The matching coefficients are then equal to:
* \f[
* A_{L \nu}^{\bf k}({\bf G}) = \sum_{j} \bigg[ \frac{\partial^j u_{\ell \nu}(r)}{\partial r^j} \bigg|_{R^{MT}} \bigg]_{\nu j}^{-1}
* \frac{\partial^j j_{\ell}(|{\bf G+k}|r)}{\partial r^j} \bigg|_{R^{MT}} \frac{4 \pi}{\sqrt \Omega} i^{\ell}
* e^{i{\bf (G+k)\tau}} Y_{L}^{*}(\widehat {\bf G+k})
* \f]
*/
class Matching_coefficients
{
private:
Unit_cell const& unit_cell_;
int num_gkvec_;
std::vector<int>& igk_;
Gvec const& gkvec_;
mdarray<double_complex, 2> gkvec_ylm_;
std::vector<double> gkvec_len_;
/// Precomputed values for the linear equations for matching coefficients.
mdarray<double_complex, 4> alm_b_;
/// Generate matching coefficients for a specific \f$ \ell \f$ and order.
/** \param [in] ngk Number of G+k vectors.
* \param [in] ia Index of atom.
* \param [in] iat Index of atom type.
* \param [in] l Orbital quantum nuber.
* \param [in] lm Composite l,m index.
* \param [in] nu Order of radial function \f$ u_{\ell \nu}(r) \f$ for which coefficients are generated.
* \param [inout] A Matrix of radial derivatives.
* \param [out] alm Pointer to alm coefficients.
*/
template <int N>
inline void generate(int ngk,
std::vector<double_complex> const& phase_factors__,
int iat,
int l,
int lm,
int nu,
matrix3d<double>& A,
double_complex* alm) const
{
/* invert matrix of radial derivatives */
switch (N) {
case 1: {
if (unit_cell_.parameters().control().verification_ > 0) {
if (std::abs(A(0, 0)) < 1.0 / std::sqrt(unit_cell_.omega())) {
std::stringstream s;
s << "Ill defined plane wave matching problem for atom type " << iat << ", l = " << l << std::endl
<< " radial function value at the MT boundary : " << A(0, 0);
WARNING(s.str());
}
}
A(0, 0) = 1.0 / A(0, 0);
break;
}
case 2: {
double det = A(0, 0) * A(1, 1) - A(0, 1) * A(1, 0);
if (unit_cell_.parameters().control().verification_ > 0) {
if (std::abs(det) < 1.0 / std::sqrt(unit_cell_.omega())) {
std::stringstream s;
s << "Ill defined plane wave matching problem for atom type " << iat << ", l = " << l << std::endl
<< " radial function value at the MT boundary : " << A(0 ,0);
WARNING(s.str());
}
}
std::swap(A(0, 0), A(1, 1));
A(0, 0) /= det;
A(1, 1) /= det;
A(0, 1) = -A(0, 1) / det;
A(1, 0) = -A(1, 0) / det;
break;
}
case 3: {
A = inverse(A);
break;
}
}
double_complex zt;
for (int igk = 0; igk < ngk; igk++) {
switch (N) {
case 1: {
zt = alm_b_(0, igk, l, iat) * A(0, 0);
break;
}
case 2: {
zt = alm_b_(0, igk, l, iat) * A(nu, 0) +
alm_b_(1, igk, l, iat) * A(nu, 1);
break;
}
case 3: {
zt = alm_b_(0, igk, l, iat) * A(nu, 0) +
alm_b_(1, igk, l, iat) * A(nu, 1) +
alm_b_(2, igk, l, iat) * A(nu, 2);
break;
}
}
alm[igk] = phase_factors__[igk] * std::conj(gkvec_ylm_(igk, lm)) * zt;
}
}
public:
/// Constructor
Matching_coefficients(Unit_cell const& unit_cell__,
int lmax_apw__,
int num_gkvec__,
std::vector<int>& igk__,
Gvec const& gkvec__)
: unit_cell_(unit_cell__),
num_gkvec_(num_gkvec__),
igk_(igk__),
gkvec_(gkvec__)
{
int lmmax_apw = Utils::lmmax(lmax_apw__);
gkvec_ylm_ = mdarray<double_complex, 2>(num_gkvec_, lmmax_apw);
gkvec_len_.resize(num_gkvec_);
/* get length and Ylm harmonics of G+k vectors */
#pragma omp parallel
{
std::vector<double_complex> ylm(lmmax_apw);
#pragma omp for
for (int i = 0; i < num_gkvec_; i++) {
auto gkvec_cart = gkvec_.gkvec_cart(igk_[i]);
/* get r, theta, phi */
auto vs = SHT::spherical_coordinates(gkvec_cart);
/* get spherical harmonics */
SHT::spherical_harmonics(lmax_apw__, vs[1], vs[2], &ylm[0]);
gkvec_len_[i] = vs[0];
for (int lm = 0; lm < lmmax_apw; lm++) {
gkvec_ylm_(i, lm) = ylm[lm];
}
}
}
alm_b_ = mdarray<double_complex, 4>(3, num_gkvec_, lmax_apw__ + 1, unit_cell_.num_atom_types());
alm_b_.zero();
/* value and first two derivatives of spherical Bessel functions */
mdarray<double, 2> sbessel_mt(lmax_apw__ + 2, 3);
for (int igk = 0; igk < num_gkvec_; igk++) {
for (int iat = 0; iat < unit_cell_.num_atom_types(); iat++) {
double R = unit_cell_.atom_type(iat).mt_radius();
double RGk = R * gkvec_len_[igk];
/* compute values and first and second derivatives of the spherical Bessel functions at the MT boundary */
gsl_sf_bessel_jl_array(lmax_apw__ + 1, RGk, &sbessel_mt(0, 0));
/* Bessel function derivative: f_{{n}}^{{\prime}}(z)=-f_{{n+1}}(z)+(n/z)f_{{n}}(z)
*
* In[]:= FullSimplify[D[SphericalBesselJ[n,a*x],{x,1}]]
* Out[]= (n SphericalBesselJ[n,a x])/x-a SphericalBesselJ[1+n,a x]
*
* In[]:= FullSimplify[D[SphericalBesselJ[n,a*x],{x,2}]]
* Out[]= (((-1+n) n-a^2 x^2) SphericalBesselJ[n,a x]+2 a x SphericalBesselJ[1+n,a x])/x^2
*/
for (int l = 0; l <= lmax_apw__; l++) {
sbessel_mt(l, 1) = -sbessel_mt(l + 1, 0) * gkvec_len_[igk] + (l / R) * sbessel_mt(l, 0);
sbessel_mt(l, 2) = 2 * gkvec_len_[igk] * sbessel_mt(l + 1, 0) / R +
((l - 1) * l - std::pow(RGk, 2)) * sbessel_mt(l, 0) / std::pow(R, 2);
}
for (int l = 0; l <= lmax_apw__; l++) {
double_complex z = std::pow(double_complex(0, 1), l);
double f = fourpi / std::sqrt(unit_cell_.omega());
alm_b_(0, igk, l, iat) = z * f * sbessel_mt(l, 0);
alm_b_(1, igk, l, iat) = z * f * sbessel_mt(l, 1);
alm_b_(2, igk, l, iat) = z * f * sbessel_mt(l, 2);
}
}
}
}
/// Generate plane-wave matching coefficents for the radial solutions of a given atom.
/** \param [in] ia Index of atom.
* \param [out] alm Array of matching coefficients with dimension indices \f$ ({\bf G+k}, \xi) \f$.
*/
void generate(int ia, mdarray<double_complex, 2>& alm) const
{
auto& atom = unit_cell_.atom(ia);
auto& type = atom.type();
assert(type.max_aw_order() <= 3);
int iat = type.id();
std::vector<double_complex> phase_factors(num_gkvec_);
for (int i = 0; i < num_gkvec_; i++) {
double phase = twopi * dot(gkvec_.gkvec(igk_[i]), unit_cell_.atom(ia).position());
phase_factors[i] = std::exp(double_complex(0, phase));
}
matrix3d<double> A;
for (int xi = 0; xi < type.mt_aw_basis_size(); xi++) {
int l = type.indexb(xi).l;
int lm = type.indexb(xi).lm;
int nu = type.indexb(xi).order;
/* order of augmentation for a given orbital quantum number */
int num_aw = static_cast<int>(type.aw_descriptor(l).size());
/* create matrix of radial derivatives */
for (int order = 0; order < num_aw; order++) {
for (int dm = 0; dm < num_aw; dm++) {
A(dm, order) = atom.symmetry_class().aw_surface_dm(l, order, dm);
}
}
switch (num_aw) {
/* APW */
case 1: {
generate<1>(num_gkvec_, phase_factors, iat, l, lm, nu, A, &alm(0, xi));
break;
}
/* LAPW */
case 2: {
generate<2>(num_gkvec_, phase_factors, iat, l, lm, nu, A, &alm(0, xi));
break;
}
/* Super LAPW */
case 3: {
generate<3>(num_gkvec_, phase_factors, iat, l, lm, nu, A, &alm(0, xi));
break;
}
default: {
TERMINATE("wrong order of augmented wave");
}
}
}
}
};
}
#endif // __MATCHING_COEFFICIENTS_H__
|
omptarget.h | //===---- omptarget.h - OpenMP GPU initialization ---------------- CUDA -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file contains the declarations of all library macros, types,
// and functions.
//
//===----------------------------------------------------------------------===//
#ifndef OMPTARGET_H
#define OMPTARGET_H
#include "common/allocator.h"
#include "common/debug.h" // debug
#include "common/state-queue.h"
#include "common/support.h"
#include "interface.h" // interfaces with omp, compiler, and user
#include "target_impl.h"
#define OMPTARGET_NVPTX_VERSION 1.1
// used by the library for the interface with the app
#define DISPATCH_FINISHED 0
#define DISPATCH_NOTFINISHED 1
// used by dynamic scheduling
#define FINISHED 0
#define NOT_FINISHED 1
#define LAST_CHUNK 2
#define BARRIER_COUNTER 0
#define ORDERED_COUNTER 1
// arguments needed for L0 parallelism only.
class omptarget_nvptx_SharedArgs {
public:
// All these methods must be called by the master thread only.
INLINE void Init() {
args = buffer;
nArgs = MAX_SHARED_ARGS;
}
INLINE void DeInit() {
// Free any memory allocated for outlined parallel function with a large
// number of arguments.
if (nArgs > MAX_SHARED_ARGS) {
SafeFree(args, "new extended args");
Init();
}
}
INLINE void EnsureSize(size_t size) {
if (size > nArgs) {
if (nArgs > MAX_SHARED_ARGS) {
SafeFree(args, "new extended args");
}
args = (void **)SafeMalloc(size * sizeof(void *), "new extended args");
nArgs = size;
}
}
// Called by all threads.
INLINE void **GetArgs() const { return args; };
private:
// buffer of pre-allocated arguments.
void *buffer[MAX_SHARED_ARGS];
// pointer to arguments buffer.
// starts off as a pointer to 'buffer' but can be dynamically allocated.
void **args;
// starts off as MAX_SHARED_ARGS but can increase in size.
uint32_t nArgs;
};
extern DEVICE
omptarget_nvptx_SharedArgs EXTERN_SHARED(omptarget_nvptx_globalArgs);
// Worker slot type which is initialized with the default worker slot
// size of 4*32 bytes.
struct __kmpc_data_sharing_slot {
__kmpc_data_sharing_slot *Next;
__kmpc_data_sharing_slot *Prev;
void *PrevSlotStackPtr;
void *DataEnd;
char Data[DS_Worker_Warp_Slot_Size];
};
// Data structure to keep in shared memory that traces the current slot, stack,
// and frame pointer as well as the active threads that didn't exit the current
// environment.
struct DataSharingStateTy {
__kmpc_data_sharing_slot *SlotPtr[DS_Max_Warp_Number];
void *StackPtr[DS_Max_Warp_Number];
void * volatile FramePtr[DS_Max_Warp_Number];
__kmpc_impl_lanemask_t ActiveThreads[DS_Max_Warp_Number];
};
extern DEVICE DataSharingStateTy EXTERN_SHARED(DataSharingState);
////////////////////////////////////////////////////////////////////////////////
// task ICV and (implicit & explicit) task state
class omptarget_nvptx_TaskDescr {
public:
// methods for flags
INLINE omp_sched_t GetRuntimeSched() const;
INLINE void SetRuntimeSched(omp_sched_t sched);
INLINE int InParallelRegion() const { return items.flags & TaskDescr_InPar; }
INLINE int InL2OrHigherParallelRegion() const {
return items.flags & TaskDescr_InParL2P;
}
INLINE int IsParallelConstruct() const {
return items.flags & TaskDescr_IsParConstr;
}
INLINE int IsTaskConstruct() const { return !IsParallelConstruct(); }
// methods for other fields
INLINE uint16_t &ThreadId() { return items.threadId; }
INLINE uint64_t &RuntimeChunkSize() { return items.runtimeChunkSize; }
INLINE omptarget_nvptx_TaskDescr *GetPrevTaskDescr() const { return prev; }
INLINE void SetPrevTaskDescr(omptarget_nvptx_TaskDescr *taskDescr) {
prev = taskDescr;
}
// init & copy
INLINE void InitLevelZeroTaskDescr();
INLINE void InitLevelOneTaskDescr(omptarget_nvptx_TaskDescr *parentTaskDescr);
INLINE void Copy(omptarget_nvptx_TaskDescr *sourceTaskDescr);
INLINE void CopyData(omptarget_nvptx_TaskDescr *sourceTaskDescr);
INLINE void CopyParent(omptarget_nvptx_TaskDescr *parentTaskDescr);
INLINE void CopyForExplicitTask(omptarget_nvptx_TaskDescr *parentTaskDescr);
INLINE void CopyToWorkDescr(omptarget_nvptx_TaskDescr *masterTaskDescr);
INLINE void CopyFromWorkDescr(omptarget_nvptx_TaskDescr *workTaskDescr);
INLINE void CopyConvergentParent(omptarget_nvptx_TaskDescr *parentTaskDescr,
uint16_t tid, uint16_t tnum);
INLINE void SaveLoopData();
INLINE void RestoreLoopData() const;
private:
// bits for flags: (6 used, 2 free)
// 3 bits (SchedMask) for runtime schedule
// 1 bit (InPar) if this thread has encountered one or more parallel region
// 1 bit (IsParConstr) if ICV for a parallel region (false = explicit task)
// 1 bit (InParL2+) if this thread has encountered L2 or higher parallel
// region
static const uint8_t TaskDescr_SchedMask = (0x1 | 0x2 | 0x4);
static const uint8_t TaskDescr_InPar = 0x10;
static const uint8_t TaskDescr_IsParConstr = 0x20;
static const uint8_t TaskDescr_InParL2P = 0x40;
struct SavedLoopDescr_items {
int64_t loopUpperBound;
int64_t nextLowerBound;
int64_t chunk;
int64_t stride;
kmp_sched_t schedule;
} loopData;
struct TaskDescr_items {
uint8_t flags; // 6 bit used (see flag above)
uint8_t unused;
uint16_t threadId; // thread id
uint64_t runtimeChunkSize; // runtime chunk size
} items;
omptarget_nvptx_TaskDescr *prev;
};
// build on kmp
typedef struct omptarget_nvptx_ExplicitTaskDescr {
omptarget_nvptx_TaskDescr
taskDescr; // omptarget_nvptx task description (must be first)
kmp_TaskDescr kmpTaskDescr; // kmp task description (must be last)
} omptarget_nvptx_ExplicitTaskDescr;
////////////////////////////////////////////////////////////////////////////////
// Descriptor of a parallel region (worksharing in general)
class omptarget_nvptx_WorkDescr {
public:
// access to data
INLINE omptarget_nvptx_TaskDescr *WorkTaskDescr() { return &masterTaskICV; }
private:
omptarget_nvptx_TaskDescr masterTaskICV;
};
////////////////////////////////////////////////////////////////////////////////
class omptarget_nvptx_TeamDescr {
public:
// access to data
INLINE omptarget_nvptx_TaskDescr *LevelZeroTaskDescr() {
return &levelZeroTaskDescr;
}
INLINE omptarget_nvptx_WorkDescr &WorkDescr() {
return workDescrForActiveParallel;
}
// init
INLINE void InitTeamDescr();
INLINE __kmpc_data_sharing_slot *GetPreallocatedSlotAddr(int wid) {
worker_rootS[wid].DataEnd =
&worker_rootS[wid].Data[0] + DS_Worker_Warp_Slot_Size;
// We currently do not have a next slot.
worker_rootS[wid].Next = 0;
worker_rootS[wid].Prev = 0;
worker_rootS[wid].PrevSlotStackPtr = 0;
return (__kmpc_data_sharing_slot *)&worker_rootS[wid];
}
private:
omptarget_nvptx_TaskDescr
levelZeroTaskDescr; // icv for team master initial thread
omptarget_nvptx_WorkDescr
workDescrForActiveParallel; // one, ONLY for the active par
ALIGN(16)
__kmpc_data_sharing_slot worker_rootS[DS_Max_Warp_Number];
};
////////////////////////////////////////////////////////////////////////////////
// thread private data (struct of arrays for better coalescing)
// tid refers here to the global thread id
// do not support multiple concurrent kernel a this time
class omptarget_nvptx_ThreadPrivateContext {
public:
// task
INLINE omptarget_nvptx_TaskDescr *Level1TaskDescr(int tid) {
return &levelOneTaskDescr[tid];
}
INLINE void SetTopLevelTaskDescr(int tid,
omptarget_nvptx_TaskDescr *taskICV) {
topTaskDescr[tid] = taskICV;
}
INLINE omptarget_nvptx_TaskDescr *GetTopLevelTaskDescr(int tid) const;
// parallel
INLINE uint16_t &NumThreadsForNextParallel(int tid) {
return nextRegion.tnum[tid];
}
// schedule (for dispatch)
INLINE kmp_sched_t &ScheduleType(int tid) { return schedule[tid]; }
INLINE int64_t &Chunk(int tid) { return chunk[tid]; }
INLINE int64_t &LoopUpperBound(int tid) { return loopUpperBound[tid]; }
INLINE int64_t &NextLowerBound(int tid) { return nextLowerBound[tid]; }
INLINE int64_t &Stride(int tid) { return stride[tid]; }
INLINE omptarget_nvptx_TeamDescr &TeamContext() { return teamContext; }
INLINE void InitThreadPrivateContext(int tid);
INLINE uint64_t &Cnt() { return cnt; }
private:
// team context for this team
omptarget_nvptx_TeamDescr teamContext;
// task ICV for implicit threads in the only parallel region
omptarget_nvptx_TaskDescr levelOneTaskDescr[MAX_THREADS_PER_TEAM];
// pointer where to find the current task ICV (top of the stack)
omptarget_nvptx_TaskDescr *topTaskDescr[MAX_THREADS_PER_TEAM];
union {
// Only one of the two is live at the same time.
// parallel
uint16_t tnum[MAX_THREADS_PER_TEAM];
} nextRegion;
// schedule (for dispatch)
kmp_sched_t schedule[MAX_THREADS_PER_TEAM]; // remember schedule type for #for
int64_t chunk[MAX_THREADS_PER_TEAM];
int64_t loopUpperBound[MAX_THREADS_PER_TEAM];
// state for dispatch with dyn/guided OR static (never use both at a time)
int64_t nextLowerBound[MAX_THREADS_PER_TEAM];
int64_t stride[MAX_THREADS_PER_TEAM];
uint64_t cnt;
};
/// Memory manager for statically allocated memory.
class omptarget_nvptx_SimpleMemoryManager {
private:
struct MemDataTy {
volatile unsigned keys[OMP_STATE_COUNT];
} MemData[MAX_SM] ALIGN(128);
INLINE static uint32_t hash(unsigned key) {
return key & (OMP_STATE_COUNT - 1);
}
public:
INLINE void Release();
INLINE const void *Acquire(const void *buf, size_t size);
};
////////////////////////////////////////////////////////////////////////////////
////////////////////////////////////////////////////////////////////////////////
// global data tables
////////////////////////////////////////////////////////////////////////////////
extern DEVICE omptarget_nvptx_SimpleMemoryManager
omptarget_nvptx_simpleMemoryManager;
extern DEVICE uint32_t EXTERN_SHARED(usedMemIdx);
extern DEVICE uint32_t EXTERN_SHARED(usedSlotIdx);
#if _OPENMP
extern DEVICE uint8_t parallelLevel[MAX_THREADS_PER_TEAM / WARPSIZE];
#pragma omp allocate(parallelLevel) allocator(omp_pteam_mem_alloc)
#else
extern DEVICE
uint8_t EXTERN_SHARED(parallelLevel)[MAX_THREADS_PER_TEAM / WARPSIZE];
#endif
extern DEVICE uint16_t EXTERN_SHARED(threadLimit);
extern DEVICE uint16_t EXTERN_SHARED(threadsInTeam);
extern DEVICE uint16_t EXTERN_SHARED(nThreads);
extern DEVICE omptarget_nvptx_ThreadPrivateContext *
EXTERN_SHARED(omptarget_nvptx_threadPrivateContext);
extern DEVICE uint32_t EXTERN_SHARED(execution_param);
extern DEVICE void *EXTERN_SHARED(ReductionScratchpadPtr);
////////////////////////////////////////////////////////////////////////////////
// work function (outlined parallel/simd functions) and arguments.
// needed for L1 parallelism only.
////////////////////////////////////////////////////////////////////////////////
typedef void *omptarget_nvptx_WorkFn;
extern volatile DEVICE
omptarget_nvptx_WorkFn EXTERN_SHARED(omptarget_nvptx_workFn);
////////////////////////////////////////////////////////////////////////////////
// get private data structures
////////////////////////////////////////////////////////////////////////////////
INLINE omptarget_nvptx_TeamDescr &getMyTeamDescriptor();
INLINE omptarget_nvptx_WorkDescr &getMyWorkDescriptor();
INLINE omptarget_nvptx_TaskDescr *
getMyTopTaskDescriptor(bool isSPMDExecutionMode);
INLINE omptarget_nvptx_TaskDescr *getMyTopTaskDescriptor(int globalThreadId);
////////////////////////////////////////////////////////////////////////////////
// inlined implementation
////////////////////////////////////////////////////////////////////////////////
#include "common/omptargeti.h"
#endif
|
image_pyramid.h | /*
*
* This file is part of the open-source SeetaFace engine, which includes three modules:
* SeetaFace Detection, SeetaFace Alignment, and SeetaFace Identification.
*
* This file is part of the SeetaFace Detection module, containing codes implementing the
* face detection method described in the following paper:
*
*
* Funnel-structured cascade for multi-view face detection with alignment awareness,
* Shuzhe Wu, Meina Kan, Zhenliang He, Shiguang Shan, Xilin Chen.
* In Neurocomputing (under review)
*
*
* Copyright (C) 2016, Visual Information Processing and Learning (VIPL) group,
* Institute of Computing Technology, Chinese Academy of Sciences, Beijing, China.
*
* The codes are mainly developed by Shuzhe Wu (a Ph.D supervised by Prof. Shiguang Shan)
*
* As an open-source face recognition engine: you can redistribute SeetaFace source codes
* and/or modify it under the terms of the BSD 2-Clause License.
*
* You should have received a copy of the BSD 2-Clause License along with the software.
* If not, see < https://opensource.org/licenses/BSD-2-Clause>.
*
* Contact Info: you can send an email to SeetaFace@vipl.ict.ac.cn for any problems.
*
* Note: the above information must be kept whenever or wherever the codes are used.
*
*/
#ifndef SEETA_FD_UTIL_IMAGE_PYRAMID_H_
#define SEETA_FD_UTIL_IMAGE_PYRAMID_H_
#include <cstdint>
#include <string>
#include <cstring>
#include "common.h"
#include "SeetaCudaMath.h"
namespace seeta {
namespace fd {
static void ResizeImage(const seeta::ImageData & src, seeta::ImageData* dest) {
static int time = 0;
int32_t src_width = src.width;
int32_t src_height = src.height;
int32_t dest_width = dest->width;
int32_t dest_height = dest->height;
//printf("resize from src_w:%d,src_h:%d, to dest_w:%d, dest_h:%d,time:%d\n", src_width, src_height, dest_width, dest_height, time++);
if (src_width == dest_width && src_height == dest_height) {
std::memcpy(dest->data, src.data, src_width * src_height * sizeof(uint8_t));
return;
}
double lf_x_scl = static_cast<double>(src_width) / dest_width;
double lf_y_Scl = static_cast<double>(src_height) / dest_height;
uint8_t* src_data = src.data;
uint8_t* dest_data = dest->data;
if (dest_height * dest_width > 45000)
{
SeetaCudaMath::resizeImgGpu(0, src_data, src_width, src_height, dest_data, dest_width, dest_height);
}
else
{
#pragma omp parallel num_threads(SEETA_NUM_THREADS)
{
#pragma omp for nowait
for (int32_t y = 0; y < dest_height; y++) {
for (int32_t x = 0; x < dest_width; x++) {
double lf_x_s = lf_x_scl * x;
double lf_y_s = lf_y_Scl * y;
int32_t n_x_s = static_cast<int>(lf_x_s);
n_x_s = (n_x_s <= (src_width - 2) ? n_x_s : (src_width - 2));
int32_t n_y_s = static_cast<int>(lf_y_s);
n_y_s = (n_y_s <= (src_height - 2) ? n_y_s : (src_height - 2));
double lf_weight_x = lf_x_s - n_x_s;
double lf_weight_y = lf_y_s - n_y_s;
double dest_val = (1 - lf_weight_y) * ((1 - lf_weight_x) *
src_data[n_y_s * src_width + n_x_s] +
lf_weight_x * src_data[n_y_s * src_width + n_x_s + 1]) +
lf_weight_y * ((1 - lf_weight_x) * src_data[(n_y_s + 1) * src_width + n_x_s] +
lf_weight_x * src_data[(n_y_s + 1) * src_width + n_x_s + 1]);
dest_data[y * dest_width + x] = static_cast<uint8_t>(dest_val);
}
}
}
}
}
class ImagePyramid {
public:
ImagePyramid()
: max_scale_(1.0f), min_scale_(1.0f),
scale_factor_(1.0f), scale_step_(0.8f),
width1x_(0), height1x_(0),
width_scaled_(0), height_scaled_(0),
buf_img_width_(2), buf_img_height_(2),
buf_scaled_width_(2), buf_scaled_height_(2) {
buf_img_ = new uint8_t[buf_img_width_ * buf_img_height_];
buf_img_scaled_ = new uint8_t[buf_scaled_width_ * buf_scaled_height_];
}
~ImagePyramid() {
delete[] buf_img_;
buf_img_ = nullptr;
buf_img_width_ = 0;
buf_img_height_ = 0;
delete[] buf_img_scaled_;
buf_img_scaled_ = nullptr;
buf_scaled_width_ = 0;
buf_scaled_height_ = 0;
img_scaled_.data = nullptr;
img_scaled_.width = 0;
img_scaled_.height = 0;
}
inline void SetScaleStep(float step) {
if (step > 0.0f && step <= 1.0f)
scale_step_ = step;
}
inline void SetMinScale(float min_scale) {
min_scale_ = min_scale;
}
inline void SetMaxScale(float max_scale) {
max_scale_ = max_scale;
scale_factor_ = max_scale;
UpdateBufScaled();
}
void SetImage1x(const uint8_t* img_data, int32_t width, int32_t height);
inline float min_scale() const { return min_scale_; }
inline float max_scale() const { return max_scale_; }
inline seeta::ImageData image1x() {
seeta::ImageData img(width1x_, height1x_, 1);
img.data = buf_img_;
return img;
}
const seeta::ImageData* GetNextScaleImage(float* scale_factor = nullptr);
private:
void UpdateBufScaled();
float max_scale_;
float min_scale_;
float scale_factor_;
float scale_step_;
int32_t width1x_;
int32_t height1x_;
int32_t width_scaled_;
int32_t height_scaled_;
uint8_t* buf_img_;
int32_t buf_img_width_;
int32_t buf_img_height_;
uint8_t* buf_img_scaled_;
int32_t buf_scaled_width_;
int32_t buf_scaled_height_;
seeta::ImageData img_scaled_;
};
} // namespace fd
} // namespace seeta
#endif // SEETA_FD_UTIL_IMAGE_PYRAMID_H_
|
Example_critical.1.c | /*
* @@name: critical.1c
* @@type: C
* @@compilable: yes
* @@linkable: no
* @@expect: success
*/
int dequeue(float *a);
void work(int i, float *a);
void critical_example(float *x, float *y)
{
int ix_next, iy_next;
#pragma omp parallel shared(x, y) private(ix_next, iy_next)
{
#pragma omp critical (xaxis)
ix_next = dequeue(x);
work(ix_next, x);
#pragma omp critical (yaxis)
iy_next = dequeue(y);
work(iy_next, y);
}
}
|
md5_broken_fmt_plug.c | /* This software is Copyright (c) 2012, Dhiru Kholia <dhiru.kholia at gmail.com>
* and Copyright magnum 2013,
* and it is hereby released to the general public under the following terms:
* Redistribution and use in source and binary forms, with or without modification,
* are permitted. */
#if FMT_EXTERNS_H
extern struct fmt_main fmt_mdb;
#elif FMT_REGISTERS_H
john_register_one(&fmt_mdb);
#else
#include <string.h>
#include <errno.h>
#ifdef _OPENMP
static int omp_t = 1;
#include <omp.h>
#define OMP_SCALE 64
#endif
#include "arch.h"
#include "md5.h"
#include "common.h"
#include "formats.h"
#include "params.h"
#define FORMAT_LABEL "md5-broken"
#define FORMAT_NAME "Broken MD5"
#define FORMAT_TAG "$md5-broken$"
#define TAG_LENGTH 12
#define ALGORITHM_NAME "32/" ARCH_BITS_STR
#define BENCHMARK_COMMENT ""
#define BENCHMARK_LENGTH -1
#define PLAINTEXT_LENGTH 125
#define BINARY_SIZE 32
#define SALT_SIZE 0
#define MIN_KEYS_PER_CRYPT 1
#define MAX_KEYS_PER_CRYPT 1
static struct fmt_tests md5_broken_tests[] = {
{"5a105e8b9d40e1329780d62ea2265d8a", "test1"},
{"?a105e8b9d40e1329780d62ea2265d8a", "test1"},
{"??????8b9d40e1329780d62ea2265d8a", "test1"},
{NULL}
};
static char (*saved_key)[PLAINTEXT_LENGTH + 1];
static ARCH_WORD_32 (*crypt_out)[BINARY_SIZE / sizeof(ARCH_WORD_32)];
static void init(struct fmt_main *self)
{
#ifdef _OPENMP
omp_t = omp_get_max_threads();
self->params.min_keys_per_crypt *= omp_t;
omp_t *= OMP_SCALE;
self->params.max_keys_per_crypt *= omp_t;
#endif
saved_key = mem_calloc_tiny(sizeof(*saved_key) *
self->params.max_keys_per_crypt,
MEM_ALIGN_WORD);
crypt_out = mem_calloc_tiny(sizeof(*crypt_out) *
self->params.max_keys_per_crypt,
MEM_ALIGN_WORD);
}
static inline void hex_encode(unsigned char *str, int len, unsigned char *out)
{
int i;
for (i = 0; i < len; ++i) {
out[0] = itoa16[str[i]>>4];
out[1] = itoa16[str[i]&0xF];
out += 2;
}
}
static int valid(char *ciphertext, struct fmt_main *self)
{
char *p;
p = ciphertext;
if (!strncmp(p, FORMAT_TAG, TAG_LENGTH))
p += TAG_LENGTH;
if (strlen(p) != 32 && strlen(p) != 64)
return 0;
return 1;
}
static void *get_binary(char *ciphertext)
{
static union {
unsigned char c[BINARY_SIZE + 1];
ARCH_WORD dummy;
} buf;
unsigned char *out = buf.c;
char *p;
if (!strncmp(ciphertext, FORMAT_TAG, TAG_LENGTH))
p = strrchr(ciphertext, '$') + 1;
else
p = ciphertext;
strcpy((char*)out, p);
return out;
}
static int crypt_all(int *pcount, struct db_salt *salt)
{
int count = *pcount;
int index = 0;
#ifdef _OPENMP
#pragma omp parallel for
#endif
#if defined(_OPENMP) || MAX_KEYS_PER_CRYPT > 1
for (index = 0; index < count; index++)
#endif
{
unsigned char hash[16];
MD5_CTX ctx;
MD5_Init(&ctx);
MD5_Update(&ctx, saved_key[index], strlen(saved_key[index]));
MD5_Final(hash, &ctx);
hex_encode(hash, 16, (unsigned char*)crypt_out[index]);
}
return count;
}
static int cmp_all(void *binary, int count)
{
int index = 0;
int any_matched = 0;
int i;
for (; index < count; index++) {
int matched = 1;
unsigned char *p = (unsigned char*)binary;
unsigned char *q = (unsigned char*)crypt_out[index];
for (i = 0; i < BINARY_SIZE; i++) {
if (p[i] != q[i] && p[i] != '?') {
matched = 0;
break;
}
}
if (matched)
any_matched = 1;
}
return any_matched;
}
static int cmp_one(void *binary, int index)
{
int i;
unsigned char *p = (unsigned char*)binary;
unsigned char *q = (unsigned char*)crypt_out[index];
for (i = 0; i < BINARY_SIZE; i++)
if (p[i] != q[i] && p[i] != '?')
return 0;
return 1;
}
static int cmp_exact(char *source, int index)
{
return 1;
}
static void set_key(char *key, int index)
{
int saved_key_length = strlen(key);
if (saved_key_length > PLAINTEXT_LENGTH)
saved_key_length = PLAINTEXT_LENGTH;
memcpy(saved_key[index], key, saved_key_length);
saved_key[index][saved_key_length] = 0;
}
static char *get_key(int index)
{
return saved_key[index];
}
struct fmt_main fmt_mdb = {
{
FORMAT_LABEL,
FORMAT_NAME,
ALGORITHM_NAME,
BENCHMARK_COMMENT,
BENCHMARK_LENGTH,
PLAINTEXT_LENGTH,
BINARY_SIZE,
DEFAULT_ALIGN,
SALT_SIZE,
DEFAULT_ALIGN,
MIN_KEYS_PER_CRYPT,
MAX_KEYS_PER_CRYPT,
FMT_CASE | FMT_8_BIT | FMT_OMP,
#if FMT_MAIN_VERSION > 11
{ NULL },
#endif
md5_broken_tests
}, {
init,
fmt_default_done,
fmt_default_reset,
fmt_default_prepare,
valid,
fmt_default_split,
get_binary,
fmt_default_salt,
#if FMT_MAIN_VERSION > 11
{ NULL },
#endif
fmt_default_source,
{
fmt_default_binary_hash
},
fmt_default_salt_hash,
fmt_default_set_salt,
set_key,
get_key,
fmt_default_clear_keys,
crypt_all,
{
fmt_default_get_hash
},
cmp_all,
cmp_one,
cmp_exact
}
};
#endif /* plugin stanza */
|
DRB061-matrixvector1-orig-no.c | /*
Copyright (c) 2017, Lawrence Livermore National Security, LLC.
Produced at the Lawrence Livermore National Laboratory
Written by Chunhua Liao, Pei-Hung Lin, Joshua Asplund,
Markus Schordan, and Ian Karlin
(email: liao6@llnl.gov, lin32@llnl.gov, asplund1@llnl.gov,
schordan1@llnl.gov, karlin1@llnl.gov)
LLNL-CODE-732144
All rights reserved.
This file is part of DataRaceBench. For details, see
https://github.com/LLNL/dataracebench. Please also see the LICENSE file
for our additional BSD notice.
Redistribution and use in source and binary forms, with
or without modification, are permitted provided that the following
conditions are met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the disclaimer below.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the disclaimer (as noted below)
in the documentation and/or other materials provided with the
distribution.
* Neither the name of the LLNS/LLNL nor the names of its contributors
may be used to endorse or promote products derived from this
software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL LAWRENCE LIVERMORE NATIONAL
SECURITY, LLC, THE U.S. DEPARTMENT OF ENERGY OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY,
OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
Matrix-vector multiplication: outer-level loop parallelization
*/
#define N 100
double a[N][N],v[N],v_out[N];
int init()
{
int i,j,k;
#pragma omp parallel for private(i, j)
for (i = 0; i < N; i++) {
#pragma omp parallel for private(j)
for (j = 0; j < N; j++) {
a[i][j] = i * j;
}
v_out[i] = i * j;
v[i] = i * j;
}
return 0;
}
int mv()
{
int i,j;
#pragma omp parallel for private(i, j)
for (i = 0; i < N; i++)
{
float sum = 0.0;
#pragma omp parallel for private(j) reduction(+:sum)
for (j = 0; j < N; j++)
{
sum += a[i][j]*v[j];
}
v_out[i] = sum;
}
return 0;
}
int print()
{
int i,j,k;
for (i = 0; i < N; i++) {
for (j = 0; j < N; j++) {
printf("%lf\n", a[i][j]);
}
printf("%lf\n",v_out[i]);
printf("%lf\n",v[i]);
}
return 0;
}
int main()
{
init();
mv();
print();
return 0;
}
|
buggy_version.c |
int array[1000];
#pragma omp parallel
{
bool flag = true;
while(flag){
int x = rand()%1000;
#pragma omp critical
{
array[x] = some_function(array[x]);
if (some_condition(array[x])){
flag = false;
}
}
}
}
|
GB_unop__identity_uint8_fp64.c | //------------------------------------------------------------------------------
// GB_unop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_atomics.h"
#include "GB_unop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB (_unop_apply__identity_uint8_fp64)
// op(A') function: GB (_unop_tran__identity_uint8_fp64)
// C type: uint8_t
// A type: double
// cast: uint8_t cij = GB_cast_to_uint8_t ((double) (aij))
// unaryop: cij = aij
#define GB_ATYPE \
double
#define GB_CTYPE \
uint8_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
double aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = x ;
// casting
#define GB_CAST(z, aij) \
uint8_t z = GB_cast_to_uint8_t ((double) (aij)) ;
// cij = op (aij)
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
double aij = Ax [pA] ; \
/* Cx [pC] = op (cast (aij)) */ \
uint8_t z = GB_cast_to_uint8_t ((double) (aij)) ; \
Cx [pC] = z ; \
}
// true if operator is the identity op with no typecasting
#define GB_OP_IS_IDENTITY_WITH_NO_TYPECAST \
0
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_IDENTITY || GxB_NO_UINT8 || GxB_NO_FP64)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_apply__identity_uint8_fp64)
(
uint8_t *Cx, // Cx and Ax may be aliased
const double *Ax,
const int8_t *restrict Ab, // A->b if A is bitmap
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
// TODO: if OP is ONE and uniform-valued matrices are exploited, then
// do this in O(1) time
if (Ab == NULL)
{
#if ( GB_OP_IS_IDENTITY_WITH_NO_TYPECAST )
GB_memcpy (Cx, Ax, anz * sizeof (double), nthreads) ;
#else
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
double aij = Ax [p] ;
uint8_t z = GB_cast_to_uint8_t ((double) (aij)) ;
Cx [p] = z ;
}
#endif
}
else
{
// bitmap case, no transpose; A->b already memcpy'd into C->b
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!Ab [p]) continue ;
double aij = Ax [p] ;
uint8_t z = GB_cast_to_uint8_t ((double) (aij)) ;
Cx [p] = z ;
}
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_tran__identity_uint8_fp64)
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
labyrinth.c | /* =============================================================================
*
* labyrinth.c
*
* =============================================================================
*
* Copyright (C) Stanford University, 2006. All Rights Reserved.
* Author: Chi Cao Minh
*
* =============================================================================
*
* For the license of bayes/sort.h and bayes/sort.c, please see the header
* of the files.
*
* ------------------------------------------------------------------------
*
* For the license of kmeans, please see kmeans/LICENSE.kmeans
*
* ------------------------------------------------------------------------
*
* For the license of ssca2, please see ssca2/COPYRIGHT
*
* ------------------------------------------------------------------------
*
* For the license of lib/mt19937ar.c and lib/mt19937ar.h, please see the
* header of the files.
*
* ------------------------------------------------------------------------
*
* For the license of lib/rbtree.h and lib/rbtree.c, please see
* lib/LEGALNOTICE.rbtree and lib/LICENSE.rbtree
*
* ------------------------------------------------------------------------
*
* Unless otherwise noted, the following license applies to STAMP files:
*
* Copyright (c) 2007, Stanford University
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are
* met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* * Neither the name of Stanford University nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY STANFORD UNIVERSITY ``AS IS'' AND ANY
* EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL STANFORD UNIVERSITY BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
* THE POSSIBILITY OF SUCH DAMAGE.
*
* =============================================================================
*/
#include <assert.h>
#include <getopt.h>
#include <stdio.h>
#include <stdlib.h>
#include "list.h"
#include "maze.h"
#include "router.h"
#include "thread.h"
#include "timer.h"
#include "types.h"
enum param_types {
PARAM_BENDCOST = (unsigned char)'b',
PARAM_THREAD = (unsigned char)'t',
PARAM_XCOST = (unsigned char)'x',
PARAM_YCOST = (unsigned char)'y',
PARAM_ZCOST = (unsigned char)'z',
};
enum param_defaults {
PARAM_DEFAULT_BENDCOST = 1,
PARAM_DEFAULT_THREAD = 1,
PARAM_DEFAULT_XCOST = 1,
PARAM_DEFAULT_YCOST = 1,
PARAM_DEFAULT_ZCOST = 2,
};
bool_t global_doPrint = FALSE;
char* global_inputFile = NULL;
long global_params[256]; /* 256 = ascii limit */
/* =============================================================================
* displayUsage
* =============================================================================
*/
static void
displayUsage (const char* appName)
{
printf("Usage: %s [options]\n", appName);
puts("\nOptions: (defaults)\n");
printf(" b <INT> [b]end cost (%i)\n", PARAM_DEFAULT_BENDCOST);
printf(" i <FILE> [i]nput file name (%s)\n", global_inputFile);
printf(" p [p]rint routed maze (false)\n");
printf(" t <UINT> Number of [t]hreads (%i)\n", PARAM_DEFAULT_THREAD);
printf(" x <UINT> [x] movement cost (%i)\n", PARAM_DEFAULT_XCOST);
printf(" y <UINT> [y] movement cost (%i)\n", PARAM_DEFAULT_YCOST);
printf(" z <UINT> [z] movement cost (%i)\n", PARAM_DEFAULT_ZCOST);
exit(1);
}
/* =============================================================================
* setDefaultParams
* =============================================================================
*/
static void
setDefaultParams ()
{
global_params[PARAM_BENDCOST] = PARAM_DEFAULT_BENDCOST;
global_params[PARAM_THREAD] = PARAM_DEFAULT_THREAD;
global_params[PARAM_XCOST] = PARAM_DEFAULT_XCOST;
global_params[PARAM_YCOST] = PARAM_DEFAULT_YCOST;
global_params[PARAM_ZCOST] = PARAM_DEFAULT_ZCOST;
}
/* =============================================================================
* parseArgs
* =============================================================================
*/
static void
parseArgs (long argc, char* const argv[])
{
long i;
long opt;
opterr = 0;
setDefaultParams();
while ((opt = getopt(argc, argv, "b:i:pt:x:y:z:")) != -1) {
switch (opt) {
case 'b':
case 't':
case 'x':
case 'y':
case 'z':
global_params[(unsigned char)opt] = atol(optarg);
break;
case 'i':
global_inputFile = optarg;
break;
case 'p':
global_doPrint = TRUE;
break;
case '?':
default:
opterr++;
break;
}
}
for (i = optind; i < argc; i++) {
fprintf(stderr, "Non-option argument: %s\n", argv[i]);
opterr++;
}
if (opterr) {
displayUsage(argv[0]);
}
}
/* =============================================================================
* main
* =============================================================================
*/
MAIN(argc, argv)
{
GOTO_REAL();
/*
* Initialization
*/
parseArgs(argc, (char** const)argv);
long numThread = global_params[PARAM_THREAD];
SIM_GET_NUM_CPU(numThread);
TM_STARTUP(numThread);
P_MEMORY_STARTUP(numThread);
thread_startup(numThread);
maze_t* mazePtr = maze_alloc();
assert(mazePtr);
long numPathToRoute = maze_read(mazePtr, global_inputFile);
router_t* routerPtr = router_alloc(global_params[PARAM_XCOST],
global_params[PARAM_YCOST],
global_params[PARAM_ZCOST],
global_params[PARAM_BENDCOST]);
assert(routerPtr);
list_t* pathVectorListPtr = list_alloc(NULL);
assert(pathVectorListPtr);
/*
* Run transactions
*/
router_solve_arg_t routerArg = {routerPtr, mazePtr, pathVectorListPtr};
TIMER_T startTime;
TIMER_READ(startTime);
GOTO_SIM();
#ifdef OTM
#pragma omp parallel
{
router_solve((void *)&routerArg);
}
#else
thread_start(router_solve, (void*)&routerArg);
#endif
GOTO_REAL();
TIMER_T stopTime;
TIMER_READ(stopTime);
long numPathRouted = 0;
list_iter_t it;
list_iter_reset(&it, pathVectorListPtr);
while (list_iter_hasNext(&it, pathVectorListPtr)) {
vector_t* pathVectorPtr = (vector_t*)list_iter_next(&it, pathVectorListPtr);
numPathRouted += vector_getSize(pathVectorPtr);
}
printf("Paths routed = %li\n", numPathRouted);
printf("\nTime = %lf\n", TIMER_DIFF_SECONDS(startTime, stopTime));
/*
* Check solution and clean up
*/
assert(numPathRouted <= numPathToRoute);
bool_t status = maze_checkPaths(mazePtr, pathVectorListPtr, global_doPrint);
assert(status == TRUE);
puts("Verification passed.");
maze_free(mazePtr);
router_free(routerPtr);
TM_SHUTDOWN();
P_MEMORY_SHUTDOWN();
GOTO_SIM();
thread_shutdown();
MAIN_RETURN(0);
}
/* =============================================================================
*
* End of labyrinth.c
*
* =============================================================================
*/
|
section.c | #include <omp.h>
#include <stdio.h>
#define N 1000
int main (){
int i,id;
float a[N], b[N], c[N];
/* Some initializations */
for (i=0; i < N; i++)
a[i] = b[i] = i * 1.0;
#pragma omp parallel shared(a,b,c) private(i,id)
{
#pragma omp sections nowait
{
#pragma omp section
{
id = omp_get_thread_num();
printf("working in Thread %d\n",id);
for (i=0; i < N/2; i++){
printf("%x:", id);
c[i] = a[i] + b[i];
}
}
#pragma omp section
{
id = omp_get_thread_num();
printf("working in Thread %d\n",id);
for (i=N/2; i < N; i++){
printf("%x:", id);
c[i] = a[i] + b[i];
}
}
} /* end of sections */
} /* end of parallel section */
}
|
composite.c | /*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% CCCC OOO M M PPPP OOO SSSSS IIIII TTTTT EEEEE %
% C O O MM MM P P O O SS I T E %
% C O O M M M PPPP O O SSS I T EEE %
% C O O M M P O O SS I T E %
% CCCC OOO M M P OOO SSSSS IIIII T EEEEE %
% %
% %
% MagickCore Image Composite Methods %
% %
% Software Design %
% Cristy %
% July 1992 %
% %
% %
% Copyright 1999-2021 ImageMagick Studio LLC, a non-profit organization %
% dedicated to making software imaging solutions freely available. %
% %
% You may not use this file except in compliance with the License. You may %
% obtain a copy of the License at %
% %
% https://imagemagick.org/script/license.php %
% %
% Unless required by applicable law or agreed to in writing, software %
% distributed under the License is distributed on an "AS IS" BASIS, %
% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. %
% See the License for the specific language governing permissions and %
% limitations under the License. %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
%
%
*/
/*
Include declarations.
*/
#include "MagickCore/studio.h"
#include "MagickCore/artifact.h"
#include "MagickCore/cache.h"
#include "MagickCore/cache-private.h"
#include "MagickCore/cache-view.h"
#include "MagickCore/channel.h"
#include "MagickCore/client.h"
#include "MagickCore/color.h"
#include "MagickCore/color-private.h"
#include "MagickCore/colorspace.h"
#include "MagickCore/colorspace-private.h"
#include "MagickCore/composite.h"
#include "MagickCore/composite-private.h"
#include "MagickCore/constitute.h"
#include "MagickCore/draw.h"
#include "MagickCore/fx.h"
#include "MagickCore/gem.h"
#include "MagickCore/geometry.h"
#include "MagickCore/image.h"
#include "MagickCore/image-private.h"
#include "MagickCore/list.h"
#include "MagickCore/log.h"
#include "MagickCore/monitor.h"
#include "MagickCore/monitor-private.h"
#include "MagickCore/memory_.h"
#include "MagickCore/option.h"
#include "MagickCore/pixel-accessor.h"
#include "MagickCore/property.h"
#include "MagickCore/quantum.h"
#include "MagickCore/resample.h"
#include "MagickCore/resource_.h"
#include "MagickCore/string_.h"
#include "MagickCore/thread-private.h"
#include "MagickCore/threshold.h"
#include "MagickCore/token.h"
#include "MagickCore/utility.h"
#include "MagickCore/utility-private.h"
#include "MagickCore/version.h"
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% C o m p o s i t e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% CompositeImage() returns the second image composited onto the first
% at the specified offset, using the specified composite method.
%
% The format of the CompositeImage method is:
%
% MagickBooleanType CompositeImage(Image *image,
% const Image *source_image,const CompositeOperator compose,
% const MagickBooleanType clip_to_self,const ssize_t x_offset,
% const ssize_t y_offset,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the canvas image, modified by he composition
%
% o source_image: the source image.
%
% o compose: This operator affects how the composite is applied to
% the image. The operators and how they are utilized are listed here
% http://www.w3.org/TR/SVG12/#compositing.
%
% o clip_to_self: set to MagickTrue to limit composition to area composed.
%
% o x_offset: the column offset of the composited image.
%
% o y_offset: the row offset of the composited image.
%
% Extra Controls from Image meta-data in 'image' (artifacts)
%
% o "compose:args"
% A string containing extra numerical arguments for specific compose
% methods, generally expressed as a 'geometry' or a comma separated list
% of numbers.
%
% Compose methods needing such arguments include "BlendCompositeOp" and
% "DisplaceCompositeOp".
%
% o exception: return any errors or warnings in this structure.
%
*/
/*
Composition based on the SVG specification:
A Composition is defined by...
Color Function : f(Sc,Dc) where Sc and Dc are the normizalized colors
Blending areas : X = 1 for area of overlap, ie: f(Sc,Dc)
Y = 1 for source preserved
Z = 1 for canvas preserved
Conversion to transparency (then optimized)
Dca' = f(Sc, Dc)*Sa*Da + Y*Sca*(1-Da) + Z*Dca*(1-Sa)
Da' = X*Sa*Da + Y*Sa*(1-Da) + Z*Da*(1-Sa)
Where...
Sca = Sc*Sa normalized Source color divided by Source alpha
Dca = Dc*Da normalized Dest color divided by Dest alpha
Dc' = Dca'/Da' the desired color value for this channel.
Da' in in the follow formula as 'gamma' The resulting alpla value.
Most functions use a blending mode of over (X=1,Y=1,Z=1) this results in
the following optimizations...
gamma = Sa+Da-Sa*Da;
gamma = 1 - QuantumScale*alpha * QuantumScale*beta;
opacity = QuantumScale*alpha*beta; // over blend, optimized 1-Gamma
The above SVG definitions also define that Mathematical Composition
methods should use a 'Over' blending mode for Alpha Channel.
It however was not applied for composition modes of 'Plus', 'Minus',
the modulus versions of 'Add' and 'Subtract'.
Mathematical operator changes to be applied from IM v6.7...
1) Modulus modes 'Add' and 'Subtract' are obsoleted and renamed
'ModulusAdd' and 'ModulusSubtract' for clarity.
2) All mathematical compositions work as per the SVG specification
with regard to blending. This now includes 'ModulusAdd' and
'ModulusSubtract'.
3) When the special channel flag 'sync' (syncronize channel updates)
is turned off (enabled by default) then mathematical compositions are
only performed on the channels specified, and are applied
independantally of each other. In other words the mathematics is
performed as 'pure' mathematical operations, rather than as image
operations.
*/
static void HCLComposite(const MagickRealType hue,const MagickRealType chroma,
const MagickRealType luma,MagickRealType *red,MagickRealType *green,
MagickRealType *blue)
{
MagickRealType
b,
c,
g,
h,
m,
r,
x;
/*
Convert HCL to RGB colorspace.
*/
assert(red != (MagickRealType *) NULL);
assert(green != (MagickRealType *) NULL);
assert(blue != (MagickRealType *) NULL);
h=6.0*hue;
c=chroma;
x=c*(1.0-fabs(fmod(h,2.0)-1.0));
r=0.0;
g=0.0;
b=0.0;
if ((0.0 <= h) && (h < 1.0))
{
r=c;
g=x;
}
else
if ((1.0 <= h) && (h < 2.0))
{
r=x;
g=c;
}
else
if ((2.0 <= h) && (h < 3.0))
{
g=c;
b=x;
}
else
if ((3.0 <= h) && (h < 4.0))
{
g=x;
b=c;
}
else
if ((4.0 <= h) && (h < 5.0))
{
r=x;
b=c;
}
else
if ((5.0 <= h) && (h < 6.0))
{
r=c;
b=x;
}
m=luma-(0.298839*r+0.586811*g+0.114350*b);
*red=QuantumRange*(r+m);
*green=QuantumRange*(g+m);
*blue=QuantumRange*(b+m);
}
static void CompositeHCL(const MagickRealType red,const MagickRealType green,
const MagickRealType blue,MagickRealType *hue,MagickRealType *chroma,
MagickRealType *luma)
{
MagickRealType
b,
c,
g,
h,
max,
r;
/*
Convert RGB to HCL colorspace.
*/
assert(hue != (MagickRealType *) NULL);
assert(chroma != (MagickRealType *) NULL);
assert(luma != (MagickRealType *) NULL);
r=red;
g=green;
b=blue;
max=MagickMax(r,MagickMax(g,b));
c=max-(MagickRealType) MagickMin(r,MagickMin(g,b));
h=0.0;
if (c == 0)
h=0.0;
else
if (red == max)
h=fmod((g-b)/c+6.0,6.0);
else
if (green == max)
h=((b-r)/c)+2.0;
else
if (blue == max)
h=((r-g)/c)+4.0;
*hue=(h/6.0);
*chroma=QuantumScale*c;
*luma=QuantumScale*(0.298839*r+0.586811*g+0.114350*b);
}
static MagickBooleanType CompositeOverImage(Image *image,
const Image *source_image,const MagickBooleanType clip_to_self,
const ssize_t x_offset,const ssize_t y_offset,ExceptionInfo *exception)
{
#define CompositeImageTag "Composite/Image"
CacheView
*image_view,
*source_view;
const char
*value;
MagickBooleanType
clamp,
status;
MagickOffsetType
progress;
ssize_t
y;
/*
Composite image.
*/
status=MagickTrue;
progress=0;
clamp=MagickTrue;
value=GetImageArtifact(image,"compose:clamp");
if (value != (const char *) NULL)
clamp=IsStringTrue(value);
status=MagickTrue;
progress=0;
source_view=AcquireVirtualCacheView(source_image,exception);
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(source_image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
const Quantum
*pixels;
PixelInfo
canvas_pixel,
source_pixel;
const Quantum
*magick_restrict p;
Quantum
*magick_restrict q;
ssize_t
x;
if (status == MagickFalse)
continue;
if (clip_to_self != MagickFalse)
{
if (y < y_offset)
continue;
if ((y-y_offset) >= (ssize_t) source_image->rows)
continue;
}
/*
If pixels is NULL, y is outside overlay region.
*/
pixels=(Quantum *) NULL;
p=(Quantum *) NULL;
if ((y >= y_offset) && ((y-y_offset) < (ssize_t) source_image->rows))
{
p=GetCacheViewVirtualPixels(source_view,0,y-y_offset,
source_image->columns,1,exception);
if (p == (const Quantum *) NULL)
{
status=MagickFalse;
continue;
}
pixels=p;
if (x_offset < 0)
p-=x_offset*(ssize_t) GetPixelChannels(source_image);
}
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
GetPixelInfo(image,&canvas_pixel);
GetPixelInfo(source_image,&source_pixel);
for (x=0; x < (ssize_t) image->columns; x++)
{
double
gamma;
MagickRealType
alpha,
Da,
Dc,
Dca,
Sa,
Sc,
Sca;
ssize_t
i;
size_t
channels;
if (clip_to_self != MagickFalse)
{
if (x < x_offset)
{
q+=GetPixelChannels(image);
continue;
}
if ((x-x_offset) >= (ssize_t) source_image->columns)
break;
}
if ((pixels == (Quantum *) NULL) || (x < x_offset) ||
((x-x_offset) >= (ssize_t) source_image->columns))
{
Quantum
source[MaxPixelChannels];
/*
Virtual composite:
Sc: source color.
Dc: canvas color.
*/
(void) GetOneVirtualPixel(source_image,x-x_offset,y-y_offset,source,
exception);
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
MagickRealType
pixel;
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
PixelTrait source_traits=GetPixelChannelTraits(source_image,
channel);
if ((traits == UndefinedPixelTrait) ||
(source_traits == UndefinedPixelTrait))
continue;
if (channel == AlphaPixelChannel)
pixel=(MagickRealType) TransparentAlpha;
else
pixel=(MagickRealType) q[i];
q[i]=clamp != MagickFalse ? ClampPixel(pixel) :
ClampToQuantum(pixel);
}
q+=GetPixelChannels(image);
continue;
}
/*
Authentic composite:
Sa: normalized source alpha.
Da: normalized canvas alpha.
*/
Sa=QuantumScale*GetPixelAlpha(source_image,p);
Da=QuantumScale*GetPixelAlpha(image,q);
alpha=Sa+Da-Sa*Da;
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
MagickRealType
pixel;
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
PixelTrait source_traits=GetPixelChannelTraits(source_image,channel);
if (traits == UndefinedPixelTrait)
continue;
if ((source_traits == UndefinedPixelTrait) &&
(channel != AlphaPixelChannel))
continue;
if (channel == AlphaPixelChannel)
{
/*
Set alpha channel.
*/
pixel=QuantumRange*alpha;
q[i]=clamp != MagickFalse ? ClampPixel(pixel) :
ClampToQuantum(pixel);
continue;
}
/*
Sc: source color.
Dc: canvas color.
*/
Sc=(MagickRealType) GetPixelChannel(source_image,channel,p);
Dc=(MagickRealType) q[i];
if ((traits & CopyPixelTrait) != 0)
{
/*
Copy channel.
*/
q[i]=ClampToQuantum(Sc);
continue;
}
/*
Porter-Duff compositions:
Sca: source normalized color multiplied by alpha.
Dca: normalized canvas color multiplied by alpha.
*/
Sca=QuantumScale*Sa*Sc;
Dca=QuantumScale*Da*Dc;
gamma=PerceptibleReciprocal(alpha);
pixel=QuantumRange*gamma*(Sca+Dca*(1.0-Sa));
q[i]=clamp != MagickFalse ? ClampPixel(pixel) : ClampToQuantum(pixel);
}
p+=GetPixelChannels(source_image);
channels=GetPixelChannels(source_image);
if (p >= (pixels+channels*source_image->columns))
p=pixels;
q+=GetPixelChannels(image);
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(image,CompositeImageTag,progress,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
source_view=DestroyCacheView(source_view);
image_view=DestroyCacheView(image_view);
return(status);
}
MagickExport MagickBooleanType CompositeImage(Image *image,
const Image *composite,const CompositeOperator compose,
const MagickBooleanType clip_to_self,const ssize_t x_offset,
const ssize_t y_offset,ExceptionInfo *exception)
{
#define CompositeImageTag "Composite/Image"
CacheView
*source_view,
*image_view;
const char
*value;
GeometryInfo
geometry_info;
Image
*canvas_image,
*source_image;
MagickBooleanType
clamp,
status;
MagickOffsetType
progress;
MagickRealType
amount,
canvas_dissolve,
midpoint,
percent_luma,
percent_chroma,
source_dissolve,
threshold;
MagickStatusType
flags;
ssize_t
y;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(composite != (Image *) NULL);
assert(composite->signature == MagickCoreSignature);
if (SetImageStorageClass(image,DirectClass,exception) == MagickFalse)
return(MagickFalse);
source_image=CloneImage(composite,0,0,MagickTrue,exception);
if (source_image == (const Image *) NULL)
return(MagickFalse);
(void) SetImageColorspace(source_image,image->colorspace,exception);
if ((compose == OverCompositeOp) || (compose == SrcOverCompositeOp))
{
status=CompositeOverImage(image,source_image,clip_to_self,x_offset,
y_offset,exception);
source_image=DestroyImage(source_image);
return(status);
}
amount=0.5;
canvas_image=(Image *) NULL;
canvas_dissolve=1.0;
clamp=MagickTrue;
value=GetImageArtifact(image,"compose:clamp");
if (value != (const char *) NULL)
clamp=IsStringTrue(value);
SetGeometryInfo(&geometry_info);
percent_luma=100.0;
percent_chroma=100.0;
source_dissolve=1.0;
threshold=0.05f;
switch (compose)
{
case CopyCompositeOp:
{
if ((x_offset < 0) || (y_offset < 0))
break;
if ((x_offset+(ssize_t) source_image->columns) > (ssize_t) image->columns)
break;
if ((y_offset+(ssize_t) source_image->rows) > (ssize_t) image->rows)
break;
if ((source_image->alpha_trait == UndefinedPixelTrait) &&
(image->alpha_trait != UndefinedPixelTrait))
(void) SetImageAlphaChannel(source_image,OpaqueAlphaChannel,exception);
status=MagickTrue;
source_view=AcquireVirtualCacheView(source_image,exception);
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(source_image,image,source_image->rows,1)
#endif
for (y=0; y < (ssize_t) source_image->rows; y++)
{
MagickBooleanType
sync;
const Quantum
*p;
Quantum
*q;
ssize_t
x;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(source_view,0,y,source_image->columns,1,
exception);
q=GetCacheViewAuthenticPixels(image_view,x_offset,y+y_offset,
source_image->columns,1,exception);
if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL))
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) source_image->columns; x++)
{
ssize_t
i;
if (GetPixelReadMask(source_image,p) <= (QuantumRange/2))
{
p+=GetPixelChannels(source_image);
q+=GetPixelChannels(image);
continue;
}
for (i=0; i < (ssize_t) GetPixelChannels(source_image); i++)
{
PixelChannel channel = GetPixelChannelChannel(source_image,i);
PixelTrait source_traits = GetPixelChannelTraits(source_image,
channel);
PixelTrait traits = GetPixelChannelTraits(image,channel);
if ((source_traits == UndefinedPixelTrait) ||
(traits == UndefinedPixelTrait))
continue;
SetPixelChannel(image,channel,p[i],q);
}
p+=GetPixelChannels(source_image);
q+=GetPixelChannels(image);
}
sync=SyncCacheViewAuthenticPixels(image_view,exception);
if (sync == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
proceed=SetImageProgress(image,CompositeImageTag,(MagickOffsetType)
y,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
source_view=DestroyCacheView(source_view);
image_view=DestroyCacheView(image_view);
source_image=DestroyImage(source_image);
return(status);
}
case IntensityCompositeOp:
{
if ((x_offset < 0) || (y_offset < 0))
break;
if ((x_offset+(ssize_t) source_image->columns) > (ssize_t) image->columns)
break;
if ((y_offset+(ssize_t) source_image->rows) > (ssize_t) image->rows)
break;
status=MagickTrue;
source_view=AcquireVirtualCacheView(source_image,exception);
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(source_image,image,source_image->rows,1)
#endif
for (y=0; y < (ssize_t) source_image->rows; y++)
{
MagickBooleanType
sync;
const Quantum
*p;
Quantum
*q;
ssize_t
x;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(source_view,0,y,source_image->columns,1,
exception);
q=GetCacheViewAuthenticPixels(image_view,x_offset,y+y_offset,
source_image->columns,1,exception);
if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL))
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) source_image->columns; x++)
{
if (GetPixelReadMask(source_image,p) <= (QuantumRange/2))
{
p+=GetPixelChannels(source_image);
q+=GetPixelChannels(image);
continue;
}
SetPixelAlpha(image,clamp != MagickFalse ?
ClampPixel(GetPixelIntensity(source_image,p)) :
ClampToQuantum(GetPixelIntensity(source_image,p)),q);
p+=GetPixelChannels(source_image);
q+=GetPixelChannels(image);
}
sync=SyncCacheViewAuthenticPixels(image_view,exception);
if (sync == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
proceed=SetImageProgress(image,CompositeImageTag,(MagickOffsetType)
y,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
source_view=DestroyCacheView(source_view);
image_view=DestroyCacheView(image_view);
source_image=DestroyImage(source_image);
return(status);
}
case CopyAlphaCompositeOp:
case ChangeMaskCompositeOp:
{
/*
Modify canvas outside the overlaid region and require an alpha
channel to exist, to add transparency.
*/
if (image->alpha_trait == UndefinedPixelTrait)
(void) SetImageAlphaChannel(image,OpaqueAlphaChannel,exception);
break;
}
case BlurCompositeOp:
{
CacheView
*canvas_view;
double
angle_range,
angle_start,
height,
width;
PixelInfo
pixel;
ResampleFilter
*resample_filter;
SegmentInfo
blur;
/*
Blur Image by resampling dictated by an overlay gradient map:
X = red_channel; Y = green_channel; compose:args =
x_scale[,y_scale[,angle]].
*/
canvas_image=CloneImage(image,0,0,MagickTrue,exception);
if (canvas_image == (Image *) NULL)
{
source_image=DestroyImage(source_image);
return(MagickFalse);
}
/*
Gather the maximum blur sigma values from user.
*/
flags=NoValue;
value=GetImageArtifact(image,"compose:args");
if (value != (const char *) NULL)
flags=ParseGeometry(value,&geometry_info);
if ((flags & WidthValue) == 0)
{
(void) ThrowMagickException(exception,GetMagickModule(),OptionWarning,
"InvalidSetting","'%s' '%s'","compose:args",value);
source_image=DestroyImage(source_image);
canvas_image=DestroyImage(canvas_image);
return(MagickFalse);
}
/*
Users input sigma now needs to be converted to the EWA ellipse size.
The filter defaults to a sigma of 0.5 so to make this match the users
input the ellipse size needs to be doubled.
*/
width=2.0*geometry_info.rho;
height=width;
if ((flags & HeightValue) != 0)
height=2.0*geometry_info.sigma;
/*
Default the unrotated ellipse width and height axis vectors.
*/
blur.x1=width;
blur.x2=0.0;
blur.y1=0.0;
blur.y2=height;
if ((flags & XValue) != 0 )
{
MagickRealType
angle;
/*
Rotate vectors if a rotation angle is given.
*/
angle=DegreesToRadians(geometry_info.xi);
blur.x1=width*cos(angle);
blur.x2=width*sin(angle);
blur.y1=(-height*sin(angle));
blur.y2=height*cos(angle);
}
angle_start=0.0;
angle_range=0.0;
if ((flags & YValue) != 0 )
{
/*
Lets set a angle range and calculate in the loop.
*/
angle_start=DegreesToRadians(geometry_info.xi);
angle_range=DegreesToRadians(geometry_info.psi)-angle_start;
}
/*
Set up a gaussian cylindrical filter for EWA Bluring.
As the minimum ellipse radius of support*1.0 the EWA algorithm
can only produce a minimum blur of 0.5 for Gaussian (support=2.0)
This means that even 'No Blur' will be still a little blurry! The
solution (as well as the problem of preventing any user expert filter
settings, is to set our own user settings, restore them afterwards.
*/
resample_filter=AcquireResampleFilter(image,exception);
SetResampleFilter(resample_filter,GaussianFilter);
/*
Perform the variable blurring of each pixel in image.
*/
GetPixelInfo(image,&pixel);
source_view=AcquireVirtualCacheView(source_image,exception);
canvas_view=AcquireAuthenticCacheView(canvas_image,exception);
for (y=0; y < (ssize_t) source_image->rows; y++)
{
MagickBooleanType
sync;
const Quantum
*magick_restrict p;
Quantum
*magick_restrict q;
ssize_t
x;
if (((y+y_offset) < 0) || ((y+y_offset) >= (ssize_t) image->rows))
continue;
p=GetCacheViewVirtualPixels(source_view,0,y,source_image->columns,1,
exception);
q=QueueCacheViewAuthenticPixels(canvas_view,0,y,canvas_image->columns,1,
exception);
if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL))
break;
for (x=0; x < (ssize_t) source_image->columns; x++)
{
if (((x_offset+x) < 0) || ((x_offset+x) >= (ssize_t) image->columns))
{
p+=GetPixelChannels(source_image);
continue;
}
if (fabs(angle_range) > MagickEpsilon)
{
MagickRealType
angle;
angle=angle_start+angle_range*QuantumScale*
GetPixelBlue(source_image,p);
blur.x1=width*cos(angle);
blur.x2=width*sin(angle);
blur.y1=(-height*sin(angle));
blur.y2=height*cos(angle);
}
ScaleResampleFilter(resample_filter,
blur.x1*QuantumScale*GetPixelRed(source_image,p),
blur.y1*QuantumScale*GetPixelGreen(source_image,p),
blur.x2*QuantumScale*GetPixelRed(source_image,p),
blur.y2*QuantumScale*GetPixelGreen(source_image,p) );
(void) ResamplePixelColor(resample_filter,(double) x_offset+x,
(double) y_offset+y,&pixel,exception);
SetPixelViaPixelInfo(canvas_image,&pixel,q);
p+=GetPixelChannels(source_image);
q+=GetPixelChannels(canvas_image);
}
sync=SyncCacheViewAuthenticPixels(canvas_view,exception);
if (sync == MagickFalse)
break;
}
resample_filter=DestroyResampleFilter(resample_filter);
source_view=DestroyCacheView(source_view);
canvas_view=DestroyCacheView(canvas_view);
source_image=DestroyImage(source_image);
source_image=canvas_image;
break;
}
case DisplaceCompositeOp:
case DistortCompositeOp:
{
CacheView
*canvas_view;
MagickRealType
horizontal_scale,
vertical_scale;
PixelInfo
pixel;
PointInfo
center,
offset;
/*
Displace/Distort based on overlay gradient map:
X = red_channel; Y = green_channel;
compose:args = x_scale[,y_scale[,center.x,center.y]]
*/
canvas_image=CloneImage(image,0,0,MagickTrue,exception);
if (canvas_image == (Image *) NULL)
{
source_image=DestroyImage(source_image);
return(MagickFalse);
}
SetGeometryInfo(&geometry_info);
flags=NoValue;
value=GetImageArtifact(image,"compose:args");
if (value != (char *) NULL)
flags=ParseGeometry(value,&geometry_info);
if ((flags & (WidthValue | HeightValue)) == 0 )
{
if ((flags & AspectValue) == 0)
{
horizontal_scale=(MagickRealType) (source_image->columns-1)/2.0;
vertical_scale=(MagickRealType) (source_image->rows-1)/2.0;
}
else
{
horizontal_scale=(MagickRealType) (image->columns-1)/2.0;
vertical_scale=(MagickRealType) (image->rows-1)/2.0;
}
}
else
{
horizontal_scale=geometry_info.rho;
vertical_scale=geometry_info.sigma;
if ((flags & PercentValue) != 0)
{
if ((flags & AspectValue) == 0)
{
horizontal_scale*=(source_image->columns-1)/200.0;
vertical_scale*=(source_image->rows-1)/200.0;
}
else
{
horizontal_scale*=(image->columns-1)/200.0;
vertical_scale*=(image->rows-1)/200.0;
}
}
if ((flags & HeightValue) == 0)
vertical_scale=horizontal_scale;
}
/*
Determine fixed center point for absolute distortion map
Absolute distort ==
Displace offset relative to a fixed absolute point
Select that point according to +X+Y user inputs.
default = center of overlay image
arg flag '!' = locations/percentage relative to background image
*/
center.x=(MagickRealType) x_offset;
center.y=(MagickRealType) y_offset;
if (compose == DistortCompositeOp)
{
if ((flags & XValue) == 0)
if ((flags & AspectValue) != 0)
center.x=(MagickRealType) ((image->columns-1)/2.0);
else
center.x=(MagickRealType) (x_offset+(source_image->columns-1)/
2.0);
else
if ((flags & AspectValue) != 0)
center.x=geometry_info.xi;
else
center.x=(MagickRealType) (x_offset+geometry_info.xi);
if ((flags & YValue) == 0)
if ((flags & AspectValue) != 0)
center.y=(MagickRealType) ((image->rows-1)/2.0);
else
center.y=(MagickRealType) (y_offset+(source_image->rows-1)/2.0);
else
if ((flags & AspectValue) != 0)
center.y=geometry_info.psi;
else
center.y=(MagickRealType) (y_offset+geometry_info.psi);
}
/*
Shift the pixel offset point as defined by the provided,
displacement/distortion map. -- Like a lens...
*/
GetPixelInfo(image,&pixel);
image_view=AcquireVirtualCacheView(image,exception);
source_view=AcquireVirtualCacheView(source_image,exception);
canvas_view=AcquireAuthenticCacheView(canvas_image,exception);
for (y=0; y < (ssize_t) source_image->rows; y++)
{
MagickBooleanType
sync;
const Quantum
*magick_restrict p;
Quantum
*magick_restrict q;
ssize_t
x;
if (((y+y_offset) < 0) || ((y+y_offset) >= (ssize_t) image->rows))
continue;
p=GetCacheViewVirtualPixels(source_view,0,y,source_image->columns,1,
exception);
q=QueueCacheViewAuthenticPixels(canvas_view,0,y,canvas_image->columns,1,
exception);
if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL))
break;
for (x=0; x < (ssize_t) source_image->columns; x++)
{
if (((x_offset+x) < 0) || ((x_offset+x) >= (ssize_t) image->columns))
{
p+=GetPixelChannels(source_image);
continue;
}
/*
Displace the offset.
*/
offset.x=(double) (horizontal_scale*(GetPixelRed(source_image,p)-
(((MagickRealType) QuantumRange+1.0)/2.0)))/(((MagickRealType)
QuantumRange+1.0)/2.0)+center.x+((compose == DisplaceCompositeOp) ?
x : 0);
offset.y=(double) (vertical_scale*(GetPixelGreen(source_image,p)-
(((MagickRealType) QuantumRange+1.0)/2.0)))/(((MagickRealType)
QuantumRange+1.0)/2.0)+center.y+((compose == DisplaceCompositeOp) ?
y : 0);
status=InterpolatePixelInfo(image,image_view,
UndefinedInterpolatePixel,(double) offset.x,(double) offset.y,
&pixel,exception);
if (status == MagickFalse)
break;
/*
Mask with the 'invalid pixel mask' in alpha channel.
*/
pixel.alpha=(MagickRealType) QuantumRange*(QuantumScale*pixel.alpha)*
(QuantumScale*GetPixelAlpha(source_image,p));
SetPixelViaPixelInfo(canvas_image,&pixel,q);
p+=GetPixelChannels(source_image);
q+=GetPixelChannels(canvas_image);
}
if (x < (ssize_t) source_image->columns)
break;
sync=SyncCacheViewAuthenticPixels(canvas_view,exception);
if (sync == MagickFalse)
break;
}
canvas_view=DestroyCacheView(canvas_view);
source_view=DestroyCacheView(source_view);
image_view=DestroyCacheView(image_view);
source_image=DestroyImage(source_image);
source_image=canvas_image;
break;
}
case DissolveCompositeOp:
{
/*
Geometry arguments to dissolve factors.
*/
value=GetImageArtifact(image,"compose:args");
if (value != (char *) NULL)
{
flags=ParseGeometry(value,&geometry_info);
source_dissolve=geometry_info.rho/100.0;
canvas_dissolve=1.0;
if ((source_dissolve-MagickEpsilon) < 0.0)
source_dissolve=0.0;
if ((source_dissolve+MagickEpsilon) > 1.0)
{
canvas_dissolve=2.0-source_dissolve;
source_dissolve=1.0;
}
if ((flags & SigmaValue) != 0)
canvas_dissolve=geometry_info.sigma/100.0;
if ((canvas_dissolve-MagickEpsilon) < 0.0)
canvas_dissolve=0.0;
}
break;
}
case BlendCompositeOp:
{
value=GetImageArtifact(image,"compose:args");
if (value != (char *) NULL)
{
flags=ParseGeometry(value,&geometry_info);
source_dissolve=geometry_info.rho/100.0;
canvas_dissolve=1.0-source_dissolve;
if ((flags & SigmaValue) != 0)
canvas_dissolve=geometry_info.sigma/100.0;
}
break;
}
case MathematicsCompositeOp:
{
/*
Just collect the values from "compose:args", setting.
Unused values are set to zero automagically.
Arguments are normally a comma separated list, so this probably should
be changed to some 'general comma list' parser, (with a minimum
number of values)
*/
SetGeometryInfo(&geometry_info);
value=GetImageArtifact(image,"compose:args");
if (value != (char *) NULL)
(void) ParseGeometry(value,&geometry_info);
break;
}
case ModulateCompositeOp:
{
/*
Determine the luma and chroma scale.
*/
value=GetImageArtifact(image,"compose:args");
if (value != (char *) NULL)
{
flags=ParseGeometry(value,&geometry_info);
percent_luma=geometry_info.rho;
if ((flags & SigmaValue) != 0)
percent_chroma=geometry_info.sigma;
}
break;
}
case ThresholdCompositeOp:
{
/*
Determine the amount and threshold.
*/
value=GetImageArtifact(image,"compose:args");
if (value != (char *) NULL)
{
flags=ParseGeometry(value,&geometry_info);
amount=geometry_info.rho;
threshold=geometry_info.sigma;
if ((flags & SigmaValue) == 0)
threshold=0.05f;
}
threshold*=QuantumRange;
break;
}
default:
break;
}
/*
Composite image.
*/
status=MagickTrue;
progress=0;
midpoint=((MagickRealType) QuantumRange+1.0)/2;
source_view=AcquireVirtualCacheView(source_image,exception);
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(source_image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
const Quantum
*pixels;
MagickRealType
blue,
chroma,
green,
hue,
luma,
red;
PixelInfo
canvas_pixel,
source_pixel;
const Quantum
*magick_restrict p;
Quantum
*magick_restrict q;
ssize_t
x;
if (status == MagickFalse)
continue;
if (clip_to_self != MagickFalse)
{
if (y < y_offset)
continue;
if ((y-y_offset) >= (ssize_t) source_image->rows)
continue;
}
/*
If pixels is NULL, y is outside overlay region.
*/
pixels=(Quantum *) NULL;
p=(Quantum *) NULL;
if ((y >= y_offset) && ((y-y_offset) < (ssize_t) source_image->rows))
{
p=GetCacheViewVirtualPixels(source_view,0,y-y_offset,
source_image->columns,1,exception);
if (p == (const Quantum *) NULL)
{
status=MagickFalse;
continue;
}
pixels=p;
if (x_offset < 0)
p-=x_offset*(ssize_t) GetPixelChannels(source_image);
}
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
hue=0.0;
chroma=0.0;
luma=0.0;
GetPixelInfo(image,&canvas_pixel);
GetPixelInfo(source_image,&source_pixel);
for (x=0; x < (ssize_t) image->columns; x++)
{
double
gamma;
MagickRealType
alpha,
Da,
Dc,
Dca,
DcaDa,
Sa,
SaSca,
Sc,
Sca;
ssize_t
i;
size_t
channels;
if (clip_to_self != MagickFalse)
{
if (x < x_offset)
{
q+=GetPixelChannels(image);
continue;
}
if ((x-x_offset) >= (ssize_t) source_image->columns)
break;
}
if ((pixels == (Quantum *) NULL) || (x < x_offset) ||
((x-x_offset) >= (ssize_t) source_image->columns))
{
Quantum
source[MaxPixelChannels];
/*
Virtual composite:
Sc: source color.
Dc: canvas color.
*/
(void) GetOneVirtualPixel(source_image,x-x_offset,y-y_offset,source,
exception);
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
MagickRealType
pixel;
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
PixelTrait source_traits=GetPixelChannelTraits(source_image,
channel);
if ((traits == UndefinedPixelTrait) ||
(source_traits == UndefinedPixelTrait))
continue;
switch (compose)
{
case AlphaCompositeOp:
case ChangeMaskCompositeOp:
case CopyAlphaCompositeOp:
case DstAtopCompositeOp:
case DstInCompositeOp:
case InCompositeOp:
case OutCompositeOp:
case SrcInCompositeOp:
case SrcOutCompositeOp:
{
if (channel == AlphaPixelChannel)
pixel=(MagickRealType) TransparentAlpha;
else
pixel=(MagickRealType) q[i];
break;
}
case ClearCompositeOp:
case CopyCompositeOp:
case ReplaceCompositeOp:
case SrcCompositeOp:
{
if (channel == AlphaPixelChannel)
pixel=(MagickRealType) TransparentAlpha;
else
pixel=0.0;
break;
}
case BlendCompositeOp:
case DissolveCompositeOp:
{
if (channel == AlphaPixelChannel)
pixel=canvas_dissolve*GetPixelAlpha(source_image,source);
else
pixel=(MagickRealType) source[channel];
break;
}
default:
{
pixel=(MagickRealType) source[channel];
break;
}
}
q[i]=clamp != MagickFalse ? ClampPixel(pixel) :
ClampToQuantum(pixel);
}
q+=GetPixelChannels(image);
continue;
}
/*
Authentic composite:
Sa: normalized source alpha.
Da: normalized canvas alpha.
*/
Sa=QuantumScale*GetPixelAlpha(source_image,p);
Da=QuantumScale*GetPixelAlpha(image,q);
switch (compose)
{
case BumpmapCompositeOp:
{
alpha=GetPixelIntensity(source_image,p)*Sa;
break;
}
case ColorBurnCompositeOp:
case ColorDodgeCompositeOp:
case DarkenCompositeOp:
case DifferenceCompositeOp:
case DivideDstCompositeOp:
case DivideSrcCompositeOp:
case ExclusionCompositeOp:
case FreezeCompositeOp:
case HardLightCompositeOp:
case HardMixCompositeOp:
case InterpolateCompositeOp:
case LightenCompositeOp:
case LinearBurnCompositeOp:
case LinearDodgeCompositeOp:
case LinearLightCompositeOp:
case MathematicsCompositeOp:
case MinusDstCompositeOp:
case MinusSrcCompositeOp:
case MultiplyCompositeOp:
case NegateCompositeOp:
case OverlayCompositeOp:
case PegtopLightCompositeOp:
case PinLightCompositeOp:
case ReflectCompositeOp:
case ScreenCompositeOp:
case SoftBurnCompositeOp:
case SoftDodgeCompositeOp:
case SoftLightCompositeOp:
case StampCompositeOp:
case VividLightCompositeOp:
{
alpha=RoundToUnity(Sa+Da-Sa*Da);
break;
}
case DstAtopCompositeOp:
case DstInCompositeOp:
case InCompositeOp:
case SrcInCompositeOp:
{
alpha=Sa*Da;
break;
}
case DissolveCompositeOp:
{
alpha=source_dissolve*Sa*(-canvas_dissolve*Da)+source_dissolve*Sa+
canvas_dissolve*Da;
break;
}
case DstOverCompositeOp:
case OverCompositeOp:
case SrcOverCompositeOp:
{
alpha=Sa+Da-Sa*Da;
break;
}
case DstOutCompositeOp:
{
alpha=Da*(1.0-Sa);
break;
}
case OutCompositeOp:
case SrcOutCompositeOp:
{
alpha=Sa*(1.0-Da);
break;
}
case BlendCompositeOp:
case PlusCompositeOp:
{
alpha=RoundToUnity(source_dissolve*Sa+canvas_dissolve*Da);
break;
}
case XorCompositeOp:
{
alpha=Sa+Da-2.0*Sa*Da;
break;
}
case ModulusAddCompositeOp:
{
if ((Sa+Da) <= 1.0)
{
alpha=(Sa+Da);
break;
}
alpha=((Sa+Da)-1.0);
break;
}
case ModulusSubtractCompositeOp:
{
if ((Sa-Da) >= 0.0)
{
alpha=(Sa-Da);
break;
}
alpha=((Sa-Da)+1.0);
break;
}
default:
{
alpha=1.0;
break;
}
}
switch (compose)
{
case ColorizeCompositeOp:
case HueCompositeOp:
case LuminizeCompositeOp:
case ModulateCompositeOp:
case RMSECompositeOp:
case SaturateCompositeOp:
{
GetPixelInfoPixel(source_image,p,&source_pixel);
GetPixelInfoPixel(image,q,&canvas_pixel);
break;
}
default:
break;
}
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
MagickRealType
pixel,
sans;
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
PixelTrait source_traits = GetPixelChannelTraits(source_image,channel);
if (traits == UndefinedPixelTrait)
continue;
if ((channel == AlphaPixelChannel) &&
((traits & UpdatePixelTrait) != 0))
{
/*
Set alpha channel.
*/
switch (compose)
{
case AlphaCompositeOp:
{
pixel=QuantumRange*Sa;
break;
}
case AtopCompositeOp:
case CopyBlackCompositeOp:
case CopyBlueCompositeOp:
case CopyCyanCompositeOp:
case CopyGreenCompositeOp:
case CopyMagentaCompositeOp:
case CopyRedCompositeOp:
case CopyYellowCompositeOp:
case SrcAtopCompositeOp:
case DstCompositeOp:
case NoCompositeOp:
{
pixel=QuantumRange*Da;
break;
}
case ChangeMaskCompositeOp:
{
MagickBooleanType
equivalent;
if (Da < 0.5)
{
pixel=(MagickRealType) TransparentAlpha;
break;
}
equivalent=IsFuzzyEquivalencePixel(source_image,p,image,q);
if (equivalent != MagickFalse)
pixel=(MagickRealType) TransparentAlpha;
else
pixel=(MagickRealType) OpaqueAlpha;
break;
}
case ClearCompositeOp:
{
pixel=(MagickRealType) TransparentAlpha;
break;
}
case ColorizeCompositeOp:
case HueCompositeOp:
case LuminizeCompositeOp:
case RMSECompositeOp:
case SaturateCompositeOp:
{
if (fabs((double) (QuantumRange*Sa-TransparentAlpha)) < MagickEpsilon)
{
pixel=QuantumRange*Da;
break;
}
if (fabs((double) (QuantumRange*Da-TransparentAlpha)) < MagickEpsilon)
{
pixel=QuantumRange*Sa;
break;
}
if (Sa < Da)
{
pixel=QuantumRange*Da;
break;
}
pixel=QuantumRange*Sa;
break;
}
case CopyAlphaCompositeOp:
{
if (source_image->alpha_trait == UndefinedPixelTrait)
pixel=GetPixelIntensity(source_image,p);
else
pixel=QuantumRange*Sa;
break;
}
case BlurCompositeOp:
case CopyCompositeOp:
case DisplaceCompositeOp:
case DistortCompositeOp:
case DstAtopCompositeOp:
case ReplaceCompositeOp:
case SrcCompositeOp:
{
pixel=QuantumRange*Sa;
break;
}
case DarkenIntensityCompositeOp:
{
pixel=Sa*GetPixelIntensity(source_image,p) <
Da*GetPixelIntensity(image,q) ? Sa : Da;
break;
}
case DifferenceCompositeOp:
{
pixel=QuantumRange*fabs((double) (Sa-Da));
break;
}
case FreezeCompositeOp:
{
pixel=QuantumRange*(1.0-(1.0-Sa)*(1.0-Sa)*
PerceptibleReciprocal(Da));
if (pixel < 0.0)
pixel=0.0;
break;
}
case InterpolateCompositeOp:
{
pixel=QuantumRange*(0.5-0.25*cos(MagickPI*Sa)-0.25*
cos(MagickPI*Da));
break;
}
case LightenIntensityCompositeOp:
{
pixel=Sa*GetPixelIntensity(source_image,p) >
Da*GetPixelIntensity(image,q) ? Sa : Da;
break;
}
case ModulateCompositeOp:
{
pixel=QuantumRange*Da;
break;
}
case MultiplyCompositeOp:
{
pixel=QuantumRange*Sa*Da;
break;
}
case NegateCompositeOp:
{
pixel=QuantumRange*((1.0-Sa-Da));
break;
}
case ReflectCompositeOp:
{
pixel=QuantumRange*(Sa*Sa*PerceptibleReciprocal(1.0-Da));
if (pixel > QuantumRange)
pixel=QuantumRange;
break;
}
case StampCompositeOp:
{
pixel=QuantumRange*(Sa+Da*Da-1.0);
break;
}
case StereoCompositeOp:
{
pixel=QuantumRange*(Sa+Da)/2;
break;
}
default:
{
pixel=QuantumRange*alpha;
break;
}
}
q[i]=clamp != MagickFalse ? ClampPixel(pixel) :
ClampToQuantum(pixel);
continue;
}
if (source_traits == UndefinedPixelTrait)
continue;
/*
Sc: source color.
Dc: canvas color.
*/
Sc=(MagickRealType) GetPixelChannel(source_image,channel,p);
Dc=(MagickRealType) q[i];
if ((traits & CopyPixelTrait) != 0)
{
/*
Copy channel.
*/
q[i]=ClampToQuantum(Dc);
continue;
}
/*
Porter-Duff compositions:
Sca: source normalized color multiplied by alpha.
Dca: normalized canvas color multiplied by alpha.
*/
Sca=QuantumScale*Sa*Sc;
Dca=QuantumScale*Da*Dc;
SaSca=Sa*PerceptibleReciprocal(Sca);
DcaDa=Dca*PerceptibleReciprocal(Da);
switch (compose)
{
case DarkenCompositeOp:
case LightenCompositeOp:
case ModulusSubtractCompositeOp:
{
gamma=PerceptibleReciprocal(1.0-alpha);
break;
}
default:
{
gamma=PerceptibleReciprocal(alpha);
break;
}
}
pixel=Dc;
switch (compose)
{
case AlphaCompositeOp:
{
pixel=QuantumRange*Sa;
break;
}
case AtopCompositeOp:
case SrcAtopCompositeOp:
{
pixel=QuantumRange*(Sca*Da+Dca*(1.0-Sa));
break;
}
case BlendCompositeOp:
{
pixel=gamma*(source_dissolve*Sa*Sc+canvas_dissolve*Da*Dc);
break;
}
case CopyCompositeOp:
case ReplaceCompositeOp:
case SrcCompositeOp:
{
pixel=QuantumRange*Sca;
break;
}
case BlurCompositeOp:
case DisplaceCompositeOp:
case DistortCompositeOp:
{
pixel=Sc;
break;
}
case BumpmapCompositeOp:
{
if (fabs((double) (QuantumRange*Sa-TransparentAlpha)) < MagickEpsilon)
{
pixel=Dc;
break;
}
pixel=QuantumScale*GetPixelIntensity(source_image,p)*Dc;
break;
}
case ChangeMaskCompositeOp:
{
pixel=Dc;
break;
}
case ClearCompositeOp:
{
pixel=0.0;
break;
}
case ColorBurnCompositeOp:
{
if ((Sca == 0.0) && (Dca == Da))
{
pixel=QuantumRange*gamma*(Sa*Da+Dca*(1.0-Sa));
break;
}
if (Sca == 0.0)
{
pixel=QuantumRange*gamma*(Dca*(1.0-Sa));
break;
}
pixel=QuantumRange*gamma*(Sa*Da-Sa*Da*MagickMin(1.0,(1.0-DcaDa)*
SaSca)+Sca*(1.0-Da)+Dca*(1.0-Sa));
break;
}
case ColorDodgeCompositeOp:
{
if ((Sca*Da+Dca*Sa) >= Sa*Da)
pixel=QuantumRange*gamma*(Sa*Da+Sca*(1.0-Da)+Dca*(1.0-Sa));
else
pixel=QuantumRange*gamma*(Dca*Sa*Sa*PerceptibleReciprocal(Sa-Sca)+
Sca*(1.0-Da)+Dca*(1.0-Sa));
break;
}
case ColorizeCompositeOp:
{
if (fabs((double) (QuantumRange*Sa-TransparentAlpha)) < MagickEpsilon)
{
pixel=Dc;
break;
}
if (fabs((double) (QuantumRange*Da-TransparentAlpha)) < MagickEpsilon)
{
pixel=Sc;
break;
}
CompositeHCL(canvas_pixel.red,canvas_pixel.green,canvas_pixel.blue,
&sans,&sans,&luma);
CompositeHCL(source_pixel.red,source_pixel.green,source_pixel.blue,
&hue,&chroma,&sans);
HCLComposite(hue,chroma,luma,&red,&green,&blue);
switch (channel)
{
case RedPixelChannel: pixel=red; break;
case GreenPixelChannel: pixel=green; break;
case BluePixelChannel: pixel=blue; break;
default: pixel=Dc; break;
}
break;
}
case CopyAlphaCompositeOp:
{
pixel=Dc;
break;
}
case CopyBlackCompositeOp:
{
if (channel == BlackPixelChannel)
pixel=(MagickRealType) GetPixelBlack(source_image,p);
break;
}
case CopyBlueCompositeOp:
case CopyYellowCompositeOp:
{
if (channel == BluePixelChannel)
pixel=(MagickRealType) GetPixelBlue(source_image,p);
break;
}
case CopyGreenCompositeOp:
case CopyMagentaCompositeOp:
{
if (channel == GreenPixelChannel)
pixel=(MagickRealType) GetPixelGreen(source_image,p);
break;
}
case CopyRedCompositeOp:
case CopyCyanCompositeOp:
{
if (channel == RedPixelChannel)
pixel=(MagickRealType) GetPixelRed(source_image,p);
break;
}
case DarkenCompositeOp:
{
/*
Darken is equivalent to a 'Minimum' method
OR a greyscale version of a binary 'Or'
OR the 'Intersection' of pixel sets.
*/
if ((Sca*Da) < (Dca*Sa))
{
pixel=QuantumRange*(Sca+Dca*(1.0-Sa));
break;
}
pixel=QuantumRange*(Dca+Sca*(1.0-Da));
break;
}
case DarkenIntensityCompositeOp:
{
pixel=Sa*GetPixelIntensity(source_image,p) <
Da*GetPixelIntensity(image,q) ? Sc : Dc;
break;
}
case DifferenceCompositeOp:
{
pixel=QuantumRange*gamma*(Sca+Dca-2.0*MagickMin(Sca*Da,Dca*Sa));
break;
}
case DissolveCompositeOp:
{
pixel=gamma*(source_dissolve*Sa*Sc-source_dissolve*Sa*
canvas_dissolve*Da*Dc+canvas_dissolve*Da*Dc);
break;
}
case DivideDstCompositeOp:
{
if ((fabs((double) Sca) < MagickEpsilon) &&
(fabs((double) Dca) < MagickEpsilon))
{
pixel=QuantumRange*gamma*(Sca*(1.0-Da)+Dca*(1.0-Sa));
break;
}
if (fabs((double) Dca) < MagickEpsilon)
{
pixel=QuantumRange*gamma*(Sa*Da+Sca*(1.0-Da)+Dca*(1.0-Sa));
break;
}
pixel=QuantumRange*gamma*(Sca*Da*Da/Dca+Sca*(1.0-Da)+Dca*(1.0-Sa));
break;
}
case DivideSrcCompositeOp:
{
if ((fabs((double) Dca) < MagickEpsilon) &&
(fabs((double) Sca) < MagickEpsilon))
{
pixel=QuantumRange*gamma*(Dca*(1.0-Sa)+Sca*(1.0-Da));
break;
}
if (fabs((double) Sca) < MagickEpsilon)
{
pixel=QuantumRange*gamma*(Da*Sa+Dca*(1.0-Sa)+Sca*(1.0-Da));
break;
}
pixel=QuantumRange*gamma*(Dca*Sa*SaSca+Dca*(1.0-Sa)+Sca*(1.0-Da));
break;
}
case DstAtopCompositeOp:
{
pixel=QuantumRange*(Dca*Sa+Sca*(1.0-Da));
break;
}
case DstCompositeOp:
case NoCompositeOp:
{
pixel=QuantumRange*Dca;
break;
}
case DstInCompositeOp:
{
pixel=QuantumRange*gamma*(Dca*Sa);
break;
}
case DstOutCompositeOp:
{
pixel=QuantumRange*gamma*(Dca*(1.0-Sa));
break;
}
case DstOverCompositeOp:
{
pixel=QuantumRange*gamma*(Dca+Sca*(1.0-Da));
break;
}
case ExclusionCompositeOp:
{
pixel=QuantumRange*gamma*(Sca*Da+Dca*Sa-2.0*Sca*Dca+Sca*(1.0-Da)+
Dca*(1.0-Sa));
break;
}
case FreezeCompositeOp:
{
pixel=QuantumRange*gamma*(1.0-(1.0-Sca)*(1.0-Sca)*
PerceptibleReciprocal(Dca));
if (pixel < 0.0)
pixel=0.0;
break;
}
case HardLightCompositeOp:
{
if ((2.0*Sca) < Sa)
{
pixel=QuantumRange*gamma*(2.0*Sca*Dca+Sca*(1.0-Da)+Dca*(1.0-
Sa));
break;
}
pixel=QuantumRange*gamma*(Sa*Da-2.0*(Da-Dca)*(Sa-Sca)+Sca*(1.0-Da)+
Dca*(1.0-Sa));
break;
}
case HardMixCompositeOp:
{
pixel=gamma*(((Sca+Dca) < 1.0) ? 0.0 : QuantumRange);
break;
}
case HueCompositeOp:
{
if (fabs((double) (QuantumRange*Sa-TransparentAlpha)) < MagickEpsilon)
{
pixel=Dc;
break;
}
if (fabs((double) (QuantumRange*Da-TransparentAlpha)) < MagickEpsilon)
{
pixel=Sc;
break;
}
CompositeHCL(canvas_pixel.red,canvas_pixel.green,canvas_pixel.blue,
&hue,&chroma,&luma);
CompositeHCL(source_pixel.red,source_pixel.green,source_pixel.blue,
&hue,&sans,&sans);
HCLComposite(hue,chroma,luma,&red,&green,&blue);
switch (channel)
{
case RedPixelChannel: pixel=red; break;
case GreenPixelChannel: pixel=green; break;
case BluePixelChannel: pixel=blue; break;
default: pixel=Dc; break;
}
break;
}
case InCompositeOp:
case SrcInCompositeOp:
{
pixel=QuantumRange*(Sca*Da);
break;
}
case InterpolateCompositeOp:
{
pixel=QuantumRange*(0.5-0.25*cos(MagickPI*Sca)-0.25*
cos(MagickPI*Dca));
break;
}
case LinearBurnCompositeOp:
{
/*
LinearBurn: as defined by Abode Photoshop, according to
http://www.simplefilter.de/en/basics/mixmods.html is:
f(Sc,Dc) = Sc + Dc - 1
*/
pixel=QuantumRange*gamma*(Sca+Dca-Sa*Da);
break;
}
case LinearDodgeCompositeOp:
{
pixel=gamma*(Sa*Sc+Da*Dc);
break;
}
case LinearLightCompositeOp:
{
/*
LinearLight: as defined by Abode Photoshop, according to
http://www.simplefilter.de/en/basics/mixmods.html is:
f(Sc,Dc) = Dc + 2*Sc - 1
*/
pixel=QuantumRange*gamma*((Sca-Sa)*Da+Sca+Dca);
break;
}
case LightenCompositeOp:
{
if ((Sca*Da) > (Dca*Sa))
{
pixel=QuantumRange*(Sca+Dca*(1.0-Sa));
break;
}
pixel=QuantumRange*(Dca+Sca*(1.0-Da));
break;
}
case LightenIntensityCompositeOp:
{
/*
Lighten is equivalent to a 'Maximum' method
OR a greyscale version of a binary 'And'
OR the 'Union' of pixel sets.
*/
pixel=Sa*GetPixelIntensity(source_image,p) >
Da*GetPixelIntensity(image,q) ? Sc : Dc;
break;
}
case LuminizeCompositeOp:
{
if (fabs((double) (QuantumRange*Sa-TransparentAlpha)) < MagickEpsilon)
{
pixel=Dc;
break;
}
if (fabs((double) (QuantumRange*Da-TransparentAlpha)) < MagickEpsilon)
{
pixel=Sc;
break;
}
CompositeHCL(canvas_pixel.red,canvas_pixel.green,canvas_pixel.blue,
&hue,&chroma,&luma);
CompositeHCL(source_pixel.red,source_pixel.green,source_pixel.blue,
&sans,&sans,&luma);
HCLComposite(hue,chroma,luma,&red,&green,&blue);
switch (channel)
{
case RedPixelChannel: pixel=red; break;
case GreenPixelChannel: pixel=green; break;
case BluePixelChannel: pixel=blue; break;
default: pixel=Dc; break;
}
break;
}
case MathematicsCompositeOp:
{
/*
'Mathematics' a free form user control mathematical composition
is defined as...
f(Sc,Dc) = A*Sc*Dc + B*Sc + C*Dc + D
Where the arguments A,B,C,D are (currently) passed to composite
as a command separated 'geometry' string in "compose:args" image
artifact.
A = a->rho, B = a->sigma, C = a->xi, D = a->psi
Applying the SVG transparency formula (see above), we get...
Dca' = Sa*Da*f(Sc,Dc) + Sca*(1.0-Da) + Dca*(1.0-Sa)
Dca' = A*Sca*Dca + B*Sca*Da + C*Dca*Sa + D*Sa*Da + Sca*(1.0-Da) +
Dca*(1.0-Sa)
*/
pixel=QuantumRange*gamma*(geometry_info.rho*Sca*Dca+
geometry_info.sigma*Sca*Da+geometry_info.xi*Dca*Sa+
geometry_info.psi*Sa*Da+Sca*(1.0-Da)+Dca*(1.0-Sa));
break;
}
case MinusDstCompositeOp:
{
pixel=gamma*(Sa*Sc+Da*Dc-2.0*Da*Dc*Sa);
break;
}
case MinusSrcCompositeOp:
{
/*
Minus source from canvas.
f(Sc,Dc) = Sc - Dc
*/
pixel=gamma*(Da*Dc+Sa*Sc-2.0*Sa*Sc*Da);
break;
}
case ModulateCompositeOp:
{
ssize_t
offset;
if (fabs((double) (QuantumRange*Sa-TransparentAlpha)) < MagickEpsilon)
{
pixel=Dc;
break;
}
offset=(ssize_t) (GetPixelIntensity(source_image,p)-midpoint);
if (offset == 0)
{
pixel=Dc;
break;
}
CompositeHCL(canvas_pixel.red,canvas_pixel.green,canvas_pixel.blue,
&hue,&chroma,&luma);
luma+=(0.01*percent_luma*offset)/midpoint;
chroma*=0.01*percent_chroma;
HCLComposite(hue,chroma,luma,&red,&green,&blue);
switch (channel)
{
case RedPixelChannel: pixel=red; break;
case GreenPixelChannel: pixel=green; break;
case BluePixelChannel: pixel=blue; break;
default: pixel=Dc; break;
}
break;
}
case ModulusAddCompositeOp:
{
if ((Sca+Dca) <= 1.0)
{
pixel=QuantumRange*(Sca+Dca);
break;
}
pixel=QuantumRange*((Sca+Dca)-1.0);
break;
}
case ModulusSubtractCompositeOp:
{
if ((Sca-Dca) >= 0.0)
{
pixel=QuantumRange*(Sca-Dca);
break;
}
pixel=QuantumRange*((Sca-Dca)+1.0);
break;
}
case MultiplyCompositeOp:
{
pixel=QuantumRange*gamma*(Sca*Dca+Sca*(1.0-Da)+Dca*(1.0-Sa));
break;
}
case NegateCompositeOp:
{
pixel=QuantumRange*(1.0-fabs(1.0-Sca-Dca));
break;
}
case OutCompositeOp:
case SrcOutCompositeOp:
{
pixel=QuantumRange*(Sca*(1.0-Da));
break;
}
case OverCompositeOp:
case SrcOverCompositeOp:
{
pixel=QuantumRange*gamma*(Sca+Dca*(1.0-Sa));
break;
}
case OverlayCompositeOp:
{
if ((2.0*Dca) < Da)
{
pixel=QuantumRange*gamma*(2.0*Dca*Sca+Dca*(1.0-Sa)+Sca*(1.0-
Da));
break;
}
pixel=QuantumRange*gamma*(Da*Sa-2.0*(Sa-Sca)*(Da-Dca)+Dca*(1.0-Sa)+
Sca*(1.0-Da));
break;
}
case PegtopLightCompositeOp:
{
/*
PegTop: A Soft-Light alternative: A continuous version of the
Softlight function, producing very similar results.
f(Sc,Dc) = Dc^2*(1-2*Sc) + 2*Sc*Dc
http://www.pegtop.net/delphi/articles/blendmodes/softlight.htm.
*/
if (fabs((double) Da) < MagickEpsilon)
{
pixel=QuantumRange*gamma*Sca;
break;
}
pixel=QuantumRange*gamma*(Dca*Dca*(Sa-2.0*Sca)/Da+Sca*(2.0*Dca+1.0-
Da)+Dca*(1.0-Sa));
break;
}
case PinLightCompositeOp:
{
/*
PinLight: A Photoshop 7 composition method
http://www.simplefilter.de/en/basics/mixmods.html
f(Sc,Dc) = Dc<2*Sc-1 ? 2*Sc-1 : Dc>2*Sc ? 2*Sc : Dc
*/
if ((Dca*Sa) < (Da*(2.0*Sca-Sa)))
{
pixel=QuantumRange*gamma*(Sca*(Da+1.0)-Sa*Da+Dca*(1.0-Sa));
break;
}
if ((Dca*Sa) > (2.0*Sca*Da))
{
pixel=QuantumRange*gamma*(Sca*Da+Sca+Dca*(1.0-Sa));
break;
}
pixel=QuantumRange*gamma*(Sca*(1.0-Da)+Dca);
break;
}
case PlusCompositeOp:
{
pixel=QuantumRange*(Sca+Dca);
break;
}
case ReflectCompositeOp:
{
pixel=QuantumRange*gamma*(Sca*Sca*PerceptibleReciprocal(1.0-Dca));
if (pixel > QuantumRange)
pixel=QuantumRange;
break;
}
case RMSECompositeOp:
{
double
gray;
if (fabs((double) (QuantumRange*Sa-TransparentAlpha)) < MagickEpsilon)
{
pixel=Dc;
break;
}
if (fabs((double) (QuantumRange*Da-TransparentAlpha)) < MagickEpsilon)
{
pixel=Sc;
break;
}
gray=sqrt(
(canvas_pixel.red-source_pixel.red)*
(canvas_pixel.red-source_pixel.red)+
(canvas_pixel.green-source_pixel.green)*
(canvas_pixel.green-source_pixel.green)+
(canvas_pixel.blue-source_pixel.blue)*
(canvas_pixel.blue-source_pixel.blue)/3.0);
switch (channel)
{
case RedPixelChannel: pixel=gray; break;
case GreenPixelChannel: pixel=gray; break;
case BluePixelChannel: pixel=gray; break;
default: pixel=Dc; break;
}
break;
}
case SaturateCompositeOp:
{
if (fabs((double) (QuantumRange*Sa-TransparentAlpha)) < MagickEpsilon)
{
pixel=Dc;
break;
}
if (fabs((double) (QuantumRange*Da-TransparentAlpha)) < MagickEpsilon)
{
pixel=Sc;
break;
}
CompositeHCL(canvas_pixel.red,canvas_pixel.green,canvas_pixel.blue,
&hue,&chroma,&luma);
CompositeHCL(source_pixel.red,source_pixel.green,source_pixel.blue,
&sans,&chroma,&sans);
HCLComposite(hue,chroma,luma,&red,&green,&blue);
switch (channel)
{
case RedPixelChannel: pixel=red; break;
case GreenPixelChannel: pixel=green; break;
case BluePixelChannel: pixel=blue; break;
default: pixel=Dc; break;
}
break;
}
case ScreenCompositeOp:
{
/*
Screen: a negated multiply:
f(Sc,Dc) = 1.0-(1.0-Sc)*(1.0-Dc)
*/
pixel=QuantumRange*gamma*(Sca+Dca-Sca*Dca);
break;
}
case SoftBurnCompositeOp:
{
if ((Sca+Dca) < 1.0)
pixel=QuantumRange*gamma*(0.5*Dca*PerceptibleReciprocal(1.0-Sca));
else
pixel=QuantumRange*gamma*(1.0-0.5*(1.0-Sca)*
PerceptibleReciprocal(Dca));
break;
}
case SoftDodgeCompositeOp:
{
if ((Sca+Dca) < 1.0)
pixel=QuantumRange*gamma*(0.5*Sca*PerceptibleReciprocal(1.0-Dca));
else
pixel=QuantumRange*gamma*(1.0-0.5*(1.0-Dca)*
PerceptibleReciprocal(Sca));
break;
}
case SoftLightCompositeOp:
{
if ((2.0*Sca) < Sa)
{
pixel=QuantumRange*gamma*(Dca*(Sa+(2.0*Sca-Sa)*(1.0-DcaDa))+
Sca*(1.0-Da)+Dca*(1.0-Sa));
break;
}
if (((2.0*Sca) > Sa) && ((4.0*Dca) <= Da))
{
pixel=QuantumRange*gamma*(Dca*Sa+Da*(2.0*Sca-Sa)*(4.0*DcaDa*
(4.0*DcaDa+1.0)*(DcaDa-1.0)+7.0*DcaDa)+Sca*(1.0-Da)+
Dca*(1.0-Sa));
break;
}
pixel=QuantumRange*gamma*(Dca*Sa+Da*(2.0*Sca-Sa)*(pow(DcaDa,0.5)-
DcaDa)+Sca*(1.0-Da)+Dca*(1.0-Sa));
break;
}
case StampCompositeOp:
{
pixel=QuantumRange*(Sca+Dca*Dca-1.0);
break;
}
case StereoCompositeOp:
{
if (channel == RedPixelChannel)
pixel=(MagickRealType) GetPixelRed(source_image,p);
break;
}
case ThresholdCompositeOp:
{
MagickRealType
delta;
delta=Sc-Dc;
if ((MagickRealType) fabs((double) (2.0*delta)) < threshold)
{
pixel=gamma*Dc;
break;
}
pixel=gamma*(Dc+delta*amount);
break;
}
case VividLightCompositeOp:
{
/*
VividLight: A Photoshop 7 composition method. See
http://www.simplefilter.de/en/basics/mixmods.html.
f(Sc,Dc) = (2*Sc < 1) ? 1-(1-Dc)/(2*Sc) : Dc/(2*(1-Sc))
*/
if ((fabs((double) Sa) < MagickEpsilon) ||
(fabs((double) (Sca-Sa)) < MagickEpsilon))
{
pixel=QuantumRange*gamma*(Sa*Da+Sca*(1.0-Da)+Dca*(1.0-Sa));
break;
}
if ((2.0*Sca) <= Sa)
{
pixel=QuantumRange*gamma*(Sa*(Da+Sa*(Dca-Da)*
PerceptibleReciprocal(2.0*Sca))+Sca*(1.0-Da)+Dca*(1.0-Sa));
break;
}
pixel=QuantumRange*gamma*(Dca*Sa*Sa*PerceptibleReciprocal(2.0*
(Sa-Sca))+Sca*(1.0-Da)+Dca*(1.0-Sa));
break;
}
case XorCompositeOp:
{
pixel=QuantumRange*(Sca*(1.0-Da)+Dca*(1.0-Sa));
break;
}
default:
{
pixel=Sc;
break;
}
}
q[i]=clamp != MagickFalse ? ClampPixel(pixel) : ClampToQuantum(pixel);
}
p+=GetPixelChannels(source_image);
channels=GetPixelChannels(source_image);
if (p >= (pixels+channels*source_image->columns))
p=pixels;
q+=GetPixelChannels(image);
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(image,CompositeImageTag,progress,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
source_view=DestroyCacheView(source_view);
image_view=DestroyCacheView(image_view);
if (canvas_image != (Image * ) NULL)
canvas_image=DestroyImage(canvas_image);
else
source_image=DestroyImage(source_image);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% T e x t u r e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% TextureImage() repeatedly tiles the texture image across and down the image
% canvas.
%
% The format of the TextureImage method is:
%
% MagickBooleanType TextureImage(Image *image,const Image *texture,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o texture_image: This image is the texture to layer on the background.
%
*/
MagickExport MagickBooleanType TextureImage(Image *image,const Image *texture,
ExceptionInfo *exception)
{
#define TextureImageTag "Texture/Image"
CacheView
*image_view,
*texture_view;
Image
*texture_image;
MagickBooleanType
status;
ssize_t
y;
assert(image != (Image *) NULL);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"...");
assert(image->signature == MagickCoreSignature);
if (texture == (const Image *) NULL)
return(MagickFalse);
if (SetImageStorageClass(image,DirectClass,exception) == MagickFalse)
return(MagickFalse);
texture_image=CloneImage(texture,0,0,MagickTrue,exception);
if (texture_image == (const Image *) NULL)
return(MagickFalse);
(void) TransformImageColorspace(texture_image,image->colorspace,exception);
(void) SetImageVirtualPixelMethod(texture_image,TileVirtualPixelMethod,
exception);
status=MagickTrue;
if ((image->compose != CopyCompositeOp) &&
((image->compose != OverCompositeOp) ||
(image->alpha_trait != UndefinedPixelTrait) ||
(texture_image->alpha_trait != UndefinedPixelTrait)))
{
/*
Tile texture onto the image background.
*/
for (y=0; y < (ssize_t) image->rows; y+=(ssize_t) texture_image->rows)
{
ssize_t
x;
if (status == MagickFalse)
continue;
for (x=0; x < (ssize_t) image->columns; x+=(ssize_t) texture_image->columns)
{
MagickBooleanType
thread_status;
thread_status=CompositeImage(image,texture_image,image->compose,
MagickTrue,x+texture_image->tile_offset.x,y+
texture_image->tile_offset.y,exception);
if (thread_status == MagickFalse)
{
status=thread_status;
break;
}
}
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
proceed=SetImageProgress(image,TextureImageTag,(MagickOffsetType) y,
image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
(void) SetImageProgress(image,TextureImageTag,(MagickOffsetType)
image->rows,image->rows);
texture_image=DestroyImage(texture_image);
return(status);
}
/*
Tile texture onto the image background (optimized).
*/
status=MagickTrue;
texture_view=AcquireVirtualCacheView(texture_image,exception);
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(texture_image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
MagickBooleanType
sync;
const Quantum
*p,
*pixels;
ssize_t
x;
Quantum
*q;
size_t
width;
if (status == MagickFalse)
continue;
pixels=GetCacheViewVirtualPixels(texture_view,texture_image->tile_offset.x,
(y+texture_image->tile_offset.y) % texture_image->rows,
texture_image->columns,1,exception);
q=QueueCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if ((pixels == (const Quantum *) NULL) || (q == (Quantum *) NULL))
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x+=(ssize_t) texture_image->columns)
{
ssize_t
j;
p=pixels;
width=texture_image->columns;
if ((x+(ssize_t) width) > (ssize_t) image->columns)
width=image->columns-x;
for (j=0; j < (ssize_t) width; j++)
{
ssize_t
i;
for (i=0; i < (ssize_t) GetPixelChannels(texture_image); i++)
{
PixelChannel channel = GetPixelChannelChannel(texture_image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
PixelTrait texture_traits=GetPixelChannelTraits(texture_image,
channel);
if ((traits == UndefinedPixelTrait) ||
(texture_traits == UndefinedPixelTrait))
continue;
SetPixelChannel(image,channel,p[i],q);
}
p+=GetPixelChannels(texture_image);
q+=GetPixelChannels(image);
}
}
sync=SyncCacheViewAuthenticPixels(image_view,exception);
if (sync == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
proceed=SetImageProgress(image,TextureImageTag,(MagickOffsetType) y,
image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
texture_view=DestroyCacheView(texture_view);
image_view=DestroyCacheView(image_view);
texture_image=DestroyImage(texture_image);
return(status);
}
|
host_varfn_function.c | #include <stdio.h>
#include <omp.h>
#include <hostrpc.h>
// This user variable function returns a uint so declare function
// as hostrpc_varfn_uint_t .
hostrpc_varfn_uint_t my3argfn;
hostrpc_varfn_double_t mydoublefn;
// This is an arbitrary 3 arg function
uint my3argfn(void * fnptr, ...) {
va_list args;
va_start(args, fnptr);
int*a = va_arg(args, int*);
int i2 = va_arg(args, int);
int i3 = va_arg(args, int);
printf(" INSIDE my3argfn: fnptr:%p &a:%p int arg2:%d int arg3:%d \n", fnptr,a,i2,i3);
va_end(args);
return i2+i3;
}
// This is an arbitrary 3 arg function
double mydoublefn(void * fnptr, ...) {
va_list args;
va_start(args, fnptr);
int*a = va_arg(args, int*);
int i2 = va_arg(args, int);
int i3 = va_arg(args, int);
double rc = (double) (i2+i3) * 1.1;
printf(" INSIDE mydoublefn: fnptr:%p &a:%p int arg2:%d int arg3:%d rc:%f \n", fnptr,a,i2,i3,rc);
va_end(args);
return rc;
}
int main()
{
int N = 10;
int a[N];
int b[N];
int i;
for (i=0; i<N; i++){
a[i]=0;
b[i]=i;
}
hostrpc_varfn_uint_t * my_host_fn_ptr;
my_host_fn_ptr = &my3argfn;
hostrpc_varfn_double_t * my_host_fn_double;
my_host_fn_double = &mydoublefn;
printf("Testing my3argfn execution as function pointer %p &a:%p\n",(void *) my_host_fn_ptr, &a);
uint sim1 = my_host_fn_ptr(NULL, &a, 2, 3);
double sim1d = my_host_fn_double(NULL, &a, 2, 3);
printf("Return values are %d and %f \n",sim1,sim1d);
printf("\nTesting the host fallback of hostrpc_varfn_double:%p\n",my_host_fn_double);
uint sim2 = hostrpc_varfn_uint(my_host_fn_ptr, &a, 4, 5);
double sim2d = hostrpc_varfn_double(my_host_fn_double, &a, 4, 5);
printf("Return values are %d and %f \n",sim2,sim2d);
printf("\nTesting call to hostrpc_varfn_uint in target region:%p\n",my_host_fn_ptr);
#pragma omp target parallel for map(from: a[0:N]) map(to: b[0:N]) map(to: my_host_fn_ptr,my_host_fn_double)
for (int j = 0; j< N; j++) {
a[j]=b[j];
uint rc=hostrpc_varfn_uint(my_host_fn_ptr, &a, j, a[j]);
double rcd=hostrpc_varfn_double(my_host_fn_double, &a, j, a[j]);
printf("DEVICE: fnptr:%p dfnptr:%p &a:%p j:%d a[j]:%d hostrpc_varfn_uint return vals are %d %f\n",
(void*) my_host_fn_ptr,
(void*) my_host_fn_double,
(void*) &a, j, a[j],rc,rcd);
}
int rc = 0;
for (i=0; i<N; i++)
if (a[i] != b[i] ) {
rc++;
printf ("Wrong value: a[%d]=%d\n", i, a[i]);
}
if (!rc){
printf("Success\n");
return EXIT_SUCCESS;
} else{
printf("Failure\n");
return EXIT_FAILURE;
}
}
|
LogSoftMax.c | #ifndef TH_GENERIC_FILE
#define TH_GENERIC_FILE "generic/LogSoftMax.c"
#else
void THNN_(LogSoftMax_updateOutput)(
THNNState *state,
THTensor *input,
THTensor *output)
{
real *input_data, *output_data;
ptrdiff_t nframe = 0, dim = 0, stride = 0;
ptrdiff_t t, d;
if (input->nDimension == 1)
{
nframe = 1;
dim = input->size[0];
stride = 1;
}
else if (input->nDimension == 2)
{
nframe = input->size[0];
dim = input->size[1];
stride = 1;
}
else if (input->nDimension == 3)
{
nframe = 1;
dim = input->size[0];
stride = input->size[1]*input->size[2];
}
else if (input->nDimension == 4)
{
nframe = input->size[0];
dim = input->size[1];
stride = input->size[2]*input->size[3];
}
else
THArgCheck(0, 2, "1D, 2D, 3D or 4D tensor expected");
input = THTensor_(newContiguous)(input);
THTensor_(resizeAs)(output, input);
real *input_data0 = THTensor_(data)(input);
real *output_data0 = THTensor_(data)(output);
accreal logsum;
real maxInput;
#pragma omp parallel for private(t, d, maxInput, logsum, input_data, output_data)
for (t = 0; t < stride*nframe; t++)
{
logsum = 0;
maxInput = -THInf;
input_data = input_data0 + (t/stride)*dim*stride + t % stride;
output_data = output_data0 + (t/stride)*dim*stride + t % stride;
for (d = 0; d < dim; d++)
maxInput = THMax(maxInput, input_data[d*stride]);
for (d = 0; d < dim; d++)
logsum += exp(input_data[d*stride] - maxInput);
logsum = maxInput + log(logsum);
for (d = 0; d < dim; d++)
output_data[d*stride] = input_data[d*stride] - logsum;
}
THTensor_(free)(input);
}
void THNN_(LogSoftMax_updateGradInput)(
THNNState *state,
THTensor *input,
THTensor *gradOutput,
THTensor *gradInput,
THTensor *output)
{
THNN_CHECK_SHAPE(input, gradOutput);
real *gradInput_data, *gradOutput_data, *output_data;
ptrdiff_t nframe = 0, dim = 0, stride = 0;
ptrdiff_t t, d;
if (output->nDimension == 1)
{
nframe = 1;
dim = output->size[0];
stride = 1;
}
else if (output->nDimension == 2)
{
nframe = output->size[0];
dim = output->size[1];
stride = 1;
}
else if (output->nDimension == 3)
{
nframe = 1;
dim = output->size[0];
stride = output->size[1]*output->size[2];
}
else if (output->nDimension == 4)
{
nframe = output->size[0];
dim = output->size[1];
stride = output->size[2]*output->size[3];
}
else
THError("1D, 2D, 3D or 4D tensor expected");
output = THTensor_(newContiguous)(output);
gradOutput = THTensor_(newContiguous)(gradOutput);
THTensor_(resizeAs)(gradInput, output);
real *gradInput_data0 = THTensor_(data)(gradInput);
real *output_data0 = THTensor_(data)(output);
real *gradOutput_data0 = THTensor_(data)(gradOutput);
accreal sum;
#pragma omp parallel for private(t, sum, d, gradInput_data, output_data, gradOutput_data)
for (t = 0; t < stride*nframe; t++)
{
sum = 0;
gradInput_data = gradInput_data0 + (t/stride)*dim*stride + t % stride;
output_data = output_data0 + (t/stride)*dim*stride + t % stride;
gradOutput_data = gradOutput_data0 + (t/stride)*dim*stride + t % stride;
for (d = 0; d < dim; d++)
sum += gradOutput_data[d*stride];
for (d = 0; d < dim; d++)
gradInput_data[d*stride] = gradOutput_data[d*stride] - exp(output_data[d*stride])*sum;
}
THTensor_(free)(gradOutput);
THTensor_(free)(output);
}
#endif
|
GB_subassign_13.c | //------------------------------------------------------------------------------
// GB_subassign_13: C(I,J)<!M> = scalar ; using S
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// Method 13: C(I,J)<!M> = scalar ; using S
// M: present
// Mask_comp: true
// C_replace: false
// accum: NULL
// A: scalar
// S: constructed
// C: not bitmap, but can be full since no zombies are inserted in that case
// M: not bitmap
#include "GB_subassign_methods.h"
GrB_Info GB_subassign_13
(
GrB_Matrix C,
// input:
const GrB_Index *I,
const int64_t ni,
const int64_t nI,
const int Ikind,
const int64_t Icolon [3],
const GrB_Index *J,
const int64_t nj,
const int64_t nJ,
const int Jkind,
const int64_t Jcolon [3],
const GrB_Matrix M,
const bool Mask_struct,
const void *scalar,
const GrB_Type atype,
GB_Context Context
)
{
//--------------------------------------------------------------------------
// check inputs
//--------------------------------------------------------------------------
ASSERT (!GB_IS_BITMAP (C)) ;
ASSERT (!GB_aliased (C, M)) ; // NO ALIAS of C==M
//--------------------------------------------------------------------------
// S = C(I,J)
//--------------------------------------------------------------------------
GB_EMPTY_TASKLIST ;
GB_OK (GB_subassign_symbolic (S, C, I, ni, J, nj, true, Context)) ;
//--------------------------------------------------------------------------
// get inputs
//--------------------------------------------------------------------------
GB_MATRIX_WAIT_IF_JUMBLED (M) ;
GB_GET_C ; // C must not be bitmap
const int64_t Cnvec = C->nvec ;
const int64_t *restrict Ch = C->h ;
const int64_t *restrict Cp = C->p ;
const bool C_is_hyper = (Ch != NULL) ;
GB_GET_MASK ;
GB_GET_SCALAR ;
GB_GET_S ;
GrB_BinaryOp accum = NULL ;
//--------------------------------------------------------------------------
// Method 13: C(I,J)<!M> = scalar ; using S
//--------------------------------------------------------------------------
// Time: Close to optimal; must visit all IxJ, so Omega(|I|*|J|) is
// required. The sparsity of !M cannot be exploited.
// Methods 13, 15, 17, and 19 are very similar.
//--------------------------------------------------------------------------
// Parallel: all IxJ (Methods 01, 03, 13, 15, 17, 19)
//--------------------------------------------------------------------------
GB_SUBASSIGN_IXJ_SLICE ;
//--------------------------------------------------------------------------
// phase 1: create zombies, update entries, and count pending tuples
//--------------------------------------------------------------------------
#pragma omp parallel for num_threads(nthreads) schedule(dynamic,1) \
reduction(+:nzombies)
for (taskid = 0 ; taskid < ntasks ; taskid++)
{
//----------------------------------------------------------------------
// get the task descriptor
//----------------------------------------------------------------------
GB_GET_IXJ_TASK_DESCRIPTOR_PHASE1 (iA_start, iA_end) ;
//----------------------------------------------------------------------
// compute all vectors in this task
//----------------------------------------------------------------------
for (int64_t j = kfirst ; j <= klast ; j++)
{
//------------------------------------------------------------------
// get jC, the corresponding vector of C
//------------------------------------------------------------------
int64_t jC = GB_ijlist (J, j, Jkind, Jcolon) ;
//------------------------------------------------------------------
// get S(iA_start:end,j) and M(iA_start:end,j)
//------------------------------------------------------------------
GB_GET_VECTOR_FOR_IXJ (S, iA_start) ;
GB_GET_VECTOR_FOR_IXJ (M, iA_start) ;
//------------------------------------------------------------------
// C(I(iA_start,iA_end-1),jC)<!M,repl> = scalar
//------------------------------------------------------------------
for (int64_t iA = iA_start ; iA < iA_end ; iA++)
{
//--------------------------------------------------------------
// Get the indices at the top of each list.
//--------------------------------------------------------------
int64_t iS = (pS < pS_end) ? GBI (Si, pS, Svlen) : INT64_MAX ;
int64_t iM = (pM < pM_end) ? GBI (Mi, pM, Mvlen) : INT64_MAX ;
//--------------------------------------------------------------
// find the smallest index of [iS iA iM] (always iA)
//--------------------------------------------------------------
int64_t i = iA ;
//--------------------------------------------------------------
// get M(i,j)
//--------------------------------------------------------------
bool mij ;
if (i == iM)
{
// mij = (bool) M [pM]
mij = GBB (Mb, pM) && GB_mcast (Mx, pM, msize) ;
GB_NEXT (M) ;
}
else
{
// mij not present, implicitly false
ASSERT (i < iM) ;
mij = false ;
}
// complement the mask entry mij since Mask_comp is true
mij = !mij ;
//--------------------------------------------------------------
// assign the entry
//--------------------------------------------------------------
if (i == iS)
{
ASSERT (i == iA) ;
{
// both S (i,j) and A (i,j) present
if (mij)
{
// ----[C A 1] or [X A 1]---------------------------
// [C A 1]: action: ( =A ): copy A, no accum
// [X A 1]: action: ( undelete ): zombie lives
GB_C_S_LOOKUP ;
GB_noaccum_C_A_1_scalar ;
}
GB_NEXT (S) ;
}
}
else
{
ASSERT (i == iA) ;
{
// S (i,j) is not present, A (i,j) is present
if (mij)
{
// ----[. A 1]--------------------------------------
// [. A 1]: action: ( insert )
task_pending++ ;
}
}
}
}
}
GB_PHASE1_TASK_WRAPUP ;
}
//--------------------------------------------------------------------------
// phase 2: insert pending tuples
//--------------------------------------------------------------------------
GB_PENDING_CUMSUM ;
#pragma omp parallel for num_threads(nthreads) schedule(dynamic,1) \
reduction(&&:pending_sorted)
for (taskid = 0 ; taskid < ntasks ; taskid++)
{
//----------------------------------------------------------------------
// get the task descriptor
//----------------------------------------------------------------------
GB_GET_IXJ_TASK_DESCRIPTOR_PHASE2 (iA_start, iA_end) ;
//----------------------------------------------------------------------
// compute all vectors in this task
//----------------------------------------------------------------------
for (int64_t j = kfirst ; j <= klast ; j++)
{
//------------------------------------------------------------------
// get jC, the corresponding vector of C
//------------------------------------------------------------------
int64_t jC = GB_ijlist (J, j, Jkind, Jcolon) ;
//------------------------------------------------------------------
// get S(iA_start:end,j) and M(iA_start:end,j)
//------------------------------------------------------------------
GB_GET_VECTOR_FOR_IXJ (S, iA_start) ;
GB_GET_VECTOR_FOR_IXJ (M, iA_start) ;
//------------------------------------------------------------------
// C(I(iA_start,iA_end-1),jC)<!M,repl> = scalar
//------------------------------------------------------------------
for (int64_t iA = iA_start ; iA < iA_end ; iA++)
{
//--------------------------------------------------------------
// Get the indices at the top of each list.
//--------------------------------------------------------------
int64_t iS = (pS < pS_end) ? GBI (Si, pS, Svlen) : INT64_MAX ;
int64_t iM = (pM < pM_end) ? GBI (Mi, pM, Mvlen) : INT64_MAX ;
//--------------------------------------------------------------
// find the smallest index of [iS iA iM] (always iA)
//--------------------------------------------------------------
int64_t i = iA ;
//--------------------------------------------------------------
// get M(i,j)
//--------------------------------------------------------------
bool mij ;
if (i == iM)
{
// mij = (bool) M [pM]
mij = GBB (Mb, pM) && GB_mcast (Mx, pM, msize) ;
GB_NEXT (M) ;
}
else
{
// mij not present, implicitly false
ASSERT (i < iM) ;
mij = false ;
}
// complement the mask entry mij since Mask_comp is true
mij = !mij ;
//--------------------------------------------------------------
// assign the entry
//--------------------------------------------------------------
if (i == iS)
{
ASSERT (i == iA) ;
{
GB_NEXT (S) ;
}
}
else
{
ASSERT (i == iA) ;
{
// S (i,j) is not present, A (i,j) is present
if (mij)
{
// ----[. A 1]--------------------------------------
// [. A 1]: action: ( insert )
int64_t iC = GB_ijlist (I, iA, Ikind, Icolon) ;
GB_PENDING_INSERT (scalar) ;
}
}
}
}
}
GB_PHASE2_TASK_WRAPUP ;
}
//--------------------------------------------------------------------------
// finalize the matrix and return result
//--------------------------------------------------------------------------
GB_SUBASSIGN_WRAPUP ;
}
|
GB_unaryop__abs_bool_uint32.c | //------------------------------------------------------------------------------
// GB_unaryop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_iterator.h"
#include "GB_unaryop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop__abs_bool_uint32
// op(A') function: GB_tran__abs_bool_uint32
// C type: bool
// A type: uint32_t
// cast: bool cij = (bool) aij
// unaryop: cij = aij
#define GB_ATYPE \
uint32_t
#define GB_CTYPE \
bool
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
uint32_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = x ;
// casting
#define GB_CASTING(z, x) \
bool z = (bool) x ;
// cij = op (cast (aij))
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
GB_GETA (aij, Ax, pA) ; \
/* Cx [pC] = op (cast (aij)) */ \
GB_CASTING (x, aij) ; \
GB_OP (GB_CX (pC), x) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_ABS || GxB_NO_BOOL || GxB_NO_UINT32)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop__abs_bool_uint32
(
bool *restrict Cx,
const uint32_t *restrict Ax,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (int64_t p = 0 ; p < anz ; p++)
{
GB_CAST_OP (p, p) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_tran__abs_bool_uint32
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t **Rowcounts,
GBI_single_iterator Iter,
const int64_t *restrict A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#define GB_PHASE_2_OF_2
#include "GB_unaryop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
GB_binop__max_uint64.c | //------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_emult.h"
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB (_AaddB__max_uint64)
// A.*B function (eWiseMult): GB (_AemultB)
// A.*B function (eWiseMult): GB (_AemultB_02__max_uint64)
// A.*B function (eWiseMult): GB (_AemultB_03__max_uint64)
// A.*B function (eWiseMult): GB (_AemultB_bitmap__max_uint64)
// A*D function (colscale): GB (_AxD__max_uint64)
// D*A function (rowscale): GB (_DxB__max_uint64)
// C+=B function (dense accum): GB (_Cdense_accumB__max_uint64)
// C+=b function (dense accum): GB (_Cdense_accumb__max_uint64)
// C+=A+B function (dense ewise3): GB (_Cdense_ewise3_accum__max_uint64)
// C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__max_uint64)
// C=scalar+B GB (_bind1st__max_uint64)
// C=scalar+B' GB (_bind1st_tran__max_uint64)
// C=A+scalar GB (_bind2nd__max_uint64)
// C=A'+scalar GB (_bind2nd_tran__max_uint64)
// C type: uint64_t
// A type: uint64_t
// B,b type: uint64_t
// BinaryOp: cij = GB_IMAX (aij, bij)
#define GB_ATYPE \
uint64_t
#define GB_BTYPE \
uint64_t
#define GB_CTYPE \
uint64_t
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
uint64_t aij = Ax [pA]
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB) \
uint64_t bij = Bx [pB]
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
uint64_t t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA) \
cij = Ax [pA]
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB) \
cij = Bx [pB]
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z, x, y, i, j) \
z = GB_IMAX (x, y) ;
// true if the binop must be flipped
#define GB_BINOP_FLIP \
0
// op is second
#define GB_OP_IS_SECOND \
0
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_MAX || GxB_NO_UINT64 || GxB_NO_MAX_UINT64)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB (_Cdense_ewise3_accum__max_uint64)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_ewise3_noaccum__max_uint64)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_dense_ewise3_noaccum_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumB__max_uint64)
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumb__max_uint64)
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type uint64_t
uint64_t bwork = (*((uint64_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_AxD__max_uint64)
(
GrB_Matrix C,
const GrB_Matrix A, bool A_is_pattern,
const GrB_Matrix D, bool D_is_pattern,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint64_t *restrict Cx = (uint64_t *) C->x ;
#include "GB_AxB_colscale_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_DxB__max_uint64)
(
GrB_Matrix C,
const GrB_Matrix D, bool D_is_pattern,
const GrB_Matrix B, bool B_is_pattern,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint64_t *restrict Cx = (uint64_t *) C->x ;
#include "GB_AxB_rowscale_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseAdd: C = A+B or C<M> = A+B
//------------------------------------------------------------------------------
GrB_Info GB (_AaddB__max_uint64)
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool Ch_is_Mh,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GB_WERK_DECLARE (M_ek_slicing, int64_t) ;
GB_WERK_DECLARE (A_ek_slicing, int64_t) ;
GB_WERK_DECLARE (B_ek_slicing, int64_t) ;
#include "GB_add_template.c"
GB_FREE_WORK ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C = A.*B or C<M> = A.*B
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_01__max_uint64)
(
GrB_Matrix C,
const int C_sparsity,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_01_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_02__max_uint64)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool flipxy,
const int64_t *restrict Cp_kfirst,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if GB_BINOP_FLIP
// The operator is not commutative, and does not have a flipped
// variant. For example z=atan2(y,x).
if (flipxy)
{
// use fmult(y,x)
#undef GB_FLIPPED
#define GB_FLIPPED 1
#include "GB_emult_02_template.c"
}
else
{
// use fmult(x,y)
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
}
#else
// No need to handle the flip: the operator is either commutative, or
// has been handled by changing z=div(y,x) to z=rdiv(x,y) for example.
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_03__max_uint64)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict Cp_kfirst,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_03_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_bitmap__max_uint64)
(
GrB_Matrix C,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_bitmap_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB (_bind1st__max_uint64)
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *restrict Bb,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint64_t *Cx = (uint64_t *) Cx_output ;
uint64_t x = (*((uint64_t *) x_input)) ;
uint64_t *Bx = (uint64_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Bb, p)) continue ;
uint64_t bij = Bx [p] ;
Cx [p] = GB_IMAX (x, bij) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB (_bind2nd__max_uint64)
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *restrict Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
uint64_t *Cx = (uint64_t *) Cx_output ;
uint64_t *Ax = (uint64_t *) Ax_input ;
uint64_t y = (*((uint64_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
uint64_t aij = Ax [p] ;
Cx [p] = GB_IMAX (aij, y) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
uint64_t aij = Ax [pA] ; \
Cx [pC] = GB_IMAX (x, aij) ; \
}
GrB_Info GB (_bind1st_tran__max_uint64)
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
uint64_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint64_t x = (*((const uint64_t *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
uint64_t
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
uint64_t aij = Ax [pA] ; \
Cx [pC] = GB_IMAX (aij, y) ; \
}
GrB_Info GB (_bind2nd_tran__max_uint64)
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint64_t y = (*((const uint64_t *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
GB_binop__pair_bool.c |
//------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCUDA_DEV
#include "GB_emult.h"
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB (_AaddB__pair_bool)
// A.*B function (eWiseMult): GB ((none))
// A.*B function (eWiseMult): GB ((none))
// A.*B function (eWiseMult): GB ((none))
// A.*B function (eWiseMult): GB ((none))
// A*D function (colscale): GB ((none))
// D*A function (rowscale): GB ((none))
// C+=B function (dense accum): GB (_Cdense_accumB__pair_bool)
// C+=b function (dense accum): GB (_Cdense_accumb__pair_bool)
// C+=A+B function (dense ewise3): GB ((none))
// C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__pair_bool)
// C=scalar+B GB ((none))
// C=scalar+B' GB ((none))
// C=A+scalar GB ((none))
// C=A'+scalar GB ((none))
// C type: bool
// A type: bool
// A pattern? 1
// B type: bool
// B pattern? 1
// BinaryOp: cij = 1
#define GB_ATYPE \
bool
#define GB_BTYPE \
bool
#define GB_CTYPE \
bool
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA,A_iso) \
;
// true if values of A are not used
#define GB_A_IS_PATTERN \
1 \
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB,B_iso) \
;
// true if values of B are not used
#define GB_B_IS_PATTERN \
1 \
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
bool t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \
cij = GBX (Ax, pA, A_iso)
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \
cij = GBX (Bx, pB, B_iso)
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z,x,y,i,j) \
z = 1 ;
// true if the binop must be flipped
#define GB_BINOP_FLIP \
0
// op is second
#define GB_OP_IS_SECOND \
0
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_PAIR || GxB_NO_BOOL || GxB_NO_PAIR_BOOL)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
#if 0
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB ((none))
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
#endif
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
void GB (_Cdense_ewise3_noaccum__pair_bool)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_noaccum_template.c"
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumB__pair_bool)
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumb__pair_bool)
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type bool
bool bwork = (*((bool *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
#if 0
GrB_Info GB ((none))
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix D,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
bool *restrict Cx = (bool *) C->x ;
#include "GB_AxB_colscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
#if 0
GrB_Info GB ((none))
(
GrB_Matrix C,
const GrB_Matrix D,
const GrB_Matrix B,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
bool *restrict Cx = (bool *) C->x ;
#include "GB_AxB_rowscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
//------------------------------------------------------------------------------
// eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B
//------------------------------------------------------------------------------
GrB_Info GB (_AaddB__pair_bool)
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool is_eWiseUnion,
const GB_void *alpha_scalar_in,
const GB_void *beta_scalar_in,
const bool Ch_is_Mh,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GB_WERK_DECLARE (M_ek_slicing, int64_t) ;
GB_WERK_DECLARE (A_ek_slicing, int64_t) ;
GB_WERK_DECLARE (B_ek_slicing, int64_t) ;
bool alpha_scalar ;
bool beta_scalar ;
if (is_eWiseUnion)
{
alpha_scalar = (*((bool *) alpha_scalar_in)) ;
beta_scalar = (*((bool *) beta_scalar_in )) ;
}
#include "GB_add_template.c"
GB_FREE_WORKSPACE ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper
//------------------------------------------------------------------------------
#if 0
GrB_Info GB ((none))
(
GrB_Matrix C,
const int C_sparsity,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_08_meta.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
//------------------------------------------------------------------------------
// eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full
//------------------------------------------------------------------------------
#if 0
GrB_Info GB ((none))
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool flipxy,
const int64_t *restrict Cp_kfirst,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if GB_BINOP_FLIP
// The operator is not commutative, and does not have a flipped
// variant. For example z=atan2(y,x).
if (flipxy)
{
// use fmult(y,x)
#undef GB_FLIPPED
#define GB_FLIPPED 1
#include "GB_emult_02_template.c"
}
else
{
// use fmult(x,y)
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
}
#else
// No need to handle the flip: the operator is either commutative, or
// has been handled by changing z=div(y,x) to z=rdiv(x,y) for example.
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
#endif
return (GrB_SUCCESS) ;
#endif
}
#endif
//------------------------------------------------------------------------------
// eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full
//------------------------------------------------------------------------------
#if 0
GrB_Info GB ((none))
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict Cp_kfirst,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_04_template.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap
//------------------------------------------------------------------------------
#if 0
GrB_Info GB ((none))
(
GrB_Matrix C,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_bitmap_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
#if 0
GrB_Info GB ((none))
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *restrict Bb,
int64_t bnz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
bool *Cx = (bool *) Cx_output ;
bool x = (*((bool *) x_input)) ;
bool *Bx = (bool *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < bnz ; p++)
{
if (!GBB (Bb, p)) continue ;
; ;
Cx [p] = 1 ;
}
return (GrB_SUCCESS) ;
#endif
}
#endif
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
#if 0
GrB_Info GB ((none))
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *restrict Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
bool *Cx = (bool *) Cx_output ;
bool *Ax = (bool *) Ax_input ;
bool y = (*((bool *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
; ;
Cx [p] = 1 ;
}
return (GrB_SUCCESS) ;
#endif
}
#endif
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
#if 0
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
; ; \
Cx [pC] = 1 ; \
}
GrB_Info GB ((none))
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
bool
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
bool x = (*((const bool *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
bool
}
#endif
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
#if 0
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
; ; \
Cx [pC] = 1 ; \
}
GrB_Info GB ((none))
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
bool y = (*((const bool *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
#endif
|
GB_unop__cos_fp32_fp32.c | //------------------------------------------------------------------------------
// GB_unop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_atomics.h"
#include "GB_unop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB (_unop_apply__cos_fp32_fp32)
// op(A') function: GB (_unop_tran__cos_fp32_fp32)
// C type: float
// A type: float
// cast: float cij = aij
// unaryop: cij = cosf (aij)
#define GB_ATYPE \
float
#define GB_CTYPE \
float
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
float aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = cosf (x) ;
// casting
#define GB_CAST(z, aij) \
float z = aij ;
// cij = op (aij)
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
float aij = Ax [pA] ; \
/* Cx [pC] = op (cast (aij)) */ \
float z = aij ; \
Cx [pC] = cosf (z) ; \
}
// true if operator is the identity op with no typecasting
#define GB_OP_IS_IDENTITY_WITH_NO_TYPECAST \
0
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_COS || GxB_NO_FP32)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_apply__cos_fp32_fp32)
(
float *Cx, // Cx and Ax may be aliased
const float *Ax,
const int8_t *restrict Ab, // A->b if A is bitmap
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
// TODO: if OP is ONE and uniform-valued matrices are exploited, then
// do this in O(1) time
if (Ab == NULL)
{
#if ( GB_OP_IS_IDENTITY_WITH_NO_TYPECAST )
GB_memcpy (Cx, Ax, anz * sizeof (float), nthreads) ;
#else
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
float aij = Ax [p] ;
float z = aij ;
Cx [p] = cosf (z) ;
}
#endif
}
else
{
// bitmap case, no transpose; A->b already memcpy'd into C->b
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!Ab [p]) continue ;
float aij = Ax [p] ;
float z = aij ;
Cx [p] = cosf (z) ;
}
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_tran__cos_fp32_fp32)
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
GB_unaryop__abs_uint16_uint64.c | //------------------------------------------------------------------------------
// GB_unaryop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_iterator.h"
#include "GB_unaryop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop__abs_uint16_uint64
// op(A') function: GB_tran__abs_uint16_uint64
// C type: uint16_t
// A type: uint64_t
// cast: uint16_t cij = (uint16_t) aij
// unaryop: cij = aij
#define GB_ATYPE \
uint64_t
#define GB_CTYPE \
uint16_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
uint64_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = x ;
// casting
#define GB_CASTING(z, x) \
uint16_t z = (uint16_t) x ;
// cij = op (cast (aij))
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
GB_GETA (aij, Ax, pA) ; \
/* Cx [pC] = op (cast (aij)) */ \
GB_CASTING (x, aij) ; \
GB_OP (GB_CX (pC), x) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_ABS || GxB_NO_UINT16 || GxB_NO_UINT64)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop__abs_uint16_uint64
(
uint16_t *restrict Cx,
const uint64_t *restrict Ax,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (int64_t p = 0 ; p < anz ; p++)
{
GB_CAST_OP (p, p) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_tran__abs_uint16_uint64
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t **Rowcounts,
GBI_single_iterator Iter,
const int64_t *restrict A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#define GB_PHASE_2_OF_2
#include "GB_unaryop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
3d25pt.c | /*
* Order-2, 3D 25 point stencil
* Adapted from PLUTO and Pochoir test bench
*
* Tareq Malas
*/
#include <stdio.h>
#include <stdlib.h>
#include <sys/time.h>
#ifdef LIKWID_PERFMON
#include <likwid.h>
#endif
#include "print_utils.h"
#define TESTS 2
#define MAX(a,b) ((a) > (b) ? a : b)
#define MIN(a,b) ((a) < (b) ? a : b)
#ifndef min
#define min(x,y) ((x) < (y)? (x) : (y))
#endif
/* Subtract the `struct timeval' values X and Y,
* storing the result in RESULT.
*
* Return 1 if the difference is negative, otherwise 0.
*/
int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y)
{
/* Perform the carry for the later subtraction by updating y. */
if (x->tv_usec < y->tv_usec)
{
int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1;
y->tv_usec -= 1000000 * nsec;
y->tv_sec += nsec;
}
if (x->tv_usec - y->tv_usec > 1000000)
{
int nsec = (x->tv_usec - y->tv_usec) / 1000000;
y->tv_usec += 1000000 * nsec;
y->tv_sec -= nsec;
}
/* Compute the time remaining to wait.
* tv_usec is certainly positive.
*/
result->tv_sec = x->tv_sec - y->tv_sec;
result->tv_usec = x->tv_usec - y->tv_usec;
/* Return 1 if result is negative. */
return x->tv_sec < y->tv_sec;
}
int main(int argc, char *argv[])
{
int t, i, j, k, test;
int Nx, Ny, Nz, Nt;
if (argc > 3) {
Nx = atoi(argv[1])+8;
Ny = atoi(argv[2])+8;
Nz = atoi(argv[3])+8;
}
if (argc > 4)
Nt = atoi(argv[4]);
double ****A = (double ****) malloc(sizeof(double***)*2);
double ***roc2 = (double ***) malloc(sizeof(double**));
A[0] = (double ***) malloc(sizeof(double**)*Nz);
A[1] = (double ***) malloc(sizeof(double**)*Nz);
roc2 = (double ***) malloc(sizeof(double**)*Nz);
for(i=0; i<Nz; i++){
A[0][i] = (double**) malloc(sizeof(double*)*Ny);
A[1][i] = (double**) malloc(sizeof(double*)*Ny);
roc2[i] = (double**) malloc(sizeof(double*)*Ny);
for(j=0;j<Ny;j++){
A[0][i][j] = (double*) malloc(sizeof(double)*Nx);
A[1][i][j] = (double*) malloc(sizeof(double)*Nx);
roc2[i][j] = (double*) malloc(sizeof(double)*Nx);
}
}
// tile size information, including extra element to decide the list length
int *tile_size = (int*) malloc(sizeof(int));
tile_size[0] = -1;
// The list is modified here before source-to-source transformations
tile_size = (int*) realloc((void *)tile_size, sizeof(int)*5);
tile_size[0] = 4;
tile_size[1] = 4;
tile_size[2] = 8;
tile_size[3] = 64;
tile_size[4] = -1;
// for timekeeping
int ts_return = -1;
struct timeval start, end, result;
double tdiff = 0.0, min_tdiff=1.e100;
const int BASE = 1024;
// initialize variables
//
srand(42);
for (i = 1; i < Nz; i++) {
for (j = 1; j < Ny; j++) {
for (k = 1; k < Nx; k++) {
A[0][i][j][k] = 1.0 * (rand() % BASE);
roc2[i][j][k] = 2.0 * (rand() % BASE);
}
}
}
#ifdef LIKWID_PERFMON
LIKWID_MARKER_INIT;
#pragma omp parallel
{
LIKWID_MARKER_THREADINIT;
#pragma omp barrier
LIKWID_MARKER_START("calc");
}
#endif
int num_threads = 1;
#if defined(_OPENMP)
num_threads = omp_get_max_threads();
#endif
const double coef0 = -0.28472;
const double coef1 = 0.16000;
const double coef2 = -0.02000;
const double coef3 = 0.00254;
const double coef4 = -0.00018;
for(test=0; test<TESTS; test++){
gettimeofday(&start, 0);
// serial execution - Addition: 6 && Multiplication: 2
#pragma scop
for (t = 0; t < Nt; t++) {
for (i = 4; i < Nz-4; i++) {
for (j = 4; j < Ny-4; j++) {
for (k = 4; k < Nx-4; k++) {
A[(t+1)%2][i][j][k] = 2.0*A[t%2][i][j][k] - A[(t+1)%2][i][j][k] + roc2[i][j][k]*(
coef0* A[t%2][i ][j ][k ] +
coef1*(A[t%2][i-1][j ][k ] + A[t%2][i+1][j ][k ] +
A[t%2][i ][j-1][k ] + A[t%2][i ][j+1][k ] +
A[t%2][i ][j ][k-1] + A[t%2][i ][j ][k+1]) +
coef2*(A[t%2][i-2][j ][k ] + A[t%2][i+2][j ][k ] +
A[t%2][i ][j-2][k ] + A[t%2][i ][j+2][k ] +
A[t%2][i ][j ][k-2] + A[t%2][i ][j ][k+2]) +
coef3*(A[t%2][i-3][j ][k ] + A[t%2][i+3][j ][k ] +
A[t%2][i ][j-3][k ] + A[t%2][i ][j+3][k ] +
A[t%2][i ][j ][k-3] + A[t%2][i ][j ][k+3]) +
coef4*(A[t%2][i-4][j ][k ] + A[t%2][i+4][j ][k ] +
A[t%2][i ][j-4][k ] + A[t%2][i ][j+4][k ] +
A[t%2][i ][j ][k-4] + A[t%2][i ][j ][k+4]) );
}
}
}
}
#pragma endscop
gettimeofday(&end, 0);
ts_return = timeval_subtract(&result, &end, &start);
tdiff = (double) (result.tv_sec + result.tv_usec * 1.0e-6);
min_tdiff = MIN(min_tdiff, tdiff);
printf("Rank 0 TEST# %d time: %f\n", test, tdiff);
}
PRINT_RESULTS(4, "constant")
#ifdef LIKWID_PERFMON
#pragma omp parallel
{
LIKWID_MARKER_STOP("calc");
}
LIKWID_MARKER_CLOSE;
#endif
// Free allocated arrays
for(i=0; i<Nz; i++){
for(j=0;j<Ny;j++){
free(A[0][i][j]);
free(A[1][i][j]);
free(roc2[i][j]);
}
free(A[0][i]);
free(A[1][i]);
free(roc2[i]);
}
free(A[0]);
free(A[1]);
free(roc2);
return 0;
}
|
expected_output.c | #include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <time.h>
#include <sys/time.h>
//---------------------------------------------------------------------
// program CG
//---------------------------------------------------------------------
//----------
// Class S:
//----------
//----------
// Class W:
//----------
//----------
// Class A:
//----------
//----------
// Class B:
//----------
//----------
// Class C:
//----------
struct anon_NAS_CG_c_75 {
double real;
double imag;
};
typedef struct anon_NAS_CG_c_75 dcomplex;
//---------------------------------------------------------------------
/*common / main_int_mem /*/
int colidx[567000];
int rowstr[7001];
int iv[7000];
int arow[7000];
int acol[63000];
/*common / main_flt_mem /*/
double aelt[63000];
double a[567000];
double x[7002];
double z[7002];
double p[7002];
double q[7002];
double r[7002];
/*common / partit_size /*/
int naa;
int nzz;
int firstrow;
int lastrow;
int firstcol;
int lastcol;
/*common /urando/*/
double amult;
double tran;
//---------------------------------------------------------------------
void conj_grad(int colidx[], int rowstr[], double x[], double z[], double a[], double p[], double q[], double r[], double *rnorm);
void makea(int n, int nz, double a[], int colidx[], int rowstr[], int firstrow, int lastrow, int firstcol, int lastcol, int arow[], int acol[][9], double aelt[][9], int iv[]);
void sparse(double a[], int colidx[], int rowstr[], int n, int nz, int nozer, int arow[], int acol[][9], double aelt[][9], int firstrow, int lastrow, int nzloc[], double rcond, double shift);
void sprnvc(int n, int nz, int nn1, double v[], int iv[]);
int icnvrt(double x, int ipwr2);
void vecset(int n, double v[], int iv[], int *nzv, int i, double val);
void print_results(char *name, char class, int n1, int n2, int n3, int niter, double t, double mops, char *optype, int verified);
double randlc(double *x, double a);
void vranlc(int n, double *x, double a, double y[]);
double start[64];
double elapsed[64];
double elapsed_time();
void timer_clear(int n);
void timer_start(int n);
void timer_stop(int n);
double timer_read(int n);
void wtime(double *t);
//---------------------------------------------------------------------
int main(int argc, char *argv[]) {
int i, j, k, it;
double zeta;
double rnorm;
double norm_temp1, norm_temp2;
double t, mflops, tmax;
char Class;
int verified;
double zeta_verify_value, epsilon, err;
char *t_names[3];
/*************** Clava msgError **************
Loop Iteration number is too low
****************************************/
for(i = 0; i < 3; i++) {
timer_clear(i);
}
timer_start(0);
firstrow = 0;
lastrow = 7000 - 1;
firstcol = 0;
lastcol = 7000 - 1;
if(7000 == 1400 && 8 == 7 && 15 == 15 && 12.0 == 10) {
Class = 'S';
zeta_verify_value = 8.5971775078648;
}
else if(7000 == 7000 && 8 == 8 && 15 == 15 && 12.0 == 12) {
Class = 'W';
zeta_verify_value = 10.362595087124;
}
else if(7000 == 14000 && 8 == 11 && 15 == 15 && 12.0 == 20) {
Class = 'A';
zeta_verify_value = 17.130235054029;
}
else if(7000 == 75000 && 8 == 13 && 15 == 75 && 12.0 == 60) {
Class = 'B';
zeta_verify_value = 22.712745482631;
}
else if(7000 == 150000 && 8 == 15 && 15 == 75 && 12.0 == 110) {
Class = 'C';
zeta_verify_value = 28.973605592845;
}
else if(7000 == 1500000 && 8 == 21 && 15 == 100 && 12.0 == 500) {
Class = 'D';
zeta_verify_value = 52.514532105794;
}
else if(7000 == 9000000 && 8 == 26 && 15 == 100 && 12.0 == 1500) {
Class = 'E';
zeta_verify_value = 77.522164599383;
}
else {
Class = 'U';
}
printf("\n\n NAS Parallel Benchmarks (NPB3.3-SER-C) - CG Benchmark\n\n");
printf(" Size: %11d\n", 7000);
printf(" Iterations: %5d\n", 15);
printf("\n");
naa = 7000;
nzz = (7000 * (8 + 1) * (8 + 1));
//---------------------------------------------------------------------
// Inialize random number generator
//---------------------------------------------------------------------
tran = 314159265.0;
amult = 1220703125.0;
zeta = randlc(&tran, amult);
//---------------------------------------------------------------------
//
//---------------------------------------------------------------------
makea(naa, nzz, a, colidx, rowstr, firstrow, lastrow, firstcol, lastcol, arow, (int (*)[9]) (void *) acol, (double (*)[9]) (void *) aelt, iv);
//---------------------------------------------------------------------
// Note: as a result of the above call to makea:
// values of j used in indexing rowstr go from 0 --> lastrow-firstrow
// values of colidx which are col indexes go from firstcol --> lastcol
// So:
// Shift the col index vals from actual (firstcol --> lastcol )
// to local, i.e., (0 --> lastcol-firstcol)
//---------------------------------------------------------------------
#pragma omp parallel for default(shared) private(j, k) firstprivate(lastrow, firstrow, firstcol, rowstr) reduction(- : colidx[:567000])
for(j = 0; j < lastrow - firstrow + 1; j++) {
// #pragma omp parallel for default(shared) private(k) firstprivate(j, firstcol, rowstr)
for(k = rowstr[j]; k < rowstr[j + 1]; k++) {
colidx[k] = colidx[k] - firstcol;
}
}
//---------------------------------------------------------------------
// set starting vector to (1, 1, .... 1)
//---------------------------------------------------------------------
#pragma omp parallel for default(shared) private(i)
for(i = 0; i < 7000 + 1; i++) {
x[i] = 1.0;
}
#pragma omp parallel for default(shared) private(j) firstprivate(lastcol, firstcol)
for(j = 0; j < lastcol - firstcol + 1; j++) {
q[j] = 0.0;
z[j] = 0.0;
r[j] = 0.0;
p[j] = 0.0;
}
zeta = 0.0;
//---------------------------------------------------------------------
//---->
// Do one iteration untimed to init all code and data page tables
//----> (then reinit, start timing, to niter its)
//---------------------------------------------------------------------
/*************** Clava msgError **************
Loop Iteration number is too low
****************************************/
for(it = 1; it <= 1; it++) { // end of do one iteration untimed
//---------------------------------------------------------------------
// The call to the conjugate gradient routine:
//---------------------------------------------------------------------
conj_grad(colidx, rowstr, x, z, a, p, q, r, &rnorm);
//---------------------------------------------------------------------
// zeta = shift + 1/(x.z)
// So, first: (x.z)
// Also, find norm of z
// So, first: (z.z)
//---------------------------------------------------------------------
norm_temp1 = 0.0;
norm_temp2 = 0.0;
#pragma omp parallel for default(shared) private(j) firstprivate(lastcol, firstcol, x, z) reduction(+ : norm_temp1) reduction(+ : norm_temp2)
for(j = 0; j < lastcol - firstcol + 1; j++) {
norm_temp1 = norm_temp1 + x[j] * z[j];
norm_temp2 = norm_temp2 + z[j] * z[j];
}
norm_temp2 = 1.0 / sqrt(norm_temp2);
//---------------------------------------------------------------------
// Normalize z to obtain x
//---------------------------------------------------------------------
#pragma omp parallel for default(shared) private(j) firstprivate(lastcol, firstcol, norm_temp2, z)
for(j = 0; j < lastcol - firstcol + 1; j++) {
x[j] = norm_temp2 * z[j];
}
}
//---------------------------------------------------------------------
// set starting vector to (1, 1, .... 1)
//---------------------------------------------------------------------
#pragma omp parallel for default(shared) private(i)
for(i = 0; i < 7000 + 1; i++) {
x[i] = 1.0;
}
zeta = 0.0;
timer_stop(0);
printf(" Initialization time = %15.3f seconds\n", timer_read(0));
timer_start(1);
//---------------------------------------------------------------------
//---->
// Main Iteration for inverse power method
//---->
//---------------------------------------------------------------------
/*************** Clava msgError **************
Loop Iteration number is too low
****************************************/
for(it = 1; it <= 15; it++) { // end of main iter inv pow meth
//---------------------------------------------------------------------
// The call to the conjugate gradient routine:
//---------------------------------------------------------------------
conj_grad(colidx, rowstr, x, z, a, p, q, r, &rnorm);
//---------------------------------------------------------------------
// zeta = shift + 1/(x.z)
// So, first: (x.z)
// Also, find norm of z
// So, first: (z.z)
//---------------------------------------------------------------------
norm_temp1 = 0.0;
norm_temp2 = 0.0;
#pragma omp parallel for default(shared) private(j) firstprivate(lastcol, firstcol, x, z) reduction(+ : norm_temp1) reduction(+ : norm_temp2)
for(j = 0; j < lastcol - firstcol + 1; j++) {
norm_temp1 = norm_temp1 + x[j] * z[j];
norm_temp2 = norm_temp2 + z[j] * z[j];
}
norm_temp2 = 1.0 / sqrt(norm_temp2);
zeta = 12.0 + 1.0 / norm_temp1;
if(it == 1) printf("\n iteration ||r|| zeta\n");
printf(" %5d %20.14E%20.13f\n", it, rnorm, zeta);
//---------------------------------------------------------------------
// Normalize z to obtain x
//---------------------------------------------------------------------
#pragma omp parallel for default(shared) private(j) firstprivate(lastcol, firstcol, norm_temp2, z)
for(j = 0; j < lastcol - firstcol + 1; j++) {
x[j] = norm_temp2 * z[j];
}
}
timer_stop(1);
//---------------------------------------------------------------------
// End of timed section
//---------------------------------------------------------------------
t = timer_read(1);
printf(" Benchmark completed\n");
epsilon = 1.0e-10;
if(Class != 'U') {
err = fabs(zeta - zeta_verify_value) / zeta_verify_value;
if(err <= epsilon) {
verified = 1;
printf(" VERIFICATION SUCCESSFUL\n");
printf(" Zeta is %20.13E\n", zeta);
printf(" Error is %20.13E\n", err);
}
else {
verified = 0;
printf(" VERIFICATION FAILED\n");
printf(" Zeta %20.13E\n", zeta);
printf(" The correct zeta is %20.13E\n", zeta_verify_value);
}
}
else {
verified = 0;
printf(" Problem size unknown\n");
printf(" NO VERIFICATION PERFORMED\n");
}
if(t != 0.0) {
mflops = (double) (2 * 15 * 7000) * (3.0 + (double) (8 * (8 + 1)) + 25.0 * (5.0 + (double) (8 * (8 + 1))) + 3.0) / t / 1000000.0;
}
else {
mflops = 0.0;
}
print_results("CG", Class, 7000, 0, 0, 15, t, mflops, " floating point", verified);
int exitValue = verified ? 0 : 1;
return exitValue;
}
//---------------------------------------------------------------------
// Floaging point arrays here are named as in NPB1 spec discussion of
// CG algorithm
//---------------------------------------------------------------------
void conj_grad(int colidx[], int rowstr[], double x[], double z[], double a[], double p[], double q[], double r[], double *rnorm) {
int j, k;
int cgit, cgitmax = 25;
double d, sum, rho, rho0, alpha, beta;
rho = 0.0;
//---------------------------------------------------------------------
// Initialize the CG algorithm:
//---------------------------------------------------------------------
#pragma omp parallel for default(shared) private(j) firstprivate(naa, x)
for(j = 0; j < naa + 1; j++) {
q[j] = 0.0;
z[j] = 0.0;
r[j] = x[j];
p[j] = r[j];
}
//---------------------------------------------------------------------
// rho = r.r
// Now, obtain the norm of r: First, sum squares of r elements locally...
//---------------------------------------------------------------------
#pragma omp parallel for default(shared) private(j) firstprivate(lastcol, firstcol, r) reduction(+ : rho)
for(j = 0; j < lastcol - firstcol + 1; j++) {
rho = rho + r[j] * r[j];
}
//---------------------------------------------------------------------
//---->
// The conj grad iteration loop
//---->
//---------------------------------------------------------------------
/*************** Clava msgError **************
Variable rho could not be categorized into any OpenMP Variable Scopeuse : RWR
****************************************/
for(cgit = 1; cgit <= cgitmax; cgit++) { // end of do cgit=1,cgitmax
//---------------------------------------------------------------------
// q = A.p
// The partition submatrix-vector multiply: use workspace w
//---------------------------------------------------------------------
//
// NOTE: this version of the multiply is actually (slightly: maybe %5)
// faster on the sp2 on 16 nodes than is the unrolled-by-2 version
// below. On the Cray t3d, the reverse is 1, i.e., the
// unrolled-by-two version is some 10% faster.
// The unrolled-by-8 version below is significantly faster
// on the Cray t3d - overall speed of code is 1.5 times faster.
#pragma omp parallel for default(shared) private(j, k, sum) firstprivate(lastrow, firstrow, rowstr, a, colidx, p)
for(j = 0; j < lastrow - firstrow + 1; j++) {
sum = 0.0;
// #pragma omp parallel for default(shared) private(k) firstprivate(j, rowstr, a, colidx, p) reduction(+ : sum)
for(k = rowstr[j]; k < rowstr[j + 1]; k++) {
sum = sum + a[k] * p[colidx[k]];
}
q[j] = sum;
}
/*
for (j = 0; j < lastrow - firstrow + 1; j++) {
int i = rowstr[j];
int iresidue = (rowstr[j+1] - i) % 2;
double sum1 = 0.0;
double sum2 = 0.0;
if (iresidue == 1)
sum1 = sum1 + a[i]*p[colidx[i]];
for (k = i + iresidue; k <= rowstr[j+1] - 2; k += 2) {
sum1 = sum1 + a[k] *p[colidx[k]];
sum2 = sum2 + a[k+1]*p[colidx[k+1]];
}
q[j] = sum1 + sum2;
}
*/
/*
for (j = 0; j < lastrow - firstrow + 1; j++) {
int i = rowstr[j];
int iresidue = (rowstr[j+1] - i) % 8;
double sum = 0.0;
for (k = i; k <= i + iresidue - 1; k++) {
sum = sum + a[k]*p[colidx[k]];
}
for (k = i + iresidue; k <= rowstr[j+1] - 8; k += 8) {
sum = sum + a[k ]*p[colidx[k ]]
+ a[k+1]*p[colidx[k+1]]
+ a[k+2]*p[colidx[k+2]]
+ a[k+3]*p[colidx[k+3]]
+ a[k+4]*p[colidx[k+4]]
+ a[k+5]*p[colidx[k+5]]
+ a[k+6]*p[colidx[k+6]]
+ a[k+7]*p[colidx[k+7]];
}
q[j] = sum;
}
*/
//---------------------------------------------------------------------
// Obtain p.q
//---------------------------------------------------------------------
d = 0.0;
#pragma omp parallel for default(shared) private(j) firstprivate(lastcol, firstcol, p, q) reduction(+ : d)
for(j = 0; j < lastcol - firstcol + 1; j++) {
d = d + p[j] * q[j];
}
//---------------------------------------------------------------------
// Obtain alpha = rho / (p.q)
//---------------------------------------------------------------------
alpha = rho / d;
//---------------------------------------------------------------------
// Save a temporary of rho
//---------------------------------------------------------------------
rho0 = rho;
//---------------------------------------------------------------------
// Obtain z = z + alpha*p
// and r = r - alpha*q
//---------------------------------------------------------------------
rho = 0.0;
#pragma omp parallel for default(shared) private(j) firstprivate(lastcol, firstcol, alpha, p, q)
for(j = 0; j < lastcol - firstcol + 1; j++) {
z[j] = z[j] + alpha * p[j];
r[j] = r[j] - alpha * q[j];
}
//---------------------------------------------------------------------
// rho = r.r
// Now, obtain the norm of r: First, sum squares of r elements locally...
//---------------------------------------------------------------------
#pragma omp parallel for default(shared) private(j) firstprivate(lastcol, firstcol, r) reduction(+ : rho)
for(j = 0; j < lastcol - firstcol + 1; j++) {
rho = rho + r[j] * r[j];
}
//---------------------------------------------------------------------
// Obtain beta:
//---------------------------------------------------------------------
beta = rho / rho0;
//---------------------------------------------------------------------
// p = r + beta*p
//---------------------------------------------------------------------
#pragma omp parallel for default(shared) private(j) firstprivate(lastcol, firstcol, beta, r)
for(j = 0; j < lastcol - firstcol + 1; j++) {
p[j] = r[j] + beta * p[j];
}
}
//---------------------------------------------------------------------
// Compute residual norm explicitly: ||r|| = ||x - A.z||
// First, form A.z
// The partition submatrix-vector multiply
//---------------------------------------------------------------------
sum = 0.0;
#pragma omp parallel for default(shared) private(j, k, d) firstprivate(lastrow, firstrow, rowstr, a, colidx, z)
for(j = 0; j < lastrow - firstrow + 1; j++) {
d = 0.0;
// #pragma omp parallel for default(shared) private(k) firstprivate(j, rowstr, a, colidx, z) reduction(+ : d)
for(k = rowstr[j]; k < rowstr[j + 1]; k++) {
d = d + a[k] * z[colidx[k]];
}
r[j] = d;
}
//---------------------------------------------------------------------
// At this point, r contains A.z
//---------------------------------------------------------------------
#pragma omp parallel for default(shared) private(j, d) firstprivate(lastcol, firstcol, x, r) reduction(+ : sum)
for(j = 0; j < lastcol - firstcol + 1; j++) {
d = x[j] - r[j];
sum = sum + d * d;
}
*rnorm = sqrt(sum);
}
//---------------------------------------------------------------------
// generate the test problem for benchmark 6
// makea generates a sparse matrix with a
// prescribed sparsity distribution
//
// parameter type usage
//
// input
//
// n i number of cols/rows of matrix
// nz i nonzeros as declared array size
// rcond r*8 condition number
// shift r*8 main diagonal shift
//
// output
//
// a r*8 array for nonzeros
// colidx i col indices
// rowstr i row pointers
//
// workspace
//
// iv, arow, acol i
// aelt r*8
//---------------------------------------------------------------------
void makea(int n, int nz, double a[], int colidx[], int rowstr[], int firstrow, int lastrow, int firstcol, int lastcol, int arow[], int acol[][9], double aelt[][9], int iv[]) {
int iouter, ivelt, nzv, nn1;
int ivc[9];
double vc[9];
//---------------------------------------------------------------------
// nonzer is approximately (int(sqrt(nnza /n)));
//---------------------------------------------------------------------
//---------------------------------------------------------------------
// nn1 is the smallest power of two not less than n
//---------------------------------------------------------------------
nn1 = 1;
do {
nn1 = 2 * nn1;
}
while (nn1 < n);
//---------------------------------------------------------------------
// Generate nonzero positions and save for the use in sparse.
//---------------------------------------------------------------------
/*************** Clava msgError **************
unsolved dependency for arrayAccess ivc use : RWR
****************************************/
for(iouter = 0; iouter < n; iouter++) {
nzv = 8;
sprnvc(n, nzv, nn1, vc, ivc);
vecset(n, vc, ivc, &nzv, iouter + 1, 0.5);
arow[iouter] = nzv;
#pragma omp parallel for default(shared) private(ivelt) firstprivate(nzv, iouter, ivc, vc)
for(ivelt = 0; ivelt < nzv; ivelt++) {
acol[iouter][ivelt] = ivc[ivelt] - 1;
aelt[iouter][ivelt] = vc[ivelt];
}
}
//---------------------------------------------------------------------
// ... make the sparse matrix from list of elements with duplicates
// (iv is used as workspace)
//---------------------------------------------------------------------
sparse(a, colidx, rowstr, n, nz, 8, arow, acol, aelt, firstrow, lastrow, iv, 1.0e-1, 12.0);
}
//---------------------------------------------------------------------
// rows range from firstrow to lastrow
// the rowstr pointers are defined for nrows = lastrow-firstrow+1 values
//---------------------------------------------------------------------
void sparse(double a[], int colidx[], int rowstr[], int n, int nz, int nozer, int arow[], int acol[][9], double aelt[][9], int firstrow, int lastrow, int nzloc[], double rcond, double shift) {
int nrows;
//---------------------------------------------------
// generate a sparse matrix from a list of
// [col, row, element] tri
//---------------------------------------------------
int i, j, j1, j2, nza, k, kk, nzrow, jcol;
double size, scale, ratio, va;
int cont40;
//---------------------------------------------------------------------
// how many rows of result
//---------------------------------------------------------------------
nrows = lastrow - firstrow + 1;
//---------------------------------------------------------------------
// ...count the number of triples in each row
//---------------------------------------------------------------------
#pragma omp parallel for default(shared) private(j) firstprivate(nrows)
for(j = 0; j < nrows + 1; j++) {
rowstr[j] = 0;
}
/*************** Clava msgError **************
unsolved dependency for arrayAccess rowstr use : RW
****************************************/
for(i = 0; i < n; i++) {
/*************** Clava msgError **************
unsolved dependency for arrayAccess rowstr use : RW
****************************************/
for(nza = 0; nza < arow[i]; nza++) {
j = acol[i][nza] + 1;
rowstr[j] = rowstr[j] + arow[i];
}
}
rowstr[0] = 0;
/*************** Clava msgError **************
unsolved dependency for arrayAccess rowstr use : RW
****************************************/
for(j = 1; j < nrows + 1; j++) {
rowstr[j] = rowstr[j] + rowstr[j - 1];
}
nza = rowstr[nrows] - 1;
//---------------------------------------------------------------------
// ... rowstr(j) now is the location of the first nonzero
// of row j of a
//---------------------------------------------------------------------
if(nza > nz) {
printf("Space for matrix elements exceeded in sparse\n");
printf("nza, nzmax = %d, %d\n", nza, nz);
exit(1);
}
//---------------------------------------------------------------------
// ... preload data pages
//---------------------------------------------------------------------
/*************** Clava msgError **************
unsolved dependency for arrayAccess a use : W
unsolved dependency for arrayAccess colidx use : W
****************************************/
for(j = 0; j < nrows; j++) {
#pragma omp parallel for default(shared) private(k) firstprivate(j, rowstr)
for(k = rowstr[j]; k < rowstr[j + 1]; k++) {
a[k] = 0.0;
colidx[k] = -1;
}
nzloc[j] = 0;
}
//---------------------------------------------------------------------
// ... generate actual values by summing duplicates
//---------------------------------------------------------------------
size = 1.0;
ratio = pow(rcond, (1.0 / (double) (n)));
/*************** Clava msgError **************
Loop contains Invalid Statement -> exit#874
****************************************/
for(i = 0; i < n; i++) {
/*************** Clava msgError **************
Loop contains Invalid Statement -> exit#874
****************************************/
for(nza = 0; nza < arow[i]; nza++) {
j = acol[i][nza];
scale = size * aelt[i][nza];
/*************** Clava msgError **************
Loop contains Invalid Statement -> exit#874
****************************************/
for(nzrow = 0; nzrow < arow[i]; nzrow++) {
jcol = acol[i][nzrow];
va = aelt[i][nzrow] * scale;
//--------------------------------------------------------------------
// ... add the identity * rcond to the generated matrix to bound
// the smallest eigenvalue from below by rcond
//--------------------------------------------------------------------
if(jcol == j && j == i) {
va = va + rcond - shift;
}
cont40 = 0;
/*************** Clava msgError **************
Loop contains Invalid Statement -> BreakStmt#852
****************************************/
for(k = rowstr[j]; k < rowstr[j + 1]; k++) {
if(colidx[k] > jcol) {
//----------------------------------------------------------------
// ... insert colidx here orderly
//----------------------------------------------------------------
/*************** Clava msgError **************
unsolved dependency for arrayAccess colidx use : RW
unsolved dependency for arrayAccess a use : RW
****************************************/
for(kk = rowstr[j + 1] - 2; kk >= k; kk--) {
if(colidx[kk] > -1) {
a[kk + 1] = a[kk];
colidx[kk + 1] = colidx[kk];
}
}
colidx[k] = jcol;
a[k] = 0.0;
cont40 = 1;
break;
}
else if(colidx[k] == -1) {
colidx[k] = jcol;
cont40 = 1;
break;
}
else if(colidx[k] == jcol) {
//--------------------------------------------------------------
// ... mark the duplicated entry
//--------------------------------------------------------------
nzloc[j] = nzloc[j] + 1;
cont40 = 1;
break;
}
}
if(cont40 == 0) {
printf("internal error in sparse: i=%d\n", i);
exit(1);
}
a[k] = a[k] + va;
}
}
size = size * ratio;
}
//---------------------------------------------------------------------
// ... remove empty entries and generate final results
//---------------------------------------------------------------------
/*************** Clava msgError **************
unsolved dependency for arrayAccess nzloc use : RW
****************************************/
for(j = 1; j < nrows; j++) {
nzloc[j] = nzloc[j] + nzloc[j - 1];
}
/*************** Clava msgError **************
unsolved dependency for arrayAccess a use : RW
unsolved dependency for arrayAccess colidx use : RW
****************************************/
for(j = 0; j < nrows; j++) {
if(j > 0) {
j1 = rowstr[j] - nzloc[j - 1];
}
else {
j1 = 0;
}
j2 = rowstr[j + 1] - nzloc[j];
nza = rowstr[j];
/*************** Clava msgError **************
Variable nza could not be categorized into any OpenMP Variable Scopeuse : RW
****************************************/
for(k = j1; k < j2; k++) {
a[k] = a[nza];
colidx[k] = colidx[nza];
nza = nza + 1;
}
}
#pragma omp parallel for default(shared) private(j) firstprivate(nrows, nzloc)
for(j = 1; j < nrows + 1; j++) {
rowstr[j] = rowstr[j] - nzloc[j - 1];
}
nza = rowstr[nrows] - 1;
}
//---------------------------------------------------------------------
// generate a sparse n-vector (v, iv)
// having nzv nonzeros
//
// mark(i) is set to 1 if position i is nonzero.
// mark is all zero on entry and is reset to all zero before exit
// this corrects a performance bug found by John G. Lewis, caused by
// reinitialization of mark on every one of the n calls to sprnvc
//---------------------------------------------------------------------
void sprnvc(int n, int nz, int nn1, double v[], int iv[]) {
int nzv, ii, i;
double vecelt, vecloc;
nzv = 0;
while(nzv < nz) {
vecelt = randlc(&tran, amult);
//---------------------------------------------------------------------
// generate an integer between 1 and n in a portable manner
//---------------------------------------------------------------------
vecloc = randlc(&tran, amult);
i = icnvrt(vecloc, nn1) + 1;
if(i > n) continue;
//---------------------------------------------------------------------
// was this integer generated already?
//---------------------------------------------------------------------
int was_gen = 0;
/*************** Clava msgError **************
Loop contains Invalid Statement -> BreakStmt#948
****************************************/
for(ii = 0; ii < nzv; ii++) {
if(iv[ii] == i) {
was_gen = 1;
break;
}
}
if(was_gen) continue;
v[nzv] = vecelt;
iv[nzv] = i;
nzv = nzv + 1;
}
}
//---------------------------------------------------------------------
// scale a double precision number x in (0,1) by a power of 2 and chop it
//---------------------------------------------------------------------
int icnvrt(double x, int ipwr2) {
return (int) (ipwr2 * x);
}
//---------------------------------------------------------------------
// set ith element of sparse vector (v, iv) with
// nzv nonzeros to val
//---------------------------------------------------------------------
void vecset(int n, double v[], int iv[], int *nzv, int i, double val) {
int k;
int set;
set = 0;
/*************** Clava msgError **************
Variable Access set is changed inside of ifstmt
****************************************/
for(k = 0; k < *nzv; k++) {
if(iv[k] == i) {
v[k] = val;
set = 1;
}
}
if(set == 0) {
v[*nzv] = val;
iv[*nzv] = i;
*nzv = *nzv + 1;
}
}
double randlc(double *x, double a) {
//--------------------------------------------------------------------
//
// This routine returns a uniform pseudorandom double precision number in the
// range (0, 1) by using the linear congruential generator
//
// x_{k+1} = a x_k (mod 2^46)
//
// where 0 < x_k < 2^46 and 0 < a < 2^46. This scheme generates 2^44 numbers
// before repeating. The argument A is the same as 'a' in the above formula,
// and X is the same as x_0. A and X must be odd double precision integers
// in the range (1, 2^46). The returned value RANDLC is normalized to be
// between 0 and 1, i.e. RANDLC = 2^(-46) * x_1. X is updated to contain
// the new seed x_1, so that subsequent calls to RANDLC using the same
// arguments will generate a continuous sequence.
//
// This routine should produce the same results on any computer with at least
// 48 mantissa bits in double precision floating point data. On 64 bit
// systems, double precision should be disabled.
//
// David H. Bailey October 26, 1990
//
//--------------------------------------------------------------------
// r23 = pow(0.5, 23.0);
//// pow(0.5, 23.0) = 1.1920928955078125e-07
// r46 = r23 * r23;
// t23 = pow(2.0, 23.0);
//// pow(2.0, 23.0) = 8.388608e+06
// t46 = t23 * t23;
double const r23 = 1.1920928955078125e-07;
double const r46 = r23 * r23;
double const t23 = 8.388608e+06;
double const t46 = t23 * t23;
double t1, t2, t3, t4, a1, a2, x1, x2, z;
double r;
//--------------------------------------------------------------------
// Break A into two parts such that A = 2^23 * A1 + A2.
//--------------------------------------------------------------------
t1 = r23 * a;
a1 = (int) t1;
a2 = a - t23 * a1;
//--------------------------------------------------------------------
// Break X into two parts such that X = 2^23 * X1 + X2, compute
// Z = A1 * X2 + A2 * X1 (mod 2^23), and then
// X = 2^23 * Z + A2 * X2 (mod 2^46).
//--------------------------------------------------------------------
t1 = r23 * (*x);
x1 = (int) t1;
x2 = *x - t23 * x1;
t1 = a1 * x2 + a2 * x1;
t2 = (int) (r23 * t1);
z = t1 - t23 * t2;
t3 = t23 * z + a2 * x2;
t4 = (int) (r46 * t3);
*x = t3 - t46 * t4;
r = r46 * (*x);
return r;
}
void vranlc(int n, double *x, double a, double y[]) {
//--------------------------------------------------------------------
//
// This routine generates N uniform pseudorandom double precision numbers in
// the range (0, 1) by using the linear congruential generator
//
// x_{k+1} = a x_k (mod 2^46)
//
// where 0 < x_k < 2^46 and 0 < a < 2^46. This scheme generates 2^44 numbers
// before repeating. The argument A is the same as 'a' in the above formula,
// and X is the same as x_0. A and X must be odd double precision integers
// in the range (1, 2^46). The N results are placed in Y and are normalized
// to be between 0 and 1. X is updated to contain the new seed, so that
// subsequent calls to VRANLC using the same arguments will generate a
// continuous sequence. If N is zero, only initialization is performed, and
// the variables X, A and Y are ignored.
//
// This routine is the standard version designed for scalar or RISC systems.
// However, it should produce the same results on any single processor
// computer with at least 48 mantissa bits in double precision floating point
// data. On 64 bit systems, double precision should be disabled.
//
//--------------------------------------------------------------------
// r23 = pow(0.5, 23.0);
//// pow(0.5, 23.0) = 1.1920928955078125e-07
// r46 = r23 * r23;
// t23 = pow(2.0, 23.0);
//// pow(2.0, 23.0) = 8.388608e+06
// t46 = t23 * t23;
double const r23 = 1.1920928955078125e-07;
double const r46 = r23 * r23;
double const t23 = 8.388608e+06;
double const t46 = t23 * t23;
double t1, t2, t3, t4, a1, a2, x1, x2, z;
int i;
//--------------------------------------------------------------------
// Break A into two parts such that A = 2^23 * A1 + A2.
//--------------------------------------------------------------------
t1 = r23 * a;
a1 = (int) t1;
a2 = a - t23 * a1;
//--------------------------------------------------------------------
// Generate N results. This loop is not vectorizable.
//--------------------------------------------------------------------
/*************** Clava msgError **************
Variable x could not be categorized into any OpenMP Variable Scopeuse : RWR
****************************************/
for(i = 0; i < n; i++) {
//--------------------------------------------------------------------
// Break X into two parts such that X = 2^23 * X1 + X2, compute
// Z = A1 * X2 + A2 * X1 (mod 2^23), and then
// X = 2^23 * Z + A2 * X2 (mod 2^46).
//--------------------------------------------------------------------
t1 = r23 * (*x);
x1 = (int) t1;
x2 = *x - t23 * x1;
t1 = a1 * x2 + a2 * x1;
t2 = (int) (r23 * t1);
z = t1 - t23 * t2;
t3 = t23 * z + a2 * x2;
t4 = (int) (r46 * t3);
*x = t3 - t46 * t4;
y[i] = r46 * (*x);
}
return;
}
void wtime(double *t) {
static int sec = -1;
struct timeval tv;
gettimeofday(&tv, (void *) 0);
if(sec < 0) sec = tv.tv_sec;
*t = (tv.tv_sec - sec) + 1.0e-6 * tv.tv_usec;
}
/*****************************************************************/
/****** E L A P S E D _ T I M E ******/
/*****************************************************************/
double elapsed_time() {
double t;
wtime(&t);
return (t);
}
/*****************************************************************/
/****** T I M E R _ C L E A R ******/
/*****************************************************************/
void timer_clear(int n) {
elapsed[n] = 0.0;
}
/*****************************************************************/
/****** T I M E R _ S T A R T ******/
/*****************************************************************/
void timer_start(int n) {
start[n] = elapsed_time();
}
/*****************************************************************/
/****** T I M E R _ S T O P ******/
/*****************************************************************/
void timer_stop(int n) {
double t, now;
now = elapsed_time();
t = now - start[n];
elapsed[n] += t;
}
/*****************************************************************/
/****** T I M E R _ R E A D ******/
/*****************************************************************/
double timer_read(int n) {
return (elapsed[n]);
}
void print_results(char *name, char class, int n1, int n2, int n3, int niter, double t, double mops, char *optype, int verified) {
char size[16];
int j;
printf("\n\n %s Benchmark Completed.\n", name);
printf(" Class = %12c\n", class);
// If this is not a grid-based problem (EP, FT, CG), then
// we only print n1, which contains some measure of the
// problem size. In that case, n2 and n3 are both zero.
// Otherwise, we print the grid size n1xn2xn3
if((n2 == 0) && (n3 == 0)) {
if((name[0] == 'E') && (name[1] == 'P')) {
sprintf(size, "%15.0lf", pow(2.0, n1));
j = 14;
if(size[j] == '.') {
size[j] = ' ';
j--;
}
size[j + 1] = '\0';
printf(" Size = %15s\n", size);
}
else {
printf(" Size = %12d\n", n1);
}
}
else {
printf(" Size = %4dx%4dx%4d\n", n1, n2, n3);
}
printf(" Iterations = %12d\n", niter);
printf(" Time in seconds = %12.2lf\n", t);
printf(" Mop/s total = %15.2lf\n", mops);
printf(" Operation type = %24s\n", optype);
if(verified) printf(" Verification = %12s\n", "SUCCESSFUL");
else printf(" Verification = %12s\n", "UNSUCCESSFUL");
}
|
GB_unaryop__minv_int16_int16.c | //------------------------------------------------------------------------------
// GB_unaryop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_iterator.h"
#include "GB_unaryop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop__minv_int16_int16
// op(A') function: GB_tran__minv_int16_int16
// C type: int16_t
// A type: int16_t
// cast: int16_t cij = (int16_t) aij
// unaryop: cij = GB_IMINV_SIGNED (aij, 16)
#define GB_ATYPE \
int16_t
#define GB_CTYPE \
int16_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
int16_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = GB_IMINV_SIGNED (x, 16) ;
// casting
#define GB_CASTING(z, x) \
int16_t z = (int16_t) x ;
// cij = op (cast (aij))
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
GB_GETA (aij, Ax, pA) ; \
/* Cx [pC] = op (cast (aij)) */ \
GB_CASTING (x, aij) ; \
GB_OP (GB_CX (pC), x) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_MINV || GxB_NO_INT16)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop__minv_int16_int16
(
int16_t *restrict Cx,
const int16_t *restrict Ax,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (int64_t p = 0 ; p < anz ; p++)
{
GB_CAST_OP (p, p) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_tran__minv_int16_int16
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t **Rowcounts,
GBI_single_iterator Iter,
const int64_t *restrict A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#define GB_PHASE_2_OF_2
#include "GB_unaryop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
GB_apply_op.c | //------------------------------------------------------------------------------
// GB_apply_op: typecast and apply a unary/binary/idxunop operator to an array
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// Cx = op (A)
// Cx and A->x may be aliased.
// This function is CSR/CSC agnostic. For positional ops, A is treated as if
// it is in CSC format. The caller has already modified the op if A is in CSR
// format.
// Template/GB_positional_op_ijp can return GrB_OUT_OF_MEMORY.
// Otherwise, this function only returns GrB_SUCCESS.
#include "GB_apply.h"
#include "GB_binop.h"
#include "GB_ek_slice.h"
#include "GB_unused.h"
#ifndef GBCOMPACT
#include "GB_unop__include.h"
#include "GB_binop__include.h"
#endif
#define GB_FREE_ALL \
{ \
GB_WERK_POP (A_ek_slicing, int64_t) ; \
}
GrB_Info GB_apply_op // apply a unary op, idxunop, or binop, Cx = op (A)
(
GB_void *Cx, // output array
const GrB_Type ctype, // type of C
const GB_iso_code C_code_iso, // C non-iso, or code to compute C iso value
const GB_Operator op, // unary/index-unary/binop to apply
const GrB_Scalar scalar, // scalar to bind to binary operator
bool binop_bind1st, // if true, C=binop(s,A), else C=binop(A,s)
bool flipij, // if true, flip i,j for user idxunop
const GrB_Matrix A, // input matrix
GB_Context Context
)
{
//--------------------------------------------------------------------------
// check inputs
//--------------------------------------------------------------------------
ASSERT (Cx != NULL) ;
ASSERT_MATRIX_OK (A, "A input for GB_apply_op", GB0) ;
ASSERT (GB_JUMBLED_OK (A)) ; // A can be jumbled
GB_WERK_DECLARE (A_ek_slicing, int64_t) ;
ASSERT (GB_IMPLIES (op != NULL, ctype == op->ztype)) ;
ASSERT_SCALAR_OK_OR_NULL (scalar, "scalar for GB_apply_op", GB0) ;
//--------------------------------------------------------------------------
// get A
//--------------------------------------------------------------------------
// A->x is not const since the operator might be applied in-place, if
// C is aliased to C.
GB_void *Ax = (GB_void *) A->x ; // A->x has type A->type
const int8_t *Ab = A->b ; // only if A is bitmap
const GrB_Type Atype = A->type ; // type of A->x
const int64_t anz = GB_nnz_held (A) ; // size of A->x and Cx
//--------------------------------------------------------------------------
// determine the maximum number of threads to use
//--------------------------------------------------------------------------
GB_GET_NTHREADS_MAX (nthreads_max, chunk, Context) ;
//--------------------------------------------------------------------------
// get the operator
//--------------------------------------------------------------------------
GB_Opcode opcode ;
bool op_is_unop = false ;
bool op_is_binop = false ;
if (op != NULL)
{
opcode = op->opcode ;
op_is_unop = GB_IS_UNARYOP_CODE (opcode) ;
op_is_binop = GB_IS_BINARYOP_CODE (opcode) ;
}
else
{
// C is iso, with no operator to apply; just call GB_iso_unop below.
ASSERT (C_code_iso == GB_ISO_1 || // C iso value is 1
C_code_iso == GB_ISO_S || // C iso value is the scalar
C_code_iso == GB_ISO_A) ; // C iso value is the iso value of A
opcode = GB_NOP_code ;
}
//--------------------------------------------------------------------------
// apply the operator
//--------------------------------------------------------------------------
if (GB_OPCODE_IS_POSITIONAL (opcode))
{
//----------------------------------------------------------------------
// built-in positional unary, index_unary, or binary operator
//----------------------------------------------------------------------
bool is64 = (op->ztype == GrB_INT64) ;
bool is32 = (op->ztype == GrB_INT32) ;
ASSERT_OP_OK (op, "positional unop/idxunop/binop: GB_apply_op", GB0) ;
// get A and C
const int64_t *restrict Ah = A->h ;
const int64_t *restrict Ap = A->p ;
const int64_t *restrict Ai = A->i ;
int64_t anvec = A->nvec ;
int64_t avlen = A->vlen ;
int64_t avdim = A->vdim ;
//----------------------------------------------------------------------
// determine number of threads to use
//----------------------------------------------------------------------
int nthreads = GB_nthreads (anz + anvec, chunk, nthreads_max) ;
int ntasks = (nthreads == 1) ? 1 : (32 * nthreads) ;
//----------------------------------------------------------------------
// Cx = positional_op (A)
//----------------------------------------------------------------------
int64_t thunk = GB_positional_offset (opcode, scalar) ;
// GB_positional_op_ijp allocates a set of tasks, which can possibly
// fail if out of memory.
if (is64)
{
//------------------------------------------------------------------
// int64 Cx = positional_op (A)
//------------------------------------------------------------------
int64_t *restrict Cz = (int64_t *) Cx ;
switch (opcode)
{
case GB_POSITIONI_unop_code : // z = position_i(A(i,j)) == i
case GB_POSITIONI1_unop_code : // z = position_i1(A(i,j)) == i+1
case GB_FIRSTI_binop_code : // z = first_i(A(i,j),y) == i
case GB_FIRSTI1_binop_code : // z = first_i1(A(i,j),y) == i+1
case GB_SECONDI_binop_code : // z = second_i(x,A(i,j)) == i
case GB_SECONDI1_binop_code : // z = second_i1(x,A(i,j)) == i+1
case GB_ROWINDEX_idxunop_code : // z = i+thunk
#define GB_APPLY(p) \
Cz [p] = (i + thunk) ;
#include "GB_positional_op_ip.c"
return (GrB_SUCCESS) ;
case GB_POSITIONJ_unop_code : // z = position_j(A(i,j)) == j
case GB_POSITIONJ1_unop_code : // z = position_j1(A(i,j)) == j+1
case GB_FIRSTJ_binop_code : // z = first_j(A(i,j),y) == j
case GB_FIRSTJ1_binop_code : // z = first_j1(A(i,j),y) == j+1
case GB_SECONDJ_binop_code : // z = second_j(x,A(i,j)) == j
case GB_SECONDJ1_binop_code : // z = second_j1(x,A(i,j)) == j+1
case GB_COLINDEX_idxunop_code : // z = j+thunk
#define GB_APPLY(p) \
Cz [p] = (j + thunk) ;
#include "GB_positional_op_ijp.c"
return (GrB_SUCCESS) ;
case GB_DIAGINDEX_idxunop_code : // z = (j-(i+thunk)
#define GB_APPLY(p) \
int64_t i = GBI (Ai, p, avlen) ; \
Cz [p] = (j - (i+thunk)) ;
#include "GB_positional_op_ijp.c"
return (GrB_SUCCESS) ;
case GB_FLIPDIAGINDEX_idxunop_code : // z = (i-(j+thunk)
#define GB_APPLY(p) \
int64_t i = GBI (Ai, p, avlen) ; \
Cz [p] = (i - (j+thunk)) ;
#include "GB_positional_op_ijp.c"
return (GrB_SUCCESS) ;
default: ;
}
}
else if (is32)
{
//------------------------------------------------------------------
// int32 Cx = positional_op (A)
//------------------------------------------------------------------
int32_t *restrict Cz = (int32_t *) Cx ;
switch (opcode)
{
case GB_POSITIONI_unop_code : // z = position_i(A(i,j)) == i
case GB_POSITIONI1_unop_code : // z = position_i1(A(i,j)) == i+1
case GB_FIRSTI_binop_code : // z = first_i(A(i,j),y) == i
case GB_FIRSTI1_binop_code : // z = first_i1(A(i,j),y) == i+1
case GB_SECONDI_binop_code : // z = second_i(x,A(i,j)) == i
case GB_SECONDI1_binop_code : // z = second_i1(x,A(i,j)) == i+1
case GB_ROWINDEX_idxunop_code : // z = i+thunk
#define GB_APPLY(p) \
Cz [p] = (int32_t) (i + thunk) ;
#include "GB_positional_op_ip.c"
return (GrB_SUCCESS) ;
case GB_POSITIONJ_unop_code : // z = position_j(A(i,j)) == j
case GB_POSITIONJ1_unop_code : // z = position_j1(A(i,j)) == j+1
case GB_FIRSTJ_binop_code : // z = first_j(A(i,j),y) == j
case GB_FIRSTJ1_binop_code : // z = first_j1(A(i,j),y) == j+1
case GB_SECONDJ_binop_code : // z = second_j(x,A(i,j)) == j
case GB_SECONDJ1_binop_code : // z = second_j1(x,A(i,j)) == j+1
case GB_COLINDEX_idxunop_code : // z = j+thunk
#define GB_APPLY(p) \
Cz [p] = (int32_t) (j + thunk) ;
#include "GB_positional_op_ijp.c"
return (GrB_SUCCESS) ;
case GB_DIAGINDEX_idxunop_code : // z = (j-(i+thunk)
#define GB_APPLY(p) \
int64_t i = GBI (Ai, p, avlen) ; \
Cz [p] = (int32_t) (j - (i+thunk)) ;
#include "GB_positional_op_ijp.c"
return (GrB_SUCCESS) ;
case GB_FLIPDIAGINDEX_idxunop_code : // z = (i-(j+thunk)
#define GB_APPLY(p) \
int64_t i = GBI (Ai, p, avlen) ; \
Cz [p] = (int32_t) (i - (j+thunk)) ;
#include "GB_positional_op_ijp.c"
return (GrB_SUCCESS) ;
default: ;
}
}
else
{
//------------------------------------------------------------------
// bool Cx = positional_op (A)
//------------------------------------------------------------------
ASSERT (op->ztype == GrB_BOOL) ;
bool *restrict Cz = (bool *) Cx ;
switch (opcode)
{
case GB_TRIL_idxunop_code : // z = (j <= (i+thunk))
#define GB_APPLY(p) \
int64_t i = GBI (Ai, p, avlen) ; \
Cz [p] = (j <= (i + thunk)) ;
#include "GB_positional_op_ijp.c"
return (GrB_SUCCESS) ;
case GB_TRIU_idxunop_code : // z = (j >= (i+thunk))
#define GB_APPLY(p) \
int64_t i = GBI (Ai, p, avlen) ; \
Cz [p] = (j >= (i + thunk)) ;
#include "GB_positional_op_ijp.c"
return (GrB_SUCCESS) ;
case GB_DIAG_idxunop_code : // z = (j == (i+thunk))
#define GB_APPLY(p) \
int64_t i = GBI (Ai, p, avlen) ; \
Cz [p] = (j == (i + thunk)) ;
#include "GB_positional_op_ijp.c"
return (GrB_SUCCESS) ;
case GB_OFFDIAG_idxunop_code : // z = (j != (i+thunk))
#define GB_APPLY(p) \
int64_t i = GBI (Ai, p, avlen) ; \
Cz [p] = (j != (i + thunk)) ;
#include "GB_positional_op_ijp.c"
return (GrB_SUCCESS) ;
case GB_COLLE_idxunop_code : // z = (j <= thunk)
#define GB_APPLY(p) \
Cz [p] = (j <= thunk) ;
#include "GB_positional_op_ijp.c"
return (GrB_SUCCESS) ;
case GB_COLGT_idxunop_code : // z = (j > thunk)
#define GB_APPLY(p) \
Cz [p] = (j > thunk) ;
#include "GB_positional_op_ijp.c"
return (GrB_SUCCESS) ;
case GB_ROWLE_idxunop_code : // z = (i <= thunk)
#define GB_APPLY(p) \
Cz [p] = (i <= thunk) ;
#include "GB_positional_op_ip.c"
return (GrB_SUCCESS) ;
case GB_ROWGT_idxunop_code : // z = (i > thunk)
#define GB_APPLY(p) \
Cz [p] = (i > thunk) ;
#include "GB_positional_op_ip.c"
return (GrB_SUCCESS) ;
default: ;
}
}
}
else if (C_code_iso != GB_NON_ISO)
{
//----------------------------------------------------------------------
// apply the unary or binary operator to the iso value
//----------------------------------------------------------------------
// if C is iso, this function takes O(1) time
GBURBLE ("(iso apply) ") ;
ASSERT_MATRIX_OK (A, "A passing to GB_iso_unop", GB0) ;
if (anz > 0)
{
// Cx [0] = unop (A), binop (scalar,A), or binop (A,scalar)
GB_iso_unop (Cx, ctype, C_code_iso, op, A, scalar) ;
}
}
else if (op_is_unop)
{
//----------------------------------------------------------------------
// apply the unary operator to all entries
//----------------------------------------------------------------------
ASSERT_UNARYOP_OK (op, "unop for GB_apply_op", GB0) ;
ASSERT (!A->iso) ;
// determine number of threads to use
int nthreads = GB_nthreads (anz, chunk, nthreads_max) ;
#ifndef GBCOMPACT
if (Atype == op->xtype || opcode == GB_IDENTITY_unop_code)
{
// The switch factory is used if the op is IDENTITY, or if no
// typecasting is being done. IDENTITY operator can do arbitrary
// typecasting (it is not used if no typecasting is done).
//------------------------------------------------------------------
// define the worker for the switch factory
//------------------------------------------------------------------
#define GB_unop_apply(unop,zname,aname) \
GB (_unop_apply_ ## unop ## zname ## aname)
#define GB_WORKER(unop,zname,ztype,aname,atype) \
{ \
if (GB_unop_apply (unop,zname,aname) ((ztype *) Cx, \
(const atype *) Ax, Ab, anz, nthreads) \
== GrB_SUCCESS) return (GrB_SUCCESS) ; \
} \
break ;
//------------------------------------------------------------------
// launch the switch factory
//------------------------------------------------------------------
#include "GB_unop_factory.c"
}
#endif
//----------------------------------------------------------------------
// generic worker: typecast and apply a unary operator
//----------------------------------------------------------------------
GB_BURBLE_N (anz, "(generic apply: %s) ", op->name) ;
size_t asize = Atype->size ;
size_t zsize = op->ztype->size ;
size_t xsize = op->xtype->size ;
GB_Type_code acode = Atype->code ;
GB_Type_code xcode = op->xtype->code ;
GB_cast_function cast_A_to_X = GB_cast_factory (xcode, acode) ;
GxB_unary_function fop = op->unop_function ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
// xwork = (xtype) Ax [p]
GB_void xwork [GB_VLA(xsize)] ;
cast_A_to_X (xwork, Ax +(p)*asize, asize) ;
// Cx [p] = fop (xwork)
fop (Cx +(p*zsize), xwork) ;
}
}
else if (op_is_binop)
{
//----------------------------------------------------------------------
// apply a binary operator (bound to a scalar)
//----------------------------------------------------------------------
ASSERT_BINARYOP_OK (op, "standard binop for GB_apply_op", GB0) ;
ASSERT_SCALAR_OK (scalar, "scalar for GB_apply_op", GB0) ;
GB_Type_code xcode, ycode, zcode ;
ASSERT (opcode != GB_FIRST_binop_code) ;
ASSERT (opcode != GB_SECOND_binop_code) ;
ASSERT (opcode != GB_PAIR_binop_code) ;
ASSERT (opcode != GB_ANY_binop_code) ;
size_t asize = Atype->size ;
size_t ssize = scalar->type->size ;
size_t zsize = op->ztype->size ;
size_t xsize = op->xtype->size ;
size_t ysize = op->ytype->size ;
GB_Type_code scode = scalar->type->code ;
xcode = op->xtype->code ;
ycode = op->ytype->code ;
// typecast the scalar to the operator input
size_t ssize_cast ;
GB_Type_code scode_cast ;
if (binop_bind1st)
{
ssize_cast = xsize ;
scode_cast = xcode ;
}
else
{
ssize_cast = ysize ;
scode_cast = ycode ;
}
GB_void swork [GB_VLA(ssize_cast)] ;
GB_void *scalarx = (GB_void *) scalar->x ;
if (scode_cast != scode)
{
// typecast the scalar to the operator input, in swork
GB_cast_function cast_s = GB_cast_factory (scode_cast, scode) ;
cast_s (swork, scalar->x, ssize) ;
scalarx = swork ;
}
// determine number of threads to use
int nthreads = GB_nthreads (anz, chunk, nthreads_max) ;
#ifndef GBCOMPACT
if (binop_bind1st)
{
//------------------------------------------------------------------
// z = binop (scalar,Ax)
//------------------------------------------------------------------
if (GB_binop_builtin (op->xtype, false, Atype, false,
(GrB_BinaryOp) op, false, &opcode, &xcode, &ycode, &zcode))
{
//--------------------------------------------------------------
// define the worker for the switch factory
//--------------------------------------------------------------
#define GB_bind1st(binop,xname) GB (_bind1st_ ## binop ## xname)
#define GB_BINOP_WORKER(binop,xname) \
{ \
if (GB_bind1st (binop, xname) (Cx, scalarx, Ax, \
Ab, anz, nthreads) == GrB_SUCCESS) \
return (GrB_SUCCESS) ; \
} \
break ;
//--------------------------------------------------------------
// launch the switch factory
//--------------------------------------------------------------
#define GB_NO_FIRST
#define GB_NO_SECOND
#define GB_NO_PAIR
#include "GB_binop_factory.c"
}
}
else
{
//------------------------------------------------------------------
// z = binop (Ax,scalar)
//------------------------------------------------------------------
if (GB_binop_builtin (Atype, false, op->ytype, false,
(GrB_BinaryOp) op, false, &opcode, &xcode, &ycode, &zcode))
{
//--------------------------------------------------------------
// define the worker for the switch factory
//--------------------------------------------------------------
#define GB_bind2nd(binop,xname) GB (_bind2nd_ ## binop ## xname)
#undef GB_BINOP_WORKER
#define GB_BINOP_WORKER(binop,xname) \
{ \
if (GB_bind2nd (binop, xname) (Cx, Ax, scalarx, \
Ab, anz, nthreads) == GrB_SUCCESS) \
return (GrB_SUCCESS) ; \
} \
break ;
//--------------------------------------------------------------
// launch the switch factory
//--------------------------------------------------------------
#define GB_NO_FIRST
#define GB_NO_SECOND
#define GB_NO_PAIR
#include "GB_binop_factory.c"
}
}
#endif
//----------------------------------------------------------------------
// generic worker: typecast and apply a binary operator
//----------------------------------------------------------------------
GB_BURBLE_N (anz, "(generic apply: %s) ", op->name) ;
GB_Type_code acode = Atype->code ;
GxB_binary_function fop = op->binop_function ;
ASSERT (!A->iso) ;
if (binop_bind1st)
{
// Cx = binop (scalar,Ax)
GB_cast_function cast_A_to_Y = GB_cast_factory (ycode, acode) ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
// ywork = (ytype) Ax [p]
GB_void ywork [GB_VLA(ysize)] ;
cast_A_to_Y (ywork, Ax +(p)*asize, asize) ;
// Cx [p] = fop (scalarx, ywork)
fop (Cx +((p)*zsize), scalarx, ywork) ;
}
}
else
{
// Cx = binop (Ax,scalar)
GB_cast_function cast_A_to_X = GB_cast_factory (xcode, acode) ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
// xwork = (xtype) Ax [p]
GB_void xwork [GB_VLA(xsize)] ;
cast_A_to_X (xwork, Ax +(p)*asize, asize) ;
// Cx [p] = fop (xwork, scalarx)
fop (Cx +(p*zsize), xwork, scalarx) ;
}
}
}
else
{
//----------------------------------------------------------------------
// apply a user-defined index_unary op
//----------------------------------------------------------------------
// All valued GrB_IndexUnaryOps (GrB_VALUE*) have already been renamed
// to their corresponding binary op (GrB_VALUEEQ_FP32 became
// GrB_EQ_FP32, for example). The only remaining index unary ops are
// positional, and user-defined. Positional ops have been handled
// above, so only user-defined index unary ops are left.
// get A and C
const int64_t *restrict Ah = A->h ;
const int64_t *restrict Ap = A->p ;
const int64_t *restrict Ai = A->i ;
int64_t anvec = A->nvec ;
int64_t avlen = A->vlen ;
int64_t avdim = A->vdim ;
ASSERT (opcode == GB_USER_idxunop_code) ;
GxB_index_unary_function fop = op->idxunop_function ;
size_t asize = Atype->size ;
size_t ssize = scalar->type->size ;
size_t zsize = op->ztype->size ;
size_t xsize = op->xtype->size ;
size_t ysize = op->ytype->size ;
GB_Type_code scode = scalar->type->code ;
GB_Type_code acode = Atype->code ;
GB_Type_code xcode = op->xtype->code ;
GB_Type_code ycode = op->ytype->code ;
GB_cast_function cast_A_to_X = GB_cast_factory (xcode, acode) ;
GB_void ywork [GB_VLA(ysize)] ;
GB_void *ythunk = (GB_void *) scalar->x ;
if (ycode != scode)
{
// typecast the scalar to the operator input, in ywork
GB_cast_function cast_s = GB_cast_factory (ycode, scode) ;
cast_s (ywork, scalar->x, ssize) ;
ythunk = ywork ;
}
#define GB_APPLY(p) \
if (!GBB (Ab, p)) continue ; \
int64_t i = GBI (Ai, p, avlen) ; \
GB_void xwork [GB_VLA(xsize)] ; \
cast_A_to_X (xwork, Ax +(p)*asize, asize) ; \
fop (Cx +(p*zsize), xwork, \
flipij ? j : i, flipij ? i : j, ythunk) ;
#include "GB_positional_op_ijp.c"
}
//--------------------------------------------------------------------------
// return result
//--------------------------------------------------------------------------
GB_FREE_ALL ;
return (GrB_SUCCESS) ;
}
|
wfpt.c | /* Generated by Cython 0.15.1+ on Mon Nov 5 12:02:37 2012 */
#define PY_SSIZE_T_CLEAN
#include "Python.h"
#ifndef Py_PYTHON_H
#error Python headers needed to compile C extensions, please install development version of Python.
#elif PY_VERSION_HEX < 0x02040000
#error Cython requires Python 2.4+.
#else
#include <stddef.h> /* For offsetof */
#ifndef offsetof
#define offsetof(type, member) ( (size_t) & ((type*)0) -> member )
#endif
#if !defined(WIN32) && !defined(MS_WINDOWS)
#ifndef __stdcall
#define __stdcall
#endif
#ifndef __cdecl
#define __cdecl
#endif
#ifndef __fastcall
#define __fastcall
#endif
#endif
#ifndef DL_IMPORT
#define DL_IMPORT(t) t
#endif
#ifndef DL_EXPORT
#define DL_EXPORT(t) t
#endif
#ifndef PY_LONG_LONG
#define PY_LONG_LONG LONG_LONG
#endif
#if PY_VERSION_HEX < 0x02050000
typedef int Py_ssize_t;
#define PY_SSIZE_T_MAX INT_MAX
#define PY_SSIZE_T_MIN INT_MIN
#define PY_FORMAT_SIZE_T ""
#define PyInt_FromSsize_t(z) PyInt_FromLong(z)
#define PyInt_AsSsize_t(o) __Pyx_PyInt_AsInt(o)
#define PyNumber_Index(o) PyNumber_Int(o)
#define PyIndex_Check(o) PyNumber_Check(o)
#define PyErr_WarnEx(category, message, stacklevel) PyErr_Warn(category, message)
#endif
#if PY_VERSION_HEX < 0x02060000
#define Py_REFCNT(ob) (((PyObject*)(ob))->ob_refcnt)
#define Py_TYPE(ob) (((PyObject*)(ob))->ob_type)
#define Py_SIZE(ob) (((PyVarObject*)(ob))->ob_size)
#define PyVarObject_HEAD_INIT(type, size) \
PyObject_HEAD_INIT(type) size,
#define PyType_Modified(t)
typedef struct {
void *buf;
PyObject *obj;
Py_ssize_t len;
Py_ssize_t itemsize;
int readonly;
int ndim;
char *format;
Py_ssize_t *shape;
Py_ssize_t *strides;
Py_ssize_t *suboffsets;
void *internal;
} Py_buffer;
#define PyBUF_SIMPLE 0
#define PyBUF_WRITABLE 0x0001
#define PyBUF_FORMAT 0x0004
#define PyBUF_ND 0x0008
#define PyBUF_STRIDES (0x0010 | PyBUF_ND)
#define PyBUF_C_CONTIGUOUS (0x0020 | PyBUF_STRIDES)
#define PyBUF_F_CONTIGUOUS (0x0040 | PyBUF_STRIDES)
#define PyBUF_ANY_CONTIGUOUS (0x0080 | PyBUF_STRIDES)
#define PyBUF_INDIRECT (0x0100 | PyBUF_STRIDES)
#define PyBUF_RECORDS (PyBUF_STRIDES | PyBUF_FORMAT | PyBUF_WRITABLE)
#define PyBUF_FULL (PyBUF_INDIRECT | PyBUF_FORMAT | PyBUF_WRITABLE)
#endif
#if PY_MAJOR_VERSION < 3
#define __Pyx_BUILTIN_MODULE_NAME "__builtin__"
#define __Pyx_PyIdentifier_FromString(s) PyString_FromString(s)
#define __Pyx_PyCode_New(a, k, l, s, f, code, c, n, v, fv, cell, fn, name, fline, lnos) \
PyCode_New(a, l, s, f, code, c, n, v, fv, cell, fn, name, fline, lnos)
#else
#define __Pyx_BUILTIN_MODULE_NAME "builtins"
#define __Pyx_PyIdentifier_FromString(s) PyUnicode_FromString(s)
#define __Pyx_PyCode_New(a, k, l, s, f, code, c, n, v, fv, cell, fn, name, fline, lnos) \
PyCode_New(a, k, l, s, f, code, c, n, v, fv, cell, fn, name, fline, lnos)
#endif
#if PY_MAJOR_VERSION < 3 && PY_MINOR_VERSION < 6
#define PyUnicode_FromString(s) PyUnicode_Decode(s, strlen(s), "UTF-8", "strict")
#endif
#if PY_MAJOR_VERSION >= 3
#define Py_TPFLAGS_CHECKTYPES 0
#define Py_TPFLAGS_HAVE_INDEX 0
#endif
#if (PY_VERSION_HEX < 0x02060000) || (PY_MAJOR_VERSION >= 3)
#define Py_TPFLAGS_HAVE_NEWBUFFER 0
#endif
#if PY_VERSION_HEX > 0x03030000 && defined(PyUnicode_GET_LENGTH)
#define CYTHON_PEP393_ENABLED
#define __Pyx_PyUnicode_GET_LENGTH(u) PyUnicode_GET_LENGTH(u)
#define __Pyx_PyUnicode_READ_CHAR(u, i) PyUnicode_READ_CHAR(u, i)
#else
#define __Pyx_PyUnicode_GET_LENGTH(u) PyUnicode_GET_SIZE(u)
#define __Pyx_PyUnicode_READ_CHAR(u, i) ((Py_UCS4)(PyUnicode_AS_UNICODE(u)[i]))
#endif
#if PY_MAJOR_VERSION >= 3
#define PyBaseString_Type PyUnicode_Type
#define PyStringObject PyUnicodeObject
#define PyString_Type PyUnicode_Type
#define PyString_Check PyUnicode_Check
#define PyString_CheckExact PyUnicode_CheckExact
#endif
#if PY_VERSION_HEX < 0x02060000
#define PyBytesObject PyStringObject
#define PyBytes_Type PyString_Type
#define PyBytes_Check PyString_Check
#define PyBytes_CheckExact PyString_CheckExact
#define PyBytes_FromString PyString_FromString
#define PyBytes_FromStringAndSize PyString_FromStringAndSize
#define PyBytes_FromFormat PyString_FromFormat
#define PyBytes_DecodeEscape PyString_DecodeEscape
#define PyBytes_AsString PyString_AsString
#define PyBytes_AsStringAndSize PyString_AsStringAndSize
#define PyBytes_Size PyString_Size
#define PyBytes_AS_STRING PyString_AS_STRING
#define PyBytes_GET_SIZE PyString_GET_SIZE
#define PyBytes_Repr PyString_Repr
#define PyBytes_Concat PyString_Concat
#define PyBytes_ConcatAndDel PyString_ConcatAndDel
#endif
#if PY_VERSION_HEX < 0x02060000
#define PySet_Check(obj) PyObject_TypeCheck(obj, &PySet_Type)
#define PyFrozenSet_Check(obj) PyObject_TypeCheck(obj, &PyFrozenSet_Type)
#endif
#ifndef PySet_CheckExact
#define PySet_CheckExact(obj) (Py_TYPE(obj) == &PySet_Type)
#endif
#define __Pyx_TypeCheck(obj, type) PyObject_TypeCheck(obj, (PyTypeObject *)type)
#if PY_MAJOR_VERSION >= 3
#define PyIntObject PyLongObject
#define PyInt_Type PyLong_Type
#define PyInt_Check(op) PyLong_Check(op)
#define PyInt_CheckExact(op) PyLong_CheckExact(op)
#define PyInt_FromString PyLong_FromString
#define PyInt_FromUnicode PyLong_FromUnicode
#define PyInt_FromLong PyLong_FromLong
#define PyInt_FromSize_t PyLong_FromSize_t
#define PyInt_FromSsize_t PyLong_FromSsize_t
#define PyInt_AsLong PyLong_AsLong
#define PyInt_AS_LONG PyLong_AS_LONG
#define PyInt_AsSsize_t PyLong_AsSsize_t
#define PyInt_AsUnsignedLongMask PyLong_AsUnsignedLongMask
#define PyInt_AsUnsignedLongLongMask PyLong_AsUnsignedLongLongMask
#endif
#if PY_MAJOR_VERSION >= 3
#define PyBoolObject PyLongObject
#endif
#if PY_VERSION_HEX < 0x03020000
typedef long Py_hash_t;
#define __Pyx_PyInt_FromHash_t PyInt_FromLong
#define __Pyx_PyInt_AsHash_t PyInt_AsLong
#else
#define __Pyx_PyInt_FromHash_t PyInt_FromSsize_t
#define __Pyx_PyInt_AsHash_t PyInt_AsSsize_t
#endif
#if (PY_MAJOR_VERSION < 3) || (PY_VERSION_HEX >= 0x03010300)
#define __Pyx_PySequence_GetSlice(obj, a, b) PySequence_GetSlice(obj, a, b)
#define __Pyx_PySequence_SetSlice(obj, a, b, value) PySequence_SetSlice(obj, a, b, value)
#define __Pyx_PySequence_DelSlice(obj, a, b) PySequence_DelSlice(obj, a, b)
#else
#define __Pyx_PySequence_GetSlice(obj, a, b) (unlikely(!(obj)) ? \
(PyErr_SetString(PyExc_SystemError, "null argument to internal routine"), (PyObject*)0) : \
(likely((obj)->ob_type->tp_as_mapping) ? (PySequence_GetSlice(obj, a, b)) : \
(PyErr_Format(PyExc_TypeError, "'%.200s' object is unsliceable", (obj)->ob_type->tp_name), (PyObject*)0)))
#define __Pyx_PySequence_SetSlice(obj, a, b, value) (unlikely(!(obj)) ? \
(PyErr_SetString(PyExc_SystemError, "null argument to internal routine"), -1) : \
(likely((obj)->ob_type->tp_as_mapping) ? (PySequence_SetSlice(obj, a, b, value)) : \
(PyErr_Format(PyExc_TypeError, "'%.200s' object doesn't support slice assignment", (obj)->ob_type->tp_name), -1)))
#define __Pyx_PySequence_DelSlice(obj, a, b) (unlikely(!(obj)) ? \
(PyErr_SetString(PyExc_SystemError, "null argument to internal routine"), -1) : \
(likely((obj)->ob_type->tp_as_mapping) ? (PySequence_DelSlice(obj, a, b)) : \
(PyErr_Format(PyExc_TypeError, "'%.200s' object doesn't support slice deletion", (obj)->ob_type->tp_name), -1)))
#endif
#if PY_MAJOR_VERSION >= 3
#define PyMethod_New(func, self, klass) ((self) ? PyMethod_New(func, self) : PyInstanceMethod_New(func))
#endif
#if PY_VERSION_HEX < 0x02050000
#define __Pyx_GetAttrString(o,n) PyObject_GetAttrString((o),((char *)(n)))
#define __Pyx_SetAttrString(o,n,a) PyObject_SetAttrString((o),((char *)(n)),(a))
#define __Pyx_DelAttrString(o,n) PyObject_DelAttrString((o),((char *)(n)))
#else
#define __Pyx_GetAttrString(o,n) PyObject_GetAttrString((o),(n))
#define __Pyx_SetAttrString(o,n,a) PyObject_SetAttrString((o),(n),(a))
#define __Pyx_DelAttrString(o,n) PyObject_DelAttrString((o),(n))
#endif
#if PY_VERSION_HEX < 0x02050000
#define __Pyx_NAMESTR(n) ((char *)(n))
#define __Pyx_DOCSTR(n) ((char *)(n))
#else
#define __Pyx_NAMESTR(n) (n)
#define __Pyx_DOCSTR(n) (n)
#endif
#if PY_MAJOR_VERSION >= 3
#define __Pyx_PyNumber_Divide(x,y) PyNumber_TrueDivide(x,y)
#define __Pyx_PyNumber_InPlaceDivide(x,y) PyNumber_InPlaceTrueDivide(x,y)
#else
#define __Pyx_PyNumber_Divide(x,y) PyNumber_Divide(x,y)
#define __Pyx_PyNumber_InPlaceDivide(x,y) PyNumber_InPlaceDivide(x,y)
#endif
#ifndef __PYX_EXTERN_C
#ifdef __cplusplus
#define __PYX_EXTERN_C extern "C"
#else
#define __PYX_EXTERN_C extern
#endif
#endif
#if defined(WIN32) || defined(MS_WINDOWS)
#define _USE_MATH_DEFINES
#endif
#include <math.h>
#define __PYX_HAVE__wfpt
#define __PYX_HAVE_API__wfpt
#include "stdio.h"
#include "stdlib.h"
#include "numpy/arrayobject.h"
#include "numpy/ufuncobject.h"
#include "math.h"
#ifdef _OPENMP
#include <omp.h>
#endif /* _OPENMP */
#ifdef PYREX_WITHOUT_ASSERTIONS
#define CYTHON_WITHOUT_ASSERTIONS
#endif
/* inline attribute */
#ifndef CYTHON_INLINE
#if defined(__GNUC__)
#define CYTHON_INLINE __inline__
#elif defined(_MSC_VER)
#define CYTHON_INLINE __inline
#elif defined (__STDC_VERSION__) && __STDC_VERSION__ >= 199901L
#define CYTHON_INLINE inline
#else
#define CYTHON_INLINE
#endif
#endif
/* unused attribute */
#ifndef CYTHON_UNUSED
# if defined(__GNUC__)
# if !(defined(__cplusplus)) || (__GNUC__ > 3 || (__GNUC__ == 3 && __GNUC_MINOR__ >= 4))
# define CYTHON_UNUSED __attribute__ ((__unused__))
# else
# define CYTHON_UNUSED
# endif
# elif defined(__ICC) || (defined(__INTEL_COMPILER) && !defined(_MSC_VER))
# define CYTHON_UNUSED __attribute__ ((__unused__))
# else
# define CYTHON_UNUSED
# endif
#endif
typedef struct {PyObject **p; char *s; const long n; const char* encoding; const char is_unicode; const char is_str; const char intern; } __Pyx_StringTabEntry; /*proto*/
/* Type Conversion Predeclarations */
#define __Pyx_PyBytes_FromUString(s) PyBytes_FromString((char*)s)
#define __Pyx_PyBytes_AsUString(s) ((unsigned char*) PyBytes_AsString(s))
#define __Pyx_Owned_Py_None(b) (Py_INCREF(Py_None), Py_None)
#define __Pyx_PyBool_FromLong(b) ((b) ? (Py_INCREF(Py_True), Py_True) : (Py_INCREF(Py_False), Py_False))
static CYTHON_INLINE int __Pyx_PyObject_IsTrue(PyObject*);
static CYTHON_INLINE PyObject* __Pyx_PyNumber_Int(PyObject* x);
static CYTHON_INLINE Py_ssize_t __Pyx_PyIndex_AsSsize_t(PyObject*);
static CYTHON_INLINE PyObject * __Pyx_PyInt_FromSize_t(size_t);
static CYTHON_INLINE size_t __Pyx_PyInt_AsSize_t(PyObject*);
#define __pyx_PyFloat_AsDouble(x) (PyFloat_CheckExact(x) ? PyFloat_AS_DOUBLE(x) : PyFloat_AsDouble(x))
#define __pyx_PyFloat_AsFloat(x) ((float) __pyx_PyFloat_AsDouble(x))
#ifdef __GNUC__
/* Test for GCC > 2.95 */
#if __GNUC__ > 2 || (__GNUC__ == 2 && (__GNUC_MINOR__ > 95))
#define likely(x) __builtin_expect(!!(x), 1)
#define unlikely(x) __builtin_expect(!!(x), 0)
#else /* __GNUC__ > 2 ... */
#define likely(x) (x)
#define unlikely(x) (x)
#endif /* __GNUC__ > 2 ... */
#else /* __GNUC__ */
#define likely(x) (x)
#define unlikely(x) (x)
#endif /* __GNUC__ */
static PyObject *__pyx_m;
static PyObject *__pyx_b;
static PyObject *__pyx_empty_tuple;
static PyObject *__pyx_empty_bytes;
static int __pyx_lineno;
static int __pyx_clineno = 0;
static const char * __pyx_cfilenm= __FILE__;
static const char *__pyx_filename;
#if !defined(CYTHON_CCOMPLEX)
#if defined(__cplusplus)
#define CYTHON_CCOMPLEX 1
#elif defined(_Complex_I)
#define CYTHON_CCOMPLEX 1
#else
#define CYTHON_CCOMPLEX 0
#endif
#endif
#if CYTHON_CCOMPLEX
#ifdef __cplusplus
#include <complex>
#else
#include <complex.h>
#endif
#endif
#if CYTHON_CCOMPLEX && !defined(__cplusplus) && defined(__sun__) && defined(__GNUC__)
#undef _Complex_I
#define _Complex_I 1.0fj
#endif
static const char *__pyx_f[] = {
"wfpt.pyx",
"pdf.pxi",
"numpy.pxd",
"integrate.pxi",
};
/* "numpy.pxd":722
* # in Cython to enable them only on the right systems.
*
* ctypedef npy_int8 int8_t # <<<<<<<<<<<<<<
* ctypedef npy_int16 int16_t
* ctypedef npy_int32 int32_t
*/
typedef npy_int8 __pyx_t_5numpy_int8_t;
/* "numpy.pxd":723
*
* ctypedef npy_int8 int8_t
* ctypedef npy_int16 int16_t # <<<<<<<<<<<<<<
* ctypedef npy_int32 int32_t
* ctypedef npy_int64 int64_t
*/
typedef npy_int16 __pyx_t_5numpy_int16_t;
/* "numpy.pxd":724
* ctypedef npy_int8 int8_t
* ctypedef npy_int16 int16_t
* ctypedef npy_int32 int32_t # <<<<<<<<<<<<<<
* ctypedef npy_int64 int64_t
* #ctypedef npy_int96 int96_t
*/
typedef npy_int32 __pyx_t_5numpy_int32_t;
/* "numpy.pxd":725
* ctypedef npy_int16 int16_t
* ctypedef npy_int32 int32_t
* ctypedef npy_int64 int64_t # <<<<<<<<<<<<<<
* #ctypedef npy_int96 int96_t
* #ctypedef npy_int128 int128_t
*/
typedef npy_int64 __pyx_t_5numpy_int64_t;
/* "numpy.pxd":729
* #ctypedef npy_int128 int128_t
*
* ctypedef npy_uint8 uint8_t # <<<<<<<<<<<<<<
* ctypedef npy_uint16 uint16_t
* ctypedef npy_uint32 uint32_t
*/
typedef npy_uint8 __pyx_t_5numpy_uint8_t;
/* "numpy.pxd":730
*
* ctypedef npy_uint8 uint8_t
* ctypedef npy_uint16 uint16_t # <<<<<<<<<<<<<<
* ctypedef npy_uint32 uint32_t
* ctypedef npy_uint64 uint64_t
*/
typedef npy_uint16 __pyx_t_5numpy_uint16_t;
/* "numpy.pxd":731
* ctypedef npy_uint8 uint8_t
* ctypedef npy_uint16 uint16_t
* ctypedef npy_uint32 uint32_t # <<<<<<<<<<<<<<
* ctypedef npy_uint64 uint64_t
* #ctypedef npy_uint96 uint96_t
*/
typedef npy_uint32 __pyx_t_5numpy_uint32_t;
/* "numpy.pxd":732
* ctypedef npy_uint16 uint16_t
* ctypedef npy_uint32 uint32_t
* ctypedef npy_uint64 uint64_t # <<<<<<<<<<<<<<
* #ctypedef npy_uint96 uint96_t
* #ctypedef npy_uint128 uint128_t
*/
typedef npy_uint64 __pyx_t_5numpy_uint64_t;
/* "numpy.pxd":736
* #ctypedef npy_uint128 uint128_t
*
* ctypedef npy_float32 float32_t # <<<<<<<<<<<<<<
* ctypedef npy_float64 float64_t
* #ctypedef npy_float80 float80_t
*/
typedef npy_float32 __pyx_t_5numpy_float32_t;
/* "numpy.pxd":737
*
* ctypedef npy_float32 float32_t
* ctypedef npy_float64 float64_t # <<<<<<<<<<<<<<
* #ctypedef npy_float80 float80_t
* #ctypedef npy_float128 float128_t
*/
typedef npy_float64 __pyx_t_5numpy_float64_t;
/* "numpy.pxd":746
* # The int types are mapped a bit surprising --
* # numpy.int corresponds to 'l' and numpy.long to 'q'
* ctypedef npy_long int_t # <<<<<<<<<<<<<<
* ctypedef npy_longlong long_t
* ctypedef npy_longlong longlong_t
*/
typedef npy_long __pyx_t_5numpy_int_t;
/* "numpy.pxd":747
* # numpy.int corresponds to 'l' and numpy.long to 'q'
* ctypedef npy_long int_t
* ctypedef npy_longlong long_t # <<<<<<<<<<<<<<
* ctypedef npy_longlong longlong_t
*
*/
typedef npy_longlong __pyx_t_5numpy_long_t;
/* "numpy.pxd":748
* ctypedef npy_long int_t
* ctypedef npy_longlong long_t
* ctypedef npy_longlong longlong_t # <<<<<<<<<<<<<<
*
* ctypedef npy_ulong uint_t
*/
typedef npy_longlong __pyx_t_5numpy_longlong_t;
/* "numpy.pxd":750
* ctypedef npy_longlong longlong_t
*
* ctypedef npy_ulong uint_t # <<<<<<<<<<<<<<
* ctypedef npy_ulonglong ulong_t
* ctypedef npy_ulonglong ulonglong_t
*/
typedef npy_ulong __pyx_t_5numpy_uint_t;
/* "numpy.pxd":751
*
* ctypedef npy_ulong uint_t
* ctypedef npy_ulonglong ulong_t # <<<<<<<<<<<<<<
* ctypedef npy_ulonglong ulonglong_t
*
*/
typedef npy_ulonglong __pyx_t_5numpy_ulong_t;
/* "numpy.pxd":752
* ctypedef npy_ulong uint_t
* ctypedef npy_ulonglong ulong_t
* ctypedef npy_ulonglong ulonglong_t # <<<<<<<<<<<<<<
*
* ctypedef npy_intp intp_t
*/
typedef npy_ulonglong __pyx_t_5numpy_ulonglong_t;
/* "numpy.pxd":754
* ctypedef npy_ulonglong ulonglong_t
*
* ctypedef npy_intp intp_t # <<<<<<<<<<<<<<
* ctypedef npy_uintp uintp_t
*
*/
typedef npy_intp __pyx_t_5numpy_intp_t;
/* "numpy.pxd":755
*
* ctypedef npy_intp intp_t
* ctypedef npy_uintp uintp_t # <<<<<<<<<<<<<<
*
* ctypedef npy_double float_t
*/
typedef npy_uintp __pyx_t_5numpy_uintp_t;
/* "numpy.pxd":757
* ctypedef npy_uintp uintp_t
*
* ctypedef npy_double float_t # <<<<<<<<<<<<<<
* ctypedef npy_double double_t
* ctypedef npy_longdouble longdouble_t
*/
typedef npy_double __pyx_t_5numpy_float_t;
/* "numpy.pxd":758
*
* ctypedef npy_double float_t
* ctypedef npy_double double_t # <<<<<<<<<<<<<<
* ctypedef npy_longdouble longdouble_t
*
*/
typedef npy_double __pyx_t_5numpy_double_t;
/* "numpy.pxd":759
* ctypedef npy_double float_t
* ctypedef npy_double double_t
* ctypedef npy_longdouble longdouble_t # <<<<<<<<<<<<<<
*
* ctypedef npy_cfloat cfloat_t
*/
typedef npy_longdouble __pyx_t_5numpy_longdouble_t;
#if CYTHON_CCOMPLEX
#ifdef __cplusplus
typedef ::std::complex< float > __pyx_t_float_complex;
#else
typedef float _Complex __pyx_t_float_complex;
#endif
#else
typedef struct { float real, imag; } __pyx_t_float_complex;
#endif
#if CYTHON_CCOMPLEX
#ifdef __cplusplus
typedef ::std::complex< double > __pyx_t_double_complex;
#else
typedef double _Complex __pyx_t_double_complex;
#endif
#else
typedef struct { double real, imag; } __pyx_t_double_complex;
#endif
/*--- Type declarations ---*/
/* "numpy.pxd":761
* ctypedef npy_longdouble longdouble_t
*
* ctypedef npy_cfloat cfloat_t # <<<<<<<<<<<<<<
* ctypedef npy_cdouble cdouble_t
* ctypedef npy_clongdouble clongdouble_t
*/
typedef npy_cfloat __pyx_t_5numpy_cfloat_t;
/* "numpy.pxd":762
*
* ctypedef npy_cfloat cfloat_t
* ctypedef npy_cdouble cdouble_t # <<<<<<<<<<<<<<
* ctypedef npy_clongdouble clongdouble_t
*
*/
typedef npy_cdouble __pyx_t_5numpy_cdouble_t;
/* "numpy.pxd":763
* ctypedef npy_cfloat cfloat_t
* ctypedef npy_cdouble cdouble_t
* ctypedef npy_clongdouble clongdouble_t # <<<<<<<<<<<<<<
*
* ctypedef npy_cdouble complex_t
*/
typedef npy_clongdouble __pyx_t_5numpy_clongdouble_t;
/* "numpy.pxd":765
* ctypedef npy_clongdouble clongdouble_t
*
* ctypedef npy_cdouble complex_t # <<<<<<<<<<<<<<
*
* cdef inline object PyArray_MultiIterNew1(a):
*/
typedef npy_cdouble __pyx_t_5numpy_complex_t;
struct __pyx_opt_args_4wfpt_full_pdf;
/* "/home/wiecki/working/projects/hddm/src/pdf.pxi":98
* return exp(log(p) + ((a*z*sv)**2 - 2*a*v*z - (v**2)*x)/(2*(sv**2)*x+2))/sqrt((sv**2)*x+1)/(a**2)
*
* cpdef double full_pdf(double x, double v, double sv, double a, double # <<<<<<<<<<<<<<
* z, double sz, double t, double st, double err, int
* n_st=2, int n_sz=2, bint use_adaptive=1, double
*/
struct __pyx_opt_args_4wfpt_full_pdf {
int __pyx_n;
int n_st;
int n_sz;
int use_adaptive;
double simps_err;
};
#ifndef CYTHON_REFNANNY
#define CYTHON_REFNANNY 0
#endif
#if CYTHON_REFNANNY
typedef struct {
void (*INCREF)(void*, PyObject*, int);
void (*DECREF)(void*, PyObject*, int);
void (*GOTREF)(void*, PyObject*, int);
void (*GIVEREF)(void*, PyObject*, int);
void* (*SetupContext)(const char*, int, const char*);
void (*FinishContext)(void**);
} __Pyx_RefNannyAPIStruct;
static __Pyx_RefNannyAPIStruct *__Pyx_RefNanny = NULL;
static __Pyx_RefNannyAPIStruct *__Pyx_RefNannyImportAPI(const char *modname); /*proto*/
#define __Pyx_RefNannyDeclarations void *__pyx_refnanny = NULL;
#define __Pyx_RefNannySetupContext(name) __pyx_refnanny = __Pyx_RefNanny->SetupContext((name), __LINE__, __FILE__)
#define __Pyx_RefNannyFinishContext() __Pyx_RefNanny->FinishContext(&__pyx_refnanny)
#define __Pyx_INCREF(r) __Pyx_RefNanny->INCREF(__pyx_refnanny, (PyObject *)(r), __LINE__)
#define __Pyx_DECREF(r) __Pyx_RefNanny->DECREF(__pyx_refnanny, (PyObject *)(r), __LINE__)
#define __Pyx_GOTREF(r) __Pyx_RefNanny->GOTREF(__pyx_refnanny, (PyObject *)(r), __LINE__)
#define __Pyx_GIVEREF(r) __Pyx_RefNanny->GIVEREF(__pyx_refnanny, (PyObject *)(r), __LINE__)
#define __Pyx_XINCREF(r) do { if((r) != NULL) {__Pyx_INCREF(r); }} while(0)
#define __Pyx_XDECREF(r) do { if((r) != NULL) {__Pyx_DECREF(r); }} while(0)
#define __Pyx_XGOTREF(r) do { if((r) != NULL) {__Pyx_GOTREF(r); }} while(0)
#define __Pyx_XGIVEREF(r) do { if((r) != NULL) {__Pyx_GIVEREF(r);}} while(0)
#else
#define __Pyx_RefNannyDeclarations
#define __Pyx_RefNannySetupContext(name)
#define __Pyx_RefNannyFinishContext()
#define __Pyx_INCREF(r) Py_INCREF(r)
#define __Pyx_DECREF(r) Py_DECREF(r)
#define __Pyx_GOTREF(r)
#define __Pyx_GIVEREF(r)
#define __Pyx_XINCREF(r) Py_XINCREF(r)
#define __Pyx_XDECREF(r) Py_XDECREF(r)
#define __Pyx_XGOTREF(r)
#define __Pyx_XGIVEREF(r)
#endif /* CYTHON_REFNANNY */
#define __Pyx_CLEAR(r) do { PyObject* tmp = ((PyObject*)(r)); r = NULL; __Pyx_DECREF(tmp);} while(0)
#define __Pyx_XCLEAR(r) do { if((r) != NULL) {PyObject* tmp = ((PyObject*)(r)); r = NULL; __Pyx_DECREF(tmp);}} while(0)
static PyObject *__Pyx_GetName(PyObject *dict, PyObject *name); /*proto*/
static void __Pyx_RaiseArgtupleInvalid(const char* func_name, int exact,
Py_ssize_t num_min, Py_ssize_t num_max, Py_ssize_t num_found); /*proto*/
static void __Pyx_RaiseDoubleKeywordsError(const char* func_name, PyObject* kw_name); /*proto*/
static int __Pyx_ParseOptionalKeywords(PyObject *kwds, PyObject **argnames[], \
PyObject *kwds2, PyObject *values[], Py_ssize_t num_pos_args, \
const char* function_name); /*proto*/
static int __Pyx_ArgTypeTest(PyObject *obj, PyTypeObject *type, int none_allowed,
const char *name, int exact); /*proto*/
#define IS_UNSIGNED(type) (((type) -1) > 0)
struct __Pyx_StructField_;
#define __PYX_BUF_FLAGS_PACKED_STRUCT (1 << 0)
typedef struct {
const char* name; /* for error messages only */
struct __Pyx_StructField_* fields;
size_t size; /* sizeof(type) */
size_t arraysize[8]; /* length of array in each dimension */
int ndim;
char typegroup; /* _R_eal, _C_omplex, Signed _I_nt, _U_nsigned int, _S_truct, _P_ointer, _O_bject */
char is_unsigned;
int flags;
} __Pyx_TypeInfo;
typedef struct __Pyx_StructField_ {
__Pyx_TypeInfo* type;
const char* name;
size_t offset;
} __Pyx_StructField;
typedef struct {
__Pyx_StructField* field;
size_t parent_offset;
} __Pyx_BufFmt_StackElem;
typedef struct {
__Pyx_StructField root;
__Pyx_BufFmt_StackElem* head;
size_t fmt_offset;
size_t new_count, enc_count;
size_t struct_alignment;
int is_complex;
char enc_type;
char new_packmode;
char enc_packmode;
char is_valid_array;
} __Pyx_BufFmt_Context;
static CYTHON_INLINE int __Pyx_GetBufferAndValidate(Py_buffer* buf, PyObject* obj,
__Pyx_TypeInfo* dtype, int flags, int nd, int cast, __Pyx_BufFmt_StackElem* stack);
static CYTHON_INLINE void __Pyx_SafeReleaseBuffer(Py_buffer* info);
static CYTHON_INLINE int __Pyx_TypeTest(PyObject *obj, PyTypeObject *type); /*proto*/
#define __Pyx_BufPtrStrided1d(type, buf, i0, s0) (type)((char*)buf + i0 * s0)
static void __Pyx_RaiseBufferFallbackError(void); /*proto*/
static CYTHON_INLINE void __Pyx_ErrRestore(PyObject *type, PyObject *value, PyObject *tb); /*proto*/
static CYTHON_INLINE void __Pyx_ErrFetch(PyObject **type, PyObject **value, PyObject **tb); /*proto*/
#include <string.h>
void __pyx_init_nan(void);
static float __PYX_NAN;
#ifndef __PYX_FORCE_INIT_THREADS
#define __PYX_FORCE_INIT_THREADS 0
#endif
static CYTHON_INLINE void __Pyx_RaiseNoneIndexingError(void);
#if PY_MAJOR_VERSION >= 3
static PyObject *__Pyx_PyDict_GetItem(PyObject *d, PyObject* key) {
PyObject *value;
if (unlikely(d == Py_None)) {
__Pyx_RaiseNoneIndexingError();
return NULL;
}
value = PyDict_GetItemWithError(d, key);
if (unlikely(!value)) {
if (!PyErr_Occurred())
PyErr_SetObject(PyExc_KeyError, key);
return NULL;
}
Py_INCREF(value);
return value;
}
#else
#define __Pyx_PyDict_GetItem(d, key) PyObject_GetItem(d, key)
#endif
static CYTHON_INLINE PyObject *__Pyx_GetItemInt_Generic(PyObject *o, PyObject* j) {
PyObject *r;
if (!j) return NULL;
r = PyObject_GetItem(o, j);
Py_DECREF(j);
return r;
}
#define __Pyx_GetItemInt_List(o, i, size, to_py_func) (((size) <= sizeof(Py_ssize_t)) ? \
__Pyx_GetItemInt_List_Fast(o, i) : \
__Pyx_GetItemInt_Generic(o, to_py_func(i)))
static CYTHON_INLINE PyObject *__Pyx_GetItemInt_List_Fast(PyObject *o, Py_ssize_t i) {
if (likely(o != Py_None)) {
if (likely((0 <= i) & (i < PyList_GET_SIZE(o)))) {
PyObject *r = PyList_GET_ITEM(o, i);
Py_INCREF(r);
return r;
}
else if ((-PyList_GET_SIZE(o) <= i) & (i < 0)) {
PyObject *r = PyList_GET_ITEM(o, PyList_GET_SIZE(o) + i);
Py_INCREF(r);
return r;
}
}
return __Pyx_GetItemInt_Generic(o, PyInt_FromSsize_t(i));
}
#define __Pyx_GetItemInt_Tuple(o, i, size, to_py_func) (((size) <= sizeof(Py_ssize_t)) ? \
__Pyx_GetItemInt_Tuple_Fast(o, i) : \
__Pyx_GetItemInt_Generic(o, to_py_func(i)))
static CYTHON_INLINE PyObject *__Pyx_GetItemInt_Tuple_Fast(PyObject *o, Py_ssize_t i) {
if (likely(o != Py_None)) {
if (likely((0 <= i) & (i < PyTuple_GET_SIZE(o)))) {
PyObject *r = PyTuple_GET_ITEM(o, i);
Py_INCREF(r);
return r;
}
else if ((-PyTuple_GET_SIZE(o) <= i) & (i < 0)) {
PyObject *r = PyTuple_GET_ITEM(o, PyTuple_GET_SIZE(o) + i);
Py_INCREF(r);
return r;
}
}
return __Pyx_GetItemInt_Generic(o, PyInt_FromSsize_t(i));
}
#define __Pyx_GetItemInt(o, i, size, to_py_func) (((size) <= sizeof(Py_ssize_t)) ? \
__Pyx_GetItemInt_Fast(o, i) : \
__Pyx_GetItemInt_Generic(o, to_py_func(i)))
static CYTHON_INLINE PyObject *__Pyx_GetItemInt_Fast(PyObject *o, Py_ssize_t i) {
if (PyList_CheckExact(o)) {
Py_ssize_t n = (likely(i >= 0)) ? i : i + PyList_GET_SIZE(o);
if (likely((n >= 0) & (n < PyList_GET_SIZE(o)))) {
PyObject *r = PyList_GET_ITEM(o, n);
Py_INCREF(r);
return r;
}
}
else if (PyTuple_CheckExact(o)) {
Py_ssize_t n = (likely(i >= 0)) ? i : i + PyTuple_GET_SIZE(o);
if (likely((n >= 0) & (n < PyTuple_GET_SIZE(o)))) {
PyObject *r = PyTuple_GET_ITEM(o, n);
Py_INCREF(r);
return r;
}
}
else if (likely(i >= 0)) {
PySequenceMethods *m = Py_TYPE(o)->tp_as_sequence;
if (likely(m && m->sq_item)) {
return m->sq_item(o, i);
}
}
return __Pyx_GetItemInt_Generic(o, PyInt_FromSsize_t(i));
}
static void __Pyx_Raise(PyObject *type, PyObject *value, PyObject *tb, PyObject *cause); /*proto*/
static CYTHON_INLINE void __Pyx_RaiseNeedMoreValuesError(Py_ssize_t index);
static CYTHON_INLINE void __Pyx_RaiseTooManyValuesError(Py_ssize_t expected);
static CYTHON_INLINE void __Pyx_RaiseNoneNotIterableError(void);
static void __Pyx_UnpackTupleError(PyObject *, Py_ssize_t index); /*proto*/
typedef struct {
Py_ssize_t shape, strides, suboffsets;
} __Pyx_Buf_DimInfo;
typedef struct {
size_t refcount;
Py_buffer pybuffer;
} __Pyx_Buffer;
typedef struct {
__Pyx_Buffer *rcbuffer;
char *data;
__Pyx_Buf_DimInfo diminfo[8];
} __Pyx_LocalBuf_ND;
#if PY_MAJOR_VERSION < 3
static int __Pyx_GetBuffer(PyObject *obj, Py_buffer *view, int flags);
static void __Pyx_ReleaseBuffer(Py_buffer *view);
#else
#define __Pyx_GetBuffer PyObject_GetBuffer
#define __Pyx_ReleaseBuffer PyBuffer_Release
#endif
Py_ssize_t __Pyx_zeros[] = {0, 0, 0, 0, 0, 0, 0, 0};
Py_ssize_t __Pyx_minusones[] = {-1, -1, -1, -1, -1, -1, -1, -1};
static PyObject *__Pyx_Import(PyObject *name, PyObject *from_list, long level); /*proto*/
static CYTHON_INLINE void __Pyx_RaiseImportError(PyObject *name);
static CYTHON_INLINE PyObject *__Pyx_PyInt_to_py_Py_intptr_t(Py_intptr_t);
#if CYTHON_CCOMPLEX
#ifdef __cplusplus
#define __Pyx_CREAL(z) ((z).real())
#define __Pyx_CIMAG(z) ((z).imag())
#else
#define __Pyx_CREAL(z) (__real__(z))
#define __Pyx_CIMAG(z) (__imag__(z))
#endif
#else
#define __Pyx_CREAL(z) ((z).real)
#define __Pyx_CIMAG(z) ((z).imag)
#endif
#if defined(_WIN32) && defined(__cplusplus) && CYTHON_CCOMPLEX
#define __Pyx_SET_CREAL(z,x) ((z).real(x))
#define __Pyx_SET_CIMAG(z,y) ((z).imag(y))
#else
#define __Pyx_SET_CREAL(z,x) __Pyx_CREAL(z) = (x)
#define __Pyx_SET_CIMAG(z,y) __Pyx_CIMAG(z) = (y)
#endif
static CYTHON_INLINE __pyx_t_float_complex __pyx_t_float_complex_from_parts(float, float);
#if CYTHON_CCOMPLEX
#define __Pyx_c_eqf(a, b) ((a)==(b))
#define __Pyx_c_sumf(a, b) ((a)+(b))
#define __Pyx_c_difff(a, b) ((a)-(b))
#define __Pyx_c_prodf(a, b) ((a)*(b))
#define __Pyx_c_quotf(a, b) ((a)/(b))
#define __Pyx_c_negf(a) (-(a))
#ifdef __cplusplus
#define __Pyx_c_is_zerof(z) ((z)==(float)0)
#define __Pyx_c_conjf(z) (::std::conj(z))
#if 1
#define __Pyx_c_absf(z) (::std::abs(z))
#define __Pyx_c_powf(a, b) (::std::pow(a, b))
#endif
#else
#define __Pyx_c_is_zerof(z) ((z)==0)
#define __Pyx_c_conjf(z) (conjf(z))
#if 1
#define __Pyx_c_absf(z) (cabsf(z))
#define __Pyx_c_powf(a, b) (cpowf(a, b))
#endif
#endif
#else
static CYTHON_INLINE int __Pyx_c_eqf(__pyx_t_float_complex, __pyx_t_float_complex);
static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_sumf(__pyx_t_float_complex, __pyx_t_float_complex);
static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_difff(__pyx_t_float_complex, __pyx_t_float_complex);
static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_prodf(__pyx_t_float_complex, __pyx_t_float_complex);
static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_quotf(__pyx_t_float_complex, __pyx_t_float_complex);
static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_negf(__pyx_t_float_complex);
static CYTHON_INLINE int __Pyx_c_is_zerof(__pyx_t_float_complex);
static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_conjf(__pyx_t_float_complex);
#if 1
static CYTHON_INLINE float __Pyx_c_absf(__pyx_t_float_complex);
static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_powf(__pyx_t_float_complex, __pyx_t_float_complex);
#endif
#endif
static CYTHON_INLINE __pyx_t_double_complex __pyx_t_double_complex_from_parts(double, double);
#if CYTHON_CCOMPLEX
#define __Pyx_c_eq(a, b) ((a)==(b))
#define __Pyx_c_sum(a, b) ((a)+(b))
#define __Pyx_c_diff(a, b) ((a)-(b))
#define __Pyx_c_prod(a, b) ((a)*(b))
#define __Pyx_c_quot(a, b) ((a)/(b))
#define __Pyx_c_neg(a) (-(a))
#ifdef __cplusplus
#define __Pyx_c_is_zero(z) ((z)==(double)0)
#define __Pyx_c_conj(z) (::std::conj(z))
#if 1
#define __Pyx_c_abs(z) (::std::abs(z))
#define __Pyx_c_pow(a, b) (::std::pow(a, b))
#endif
#else
#define __Pyx_c_is_zero(z) ((z)==0)
#define __Pyx_c_conj(z) (conj(z))
#if 1
#define __Pyx_c_abs(z) (cabs(z))
#define __Pyx_c_pow(a, b) (cpow(a, b))
#endif
#endif
#else
static CYTHON_INLINE int __Pyx_c_eq(__pyx_t_double_complex, __pyx_t_double_complex);
static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_sum(__pyx_t_double_complex, __pyx_t_double_complex);
static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_diff(__pyx_t_double_complex, __pyx_t_double_complex);
static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_prod(__pyx_t_double_complex, __pyx_t_double_complex);
static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_quot(__pyx_t_double_complex, __pyx_t_double_complex);
static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_neg(__pyx_t_double_complex);
static CYTHON_INLINE int __Pyx_c_is_zero(__pyx_t_double_complex);
static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_conj(__pyx_t_double_complex);
#if 1
static CYTHON_INLINE double __Pyx_c_abs(__pyx_t_double_complex);
static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_pow(__pyx_t_double_complex, __pyx_t_double_complex);
#endif
#endif
static CYTHON_INLINE unsigned char __Pyx_PyInt_AsUnsignedChar(PyObject *);
static CYTHON_INLINE unsigned short __Pyx_PyInt_AsUnsignedShort(PyObject *);
static CYTHON_INLINE unsigned int __Pyx_PyInt_AsUnsignedInt(PyObject *);
static CYTHON_INLINE char __Pyx_PyInt_AsChar(PyObject *);
static CYTHON_INLINE short __Pyx_PyInt_AsShort(PyObject *);
static CYTHON_INLINE int __Pyx_PyInt_AsInt(PyObject *);
static CYTHON_INLINE signed char __Pyx_PyInt_AsSignedChar(PyObject *);
static CYTHON_INLINE signed short __Pyx_PyInt_AsSignedShort(PyObject *);
static CYTHON_INLINE signed int __Pyx_PyInt_AsSignedInt(PyObject *);
static CYTHON_INLINE int __Pyx_PyInt_AsLongDouble(PyObject *);
static CYTHON_INLINE unsigned long __Pyx_PyInt_AsUnsignedLong(PyObject *);
static CYTHON_INLINE unsigned PY_LONG_LONG __Pyx_PyInt_AsUnsignedLongLong(PyObject *);
static CYTHON_INLINE long __Pyx_PyInt_AsLong(PyObject *);
static CYTHON_INLINE PY_LONG_LONG __Pyx_PyInt_AsLongLong(PyObject *);
static CYTHON_INLINE signed long __Pyx_PyInt_AsSignedLong(PyObject *);
static CYTHON_INLINE signed PY_LONG_LONG __Pyx_PyInt_AsSignedLongLong(PyObject *);
static int __Pyx_check_binary_version(void);
static PyTypeObject *__Pyx_ImportType(const char *module_name, const char *class_name, size_t size, int strict); /*proto*/
static PyObject *__Pyx_ImportModule(const char *name); /*proto*/
typedef struct {
int code_line;
PyCodeObject* code_object;
} __Pyx_CodeObjectCacheEntry;
struct __Pyx_CodeObjectCache {
int count;
int max_count;
__Pyx_CodeObjectCacheEntry* entries;
};
static struct __Pyx_CodeObjectCache __pyx_code_cache = {0,0,NULL};
static int __pyx_bisect_code_objects(__Pyx_CodeObjectCacheEntry* entries, int count, int code_line);
static void __pyx_clear_code_object_cache(void);
static PyCodeObject *__pyx_find_code_object(int code_line);
static void __pyx_insert_code_object(int code_line, PyCodeObject* code_object);
static void __Pyx_AddTraceback(const char *funcname, int __pyx_clineno,
int __pyx_lineno, const char *__pyx_filename); /*proto*/
static int __Pyx_InitStrings(__Pyx_StringTabEntry *t); /*proto*/
/* Module declarations from 'cpython.buffer' */
/* Module declarations from 'cpython.ref' */
/* Module declarations from 'libc.stdio' */
/* Module declarations from 'cpython.object' */
/* Module declarations from 'libc.stdlib' */
/* Module declarations from 'numpy' */
/* Module declarations from 'numpy' */
static PyTypeObject *__pyx_ptype_5numpy_dtype = 0;
static PyTypeObject *__pyx_ptype_5numpy_flatiter = 0;
static PyTypeObject *__pyx_ptype_5numpy_broadcast = 0;
static PyTypeObject *__pyx_ptype_5numpy_ndarray = 0;
static PyTypeObject *__pyx_ptype_5numpy_ufunc = 0;
static CYTHON_INLINE char *__pyx_f_5numpy__util_dtypestring(PyArray_Descr *, char *, char *, int *); /*proto*/
/* Module declarations from 'cython' */
/* Module declarations from 'wfpt' */
static double __pyx_f_4wfpt_ftt_01w(double, double, double); /*proto*/
static double __pyx_f_4wfpt_pdf(double, double, double, double, double); /*proto*/
static double __pyx_f_4wfpt_pdf_sv(double, double, double, double, double, double); /*proto*/
static double __pyx_f_4wfpt_full_pdf(double, double, double, double, double, double, double, double, double, int __pyx_skip_dispatch, struct __pyx_opt_args_4wfpt_full_pdf *__pyx_optional_args); /*proto*/
static double __pyx_f_4wfpt_simpson_1D(double, double, double, double, double, double, double, double, double, int, double, double, int); /*proto*/
static double __pyx_f_4wfpt_simpson_2D(double, double, double, double, double, double, double, double, double, int, double, double, int); /*proto*/
static double __pyx_f_4wfpt_adaptiveSimpsonsAux(double, double, double, double, double, double, double, double, double, double, double, double, double, double, double, double, double, int); /*proto*/
static double __pyx_f_4wfpt_adaptiveSimpsons_1D(double, double, double, double, double, double, double, double, double, double, double, double, int); /*proto*/
static double __pyx_f_4wfpt_adaptiveSimpsonsAux_2D(double, double, double, double, double, double, double, double, double, double, double, double, double, double, double, double, double, double, int, int); /*proto*/
static double __pyx_f_4wfpt_adaptiveSimpsons_2D(double, double, double, double, double, double, double, double, double, double, double, double, int, int); /*proto*/
static CYTHON_INLINE int __pyx_f_4wfpt_p_outlier_in_range(double); /*proto*/
static __Pyx_TypeInfo __Pyx_TypeInfo_double = { "double", NULL, sizeof(double), { 0 }, 0, 'R', 0, 0 };
static __Pyx_TypeInfo __Pyx_TypeInfo_int = { "int", NULL, sizeof(int), { 0 }, 0, 'I', IS_UNSIGNED(int), 0 };
#define __Pyx_MODULE_NAME "wfpt"
int __pyx_module_is_main_wfpt = 0;
/* Implementation of 'wfpt' */
static PyObject *__pyx_builtin_ValueError;
static PyObject *__pyx_builtin_range;
static PyObject *__pyx_builtin_RuntimeError;
static PyObject *__pyx_pf_4wfpt_full_pdf(CYTHON_UNUSED PyObject *__pyx_self, double __pyx_v_x, double __pyx_v_v, double __pyx_v_sv, double __pyx_v_a, double __pyx_v_z, double __pyx_v_sz, double __pyx_v_t, double __pyx_v_st, double __pyx_v_err, int __pyx_v_n_st, int __pyx_v_n_sz, int __pyx_v_use_adaptive, double __pyx_v_simps_err); /* proto */
static PyObject *__pyx_pf_4wfpt_2pdf_array(CYTHON_UNUSED PyObject *__pyx_self, PyArrayObject *__pyx_v_x, double __pyx_v_v, double __pyx_v_sv, double __pyx_v_a, double __pyx_v_z, double __pyx_v_sz, double __pyx_v_t, double __pyx_v_st, double __pyx_v_err, int __pyx_v_logp, int __pyx_v_n_st, int __pyx_v_n_sz, int __pyx_v_use_adaptive, double __pyx_v_simps_err, double __pyx_v_p_outlier, double __pyx_v_w_outlier); /* proto */
static PyObject *__pyx_pf_4wfpt_4wiener_like(CYTHON_UNUSED PyObject *__pyx_self, PyArrayObject *__pyx_v_x, double __pyx_v_v, double __pyx_v_sv, double __pyx_v_a, double __pyx_v_z, double __pyx_v_sz, double __pyx_v_t, double __pyx_v_st, double __pyx_v_err, int __pyx_v_n_st, int __pyx_v_n_sz, int __pyx_v_use_adaptive, double __pyx_v_simps_err, double __pyx_v_p_outlier, double __pyx_v_w_outlier); /* proto */
static PyObject *__pyx_pf_4wfpt_6wiener_like_multi(CYTHON_UNUSED PyObject *__pyx_self, PyArrayObject *__pyx_v_x, PyObject *__pyx_v_v, PyObject *__pyx_v_sv, PyObject *__pyx_v_a, PyObject *__pyx_v_z, PyObject *__pyx_v_sz, PyObject *__pyx_v_t, PyObject *__pyx_v_st, double __pyx_v_err, PyObject *__pyx_v_multi, int __pyx_v_n_st, int __pyx_v_n_sz, int __pyx_v_use_adaptive, double __pyx_v_simps_err, double __pyx_v_p_outlier, double __pyx_v_w_outlier); /* proto */
static PyObject *__pyx_pf_4wfpt_8gen_rts_from_cdf(CYTHON_UNUSED PyObject *__pyx_self, double __pyx_v_v, double __pyx_v_sv, double __pyx_v_a, double __pyx_v_z, double __pyx_v_sz, double __pyx_v_t, double __pyx_v_st, int __pyx_v_samples, double __pyx_v_cdf_lb, double __pyx_v_cdf_ub, double __pyx_v_dt); /* proto */
static PyObject *__pyx_pf_4wfpt_10wiener_like_contaminant(CYTHON_UNUSED PyObject *__pyx_self, PyArrayObject *__pyx_v_x, PyArrayObject *__pyx_v_cont_x, double __pyx_v_v, double __pyx_v_sv, double __pyx_v_a, double __pyx_v_z, double __pyx_v_sz, double __pyx_v_t, double __pyx_v_st, double __pyx_v_t_min, double __pyx_v_t_max, double __pyx_v_err, int __pyx_v_n_st, int __pyx_v_n_sz, int __pyx_v_use_adaptive, double __pyx_v_simps_err); /* proto */
static PyObject *__pyx_pf_4wfpt_12gen_cdf_using_pdf(CYTHON_UNUSED PyObject *__pyx_self, double __pyx_v_v, double __pyx_v_sv, double __pyx_v_a, double __pyx_v_z, double __pyx_v_sz, double __pyx_v_t, double __pyx_v_st, double __pyx_v_err, int __pyx_v_N, double __pyx_v_time, int __pyx_v_n_st, int __pyx_v_n_sz, int __pyx_v_use_adaptive, double __pyx_v_simps_err, double __pyx_v_p_outlier, double __pyx_v_w_outlier); /* proto */
static PyObject *__pyx_pf_4wfpt_14split_cdf(CYTHON_UNUSED PyObject *__pyx_self, PyArrayObject *__pyx_v_x, PyArrayObject *__pyx_v_data); /* proto */
static int __pyx_pf_5numpy_7ndarray___getbuffer__(PyArrayObject *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags); /* proto */
static void __pyx_pf_5numpy_7ndarray_2__releasebuffer__(PyArrayObject *__pyx_v_self, Py_buffer *__pyx_v_info); /* proto */
static char __pyx_k_1[] = "at least one of the parameters is out of the support";
static char __pyx_k_5[] = "ndarray is not C contiguous";
static char __pyx_k_7[] = "ndarray is not Fortran contiguous";
static char __pyx_k_9[] = "Non-native byte order not supported";
static char __pyx_k_11[] = "unknown dtype code in numpy.pxd (%d)";
static char __pyx_k_12[] = "Format string allocated too short, see comment in numpy.pxd";
static char __pyx_k_15[] = "Format string allocated too short.";
static char __pyx_k_17[] = "scipy.integrate";
static char __pyx_k_18[] = "*";
static char __pyx_k_21[] = "/home/wiecki/working/projects/hddm/src/wfpt.pyx";
static char __pyx_k_30[] = "wiener_like_contaminant";
static char __pyx_k__B[] = "B";
static char __pyx_k__H[] = "H";
static char __pyx_k__I[] = "I";
static char __pyx_k__L[] = "L";
static char __pyx_k__N[] = "N";
static char __pyx_k__O[] = "O";
static char __pyx_k__Q[] = "Q";
static char __pyx_k__a[] = "a";
static char __pyx_k__b[] = "b";
static char __pyx_k__d[] = "d";
static char __pyx_k__f[] = "f";
static char __pyx_k__g[] = "g";
static char __pyx_k__h[] = "h";
static char __pyx_k__i[] = "i";
static char __pyx_k__j[] = "j";
static char __pyx_k__l[] = "l";
static char __pyx_k__p[] = "p";
static char __pyx_k__q[] = "q";
static char __pyx_k__t[] = "t";
static char __pyx_k__v[] = "v";
static char __pyx_k__x[] = "x";
static char __pyx_k__y[] = "y";
static char __pyx_k__z[] = "z";
static char __pyx_k__Zd[] = "Zd";
static char __pyx_k__Zf[] = "Zf";
static char __pyx_k__Zg[] = "Zg";
static char __pyx_k__dt[] = "dt";
static char __pyx_k__lb[] = "lb";
static char __pyx_k__np[] = "np";
static char __pyx_k__rt[] = "rt";
static char __pyx_k__st[] = "st";
static char __pyx_k__sv[] = "sv";
static char __pyx_k__sz[] = "sz";
static char __pyx_k__ub[] = "ub";
static char __pyx_k__err[] = "err";
static char __pyx_k__idx[] = "idx";
static char __pyx_k__inf[] = "inf";
static char __pyx_k__log[] = "log";
static char __pyx_k__pdf[] = "pdf";
static char __pyx_k__rts[] = "rts";
static char __pyx_k__sum[] = "sum";
static char __pyx_k__copy[] = "copy";
static char __pyx_k__data[] = "data";
static char __pyx_k__diff[] = "diff";
static char __pyx_k__hddm[] = "hddm";
static char __pyx_k__logp[] = "logp";
static char __pyx_k__n_st[] = "n_st";
static char __pyx_k__n_sz[] = "n_sz";
static char __pyx_k__rand[] = "rand";
static char __pyx_k__sign[] = "sign";
static char __pyx_k__size[] = "size";
static char __pyx_k__time[] = "time";
static char __pyx_k__wfpt[] = "wfpt";
static char __pyx_k__x_lb[] = "x_lb";
static char __pyx_k__x_ub[] = "x_ub";
static char __pyx_k__array[] = "array";
static char __pyx_k__delay[] = "delay";
static char __pyx_k__dtype[] = "dtype";
static char __pyx_k__empty[] = "empty";
static char __pyx_k__l_cdf[] = "l_cdf";
static char __pyx_k__multi[] = "multi";
static char __pyx_k__numpy[] = "numpy";
static char __pyx_k__param[] = "param";
static char __pyx_k__range[] = "range";
static char __pyx_k__t_max[] = "t_max";
static char __pyx_k__t_min[] = "t_min";
static char __pyx_k__arange[] = "arange";
static char __pyx_k__cdf_lb[] = "cdf_lb";
static char __pyx_k__cdf_ub[] = "cdf_ub";
static char __pyx_k__cont_x[] = "cont_x";
static char __pyx_k__cumsum[] = "cumsum";
static char __pyx_k__double[] = "double";
static char __pyx_k__n_cont[] = "n_cont";
static char __pyx_k__params[] = "params";
static char __pyx_k__random[] = "random";
static char __pyx_k__samples[] = "samples";
static char __pyx_k____main__[] = "__main__";
static char __pyx_k____test__[] = "__test__";
static char __pyx_k__cumtrapz[] = "cumtrapz";
static char __pyx_k__linspace[] = "linspace";
static char __pyx_k__pos_cont[] = "pos_cont";
static char __pyx_k__sum_logp[] = "sum_logp";
static char __pyx_k__cdf_array[] = "cdf_array";
static char __pyx_k__integrate[] = "integrate";
static char __pyx_k__p_outlier[] = "p_outlier";
static char __pyx_k__pdf_array[] = "pdf_array";
static char __pyx_k__simps_err[] = "simps_err";
static char __pyx_k__split_cdf[] = "split_cdf";
static char __pyx_k__w_outlier[] = "w_outlier";
static char __pyx_k__ValueError[] = "ValueError";
static char __pyx_k__wp_outlier[] = "wp_outlier";
static char __pyx_k__concatenate[] = "concatenate";
static char __pyx_k__params_iter[] = "params_iter";
static char __pyx_k__wiener_like[] = "wiener_like";
static char __pyx_k__RuntimeError[] = "RuntimeError";
static char __pyx_k__searchsorted[] = "searchsorted";
static char __pyx_k__use_adaptive[] = "use_adaptive";
static char __pyx_k__gen_rts_from_cdf[] = "gen_rts_from_cdf";
static char __pyx_k__gen_cdf_using_pdf[] = "gen_cdf_using_pdf";
static char __pyx_k__wiener_like_multi[] = "wiener_like_multi";
static PyObject *__pyx_kp_s_1;
static PyObject *__pyx_kp_u_11;
static PyObject *__pyx_kp_u_12;
static PyObject *__pyx_kp_u_15;
static PyObject *__pyx_n_s_17;
static PyObject *__pyx_n_s_18;
static PyObject *__pyx_kp_s_21;
static PyObject *__pyx_n_s_30;
static PyObject *__pyx_kp_u_5;
static PyObject *__pyx_kp_u_7;
static PyObject *__pyx_kp_u_9;
static PyObject *__pyx_n_s__N;
static PyObject *__pyx_n_s__RuntimeError;
static PyObject *__pyx_n_s__ValueError;
static PyObject *__pyx_n_s____main__;
static PyObject *__pyx_n_s____test__;
static PyObject *__pyx_n_s__a;
static PyObject *__pyx_n_s__arange;
static PyObject *__pyx_n_s__array;
static PyObject *__pyx_n_s__cdf_array;
static PyObject *__pyx_n_s__cdf_lb;
static PyObject *__pyx_n_s__cdf_ub;
static PyObject *__pyx_n_s__concatenate;
static PyObject *__pyx_n_s__cont_x;
static PyObject *__pyx_n_s__copy;
static PyObject *__pyx_n_s__cumsum;
static PyObject *__pyx_n_s__cumtrapz;
static PyObject *__pyx_n_s__data;
static PyObject *__pyx_n_s__delay;
static PyObject *__pyx_n_s__diff;
static PyObject *__pyx_n_s__double;
static PyObject *__pyx_n_s__dt;
static PyObject *__pyx_n_s__dtype;
static PyObject *__pyx_n_s__empty;
static PyObject *__pyx_n_s__err;
static PyObject *__pyx_n_s__f;
static PyObject *__pyx_n_s__gen_cdf_using_pdf;
static PyObject *__pyx_n_s__gen_rts_from_cdf;
static PyObject *__pyx_n_s__hddm;
static PyObject *__pyx_n_s__i;
static PyObject *__pyx_n_s__idx;
static PyObject *__pyx_n_s__inf;
static PyObject *__pyx_n_s__integrate;
static PyObject *__pyx_n_s__j;
static PyObject *__pyx_n_s__l_cdf;
static PyObject *__pyx_n_s__lb;
static PyObject *__pyx_n_s__linspace;
static PyObject *__pyx_n_s__log;
static PyObject *__pyx_n_s__logp;
static PyObject *__pyx_n_s__multi;
static PyObject *__pyx_n_s__n_cont;
static PyObject *__pyx_n_s__n_st;
static PyObject *__pyx_n_s__n_sz;
static PyObject *__pyx_n_s__np;
static PyObject *__pyx_n_s__numpy;
static PyObject *__pyx_n_s__p;
static PyObject *__pyx_n_s__p_outlier;
static PyObject *__pyx_n_s__param;
static PyObject *__pyx_n_s__params;
static PyObject *__pyx_n_s__params_iter;
static PyObject *__pyx_n_s__pdf;
static PyObject *__pyx_n_s__pdf_array;
static PyObject *__pyx_n_s__pos_cont;
static PyObject *__pyx_n_s__rand;
static PyObject *__pyx_n_s__random;
static PyObject *__pyx_n_s__range;
static PyObject *__pyx_n_s__rt;
static PyObject *__pyx_n_s__rts;
static PyObject *__pyx_n_s__samples;
static PyObject *__pyx_n_s__searchsorted;
static PyObject *__pyx_n_s__sign;
static PyObject *__pyx_n_s__simps_err;
static PyObject *__pyx_n_s__size;
static PyObject *__pyx_n_s__split_cdf;
static PyObject *__pyx_n_s__st;
static PyObject *__pyx_n_s__sum;
static PyObject *__pyx_n_s__sum_logp;
static PyObject *__pyx_n_s__sv;
static PyObject *__pyx_n_s__sz;
static PyObject *__pyx_n_s__t;
static PyObject *__pyx_n_s__t_max;
static PyObject *__pyx_n_s__t_min;
static PyObject *__pyx_n_s__time;
static PyObject *__pyx_n_s__ub;
static PyObject *__pyx_n_s__use_adaptive;
static PyObject *__pyx_n_s__v;
static PyObject *__pyx_n_s__w_outlier;
static PyObject *__pyx_n_s__wfpt;
static PyObject *__pyx_n_s__wiener_like;
static PyObject *__pyx_n_s__wiener_like_multi;
static PyObject *__pyx_n_s__wp_outlier;
static PyObject *__pyx_n_s__x;
static PyObject *__pyx_n_s__x_lb;
static PyObject *__pyx_n_s__x_ub;
static PyObject *__pyx_n_s__y;
static PyObject *__pyx_n_s__z;
static PyObject *__pyx_int_0;
static PyObject *__pyx_int_neg_1;
static PyObject *__pyx_int_15;
static PyObject *__pyx_k_slice_3;
static PyObject *__pyx_k_slice_4;
static PyObject *__pyx_k_tuple_2;
static PyObject *__pyx_k_tuple_6;
static PyObject *__pyx_k_tuple_8;
static PyObject *__pyx_k_tuple_10;
static PyObject *__pyx_k_tuple_13;
static PyObject *__pyx_k_tuple_14;
static PyObject *__pyx_k_tuple_16;
static PyObject *__pyx_k_tuple_19;
static PyObject *__pyx_k_tuple_22;
static PyObject *__pyx_k_tuple_24;
static PyObject *__pyx_k_tuple_26;
static PyObject *__pyx_k_tuple_28;
static PyObject *__pyx_k_tuple_31;
static PyObject *__pyx_k_tuple_33;
static PyObject *__pyx_k_codeobj_20;
static PyObject *__pyx_k_codeobj_23;
static PyObject *__pyx_k_codeobj_25;
static PyObject *__pyx_k_codeobj_27;
static PyObject *__pyx_k_codeobj_29;
static PyObject *__pyx_k_codeobj_32;
static PyObject *__pyx_k_codeobj_34;
/* "/home/wiecki/working/projects/hddm/src/pdf.pxi":25
* double M_PI
*
* cdef double ftt_01w(double tt, double w, double err) nogil: # <<<<<<<<<<<<<<
* """Compute f(t|0,1,w) for the likelihood of the drift diffusion model using the method
* and implementation of Navarro & Fuss, 2009.
*/
static double __pyx_f_4wfpt_ftt_01w(double __pyx_v_tt, double __pyx_v_w, double __pyx_v_err) {
double __pyx_v_kl;
double __pyx_v_ks;
double __pyx_v_p;
int __pyx_v_k;
int __pyx_v_K;
int __pyx_v_lower;
int __pyx_v_upper;
double __pyx_r;
int __pyx_t_1;
int __pyx_t_2;
/* "/home/wiecki/working/projects/hddm/src/pdf.pxi":33
*
* # calculate number of terms needed for large t
* if M_PI*tt*err<1: # if error threshold is set low enough # <<<<<<<<<<<<<<
* kl=sqrt(-2*log(M_PI*tt*err)/(M_PI**2*tt)) # bound
* kl=fmax(kl,1./(M_PI*sqrt(tt))) # ensure boundary conditions met
*/
__pyx_t_1 = (((M_PI * __pyx_v_tt) * __pyx_v_err) < 1.0);
if (__pyx_t_1) {
/* "/home/wiecki/working/projects/hddm/src/pdf.pxi":34
* # calculate number of terms needed for large t
* if M_PI*tt*err<1: # if error threshold is set low enough
* kl=sqrt(-2*log(M_PI*tt*err)/(M_PI**2*tt)) # bound # <<<<<<<<<<<<<<
* kl=fmax(kl,1./(M_PI*sqrt(tt))) # ensure boundary conditions met
* else: # if error threshold set too high
*/
__pyx_v_kl = sqrt(((-2.0 * log(((M_PI * __pyx_v_tt) * __pyx_v_err))) / (pow(M_PI, 2.0) * __pyx_v_tt)));
/* "/home/wiecki/working/projects/hddm/src/pdf.pxi":35
* if M_PI*tt*err<1: # if error threshold is set low enough
* kl=sqrt(-2*log(M_PI*tt*err)/(M_PI**2*tt)) # bound
* kl=fmax(kl,1./(M_PI*sqrt(tt))) # ensure boundary conditions met # <<<<<<<<<<<<<<
* else: # if error threshold set too high
* kl=1./(M_PI*sqrt(tt)) # set to boundary condition
*/
__pyx_v_kl = fmax(__pyx_v_kl, (1. / (M_PI * sqrt(__pyx_v_tt))));
goto __pyx_L3;
}
/*else*/ {
/* "/home/wiecki/working/projects/hddm/src/pdf.pxi":37
* kl=fmax(kl,1./(M_PI*sqrt(tt))) # ensure boundary conditions met
* else: # if error threshold set too high
* kl=1./(M_PI*sqrt(tt)) # set to boundary condition # <<<<<<<<<<<<<<
*
* # calculate number of terms needed for small t
*/
__pyx_v_kl = (1. / (M_PI * sqrt(__pyx_v_tt)));
}
__pyx_L3:;
/* "/home/wiecki/working/projects/hddm/src/pdf.pxi":40
*
* # calculate number of terms needed for small t
* if 2*sqrt(2*M_PI*tt)*err<1: # if error threshold is set low enough # <<<<<<<<<<<<<<
* ks=2+sqrt(-2*tt*log(2*sqrt(2*M_PI*tt)*err)) # bound
* ks=fmax(ks,sqrt(tt)+1) # ensure boundary conditions are met
*/
__pyx_t_1 = (((2.0 * sqrt(((2.0 * M_PI) * __pyx_v_tt))) * __pyx_v_err) < 1.0);
if (__pyx_t_1) {
/* "/home/wiecki/working/projects/hddm/src/pdf.pxi":41
* # calculate number of terms needed for small t
* if 2*sqrt(2*M_PI*tt)*err<1: # if error threshold is set low enough
* ks=2+sqrt(-2*tt*log(2*sqrt(2*M_PI*tt)*err)) # bound # <<<<<<<<<<<<<<
* ks=fmax(ks,sqrt(tt)+1) # ensure boundary conditions are met
* else: # if error threshold was set too high
*/
__pyx_v_ks = (2.0 + sqrt(((-2.0 * __pyx_v_tt) * log(((2.0 * sqrt(((2.0 * M_PI) * __pyx_v_tt))) * __pyx_v_err)))));
/* "/home/wiecki/working/projects/hddm/src/pdf.pxi":42
* if 2*sqrt(2*M_PI*tt)*err<1: # if error threshold is set low enough
* ks=2+sqrt(-2*tt*log(2*sqrt(2*M_PI*tt)*err)) # bound
* ks=fmax(ks,sqrt(tt)+1) # ensure boundary conditions are met # <<<<<<<<<<<<<<
* else: # if error threshold was set too high
* ks=2 # minimal kappa for that case
*/
__pyx_v_ks = fmax(__pyx_v_ks, (sqrt(__pyx_v_tt) + 1.0));
goto __pyx_L4;
}
/*else*/ {
/* "/home/wiecki/working/projects/hddm/src/pdf.pxi":44
* ks=fmax(ks,sqrt(tt)+1) # ensure boundary conditions are met
* else: # if error threshold was set too high
* ks=2 # minimal kappa for that case # <<<<<<<<<<<<<<
*
* # compute f(tt|0,1,w)
*/
__pyx_v_ks = 2.0;
}
__pyx_L4:;
/* "/home/wiecki/working/projects/hddm/src/pdf.pxi":47
*
* # compute f(tt|0,1,w)
* p=0 #initialize density # <<<<<<<<<<<<<<
* if ks<kl: # if small t is better (i.e., lambda<0)
* K=<int>(ceil(ks)) # round to smallest integer meeting error
*/
__pyx_v_p = 0.0;
/* "/home/wiecki/working/projects/hddm/src/pdf.pxi":48
* # compute f(tt|0,1,w)
* p=0 #initialize density
* if ks<kl: # if small t is better (i.e., lambda<0) # <<<<<<<<<<<<<<
* K=<int>(ceil(ks)) # round to smallest integer meeting error
* lower = <int>(-floor((K-1)/2.))
*/
__pyx_t_1 = (__pyx_v_ks < __pyx_v_kl);
if (__pyx_t_1) {
/* "/home/wiecki/working/projects/hddm/src/pdf.pxi":49
* p=0 #initialize density
* if ks<kl: # if small t is better (i.e., lambda<0)
* K=<int>(ceil(ks)) # round to smallest integer meeting error # <<<<<<<<<<<<<<
* lower = <int>(-floor((K-1)/2.))
* upper = <int>(ceil((K-1)/2.))
*/
__pyx_v_K = ((int)ceil(__pyx_v_ks));
/* "/home/wiecki/working/projects/hddm/src/pdf.pxi":50
* if ks<kl: # if small t is better (i.e., lambda<0)
* K=<int>(ceil(ks)) # round to smallest integer meeting error
* lower = <int>(-floor((K-1)/2.)) # <<<<<<<<<<<<<<
* upper = <int>(ceil((K-1)/2.))
* for k from lower <= k <= upper: # loop over k
*/
__pyx_v_lower = ((int)(-floor(((__pyx_v_K - 1) / 2.))));
/* "/home/wiecki/working/projects/hddm/src/pdf.pxi":51
* K=<int>(ceil(ks)) # round to smallest integer meeting error
* lower = <int>(-floor((K-1)/2.))
* upper = <int>(ceil((K-1)/2.)) # <<<<<<<<<<<<<<
* for k from lower <= k <= upper: # loop over k
* p+=(w+2*k)*exp(-(pow((w+2*k),2))/2/tt) # increment sum
*/
__pyx_v_upper = ((int)ceil(((__pyx_v_K - 1) / 2.)));
/* "/home/wiecki/working/projects/hddm/src/pdf.pxi":52
* lower = <int>(-floor((K-1)/2.))
* upper = <int>(ceil((K-1)/2.))
* for k from lower <= k <= upper: # loop over k # <<<<<<<<<<<<<<
* p+=(w+2*k)*exp(-(pow((w+2*k),2))/2/tt) # increment sum
* p/=sqrt(2*M_PI*pow(tt,3)) # add con_stant term
*/
__pyx_t_2 = __pyx_v_upper;
for (__pyx_v_k = __pyx_v_lower; __pyx_v_k <= __pyx_t_2; __pyx_v_k++) {
/* "/home/wiecki/working/projects/hddm/src/pdf.pxi":53
* upper = <int>(ceil((K-1)/2.))
* for k from lower <= k <= upper: # loop over k
* p+=(w+2*k)*exp(-(pow((w+2*k),2))/2/tt) # increment sum # <<<<<<<<<<<<<<
* p/=sqrt(2*M_PI*pow(tt,3)) # add con_stant term
*
*/
__pyx_v_p = (__pyx_v_p + ((__pyx_v_w + (2 * __pyx_v_k)) * exp((((-pow((__pyx_v_w + (2 * __pyx_v_k)), 2.0)) / 2.0) / __pyx_v_tt))));
}
/* "/home/wiecki/working/projects/hddm/src/pdf.pxi":54
* for k from lower <= k <= upper: # loop over k
* p+=(w+2*k)*exp(-(pow((w+2*k),2))/2/tt) # increment sum
* p/=sqrt(2*M_PI*pow(tt,3)) # add con_stant term # <<<<<<<<<<<<<<
*
* else: # if large t is better...
*/
__pyx_v_p = (__pyx_v_p / sqrt(((2.0 * M_PI) * pow(__pyx_v_tt, 3.0))));
goto __pyx_L5;
}
/*else*/ {
/* "/home/wiecki/working/projects/hddm/src/pdf.pxi":57
*
* else: # if large t is better...
* K=<int>(ceil(kl)) # round to smallest integer meeting error # <<<<<<<<<<<<<<
* for k from 1 <= k <= K:
* p+=k*exp(-(pow(k,2))*(M_PI**2)*tt/2)*sin(k*M_PI*w) # increment sum
*/
__pyx_v_K = ((int)ceil(__pyx_v_kl));
/* "/home/wiecki/working/projects/hddm/src/pdf.pxi":58
* else: # if large t is better...
* K=<int>(ceil(kl)) # round to smallest integer meeting error
* for k from 1 <= k <= K: # <<<<<<<<<<<<<<
* p+=k*exp(-(pow(k,2))*(M_PI**2)*tt/2)*sin(k*M_PI*w) # increment sum
* p*=M_PI # add con_stant term
*/
__pyx_t_2 = __pyx_v_K;
for (__pyx_v_k = 1; __pyx_v_k <= __pyx_t_2; __pyx_v_k++) {
/* "/home/wiecki/working/projects/hddm/src/pdf.pxi":59
* K=<int>(ceil(kl)) # round to smallest integer meeting error
* for k from 1 <= k <= K:
* p+=k*exp(-(pow(k,2))*(M_PI**2)*tt/2)*sin(k*M_PI*w) # increment sum # <<<<<<<<<<<<<<
* p*=M_PI # add con_stant term
*
*/
__pyx_v_p = (__pyx_v_p + ((__pyx_v_k * exp(((((-pow(__pyx_v_k, 2.0)) * pow(M_PI, 2.0)) * __pyx_v_tt) / 2.0))) * sin(((__pyx_v_k * M_PI) * __pyx_v_w))));
}
/* "/home/wiecki/working/projects/hddm/src/pdf.pxi":60
* for k from 1 <= k <= K:
* p+=k*exp(-(pow(k,2))*(M_PI**2)*tt/2)*sin(k*M_PI*w) # increment sum
* p*=M_PI # add con_stant term # <<<<<<<<<<<<<<
*
* return p
*/
__pyx_v_p = (__pyx_v_p * M_PI);
}
__pyx_L5:;
/* "/home/wiecki/working/projects/hddm/src/pdf.pxi":62
* p*=M_PI # add con_stant term
*
* return p # <<<<<<<<<<<<<<
*
* cdef inline double prob_ub(double v, double a, double z) nogil:
*/
__pyx_r = __pyx_v_p;
goto __pyx_L0;
__pyx_r = 0;
__pyx_L0:;
return __pyx_r;
}
/* "/home/wiecki/working/projects/hddm/src/pdf.pxi":64
* return p
*
* cdef inline double prob_ub(double v, double a, double z) nogil: # <<<<<<<<<<<<<<
* """Probability of hitting upper boundary."""
* return (exp(-2*a*z*v) - 1) / (exp(-2*a*v) - 1)
*/
static CYTHON_INLINE double __pyx_f_4wfpt_prob_ub(double __pyx_v_v, double __pyx_v_a, double __pyx_v_z) {
double __pyx_r;
/* "/home/wiecki/working/projects/hddm/src/pdf.pxi":66
* cdef inline double prob_ub(double v, double a, double z) nogil:
* """Probability of hitting upper boundary."""
* return (exp(-2*a*z*v) - 1) / (exp(-2*a*v) - 1) # <<<<<<<<<<<<<<
*
* cdef double pdf(double x, double v, double a, double w, double err) nogil:
*/
__pyx_r = ((exp((((-2.0 * __pyx_v_a) * __pyx_v_z) * __pyx_v_v)) - 1.0) / (exp(((-2.0 * __pyx_v_a) * __pyx_v_v)) - 1.0));
goto __pyx_L0;
__pyx_r = 0;
__pyx_L0:;
return __pyx_r;
}
/* "/home/wiecki/working/projects/hddm/src/pdf.pxi":68
* return (exp(-2*a*z*v) - 1) / (exp(-2*a*v) - 1)
*
* cdef double pdf(double x, double v, double a, double w, double err) nogil: # <<<<<<<<<<<<<<
* """Compute the likelihood of the drift diffusion model f(t|v,a,z) using the method
* and implementation of Navarro & Fuss, 2009.
*/
static double __pyx_f_4wfpt_pdf(double __pyx_v_x, double __pyx_v_v, double __pyx_v_a, double __pyx_v_w, double __pyx_v_err) {
double __pyx_v_tt;
double __pyx_v_p;
double __pyx_r;
int __pyx_t_1;
/* "/home/wiecki/working/projects/hddm/src/pdf.pxi":72
* and implementation of Navarro & Fuss, 2009.
* """
* if x <= 0: # <<<<<<<<<<<<<<
* return 0
*
*/
__pyx_t_1 = (__pyx_v_x <= 0.0);
if (__pyx_t_1) {
/* "/home/wiecki/working/projects/hddm/src/pdf.pxi":73
* """
* if x <= 0:
* return 0 # <<<<<<<<<<<<<<
*
* cdef double tt = x/a**2 # use normalized time
*/
__pyx_r = 0.0;
goto __pyx_L0;
goto __pyx_L3;
}
__pyx_L3:;
/* "/home/wiecki/working/projects/hddm/src/pdf.pxi":75
* return 0
*
* cdef double tt = x/a**2 # use normalized time # <<<<<<<<<<<<<<
* cdef double p = ftt_01w(tt, w, err) #get f(t|0,1,w)
*
*/
__pyx_v_tt = (__pyx_v_x / pow(__pyx_v_a, 2.0));
/* "/home/wiecki/working/projects/hddm/src/pdf.pxi":76
*
* cdef double tt = x/a**2 # use normalized time
* cdef double p = ftt_01w(tt, w, err) #get f(t|0,1,w) # <<<<<<<<<<<<<<
*
* # convert to f(t|v,a,w)
*/
__pyx_v_p = __pyx_f_4wfpt_ftt_01w(__pyx_v_tt, __pyx_v_w, __pyx_v_err);
/* "/home/wiecki/working/projects/hddm/src/pdf.pxi":79
*
* # convert to f(t|v,a,w)
* return p*exp(-v*a*w -(pow(v,2))*x/2.)/(pow(a,2)) # <<<<<<<<<<<<<<
*
* cdef double pdf_sv(double x, double v, double sv, double a, double z, double err) nogil:
*/
__pyx_r = ((__pyx_v_p * exp(((((-__pyx_v_v) * __pyx_v_a) * __pyx_v_w) - ((pow(__pyx_v_v, 2.0) * __pyx_v_x) / 2.)))) / pow(__pyx_v_a, 2.0));
goto __pyx_L0;
__pyx_r = 0;
__pyx_L0:;
return __pyx_r;
}
/* "/home/wiecki/working/projects/hddm/src/pdf.pxi":81
* return p*exp(-v*a*w -(pow(v,2))*x/2.)/(pow(a,2))
*
* cdef double pdf_sv(double x, double v, double sv, double a, double z, double err) nogil: # <<<<<<<<<<<<<<
* """Compute the likelihood of the drift diffusion model f(t|v,a,z,sv) using the method
* and implementation of Navarro & Fuss, 2009.
*/
static double __pyx_f_4wfpt_pdf_sv(double __pyx_v_x, double __pyx_v_v, double __pyx_v_sv, double __pyx_v_a, double __pyx_v_z, double __pyx_v_err) {
double __pyx_v_tt;
double __pyx_v_p;
double __pyx_r;
int __pyx_t_1;
/* "/home/wiecki/working/projects/hddm/src/pdf.pxi":86
* sv is the std of the drift rate
* """
* if x <= 0: # <<<<<<<<<<<<<<
* return 0
*
*/
__pyx_t_1 = (__pyx_v_x <= 0.0);
if (__pyx_t_1) {
/* "/home/wiecki/working/projects/hddm/src/pdf.pxi":87
* """
* if x <= 0:
* return 0 # <<<<<<<<<<<<<<
*
* if sv==0:
*/
__pyx_r = 0.0;
goto __pyx_L0;
goto __pyx_L3;
}
__pyx_L3:;
/* "/home/wiecki/working/projects/hddm/src/pdf.pxi":89
* return 0
*
* if sv==0: # <<<<<<<<<<<<<<
* return pdf(x, v, a, z, err)
*
*/
__pyx_t_1 = (__pyx_v_sv == 0.0);
if (__pyx_t_1) {
/* "/home/wiecki/working/projects/hddm/src/pdf.pxi":90
*
* if sv==0:
* return pdf(x, v, a, z, err) # <<<<<<<<<<<<<<
*
* cdef double tt = x/(pow(a,2)) # use normalized time
*/
__pyx_r = __pyx_f_4wfpt_pdf(__pyx_v_x, __pyx_v_v, __pyx_v_a, __pyx_v_z, __pyx_v_err);
goto __pyx_L0;
goto __pyx_L4;
}
__pyx_L4:;
/* "/home/wiecki/working/projects/hddm/src/pdf.pxi":92
* return pdf(x, v, a, z, err)
*
* cdef double tt = x/(pow(a,2)) # use normalized time # <<<<<<<<<<<<<<
* cdef double p = ftt_01w(tt, z, err) #get f(t|0,1,w)
*
*/
__pyx_v_tt = (__pyx_v_x / pow(__pyx_v_a, 2.0));
/* "/home/wiecki/working/projects/hddm/src/pdf.pxi":93
*
* cdef double tt = x/(pow(a,2)) # use normalized time
* cdef double p = ftt_01w(tt, z, err) #get f(t|0,1,w) # <<<<<<<<<<<<<<
*
* # convert to f(t|v,a,w)
*/
__pyx_v_p = __pyx_f_4wfpt_ftt_01w(__pyx_v_tt, __pyx_v_z, __pyx_v_err);
/* "/home/wiecki/working/projects/hddm/src/pdf.pxi":96
*
* # convert to f(t|v,a,w)
* return exp(log(p) + ((a*z*sv)**2 - 2*a*v*z - (v**2)*x)/(2*(sv**2)*x+2))/sqrt((sv**2)*x+1)/(a**2) # <<<<<<<<<<<<<<
*
* cpdef double full_pdf(double x, double v, double sv, double a, double
*/
__pyx_r = ((exp((log(__pyx_v_p) + (((pow(((__pyx_v_a * __pyx_v_z) * __pyx_v_sv), 2.0) - (((2.0 * __pyx_v_a) * __pyx_v_v) * __pyx_v_z)) - (pow(__pyx_v_v, 2.0) * __pyx_v_x)) / (((2.0 * pow(__pyx_v_sv, 2.0)) * __pyx_v_x) + 2.0)))) / sqrt(((pow(__pyx_v_sv, 2.0) * __pyx_v_x) + 1.0))) / pow(__pyx_v_a, 2.0));
goto __pyx_L0;
__pyx_r = 0;
__pyx_L0:;
return __pyx_r;
}
/* "/home/wiecki/working/projects/hddm/src/pdf.pxi":98
* return exp(log(p) + ((a*z*sv)**2 - 2*a*v*z - (v**2)*x)/(2*(sv**2)*x+2))/sqrt((sv**2)*x+1)/(a**2)
*
* cpdef double full_pdf(double x, double v, double sv, double a, double # <<<<<<<<<<<<<<
* z, double sz, double t, double st, double err, int
* n_st=2, int n_sz=2, bint use_adaptive=1, double
*/
static PyObject *__pyx_pw_4wfpt_1full_pdf(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/
static double __pyx_f_4wfpt_full_pdf(double __pyx_v_x, double __pyx_v_v, double __pyx_v_sv, double __pyx_v_a, double __pyx_v_z, double __pyx_v_sz, double __pyx_v_t, double __pyx_v_st, double __pyx_v_err, CYTHON_UNUSED int __pyx_skip_dispatch, struct __pyx_opt_args_4wfpt_full_pdf *__pyx_optional_args) {
int __pyx_v_n_st = ((int)2);
int __pyx_v_n_sz = ((int)2);
int __pyx_v_use_adaptive = ((int)1);
/* "/home/wiecki/working/projects/hddm/src/pdf.pxi":101
* z, double sz, double t, double st, double err, int
* n_st=2, int n_sz=2, bint use_adaptive=1, double
* simps_err=1e-3) nogil: # <<<<<<<<<<<<<<
* """full pdf"""
*
*/
double __pyx_v_simps_err = ((double)1e-3);
double __pyx_r;
int __pyx_t_1;
int __pyx_t_2;
int __pyx_t_3;
int __pyx_t_4;
int __pyx_t_5;
int __pyx_t_6;
int __pyx_t_7;
int __pyx_t_8;
int __pyx_t_9;
if (__pyx_optional_args) {
if (__pyx_optional_args->__pyx_n > 0) {
__pyx_v_n_st = __pyx_optional_args->n_st;
if (__pyx_optional_args->__pyx_n > 1) {
__pyx_v_n_sz = __pyx_optional_args->n_sz;
if (__pyx_optional_args->__pyx_n > 2) {
__pyx_v_use_adaptive = __pyx_optional_args->use_adaptive;
if (__pyx_optional_args->__pyx_n > 3) {
__pyx_v_simps_err = __pyx_optional_args->simps_err;
}
}
}
}
}
/* "/home/wiecki/working/projects/hddm/src/pdf.pxi":105
*
* # Check if parpameters are valid
* if z<0 or z>1 or a<0 or ((fabs(x)-(t-st/2.))<0) or (z+sz/2.>1) or (z-sz/2.<0) or (t-st/2.<0) or (t<0): # <<<<<<<<<<<<<<
* return 0
*
*/
__pyx_t_1 = (__pyx_v_z < 0.0);
if (!__pyx_t_1) {
__pyx_t_2 = (__pyx_v_z > 1.0);
if (!__pyx_t_2) {
__pyx_t_3 = (__pyx_v_a < 0.0);
if (!__pyx_t_3) {
__pyx_t_4 = ((fabs(__pyx_v_x) - (__pyx_v_t - (__pyx_v_st / 2.))) < 0.0);
if (!__pyx_t_4) {
__pyx_t_5 = ((__pyx_v_z + (__pyx_v_sz / 2.)) > 1.0);
if (!__pyx_t_5) {
__pyx_t_6 = ((__pyx_v_z - (__pyx_v_sz / 2.)) < 0.0);
if (!__pyx_t_6) {
__pyx_t_7 = ((__pyx_v_t - (__pyx_v_st / 2.)) < 0.0);
if (!__pyx_t_7) {
__pyx_t_8 = (__pyx_v_t < 0.0);
__pyx_t_9 = __pyx_t_8;
} else {
__pyx_t_9 = __pyx_t_7;
}
__pyx_t_7 = __pyx_t_9;
} else {
__pyx_t_7 = __pyx_t_6;
}
__pyx_t_6 = __pyx_t_7;
} else {
__pyx_t_6 = __pyx_t_5;
}
__pyx_t_5 = __pyx_t_6;
} else {
__pyx_t_5 = __pyx_t_4;
}
__pyx_t_4 = __pyx_t_5;
} else {
__pyx_t_4 = __pyx_t_3;
}
__pyx_t_3 = __pyx_t_4;
} else {
__pyx_t_3 = __pyx_t_2;
}
__pyx_t_2 = __pyx_t_3;
} else {
__pyx_t_2 = __pyx_t_1;
}
if (__pyx_t_2) {
/* "/home/wiecki/working/projects/hddm/src/pdf.pxi":106
* # Check if parpameters are valid
* if z<0 or z>1 or a<0 or ((fabs(x)-(t-st/2.))<0) or (z+sz/2.>1) or (z-sz/2.<0) or (t-st/2.<0) or (t<0):
* return 0 # <<<<<<<<<<<<<<
*
* # transform x,v,z if x is upper bound response
*/
__pyx_r = 0.0;
goto __pyx_L0;
goto __pyx_L3;
}
__pyx_L3:;
/* "/home/wiecki/working/projects/hddm/src/pdf.pxi":109
*
* # transform x,v,z if x is upper bound response
* if x > 0: # <<<<<<<<<<<<<<
* v = -v
* z = 1.-z
*/
__pyx_t_2 = (__pyx_v_x > 0.0);
if (__pyx_t_2) {
/* "/home/wiecki/working/projects/hddm/src/pdf.pxi":110
* # transform x,v,z if x is upper bound response
* if x > 0:
* v = -v # <<<<<<<<<<<<<<
* z = 1.-z
*
*/
__pyx_v_v = (-__pyx_v_v);
/* "/home/wiecki/working/projects/hddm/src/pdf.pxi":111
* if x > 0:
* v = -v
* z = 1.-z # <<<<<<<<<<<<<<
*
* x = fabs(x)
*/
__pyx_v_z = (1. - __pyx_v_z);
goto __pyx_L4;
}
__pyx_L4:;
/* "/home/wiecki/working/projects/hddm/src/pdf.pxi":113
* z = 1.-z
*
* x = fabs(x) # <<<<<<<<<<<<<<
*
* if st<1e-3:
*/
__pyx_v_x = fabs(__pyx_v_x);
/* "/home/wiecki/working/projects/hddm/src/pdf.pxi":115
* x = fabs(x)
*
* if st<1e-3: # <<<<<<<<<<<<<<
* st = 0
* if sz <1e-3:
*/
__pyx_t_2 = (__pyx_v_st < 1e-3);
if (__pyx_t_2) {
/* "/home/wiecki/working/projects/hddm/src/pdf.pxi":116
*
* if st<1e-3:
* st = 0 # <<<<<<<<<<<<<<
* if sz <1e-3:
* sz = 0
*/
__pyx_v_st = 0.0;
goto __pyx_L5;
}
__pyx_L5:;
/* "/home/wiecki/working/projects/hddm/src/pdf.pxi":117
* if st<1e-3:
* st = 0
* if sz <1e-3: # <<<<<<<<<<<<<<
* sz = 0
*
*/
__pyx_t_2 = (__pyx_v_sz < 1e-3);
if (__pyx_t_2) {
/* "/home/wiecki/working/projects/hddm/src/pdf.pxi":118
* st = 0
* if sz <1e-3:
* sz = 0 # <<<<<<<<<<<<<<
*
* if (sz==0):
*/
__pyx_v_sz = 0.0;
goto __pyx_L6;
}
__pyx_L6:;
/* "/home/wiecki/working/projects/hddm/src/pdf.pxi":120
* sz = 0
*
* if (sz==0): # <<<<<<<<<<<<<<
* if (st==0): #sv=0,sz=0,st=0
* return pdf_sv(x - t, v, sv, a, z, err)
*/
__pyx_t_2 = (__pyx_v_sz == 0.0);
if (__pyx_t_2) {
/* "/home/wiecki/working/projects/hddm/src/pdf.pxi":121
*
* if (sz==0):
* if (st==0): #sv=0,sz=0,st=0 # <<<<<<<<<<<<<<
* return pdf_sv(x - t, v, sv, a, z, err)
* else: #sv=0,sz=0,st=$
*/
__pyx_t_2 = (__pyx_v_st == 0.0);
if (__pyx_t_2) {
/* "/home/wiecki/working/projects/hddm/src/pdf.pxi":122
* if (sz==0):
* if (st==0): #sv=0,sz=0,st=0
* return pdf_sv(x - t, v, sv, a, z, err) # <<<<<<<<<<<<<<
* else: #sv=0,sz=0,st=$
* if use_adaptive>0:
*/
__pyx_r = __pyx_f_4wfpt_pdf_sv((__pyx_v_x - __pyx_v_t), __pyx_v_v, __pyx_v_sv, __pyx_v_a, __pyx_v_z, __pyx_v_err);
goto __pyx_L0;
goto __pyx_L8;
}
/*else*/ {
/* "/home/wiecki/working/projects/hddm/src/pdf.pxi":124
* return pdf_sv(x - t, v, sv, a, z, err)
* else: #sv=0,sz=0,st=$
* if use_adaptive>0: # <<<<<<<<<<<<<<
* return adaptiveSimpsons_1D(x, v, sv, a, z, t, err, z, z, t-st/2., t+st/2., simps_err, n_st)
* else:
*/
__pyx_t_2 = (__pyx_v_use_adaptive > 0);
if (__pyx_t_2) {
/* "/home/wiecki/working/projects/hddm/src/pdf.pxi":125
* else: #sv=0,sz=0,st=$
* if use_adaptive>0:
* return adaptiveSimpsons_1D(x, v, sv, a, z, t, err, z, z, t-st/2., t+st/2., simps_err, n_st) # <<<<<<<<<<<<<<
* else:
* return simpson_1D(x, v, sv, a, z, t, err, z, z, 0, t-st/2., t+st/2., n_st)
*/
__pyx_r = __pyx_f_4wfpt_adaptiveSimpsons_1D(__pyx_v_x, __pyx_v_v, __pyx_v_sv, __pyx_v_a, __pyx_v_z, __pyx_v_t, __pyx_v_err, __pyx_v_z, __pyx_v_z, (__pyx_v_t - (__pyx_v_st / 2.)), (__pyx_v_t + (__pyx_v_st / 2.)), __pyx_v_simps_err, __pyx_v_n_st);
goto __pyx_L0;
goto __pyx_L9;
}
/*else*/ {
/* "/home/wiecki/working/projects/hddm/src/pdf.pxi":127
* return adaptiveSimpsons_1D(x, v, sv, a, z, t, err, z, z, t-st/2., t+st/2., simps_err, n_st)
* else:
* return simpson_1D(x, v, sv, a, z, t, err, z, z, 0, t-st/2., t+st/2., n_st) # <<<<<<<<<<<<<<
*
* else: #sz=$
*/
__pyx_r = __pyx_f_4wfpt_simpson_1D(__pyx_v_x, __pyx_v_v, __pyx_v_sv, __pyx_v_a, __pyx_v_z, __pyx_v_t, __pyx_v_err, __pyx_v_z, __pyx_v_z, 0, (__pyx_v_t - (__pyx_v_st / 2.)), (__pyx_v_t + (__pyx_v_st / 2.)), __pyx_v_n_st);
goto __pyx_L0;
}
__pyx_L9:;
}
__pyx_L8:;
goto __pyx_L7;
}
/*else*/ {
/* "/home/wiecki/working/projects/hddm/src/pdf.pxi":130
*
* else: #sz=$
* if (st==0): #sv=0,sz=$,st=0 # <<<<<<<<<<<<<<
* if use_adaptive:
* return adaptiveSimpsons_1D(x, v, sv, a, z, t, err, z-sz/2., z+sz/2., t, t, simps_err, n_sz)
*/
__pyx_t_2 = (__pyx_v_st == 0.0);
if (__pyx_t_2) {
/* "/home/wiecki/working/projects/hddm/src/pdf.pxi":131
* else: #sz=$
* if (st==0): #sv=0,sz=$,st=0
* if use_adaptive: # <<<<<<<<<<<<<<
* return adaptiveSimpsons_1D(x, v, sv, a, z, t, err, z-sz/2., z+sz/2., t, t, simps_err, n_sz)
* else:
*/
if (__pyx_v_use_adaptive) {
/* "/home/wiecki/working/projects/hddm/src/pdf.pxi":132
* if (st==0): #sv=0,sz=$,st=0
* if use_adaptive:
* return adaptiveSimpsons_1D(x, v, sv, a, z, t, err, z-sz/2., z+sz/2., t, t, simps_err, n_sz) # <<<<<<<<<<<<<<
* else:
* return simpson_1D(x, v, sv, a, z, t, err, z-sz/2., z+sz/2., n_sz, t, t , 0)
*/
__pyx_r = __pyx_f_4wfpt_adaptiveSimpsons_1D(__pyx_v_x, __pyx_v_v, __pyx_v_sv, __pyx_v_a, __pyx_v_z, __pyx_v_t, __pyx_v_err, (__pyx_v_z - (__pyx_v_sz / 2.)), (__pyx_v_z + (__pyx_v_sz / 2.)), __pyx_v_t, __pyx_v_t, __pyx_v_simps_err, __pyx_v_n_sz);
goto __pyx_L0;
goto __pyx_L11;
}
/*else*/ {
/* "/home/wiecki/working/projects/hddm/src/pdf.pxi":134
* return adaptiveSimpsons_1D(x, v, sv, a, z, t, err, z-sz/2., z+sz/2., t, t, simps_err, n_sz)
* else:
* return simpson_1D(x, v, sv, a, z, t, err, z-sz/2., z+sz/2., n_sz, t, t , 0) # <<<<<<<<<<<<<<
* else: #sv=0,sz=$,st=$
* if use_adaptive:
*/
__pyx_r = __pyx_f_4wfpt_simpson_1D(__pyx_v_x, __pyx_v_v, __pyx_v_sv, __pyx_v_a, __pyx_v_z, __pyx_v_t, __pyx_v_err, (__pyx_v_z - (__pyx_v_sz / 2.)), (__pyx_v_z + (__pyx_v_sz / 2.)), __pyx_v_n_sz, __pyx_v_t, __pyx_v_t, 0);
goto __pyx_L0;
}
__pyx_L11:;
goto __pyx_L10;
}
/*else*/ {
/* "/home/wiecki/working/projects/hddm/src/pdf.pxi":136
* return simpson_1D(x, v, sv, a, z, t, err, z-sz/2., z+sz/2., n_sz, t, t , 0)
* else: #sv=0,sz=$,st=$
* if use_adaptive: # <<<<<<<<<<<<<<
* return adaptiveSimpsons_2D(x, v, sv, a, z, t, err, z-sz/2., z+sz/2., t-st/2., t+st/2., simps_err, n_sz, n_st)
* else:
*/
if (__pyx_v_use_adaptive) {
/* "/home/wiecki/working/projects/hddm/src/pdf.pxi":137
* else: #sv=0,sz=$,st=$
* if use_adaptive:
* return adaptiveSimpsons_2D(x, v, sv, a, z, t, err, z-sz/2., z+sz/2., t-st/2., t+st/2., simps_err, n_sz, n_st) # <<<<<<<<<<<<<<
* else:
* return simpson_2D(x, v, sv, a, z, t, err, z-sz/2., z+sz/2., n_sz, t-st/2., t+st/2., n_st)
*/
__pyx_r = __pyx_f_4wfpt_adaptiveSimpsons_2D(__pyx_v_x, __pyx_v_v, __pyx_v_sv, __pyx_v_a, __pyx_v_z, __pyx_v_t, __pyx_v_err, (__pyx_v_z - (__pyx_v_sz / 2.)), (__pyx_v_z + (__pyx_v_sz / 2.)), (__pyx_v_t - (__pyx_v_st / 2.)), (__pyx_v_t + (__pyx_v_st / 2.)), __pyx_v_simps_err, __pyx_v_n_sz, __pyx_v_n_st);
goto __pyx_L0;
goto __pyx_L12;
}
/*else*/ {
/* "/home/wiecki/working/projects/hddm/src/pdf.pxi":139
* return adaptiveSimpsons_2D(x, v, sv, a, z, t, err, z-sz/2., z+sz/2., t-st/2., t+st/2., simps_err, n_sz, n_st)
* else:
* return simpson_2D(x, v, sv, a, z, t, err, z-sz/2., z+sz/2., n_sz, t-st/2., t+st/2., n_st) # <<<<<<<<<<<<<<
*/
__pyx_r = __pyx_f_4wfpt_simpson_2D(__pyx_v_x, __pyx_v_v, __pyx_v_sv, __pyx_v_a, __pyx_v_z, __pyx_v_t, __pyx_v_err, (__pyx_v_z - (__pyx_v_sz / 2.)), (__pyx_v_z + (__pyx_v_sz / 2.)), __pyx_v_n_sz, (__pyx_v_t - (__pyx_v_st / 2.)), (__pyx_v_t + (__pyx_v_st / 2.)), __pyx_v_n_st);
goto __pyx_L0;
}
__pyx_L12:;
}
__pyx_L10:;
}
__pyx_L7:;
__pyx_r = 0;
__pyx_L0:;
return __pyx_r;
}
/* Python wrapper */
static PyObject *__pyx_pw_4wfpt_1full_pdf(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/
static char __pyx_doc_4wfpt_full_pdf[] = "full_pdf(double x, double v, double sv, double a, double z, double sz, double t, double st, double err, int n_st=2, int n_sz=2, int use_adaptive=1, double simps_err=0.001) -> double\nfull pdf";
static PyObject *__pyx_pw_4wfpt_1full_pdf(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds) {
double __pyx_v_x;
double __pyx_v_v;
double __pyx_v_sv;
double __pyx_v_a;
double __pyx_v_z;
double __pyx_v_sz;
double __pyx_v_t;
double __pyx_v_st;
double __pyx_v_err;
int __pyx_v_n_st;
int __pyx_v_n_sz;
int __pyx_v_use_adaptive;
double __pyx_v_simps_err;
static PyObject **__pyx_pyargnames[] = {&__pyx_n_s__x,&__pyx_n_s__v,&__pyx_n_s__sv,&__pyx_n_s__a,&__pyx_n_s__z,&__pyx_n_s__sz,&__pyx_n_s__t,&__pyx_n_s__st,&__pyx_n_s__err,&__pyx_n_s__n_st,&__pyx_n_s__n_sz,&__pyx_n_s__use_adaptive,&__pyx_n_s__simps_err,0};
PyObject *__pyx_r = 0;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("full_pdf (wrapper)");
__pyx_self = __pyx_self;
{
PyObject* values[13] = {0,0,0,0,0,0,0,0,0,0,0,0,0};
if (unlikely(__pyx_kwds)) {
Py_ssize_t kw_args;
const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args);
switch (pos_args) {
case 13: values[12] = PyTuple_GET_ITEM(__pyx_args, 12);
case 12: values[11] = PyTuple_GET_ITEM(__pyx_args, 11);
case 11: values[10] = PyTuple_GET_ITEM(__pyx_args, 10);
case 10: values[9] = PyTuple_GET_ITEM(__pyx_args, 9);
case 9: values[8] = PyTuple_GET_ITEM(__pyx_args, 8);
case 8: values[7] = PyTuple_GET_ITEM(__pyx_args, 7);
case 7: values[6] = PyTuple_GET_ITEM(__pyx_args, 6);
case 6: values[5] = PyTuple_GET_ITEM(__pyx_args, 5);
case 5: values[4] = PyTuple_GET_ITEM(__pyx_args, 4);
case 4: values[3] = PyTuple_GET_ITEM(__pyx_args, 3);
case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2);
case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1);
case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0);
case 0: break;
default: goto __pyx_L5_argtuple_error;
}
kw_args = PyDict_Size(__pyx_kwds);
switch (pos_args) {
case 0:
values[0] = PyDict_GetItem(__pyx_kwds, __pyx_n_s__x);
if (likely(values[0])) kw_args--;
else goto __pyx_L5_argtuple_error;
case 1:
values[1] = PyDict_GetItem(__pyx_kwds, __pyx_n_s__v);
if (likely(values[1])) kw_args--;
else {
__Pyx_RaiseArgtupleInvalid("full_pdf", 0, 9, 13, 1); {__pyx_filename = __pyx_f[1]; __pyx_lineno = 98; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
}
case 2:
values[2] = PyDict_GetItem(__pyx_kwds, __pyx_n_s__sv);
if (likely(values[2])) kw_args--;
else {
__Pyx_RaiseArgtupleInvalid("full_pdf", 0, 9, 13, 2); {__pyx_filename = __pyx_f[1]; __pyx_lineno = 98; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
}
case 3:
values[3] = PyDict_GetItem(__pyx_kwds, __pyx_n_s__a);
if (likely(values[3])) kw_args--;
else {
__Pyx_RaiseArgtupleInvalid("full_pdf", 0, 9, 13, 3); {__pyx_filename = __pyx_f[1]; __pyx_lineno = 98; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
}
case 4:
values[4] = PyDict_GetItem(__pyx_kwds, __pyx_n_s__z);
if (likely(values[4])) kw_args--;
else {
__Pyx_RaiseArgtupleInvalid("full_pdf", 0, 9, 13, 4); {__pyx_filename = __pyx_f[1]; __pyx_lineno = 98; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
}
case 5:
values[5] = PyDict_GetItem(__pyx_kwds, __pyx_n_s__sz);
if (likely(values[5])) kw_args--;
else {
__Pyx_RaiseArgtupleInvalid("full_pdf", 0, 9, 13, 5); {__pyx_filename = __pyx_f[1]; __pyx_lineno = 98; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
}
case 6:
values[6] = PyDict_GetItem(__pyx_kwds, __pyx_n_s__t);
if (likely(values[6])) kw_args--;
else {
__Pyx_RaiseArgtupleInvalid("full_pdf", 0, 9, 13, 6); {__pyx_filename = __pyx_f[1]; __pyx_lineno = 98; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
}
case 7:
values[7] = PyDict_GetItem(__pyx_kwds, __pyx_n_s__st);
if (likely(values[7])) kw_args--;
else {
__Pyx_RaiseArgtupleInvalid("full_pdf", 0, 9, 13, 7); {__pyx_filename = __pyx_f[1]; __pyx_lineno = 98; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
}
case 8:
values[8] = PyDict_GetItem(__pyx_kwds, __pyx_n_s__err);
if (likely(values[8])) kw_args--;
else {
__Pyx_RaiseArgtupleInvalid("full_pdf", 0, 9, 13, 8); {__pyx_filename = __pyx_f[1]; __pyx_lineno = 98; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
}
case 9:
if (kw_args > 0) {
PyObject* value = PyDict_GetItem(__pyx_kwds, __pyx_n_s__n_st);
if (value) { values[9] = value; kw_args--; }
}
case 10:
if (kw_args > 0) {
PyObject* value = PyDict_GetItem(__pyx_kwds, __pyx_n_s__n_sz);
if (value) { values[10] = value; kw_args--; }
}
case 11:
if (kw_args > 0) {
PyObject* value = PyDict_GetItem(__pyx_kwds, __pyx_n_s__use_adaptive);
if (value) { values[11] = value; kw_args--; }
}
case 12:
if (kw_args > 0) {
PyObject* value = PyDict_GetItem(__pyx_kwds, __pyx_n_s__simps_err);
if (value) { values[12] = value; kw_args--; }
}
}
if (unlikely(kw_args > 0)) {
if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "full_pdf") < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 98; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
}
__pyx_v_x = __pyx_PyFloat_AsDouble(values[0]); if (unlikely((__pyx_v_x == (double)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 98; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
__pyx_v_v = __pyx_PyFloat_AsDouble(values[1]); if (unlikely((__pyx_v_v == (double)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 98; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
__pyx_v_sv = __pyx_PyFloat_AsDouble(values[2]); if (unlikely((__pyx_v_sv == (double)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 98; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
__pyx_v_a = __pyx_PyFloat_AsDouble(values[3]); if (unlikely((__pyx_v_a == (double)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 98; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
__pyx_v_z = __pyx_PyFloat_AsDouble(values[4]); if (unlikely((__pyx_v_z == (double)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 98; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
__pyx_v_sz = __pyx_PyFloat_AsDouble(values[5]); if (unlikely((__pyx_v_sz == (double)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 99; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
__pyx_v_t = __pyx_PyFloat_AsDouble(values[6]); if (unlikely((__pyx_v_t == (double)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 99; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
__pyx_v_st = __pyx_PyFloat_AsDouble(values[7]); if (unlikely((__pyx_v_st == (double)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 99; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
__pyx_v_err = __pyx_PyFloat_AsDouble(values[8]); if (unlikely((__pyx_v_err == (double)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 99; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
if (values[9]) {
__pyx_v_n_st = __Pyx_PyInt_AsInt(values[9]); if (unlikely((__pyx_v_n_st == (int)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 99; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
} else {
__pyx_v_n_st = ((int)2);
}
if (values[10]) {
__pyx_v_n_sz = __Pyx_PyInt_AsInt(values[10]); if (unlikely((__pyx_v_n_sz == (int)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 100; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
} else {
__pyx_v_n_sz = ((int)2);
}
if (values[11]) {
__pyx_v_use_adaptive = __Pyx_PyObject_IsTrue(values[11]); if (unlikely((__pyx_v_use_adaptive == (int)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 100; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
} else {
__pyx_v_use_adaptive = ((int)1);
}
if (values[12]) {
__pyx_v_simps_err = __pyx_PyFloat_AsDouble(values[12]); if (unlikely((__pyx_v_simps_err == (double)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 100; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
} else {
/* "/home/wiecki/working/projects/hddm/src/pdf.pxi":101
* z, double sz, double t, double st, double err, int
* n_st=2, int n_sz=2, bint use_adaptive=1, double
* simps_err=1e-3) nogil: # <<<<<<<<<<<<<<
* """full pdf"""
*
*/
__pyx_v_simps_err = ((double)1e-3);
}
} else {
switch (PyTuple_GET_SIZE(__pyx_args)) {
case 13: values[12] = PyTuple_GET_ITEM(__pyx_args, 12);
case 12: values[11] = PyTuple_GET_ITEM(__pyx_args, 11);
case 11: values[10] = PyTuple_GET_ITEM(__pyx_args, 10);
case 10: values[9] = PyTuple_GET_ITEM(__pyx_args, 9);
case 9: values[8] = PyTuple_GET_ITEM(__pyx_args, 8);
values[7] = PyTuple_GET_ITEM(__pyx_args, 7);
values[6] = PyTuple_GET_ITEM(__pyx_args, 6);
values[5] = PyTuple_GET_ITEM(__pyx_args, 5);
values[4] = PyTuple_GET_ITEM(__pyx_args, 4);
values[3] = PyTuple_GET_ITEM(__pyx_args, 3);
values[2] = PyTuple_GET_ITEM(__pyx_args, 2);
values[1] = PyTuple_GET_ITEM(__pyx_args, 1);
values[0] = PyTuple_GET_ITEM(__pyx_args, 0);
break;
default: goto __pyx_L5_argtuple_error;
}
}
__pyx_v_x = __pyx_PyFloat_AsDouble(values[0]); if (unlikely((__pyx_v_x == (double)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 98; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
__pyx_v_v = __pyx_PyFloat_AsDouble(values[1]); if (unlikely((__pyx_v_v == (double)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 98; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
__pyx_v_sv = __pyx_PyFloat_AsDouble(values[2]); if (unlikely((__pyx_v_sv == (double)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 98; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
__pyx_v_a = __pyx_PyFloat_AsDouble(values[3]); if (unlikely((__pyx_v_a == (double)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 98; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
__pyx_v_z = __pyx_PyFloat_AsDouble(values[4]); if (unlikely((__pyx_v_z == (double)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 98; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
__pyx_v_sz = __pyx_PyFloat_AsDouble(values[5]); if (unlikely((__pyx_v_sz == (double)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 99; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
__pyx_v_t = __pyx_PyFloat_AsDouble(values[6]); if (unlikely((__pyx_v_t == (double)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 99; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
__pyx_v_st = __pyx_PyFloat_AsDouble(values[7]); if (unlikely((__pyx_v_st == (double)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 99; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
__pyx_v_err = __pyx_PyFloat_AsDouble(values[8]); if (unlikely((__pyx_v_err == (double)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 99; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
if (values[9]) {
__pyx_v_n_st = __Pyx_PyInt_AsInt(values[9]); if (unlikely((__pyx_v_n_st == (int)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 99; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
} else {
__pyx_v_n_st = ((int)2);
}
if (values[10]) {
__pyx_v_n_sz = __Pyx_PyInt_AsInt(values[10]); if (unlikely((__pyx_v_n_sz == (int)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 100; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
} else {
__pyx_v_n_sz = ((int)2);
}
if (values[11]) {
__pyx_v_use_adaptive = __Pyx_PyObject_IsTrue(values[11]); if (unlikely((__pyx_v_use_adaptive == (int)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 100; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
} else {
__pyx_v_use_adaptive = ((int)1);
}
if (values[12]) {
__pyx_v_simps_err = __pyx_PyFloat_AsDouble(values[12]); if (unlikely((__pyx_v_simps_err == (double)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 100; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
} else {
__pyx_v_simps_err = ((double)1e-3);
}
}
goto __pyx_L4_argument_unpacking_done;
__pyx_L5_argtuple_error:;
__Pyx_RaiseArgtupleInvalid("full_pdf", 0, 9, 13, PyTuple_GET_SIZE(__pyx_args)); {__pyx_filename = __pyx_f[1]; __pyx_lineno = 98; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
__pyx_L3_error:;
__Pyx_AddTraceback("wfpt.full_pdf", __pyx_clineno, __pyx_lineno, __pyx_filename);
__Pyx_RefNannyFinishContext();
return NULL;
__pyx_L4_argument_unpacking_done:;
__pyx_r = __pyx_pf_4wfpt_full_pdf(__pyx_self, __pyx_v_x, __pyx_v_v, __pyx_v_sv, __pyx_v_a, __pyx_v_z, __pyx_v_sz, __pyx_v_t, __pyx_v_st, __pyx_v_err, __pyx_v_n_st, __pyx_v_n_sz, __pyx_v_use_adaptive, __pyx_v_simps_err);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "/home/wiecki/working/projects/hddm/src/pdf.pxi":98
* return exp(log(p) + ((a*z*sv)**2 - 2*a*v*z - (v**2)*x)/(2*(sv**2)*x+2))/sqrt((sv**2)*x+1)/(a**2)
*
* cpdef double full_pdf(double x, double v, double sv, double a, double # <<<<<<<<<<<<<<
* z, double sz, double t, double st, double err, int
* n_st=2, int n_sz=2, bint use_adaptive=1, double
*/
static PyObject *__pyx_pf_4wfpt_full_pdf(CYTHON_UNUSED PyObject *__pyx_self, double __pyx_v_x, double __pyx_v_v, double __pyx_v_sv, double __pyx_v_a, double __pyx_v_z, double __pyx_v_sz, double __pyx_v_t, double __pyx_v_st, double __pyx_v_err, int __pyx_v_n_st, int __pyx_v_n_sz, int __pyx_v_use_adaptive, double __pyx_v_simps_err) {
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
double __pyx_t_1;
struct __pyx_opt_args_4wfpt_full_pdf __pyx_t_2;
PyObject *__pyx_t_3 = NULL;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("full_pdf");
__Pyx_XDECREF(__pyx_r);
__pyx_t_2.__pyx_n = 4;
__pyx_t_2.n_st = __pyx_v_n_st;
__pyx_t_2.n_sz = __pyx_v_n_sz;
__pyx_t_2.use_adaptive = __pyx_v_use_adaptive;
__pyx_t_2.simps_err = __pyx_v_simps_err;
__pyx_t_1 = __pyx_f_4wfpt_full_pdf(__pyx_v_x, __pyx_v_v, __pyx_v_sv, __pyx_v_a, __pyx_v_z, __pyx_v_sz, __pyx_v_t, __pyx_v_st, __pyx_v_err, 0, &__pyx_t_2);
__pyx_t_3 = PyFloat_FromDouble(__pyx_t_1); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 98; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_3);
__pyx_r = __pyx_t_3;
__pyx_t_3 = 0;
goto __pyx_L0;
__pyx_r = Py_None; __Pyx_INCREF(Py_None);
goto __pyx_L0;
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_3);
__Pyx_AddTraceback("wfpt.full_pdf", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = NULL;
__pyx_L0:;
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "/home/wiecki/working/projects/hddm/src/integrate.pxi":10
* include 'pdf.pxi'
*
* cdef double simpson_1D(double x, double v, double sv, double a, double z, double t, double err, # <<<<<<<<<<<<<<
* double lb_z, double ub_z, int n_sz, double lb_t, double ub_t, int n_st) nogil:
* #assert ((n_sz&1)==0 and (n_st&1)==0), "n_st and n_sz have to be even"
*/
static double __pyx_f_4wfpt_simpson_1D(double __pyx_v_x, double __pyx_v_v, double __pyx_v_sv, double __pyx_v_a, double __pyx_v_z, double __pyx_v_t, double __pyx_v_err, double __pyx_v_lb_z, double __pyx_v_ub_z, int __pyx_v_n_sz, double __pyx_v_lb_t, double __pyx_v_ub_t, int __pyx_v_n_st) {
double __pyx_v_ht;
double __pyx_v_hz;
int __pyx_v_n;
double __pyx_v_S;
double __pyx_v_z_tag;
double __pyx_v_t_tag;
double __pyx_v_y;
int __pyx_v_i;
double __pyx_r;
int __pyx_t_1;
int __pyx_t_2;
int __pyx_t_3;
int __pyx_t_4;
long __pyx_t_5;
/* "/home/wiecki/working/projects/hddm/src/integrate.pxi":16
*
* cdef double ht, hz
* cdef int n = max(n_st, n_sz) # <<<<<<<<<<<<<<
* if n_st==0: #integration over z
* hz = (ub_z-lb_z)/n
*/
__pyx_t_1 = __pyx_v_n_sz;
__pyx_t_2 = __pyx_v_n_st;
if ((__pyx_t_1 > __pyx_t_2)) {
__pyx_t_3 = __pyx_t_1;
} else {
__pyx_t_3 = __pyx_t_2;
}
__pyx_v_n = __pyx_t_3;
/* "/home/wiecki/working/projects/hddm/src/integrate.pxi":17
* cdef double ht, hz
* cdef int n = max(n_st, n_sz)
* if n_st==0: #integration over z # <<<<<<<<<<<<<<
* hz = (ub_z-lb_z)/n
* ht = 0
*/
__pyx_t_4 = (__pyx_v_n_st == 0);
if (__pyx_t_4) {
/* "/home/wiecki/working/projects/hddm/src/integrate.pxi":18
* cdef int n = max(n_st, n_sz)
* if n_st==0: #integration over z
* hz = (ub_z-lb_z)/n # <<<<<<<<<<<<<<
* ht = 0
* lb_t = t
*/
__pyx_v_hz = ((__pyx_v_ub_z - __pyx_v_lb_z) / __pyx_v_n);
/* "/home/wiecki/working/projects/hddm/src/integrate.pxi":19
* if n_st==0: #integration over z
* hz = (ub_z-lb_z)/n
* ht = 0 # <<<<<<<<<<<<<<
* lb_t = t
* ub_t = t
*/
__pyx_v_ht = 0.0;
/* "/home/wiecki/working/projects/hddm/src/integrate.pxi":20
* hz = (ub_z-lb_z)/n
* ht = 0
* lb_t = t # <<<<<<<<<<<<<<
* ub_t = t
* else: #integration over t
*/
__pyx_v_lb_t = __pyx_v_t;
/* "/home/wiecki/working/projects/hddm/src/integrate.pxi":21
* ht = 0
* lb_t = t
* ub_t = t # <<<<<<<<<<<<<<
* else: #integration over t
* hz = 0
*/
__pyx_v_ub_t = __pyx_v_t;
goto __pyx_L3;
}
/*else*/ {
/* "/home/wiecki/working/projects/hddm/src/integrate.pxi":23
* ub_t = t
* else: #integration over t
* hz = 0 # <<<<<<<<<<<<<<
* ht = (ub_t-lb_t)/n
* lb_z = z
*/
__pyx_v_hz = 0.0;
/* "/home/wiecki/working/projects/hddm/src/integrate.pxi":24
* else: #integration over t
* hz = 0
* ht = (ub_t-lb_t)/n # <<<<<<<<<<<<<<
* lb_z = z
* ub_z = z
*/
__pyx_v_ht = ((__pyx_v_ub_t - __pyx_v_lb_t) / __pyx_v_n);
/* "/home/wiecki/working/projects/hddm/src/integrate.pxi":25
* hz = 0
* ht = (ub_t-lb_t)/n
* lb_z = z # <<<<<<<<<<<<<<
* ub_z = z
*
*/
__pyx_v_lb_z = __pyx_v_z;
/* "/home/wiecki/working/projects/hddm/src/integrate.pxi":26
* ht = (ub_t-lb_t)/n
* lb_z = z
* ub_z = z # <<<<<<<<<<<<<<
*
* cdef double S = pdf_sv(x - lb_t, v, sv, a, lb_z, err)
*/
__pyx_v_ub_z = __pyx_v_z;
}
__pyx_L3:;
/* "/home/wiecki/working/projects/hddm/src/integrate.pxi":28
* ub_z = z
*
* cdef double S = pdf_sv(x - lb_t, v, sv, a, lb_z, err) # <<<<<<<<<<<<<<
* cdef double z_tag, t_tag, y
* cdef int i
*/
__pyx_v_S = __pyx_f_4wfpt_pdf_sv((__pyx_v_x - __pyx_v_lb_t), __pyx_v_v, __pyx_v_sv, __pyx_v_a, __pyx_v_lb_z, __pyx_v_err);
/* "/home/wiecki/working/projects/hddm/src/integrate.pxi":32
* cdef int i
*
* for i from 1 <= i <= n: # <<<<<<<<<<<<<<
* z_tag = lb_z + hz * i
* t_tag = lb_t + ht * i
*/
__pyx_t_3 = __pyx_v_n;
for (__pyx_v_i = 1; __pyx_v_i <= __pyx_t_3; __pyx_v_i++) {
/* "/home/wiecki/working/projects/hddm/src/integrate.pxi":33
*
* for i from 1 <= i <= n:
* z_tag = lb_z + hz * i # <<<<<<<<<<<<<<
* t_tag = lb_t + ht * i
* y = pdf_sv(x - t_tag, v, sv, a, z_tag, err)
*/
__pyx_v_z_tag = (__pyx_v_lb_z + (__pyx_v_hz * __pyx_v_i));
/* "/home/wiecki/working/projects/hddm/src/integrate.pxi":34
* for i from 1 <= i <= n:
* z_tag = lb_z + hz * i
* t_tag = lb_t + ht * i # <<<<<<<<<<<<<<
* y = pdf_sv(x - t_tag, v, sv, a, z_tag, err)
* if i&1: #check if i is odd
*/
__pyx_v_t_tag = (__pyx_v_lb_t + (__pyx_v_ht * __pyx_v_i));
/* "/home/wiecki/working/projects/hddm/src/integrate.pxi":35
* z_tag = lb_z + hz * i
* t_tag = lb_t + ht * i
* y = pdf_sv(x - t_tag, v, sv, a, z_tag, err) # <<<<<<<<<<<<<<
* if i&1: #check if i is odd
* S += (4 * y)
*/
__pyx_v_y = __pyx_f_4wfpt_pdf_sv((__pyx_v_x - __pyx_v_t_tag), __pyx_v_v, __pyx_v_sv, __pyx_v_a, __pyx_v_z_tag, __pyx_v_err);
/* "/home/wiecki/working/projects/hddm/src/integrate.pxi":36
* t_tag = lb_t + ht * i
* y = pdf_sv(x - t_tag, v, sv, a, z_tag, err)
* if i&1: #check if i is odd # <<<<<<<<<<<<<<
* S += (4 * y)
* else:
*/
__pyx_t_5 = (__pyx_v_i & 1);
if (__pyx_t_5) {
/* "/home/wiecki/working/projects/hddm/src/integrate.pxi":37
* y = pdf_sv(x - t_tag, v, sv, a, z_tag, err)
* if i&1: #check if i is odd
* S += (4 * y) # <<<<<<<<<<<<<<
* else:
* S += (2 * y)
*/
__pyx_v_S = (__pyx_v_S + (4.0 * __pyx_v_y));
goto __pyx_L6;
}
/*else*/ {
/* "/home/wiecki/working/projects/hddm/src/integrate.pxi":39
* S += (4 * y)
* else:
* S += (2 * y) # <<<<<<<<<<<<<<
* S = S - y #the last term should be f(b) and not 2*f(b) so we subtract y
* S = S / ((ub_t-lb_t)+(ub_z-lb_z)) #the right function if pdf_sv()/sz or pdf_sv()/st
*/
__pyx_v_S = (__pyx_v_S + (2.0 * __pyx_v_y));
}
__pyx_L6:;
}
/* "/home/wiecki/working/projects/hddm/src/integrate.pxi":40
* else:
* S += (2 * y)
* S = S - y #the last term should be f(b) and not 2*f(b) so we subtract y # <<<<<<<<<<<<<<
* S = S / ((ub_t-lb_t)+(ub_z-lb_z)) #the right function if pdf_sv()/sz or pdf_sv()/st
*
*/
__pyx_v_S = (__pyx_v_S - __pyx_v_y);
/* "/home/wiecki/working/projects/hddm/src/integrate.pxi":41
* S += (2 * y)
* S = S - y #the last term should be f(b) and not 2*f(b) so we subtract y
* S = S / ((ub_t-lb_t)+(ub_z-lb_z)) #the right function if pdf_sv()/sz or pdf_sv()/st # <<<<<<<<<<<<<<
*
* return ((ht+hz) * S / 3)
*/
__pyx_v_S = (__pyx_v_S / ((__pyx_v_ub_t - __pyx_v_lb_t) + (__pyx_v_ub_z - __pyx_v_lb_z)));
/* "/home/wiecki/working/projects/hddm/src/integrate.pxi":43
* S = S / ((ub_t-lb_t)+(ub_z-lb_z)) #the right function if pdf_sv()/sz or pdf_sv()/st
*
* return ((ht+hz) * S / 3) # <<<<<<<<<<<<<<
*
* cdef double simpson_2D(double x, double v, double sv, double a, double z, double t, double err, double lb_z, double ub_z, int n_sz, double lb_t, double ub_t, int n_st) nogil:
*/
__pyx_r = (((__pyx_v_ht + __pyx_v_hz) * __pyx_v_S) / 3.0);
goto __pyx_L0;
__pyx_r = 0;
__pyx_L0:;
return __pyx_r;
}
/* "/home/wiecki/working/projects/hddm/src/integrate.pxi":45
* return ((ht+hz) * S / 3)
*
* cdef double simpson_2D(double x, double v, double sv, double a, double z, double t, double err, double lb_z, double ub_z, int n_sz, double lb_t, double ub_t, int n_st) nogil: # <<<<<<<<<<<<<<
* #assert ((n_sz&1)==0 and (n_st&1)==0), "n_st and n_sz have to be even"
* #assert ((ub_t-lb_t)*(ub_z-lb_z)>0 and (n_sz*n_st)>0), "the function is defined for 2D-integration only, lb_t: %f, ub_t %f, lb_z %f, ub_z %f, n_sz: %d, n_st %d" % (lb_t, ub_t, lb_z, ub_z, n_sz, n_st)
*/
static double __pyx_f_4wfpt_simpson_2D(double __pyx_v_x, double __pyx_v_v, double __pyx_v_sv, double __pyx_v_a, double __pyx_v_z, CYTHON_UNUSED double __pyx_v_t, double __pyx_v_err, double __pyx_v_lb_z, double __pyx_v_ub_z, int __pyx_v_n_sz, double __pyx_v_lb_t, double __pyx_v_ub_t, int __pyx_v_n_st) {
double __pyx_v_ht;
double __pyx_v_S;
double __pyx_v_t_tag;
double __pyx_v_y;
int __pyx_v_i_t;
double __pyx_r;
int __pyx_t_1;
long __pyx_t_2;
/* "/home/wiecki/working/projects/hddm/src/integrate.pxi":54
* cdef int i_t
*
* ht = (ub_t-lb_t)/n_st # <<<<<<<<<<<<<<
*
* S = simpson_1D(x, v, sv, a, z, lb_t, err, lb_z, ub_z, n_sz, 0, 0, 0)
*/
__pyx_v_ht = ((__pyx_v_ub_t - __pyx_v_lb_t) / __pyx_v_n_st);
/* "/home/wiecki/working/projects/hddm/src/integrate.pxi":56
* ht = (ub_t-lb_t)/n_st
*
* S = simpson_1D(x, v, sv, a, z, lb_t, err, lb_z, ub_z, n_sz, 0, 0, 0) # <<<<<<<<<<<<<<
*
* for i_t from 1 <= i_t <= n_st:
*/
__pyx_v_S = __pyx_f_4wfpt_simpson_1D(__pyx_v_x, __pyx_v_v, __pyx_v_sv, __pyx_v_a, __pyx_v_z, __pyx_v_lb_t, __pyx_v_err, __pyx_v_lb_z, __pyx_v_ub_z, __pyx_v_n_sz, 0.0, 0.0, 0);
/* "/home/wiecki/working/projects/hddm/src/integrate.pxi":58
* S = simpson_1D(x, v, sv, a, z, lb_t, err, lb_z, ub_z, n_sz, 0, 0, 0)
*
* for i_t from 1 <= i_t <= n_st: # <<<<<<<<<<<<<<
* t_tag = lb_t + ht * i_t
* y = simpson_1D(x, v, sv, a, z, t_tag, err, lb_z, ub_z, n_sz, 0, 0, 0)
*/
__pyx_t_1 = __pyx_v_n_st;
for (__pyx_v_i_t = 1; __pyx_v_i_t <= __pyx_t_1; __pyx_v_i_t++) {
/* "/home/wiecki/working/projects/hddm/src/integrate.pxi":59
*
* for i_t from 1 <= i_t <= n_st:
* t_tag = lb_t + ht * i_t # <<<<<<<<<<<<<<
* y = simpson_1D(x, v, sv, a, z, t_tag, err, lb_z, ub_z, n_sz, 0, 0, 0)
* if i_t&1: #check if i is odd
*/
__pyx_v_t_tag = (__pyx_v_lb_t + (__pyx_v_ht * __pyx_v_i_t));
/* "/home/wiecki/working/projects/hddm/src/integrate.pxi":60
* for i_t from 1 <= i_t <= n_st:
* t_tag = lb_t + ht * i_t
* y = simpson_1D(x, v, sv, a, z, t_tag, err, lb_z, ub_z, n_sz, 0, 0, 0) # <<<<<<<<<<<<<<
* if i_t&1: #check if i is odd
* S += (4 * y)
*/
__pyx_v_y = __pyx_f_4wfpt_simpson_1D(__pyx_v_x, __pyx_v_v, __pyx_v_sv, __pyx_v_a, __pyx_v_z, __pyx_v_t_tag, __pyx_v_err, __pyx_v_lb_z, __pyx_v_ub_z, __pyx_v_n_sz, 0.0, 0.0, 0);
/* "/home/wiecki/working/projects/hddm/src/integrate.pxi":61
* t_tag = lb_t + ht * i_t
* y = simpson_1D(x, v, sv, a, z, t_tag, err, lb_z, ub_z, n_sz, 0, 0, 0)
* if i_t&1: #check if i is odd # <<<<<<<<<<<<<<
* S += (4 * y)
* else:
*/
__pyx_t_2 = (__pyx_v_i_t & 1);
if (__pyx_t_2) {
/* "/home/wiecki/working/projects/hddm/src/integrate.pxi":62
* y = simpson_1D(x, v, sv, a, z, t_tag, err, lb_z, ub_z, n_sz, 0, 0, 0)
* if i_t&1: #check if i is odd
* S += (4 * y) # <<<<<<<<<<<<<<
* else:
* S += (2 * y)
*/
__pyx_v_S = (__pyx_v_S + (4.0 * __pyx_v_y));
goto __pyx_L5;
}
/*else*/ {
/* "/home/wiecki/working/projects/hddm/src/integrate.pxi":64
* S += (4 * y)
* else:
* S += (2 * y) # <<<<<<<<<<<<<<
* S = S - y #the last term should be f(b) and not 2*f(b) so we subtract y
* S = S/ (ub_t-lb_t)
*/
__pyx_v_S = (__pyx_v_S + (2.0 * __pyx_v_y));
}
__pyx_L5:;
}
/* "/home/wiecki/working/projects/hddm/src/integrate.pxi":65
* else:
* S += (2 * y)
* S = S - y #the last term should be f(b) and not 2*f(b) so we subtract y # <<<<<<<<<<<<<<
* S = S/ (ub_t-lb_t)
*
*/
__pyx_v_S = (__pyx_v_S - __pyx_v_y);
/* "/home/wiecki/working/projects/hddm/src/integrate.pxi":66
* S += (2 * y)
* S = S - y #the last term should be f(b) and not 2*f(b) so we subtract y
* S = S/ (ub_t-lb_t) # <<<<<<<<<<<<<<
*
* return (ht * S / 3)
*/
__pyx_v_S = (__pyx_v_S / (__pyx_v_ub_t - __pyx_v_lb_t));
/* "/home/wiecki/working/projects/hddm/src/integrate.pxi":68
* S = S/ (ub_t-lb_t)
*
* return (ht * S / 3) # <<<<<<<<<<<<<<
*
* cdef double adaptiveSimpsonsAux(double x, double v, double sv, double a, double z, double t, double pdf_err,
*/
__pyx_r = ((__pyx_v_ht * __pyx_v_S) / 3.0);
goto __pyx_L0;
__pyx_r = 0;
__pyx_L0:;
return __pyx_r;
}
/* "/home/wiecki/working/projects/hddm/src/integrate.pxi":70
* return (ht * S / 3)
*
* cdef double adaptiveSimpsonsAux(double x, double v, double sv, double a, double z, double t, double pdf_err, # <<<<<<<<<<<<<<
* double lb_z, double ub_z, double lb_t, double ub_t, double ZT, double simps_err,
* double S, double f_beg, double f_end, double f_mid, int bottom) nogil:
*/
static double __pyx_f_4wfpt_adaptiveSimpsonsAux(double __pyx_v_x, double __pyx_v_v, double __pyx_v_sv, double __pyx_v_a, double __pyx_v_z, double __pyx_v_t, double __pyx_v_pdf_err, double __pyx_v_lb_z, double __pyx_v_ub_z, double __pyx_v_lb_t, double __pyx_v_ub_t, double __pyx_v_ZT, double __pyx_v_simps_err, double __pyx_v_S, double __pyx_v_f_beg, double __pyx_v_f_end, double __pyx_v_f_mid, int __pyx_v_bottom) {
double __pyx_v_z_c;
double __pyx_v_z_d;
double __pyx_v_z_e;
double __pyx_v_t_c;
double __pyx_v_t_d;
double __pyx_v_t_e;
double __pyx_v_h;
double __pyx_v_fd;
double __pyx_v_fe;
double __pyx_v_Sleft;
double __pyx_v_Sright;
double __pyx_v_S2;
double __pyx_r;
int __pyx_t_1;
int __pyx_t_2;
int __pyx_t_3;
/* "/home/wiecki/working/projects/hddm/src/integrate.pxi":79
* #print "in AdaptiveSimpsAux: lb_z: %f, ub_z: %f, lb_t %f, ub_t %f, f_beg: %f, f_end: %f, bottom: %d" % (lb_z, ub_z, lb_t, ub_t, f_beg, f_end, bottom)
*
* if (ub_t-lb_t) == 0: #integration over sz # <<<<<<<<<<<<<<
* h = ub_z - lb_z
* z_c = (ub_z + lb_z)/2.
*/
__pyx_t_1 = ((__pyx_v_ub_t - __pyx_v_lb_t) == 0.0);
if (__pyx_t_1) {
/* "/home/wiecki/working/projects/hddm/src/integrate.pxi":80
*
* if (ub_t-lb_t) == 0: #integration over sz
* h = ub_z - lb_z # <<<<<<<<<<<<<<
* z_c = (ub_z + lb_z)/2.
* z_d = (lb_z + z_c)/2.
*/
__pyx_v_h = (__pyx_v_ub_z - __pyx_v_lb_z);
/* "/home/wiecki/working/projects/hddm/src/integrate.pxi":81
* if (ub_t-lb_t) == 0: #integration over sz
* h = ub_z - lb_z
* z_c = (ub_z + lb_z)/2. # <<<<<<<<<<<<<<
* z_d = (lb_z + z_c)/2.
* z_e = (z_c + ub_z)/2.
*/
__pyx_v_z_c = ((__pyx_v_ub_z + __pyx_v_lb_z) / 2.);
/* "/home/wiecki/working/projects/hddm/src/integrate.pxi":82
* h = ub_z - lb_z
* z_c = (ub_z + lb_z)/2.
* z_d = (lb_z + z_c)/2. # <<<<<<<<<<<<<<
* z_e = (z_c + ub_z)/2.
* t_c = t
*/
__pyx_v_z_d = ((__pyx_v_lb_z + __pyx_v_z_c) / 2.);
/* "/home/wiecki/working/projects/hddm/src/integrate.pxi":83
* z_c = (ub_z + lb_z)/2.
* z_d = (lb_z + z_c)/2.
* z_e = (z_c + ub_z)/2. # <<<<<<<<<<<<<<
* t_c = t
* t_d = t
*/
__pyx_v_z_e = ((__pyx_v_z_c + __pyx_v_ub_z) / 2.);
/* "/home/wiecki/working/projects/hddm/src/integrate.pxi":84
* z_d = (lb_z + z_c)/2.
* z_e = (z_c + ub_z)/2.
* t_c = t # <<<<<<<<<<<<<<
* t_d = t
* t_e = t
*/
__pyx_v_t_c = __pyx_v_t;
/* "/home/wiecki/working/projects/hddm/src/integrate.pxi":85
* z_e = (z_c + ub_z)/2.
* t_c = t
* t_d = t # <<<<<<<<<<<<<<
* t_e = t
*
*/
__pyx_v_t_d = __pyx_v_t;
/* "/home/wiecki/working/projects/hddm/src/integrate.pxi":86
* t_c = t
* t_d = t
* t_e = t # <<<<<<<<<<<<<<
*
* else: #integration over t
*/
__pyx_v_t_e = __pyx_v_t;
goto __pyx_L3;
}
/*else*/ {
/* "/home/wiecki/working/projects/hddm/src/integrate.pxi":89
*
* else: #integration over t
* h = ub_t - lb_t # <<<<<<<<<<<<<<
* t_c = (ub_t + lb_t)/2.
* t_d = (lb_t + t_c)/2.
*/
__pyx_v_h = (__pyx_v_ub_t - __pyx_v_lb_t);
/* "/home/wiecki/working/projects/hddm/src/integrate.pxi":90
* else: #integration over t
* h = ub_t - lb_t
* t_c = (ub_t + lb_t)/2. # <<<<<<<<<<<<<<
* t_d = (lb_t + t_c)/2.
* t_e = (t_c + ub_t)/2.
*/
__pyx_v_t_c = ((__pyx_v_ub_t + __pyx_v_lb_t) / 2.);
/* "/home/wiecki/working/projects/hddm/src/integrate.pxi":91
* h = ub_t - lb_t
* t_c = (ub_t + lb_t)/2.
* t_d = (lb_t + t_c)/2. # <<<<<<<<<<<<<<
* t_e = (t_c + ub_t)/2.
* z_c = z
*/
__pyx_v_t_d = ((__pyx_v_lb_t + __pyx_v_t_c) / 2.);
/* "/home/wiecki/working/projects/hddm/src/integrate.pxi":92
* t_c = (ub_t + lb_t)/2.
* t_d = (lb_t + t_c)/2.
* t_e = (t_c + ub_t)/2. # <<<<<<<<<<<<<<
* z_c = z
* z_d = z
*/
__pyx_v_t_e = ((__pyx_v_t_c + __pyx_v_ub_t) / 2.);
/* "/home/wiecki/working/projects/hddm/src/integrate.pxi":93
* t_d = (lb_t + t_c)/2.
* t_e = (t_c + ub_t)/2.
* z_c = z # <<<<<<<<<<<<<<
* z_d = z
* z_e = z
*/
__pyx_v_z_c = __pyx_v_z;
/* "/home/wiecki/working/projects/hddm/src/integrate.pxi":94
* t_e = (t_c + ub_t)/2.
* z_c = z
* z_d = z # <<<<<<<<<<<<<<
* z_e = z
*
*/
__pyx_v_z_d = __pyx_v_z;
/* "/home/wiecki/working/projects/hddm/src/integrate.pxi":95
* z_c = z
* z_d = z
* z_e = z # <<<<<<<<<<<<<<
*
* fd = pdf_sv(x - t_d, v, sv, a, z_d, pdf_err)/ZT
*/
__pyx_v_z_e = __pyx_v_z;
}
__pyx_L3:;
/* "/home/wiecki/working/projects/hddm/src/integrate.pxi":97
* z_e = z
*
* fd = pdf_sv(x - t_d, v, sv, a, z_d, pdf_err)/ZT # <<<<<<<<<<<<<<
* fe = pdf_sv(x - t_e, v, sv, a, z_e, pdf_err)/ZT
*
*/
__pyx_v_fd = (__pyx_f_4wfpt_pdf_sv((__pyx_v_x - __pyx_v_t_d), __pyx_v_v, __pyx_v_sv, __pyx_v_a, __pyx_v_z_d, __pyx_v_pdf_err) / __pyx_v_ZT);
/* "/home/wiecki/working/projects/hddm/src/integrate.pxi":98
*
* fd = pdf_sv(x - t_d, v, sv, a, z_d, pdf_err)/ZT
* fe = pdf_sv(x - t_e, v, sv, a, z_e, pdf_err)/ZT # <<<<<<<<<<<<<<
*
* Sleft = (h/12)*(f_beg + 4*fd + f_mid)
*/
__pyx_v_fe = (__pyx_f_4wfpt_pdf_sv((__pyx_v_x - __pyx_v_t_e), __pyx_v_v, __pyx_v_sv, __pyx_v_a, __pyx_v_z_e, __pyx_v_pdf_err) / __pyx_v_ZT);
/* "/home/wiecki/working/projects/hddm/src/integrate.pxi":100
* fe = pdf_sv(x - t_e, v, sv, a, z_e, pdf_err)/ZT
*
* Sleft = (h/12)*(f_beg + 4*fd + f_mid) # <<<<<<<<<<<<<<
* Sright = (h/12)*(f_mid + 4*fe + f_end)
* S2 = Sleft + Sright
*/
__pyx_v_Sleft = ((__pyx_v_h / 12.0) * ((__pyx_v_f_beg + (4.0 * __pyx_v_fd)) + __pyx_v_f_mid));
/* "/home/wiecki/working/projects/hddm/src/integrate.pxi":101
*
* Sleft = (h/12)*(f_beg + 4*fd + f_mid)
* Sright = (h/12)*(f_mid + 4*fe + f_end) # <<<<<<<<<<<<<<
* S2 = Sleft + Sright
* if (bottom <= 0 or fabs(S2 - S) <= 15*simps_err):
*/
__pyx_v_Sright = ((__pyx_v_h / 12.0) * ((__pyx_v_f_mid + (4.0 * __pyx_v_fe)) + __pyx_v_f_end));
/* "/home/wiecki/working/projects/hddm/src/integrate.pxi":102
* Sleft = (h/12)*(f_beg + 4*fd + f_mid)
* Sright = (h/12)*(f_mid + 4*fe + f_end)
* S2 = Sleft + Sright # <<<<<<<<<<<<<<
* if (bottom <= 0 or fabs(S2 - S) <= 15*simps_err):
* return S2 + (S2 - S)/15
*/
__pyx_v_S2 = (__pyx_v_Sleft + __pyx_v_Sright);
/* "/home/wiecki/working/projects/hddm/src/integrate.pxi":103
* Sright = (h/12)*(f_mid + 4*fe + f_end)
* S2 = Sleft + Sright
* if (bottom <= 0 or fabs(S2 - S) <= 15*simps_err): # <<<<<<<<<<<<<<
* return S2 + (S2 - S)/15
* return adaptiveSimpsonsAux(x, v, sv, a, z, t, pdf_err,
*/
__pyx_t_1 = (__pyx_v_bottom <= 0);
if (!__pyx_t_1) {
__pyx_t_2 = (fabs((__pyx_v_S2 - __pyx_v_S)) <= (15.0 * __pyx_v_simps_err));
__pyx_t_3 = __pyx_t_2;
} else {
__pyx_t_3 = __pyx_t_1;
}
if (__pyx_t_3) {
/* "/home/wiecki/working/projects/hddm/src/integrate.pxi":104
* S2 = Sleft + Sright
* if (bottom <= 0 or fabs(S2 - S) <= 15*simps_err):
* return S2 + (S2 - S)/15 # <<<<<<<<<<<<<<
* return adaptiveSimpsonsAux(x, v, sv, a, z, t, pdf_err,
* lb_z, z_c, lb_t, t_c, ZT, simps_err/2,
*/
__pyx_r = (__pyx_v_S2 + ((__pyx_v_S2 - __pyx_v_S) / 15.0));
goto __pyx_L0;
goto __pyx_L4;
}
__pyx_L4:;
/* "/home/wiecki/working/projects/hddm/src/integrate.pxi":110
* adaptiveSimpsonsAux(x, v, sv, a, z, t, pdf_err,
* z_c, ub_z, t_c, ub_t, ZT, simps_err/2,
* Sright, f_mid, f_end, fe, bottom-1) # <<<<<<<<<<<<<<
*
* cdef double adaptiveSimpsons_1D(double x, double v, double sv, double a, double z, double t,
*/
__pyx_r = (__pyx_f_4wfpt_adaptiveSimpsonsAux(__pyx_v_x, __pyx_v_v, __pyx_v_sv, __pyx_v_a, __pyx_v_z, __pyx_v_t, __pyx_v_pdf_err, __pyx_v_lb_z, __pyx_v_z_c, __pyx_v_lb_t, __pyx_v_t_c, __pyx_v_ZT, (__pyx_v_simps_err / 2.0), __pyx_v_Sleft, __pyx_v_f_beg, __pyx_v_f_mid, __pyx_v_fd, (__pyx_v_bottom - 1)) + __pyx_f_4wfpt_adaptiveSimpsonsAux(__pyx_v_x, __pyx_v_v, __pyx_v_sv, __pyx_v_a, __pyx_v_z, __pyx_v_t, __pyx_v_pdf_err, __pyx_v_z_c, __pyx_v_ub_z, __pyx_v_t_c, __pyx_v_ub_t, __pyx_v_ZT, (__pyx_v_simps_err / 2.0), __pyx_v_Sright, __pyx_v_f_mid, __pyx_v_f_end, __pyx_v_fe, (__pyx_v_bottom - 1)));
goto __pyx_L0;
__pyx_r = 0;
__pyx_L0:;
return __pyx_r;
}
/* "/home/wiecki/working/projects/hddm/src/integrate.pxi":112
* Sright, f_mid, f_end, fe, bottom-1)
*
* cdef double adaptiveSimpsons_1D(double x, double v, double sv, double a, double z, double t, # <<<<<<<<<<<<<<
* double pdf_err, double lb_z, double ub_z, double lb_t, double ub_t,
* double simps_err, int maxRecursionDepth) nogil:
*/
static double __pyx_f_4wfpt_adaptiveSimpsons_1D(double __pyx_v_x, double __pyx_v_v, double __pyx_v_sv, double __pyx_v_a, double __pyx_v_z, double __pyx_v_t, double __pyx_v_pdf_err, double __pyx_v_lb_z, double __pyx_v_ub_z, double __pyx_v_lb_t, double __pyx_v_ub_t, double __pyx_v_simps_err, int __pyx_v_maxRecursionDepth) {
double __pyx_v_h;
double __pyx_v_ZT;
double __pyx_v_c_t;
double __pyx_v_c_z;
double __pyx_v_f_beg;
double __pyx_v_f_end;
double __pyx_v_f_mid;
double __pyx_v_S;
double __pyx_v_res;
double __pyx_r;
int __pyx_t_1;
/* "/home/wiecki/working/projects/hddm/src/integrate.pxi":118
* cdef double h
*
* if (ub_t - lb_t) == 0: #integration over z # <<<<<<<<<<<<<<
* lb_t = t
* ub_t = t
*/
__pyx_t_1 = ((__pyx_v_ub_t - __pyx_v_lb_t) == 0.0);
if (__pyx_t_1) {
/* "/home/wiecki/working/projects/hddm/src/integrate.pxi":119
*
* if (ub_t - lb_t) == 0: #integration over z
* lb_t = t # <<<<<<<<<<<<<<
* ub_t = t
* h = ub_z - lb_z
*/
__pyx_v_lb_t = __pyx_v_t;
/* "/home/wiecki/working/projects/hddm/src/integrate.pxi":120
* if (ub_t - lb_t) == 0: #integration over z
* lb_t = t
* ub_t = t # <<<<<<<<<<<<<<
* h = ub_z - lb_z
* else: #integration over t
*/
__pyx_v_ub_t = __pyx_v_t;
/* "/home/wiecki/working/projects/hddm/src/integrate.pxi":121
* lb_t = t
* ub_t = t
* h = ub_z - lb_z # <<<<<<<<<<<<<<
* else: #integration over t
* h = (ub_t-lb_t)
*/
__pyx_v_h = (__pyx_v_ub_z - __pyx_v_lb_z);
goto __pyx_L3;
}
/*else*/ {
/* "/home/wiecki/working/projects/hddm/src/integrate.pxi":123
* h = ub_z - lb_z
* else: #integration over t
* h = (ub_t-lb_t) # <<<<<<<<<<<<<<
* lb_z = z
* ub_z = z
*/
__pyx_v_h = (__pyx_v_ub_t - __pyx_v_lb_t);
/* "/home/wiecki/working/projects/hddm/src/integrate.pxi":124
* else: #integration over t
* h = (ub_t-lb_t)
* lb_z = z # <<<<<<<<<<<<<<
* ub_z = z
*
*/
__pyx_v_lb_z = __pyx_v_z;
/* "/home/wiecki/working/projects/hddm/src/integrate.pxi":125
* h = (ub_t-lb_t)
* lb_z = z
* ub_z = z # <<<<<<<<<<<<<<
*
* cdef double ZT = h
*/
__pyx_v_ub_z = __pyx_v_z;
}
__pyx_L3:;
/* "/home/wiecki/working/projects/hddm/src/integrate.pxi":127
* ub_z = z
*
* cdef double ZT = h # <<<<<<<<<<<<<<
* cdef double c_t = (lb_t + ub_t)/2.
* cdef double c_z = (lb_z + ub_z)/2.
*/
__pyx_v_ZT = __pyx_v_h;
/* "/home/wiecki/working/projects/hddm/src/integrate.pxi":128
*
* cdef double ZT = h
* cdef double c_t = (lb_t + ub_t)/2. # <<<<<<<<<<<<<<
* cdef double c_z = (lb_z + ub_z)/2.
*
*/
__pyx_v_c_t = ((__pyx_v_lb_t + __pyx_v_ub_t) / 2.);
/* "/home/wiecki/working/projects/hddm/src/integrate.pxi":129
* cdef double ZT = h
* cdef double c_t = (lb_t + ub_t)/2.
* cdef double c_z = (lb_z + ub_z)/2. # <<<<<<<<<<<<<<
*
* cdef double f_beg, f_end, f_mid, S
*/
__pyx_v_c_z = ((__pyx_v_lb_z + __pyx_v_ub_z) / 2.);
/* "/home/wiecki/working/projects/hddm/src/integrate.pxi":132
*
* cdef double f_beg, f_end, f_mid, S
* f_beg = pdf_sv(x - lb_t, v, sv, a, lb_z, pdf_err)/ZT # <<<<<<<<<<<<<<
* f_end = pdf_sv(x - ub_t, v, sv, a, ub_z, pdf_err)/ZT
* f_mid = pdf_sv(x - c_t, v, sv, a, c_z, pdf_err)/ZT
*/
__pyx_v_f_beg = (__pyx_f_4wfpt_pdf_sv((__pyx_v_x - __pyx_v_lb_t), __pyx_v_v, __pyx_v_sv, __pyx_v_a, __pyx_v_lb_z, __pyx_v_pdf_err) / __pyx_v_ZT);
/* "/home/wiecki/working/projects/hddm/src/integrate.pxi":133
* cdef double f_beg, f_end, f_mid, S
* f_beg = pdf_sv(x - lb_t, v, sv, a, lb_z, pdf_err)/ZT
* f_end = pdf_sv(x - ub_t, v, sv, a, ub_z, pdf_err)/ZT # <<<<<<<<<<<<<<
* f_mid = pdf_sv(x - c_t, v, sv, a, c_z, pdf_err)/ZT
* S = (h/6)*(f_beg + 4*f_mid + f_end)
*/
__pyx_v_f_end = (__pyx_f_4wfpt_pdf_sv((__pyx_v_x - __pyx_v_ub_t), __pyx_v_v, __pyx_v_sv, __pyx_v_a, __pyx_v_ub_z, __pyx_v_pdf_err) / __pyx_v_ZT);
/* "/home/wiecki/working/projects/hddm/src/integrate.pxi":134
* f_beg = pdf_sv(x - lb_t, v, sv, a, lb_z, pdf_err)/ZT
* f_end = pdf_sv(x - ub_t, v, sv, a, ub_z, pdf_err)/ZT
* f_mid = pdf_sv(x - c_t, v, sv, a, c_z, pdf_err)/ZT # <<<<<<<<<<<<<<
* S = (h/6)*(f_beg + 4*f_mid + f_end)
* cdef double res = adaptiveSimpsonsAux(x, v, sv, a, z, t, pdf_err,
*/
__pyx_v_f_mid = (__pyx_f_4wfpt_pdf_sv((__pyx_v_x - __pyx_v_c_t), __pyx_v_v, __pyx_v_sv, __pyx_v_a, __pyx_v_c_z, __pyx_v_pdf_err) / __pyx_v_ZT);
/* "/home/wiecki/working/projects/hddm/src/integrate.pxi":135
* f_end = pdf_sv(x - ub_t, v, sv, a, ub_z, pdf_err)/ZT
* f_mid = pdf_sv(x - c_t, v, sv, a, c_z, pdf_err)/ZT
* S = (h/6)*(f_beg + 4*f_mid + f_end) # <<<<<<<<<<<<<<
* cdef double res = adaptiveSimpsonsAux(x, v, sv, a, z, t, pdf_err,
* lb_z, ub_z, lb_t, ub_t, ZT, simps_err,
*/
__pyx_v_S = ((__pyx_v_h / 6.0) * ((__pyx_v_f_beg + (4.0 * __pyx_v_f_mid)) + __pyx_v_f_end));
/* "/home/wiecki/working/projects/hddm/src/integrate.pxi":138
* cdef double res = adaptiveSimpsonsAux(x, v, sv, a, z, t, pdf_err,
* lb_z, ub_z, lb_t, ub_t, ZT, simps_err,
* S, f_beg, f_end, f_mid, maxRecursionDepth) # <<<<<<<<<<<<<<
* return res
*
*/
__pyx_v_res = __pyx_f_4wfpt_adaptiveSimpsonsAux(__pyx_v_x, __pyx_v_v, __pyx_v_sv, __pyx_v_a, __pyx_v_z, __pyx_v_t, __pyx_v_pdf_err, __pyx_v_lb_z, __pyx_v_ub_z, __pyx_v_lb_t, __pyx_v_ub_t, __pyx_v_ZT, __pyx_v_simps_err, __pyx_v_S, __pyx_v_f_beg, __pyx_v_f_end, __pyx_v_f_mid, __pyx_v_maxRecursionDepth);
/* "/home/wiecki/working/projects/hddm/src/integrate.pxi":139
* lb_z, ub_z, lb_t, ub_t, ZT, simps_err,
* S, f_beg, f_end, f_mid, maxRecursionDepth)
* return res # <<<<<<<<<<<<<<
*
* cdef double adaptiveSimpsonsAux_2D(double x, double v, double sv,
*/
__pyx_r = __pyx_v_res;
goto __pyx_L0;
__pyx_r = 0;
__pyx_L0:;
return __pyx_r;
}
/* "/home/wiecki/working/projects/hddm/src/integrate.pxi":141
* return res
*
* cdef double adaptiveSimpsonsAux_2D(double x, double v, double sv, # <<<<<<<<<<<<<<
* double a, double z, double t, double
* pdf_err, double err_1d, double lb_z,
*/
static double __pyx_f_4wfpt_adaptiveSimpsonsAux_2D(double __pyx_v_x, double __pyx_v_v, double __pyx_v_sv, double __pyx_v_a, double __pyx_v_z, double __pyx_v_t, double __pyx_v_pdf_err, double __pyx_v_err_1d, double __pyx_v_lb_z, double __pyx_v_ub_z, double __pyx_v_lb_t, double __pyx_v_ub_t, double __pyx_v_st, double __pyx_v_err_2d, double __pyx_v_S, double __pyx_v_f_beg, double __pyx_v_f_end, double __pyx_v_f_mid, int __pyx_v_maxRecursionDepth_sz, int __pyx_v_bottom) {
double __pyx_v_fd;
double __pyx_v_fe;
double __pyx_v_Sleft;
double __pyx_v_Sright;
double __pyx_v_S2;
double __pyx_v_t_c;
double __pyx_v_t_d;
double __pyx_v_t_e;
double __pyx_v_h;
double __pyx_r;
int __pyx_t_1;
int __pyx_t_2;
int __pyx_t_3;
/* "/home/wiecki/working/projects/hddm/src/integrate.pxi":154
* #print "in AdaptiveSimpsAux_2D: lb_z: %f, ub_z: %f, lb_t %f, ub_t %f, f_beg: %f, f_end: %f, bottom: %d" % (lb_z, ub_z, lb_t, ub_t, f_beg, f_end, bottom)
*
* cdef double t_c = (ub_t + lb_t)/2. # <<<<<<<<<<<<<<
* cdef double t_d = (lb_t + t_c)/2.
* cdef double t_e = (t_c + ub_t)/2.
*/
__pyx_v_t_c = ((__pyx_v_ub_t + __pyx_v_lb_t) / 2.);
/* "/home/wiecki/working/projects/hddm/src/integrate.pxi":155
*
* cdef double t_c = (ub_t + lb_t)/2.
* cdef double t_d = (lb_t + t_c)/2. # <<<<<<<<<<<<<<
* cdef double t_e = (t_c + ub_t)/2.
* cdef double h = ub_t - lb_t
*/
__pyx_v_t_d = ((__pyx_v_lb_t + __pyx_v_t_c) / 2.);
/* "/home/wiecki/working/projects/hddm/src/integrate.pxi":156
* cdef double t_c = (ub_t + lb_t)/2.
* cdef double t_d = (lb_t + t_c)/2.
* cdef double t_e = (t_c + ub_t)/2. # <<<<<<<<<<<<<<
* cdef double h = ub_t - lb_t
*
*/
__pyx_v_t_e = ((__pyx_v_t_c + __pyx_v_ub_t) / 2.);
/* "/home/wiecki/working/projects/hddm/src/integrate.pxi":157
* cdef double t_d = (lb_t + t_c)/2.
* cdef double t_e = (t_c + ub_t)/2.
* cdef double h = ub_t - lb_t # <<<<<<<<<<<<<<
*
* fd = adaptiveSimpsons_1D(x, v, sv, a, z, t_d, pdf_err, lb_z, ub_z,
*/
__pyx_v_h = (__pyx_v_ub_t - __pyx_v_lb_t);
/* "/home/wiecki/working/projects/hddm/src/integrate.pxi":160
*
* fd = adaptiveSimpsons_1D(x, v, sv, a, z, t_d, pdf_err, lb_z, ub_z,
* 0, 0, err_1d, maxRecursionDepth_sz)/st # <<<<<<<<<<<<<<
* fe = adaptiveSimpsons_1D(x, v, sv, a, z, t_e, pdf_err, lb_z, ub_z,
* 0, 0, err_1d, maxRecursionDepth_sz)/st
*/
__pyx_v_fd = (__pyx_f_4wfpt_adaptiveSimpsons_1D(__pyx_v_x, __pyx_v_v, __pyx_v_sv, __pyx_v_a, __pyx_v_z, __pyx_v_t_d, __pyx_v_pdf_err, __pyx_v_lb_z, __pyx_v_ub_z, 0.0, 0.0, __pyx_v_err_1d, __pyx_v_maxRecursionDepth_sz) / __pyx_v_st);
/* "/home/wiecki/working/projects/hddm/src/integrate.pxi":162
* 0, 0, err_1d, maxRecursionDepth_sz)/st
* fe = adaptiveSimpsons_1D(x, v, sv, a, z, t_e, pdf_err, lb_z, ub_z,
* 0, 0, err_1d, maxRecursionDepth_sz)/st # <<<<<<<<<<<<<<
*
* Sleft = (h/12)*(f_beg + 4*fd + f_mid)
*/
__pyx_v_fe = (__pyx_f_4wfpt_adaptiveSimpsons_1D(__pyx_v_x, __pyx_v_v, __pyx_v_sv, __pyx_v_a, __pyx_v_z, __pyx_v_t_e, __pyx_v_pdf_err, __pyx_v_lb_z, __pyx_v_ub_z, 0.0, 0.0, __pyx_v_err_1d, __pyx_v_maxRecursionDepth_sz) / __pyx_v_st);
/* "/home/wiecki/working/projects/hddm/src/integrate.pxi":164
* 0, 0, err_1d, maxRecursionDepth_sz)/st
*
* Sleft = (h/12)*(f_beg + 4*fd + f_mid) # <<<<<<<<<<<<<<
* Sright = (h/12)*(f_mid + 4*fe + f_end)
* S2 = Sleft + Sright
*/
__pyx_v_Sleft = ((__pyx_v_h / 12.0) * ((__pyx_v_f_beg + (4.0 * __pyx_v_fd)) + __pyx_v_f_mid));
/* "/home/wiecki/working/projects/hddm/src/integrate.pxi":165
*
* Sleft = (h/12)*(f_beg + 4*fd + f_mid)
* Sright = (h/12)*(f_mid + 4*fe + f_end) # <<<<<<<<<<<<<<
* S2 = Sleft + Sright
*
*/
__pyx_v_Sright = ((__pyx_v_h / 12.0) * ((__pyx_v_f_mid + (4.0 * __pyx_v_fe)) + __pyx_v_f_end));
/* "/home/wiecki/working/projects/hddm/src/integrate.pxi":166
* Sleft = (h/12)*(f_beg + 4*fd + f_mid)
* Sright = (h/12)*(f_mid + 4*fe + f_end)
* S2 = Sleft + Sright # <<<<<<<<<<<<<<
*
* if (bottom <= 0 or fabs(S2 - S) <= 15*err_2d):
*/
__pyx_v_S2 = (__pyx_v_Sleft + __pyx_v_Sright);
/* "/home/wiecki/working/projects/hddm/src/integrate.pxi":168
* S2 = Sleft + Sright
*
* if (bottom <= 0 or fabs(S2 - S) <= 15*err_2d): # <<<<<<<<<<<<<<
* return S2 + (S2 - S)/15;
*
*/
__pyx_t_1 = (__pyx_v_bottom <= 0);
if (!__pyx_t_1) {
__pyx_t_2 = (fabs((__pyx_v_S2 - __pyx_v_S)) <= (15.0 * __pyx_v_err_2d));
__pyx_t_3 = __pyx_t_2;
} else {
__pyx_t_3 = __pyx_t_1;
}
if (__pyx_t_3) {
/* "/home/wiecki/working/projects/hddm/src/integrate.pxi":169
*
* if (bottom <= 0 or fabs(S2 - S) <= 15*err_2d):
* return S2 + (S2 - S)/15; # <<<<<<<<<<<<<<
*
* return adaptiveSimpsonsAux_2D(x, v, sv, a, z, t, pdf_err, err_1d,
*/
__pyx_r = (__pyx_v_S2 + ((__pyx_v_S2 - __pyx_v_S) / 15.0));
goto __pyx_L0;
goto __pyx_L3;
}
__pyx_L3:;
/* "/home/wiecki/working/projects/hddm/src/integrate.pxi":176
* adaptiveSimpsonsAux_2D(x, v, sv, a, z, t, pdf_err, err_1d,
* lb_z, ub_z, t_c, ub_t, st, err_2d/2,
* Sright, f_mid, f_end, fe, maxRecursionDepth_sz, bottom-1) # <<<<<<<<<<<<<<
*
*
*/
__pyx_r = (__pyx_f_4wfpt_adaptiveSimpsonsAux_2D(__pyx_v_x, __pyx_v_v, __pyx_v_sv, __pyx_v_a, __pyx_v_z, __pyx_v_t, __pyx_v_pdf_err, __pyx_v_err_1d, __pyx_v_lb_z, __pyx_v_ub_z, __pyx_v_lb_t, __pyx_v_t_c, __pyx_v_st, (__pyx_v_err_2d / 2.0), __pyx_v_Sleft, __pyx_v_f_beg, __pyx_v_f_mid, __pyx_v_fd, __pyx_v_maxRecursionDepth_sz, (__pyx_v_bottom - 1)) + __pyx_f_4wfpt_adaptiveSimpsonsAux_2D(__pyx_v_x, __pyx_v_v, __pyx_v_sv, __pyx_v_a, __pyx_v_z, __pyx_v_t, __pyx_v_pdf_err, __pyx_v_err_1d, __pyx_v_lb_z, __pyx_v_ub_z, __pyx_v_t_c, __pyx_v_ub_t, __pyx_v_st, (__pyx_v_err_2d / 2.0), __pyx_v_Sright, __pyx_v_f_mid, __pyx_v_f_end, __pyx_v_fe, __pyx_v_maxRecursionDepth_sz, (__pyx_v_bottom - 1)));
goto __pyx_L0;
__pyx_r = 0;
__pyx_L0:;
return __pyx_r;
}
/* "/home/wiecki/working/projects/hddm/src/integrate.pxi":179
*
*
* cdef double adaptiveSimpsons_2D(double x, double v, double sv, double a, double z, double t, # <<<<<<<<<<<<<<
* double pdf_err, double lb_z, double ub_z, double lb_t, double ub_t,
* double simps_err, int maxRecursionDepth_sz, int maxRecursionDepth_st) nogil:
*/
static double __pyx_f_4wfpt_adaptiveSimpsons_2D(double __pyx_v_x, double __pyx_v_v, double __pyx_v_sv, double __pyx_v_a, double __pyx_v_z, double __pyx_v_t, double __pyx_v_pdf_err, double __pyx_v_lb_z, double __pyx_v_ub_z, double __pyx_v_lb_t, double __pyx_v_ub_t, double __pyx_v_simps_err, int __pyx_v_maxRecursionDepth_sz, int __pyx_v_maxRecursionDepth_st) {
double __pyx_v_h;
double __pyx_v_st;
CYTHON_UNUSED double __pyx_v_c_t;
CYTHON_UNUSED double __pyx_v_c_z;
double __pyx_v_f_beg;
double __pyx_v_f_end;
double __pyx_v_f_mid;
double __pyx_v_S;
double __pyx_v_err_1d;
double __pyx_v_err_2d;
double __pyx_v_res;
double __pyx_r;
/* "/home/wiecki/working/projects/hddm/src/integrate.pxi":183
* double simps_err, int maxRecursionDepth_sz, int maxRecursionDepth_st) nogil:
*
* cdef double h = (ub_t-lb_t) # <<<<<<<<<<<<<<
*
* cdef double st = (ub_t - lb_t)
*/
__pyx_v_h = (__pyx_v_ub_t - __pyx_v_lb_t);
/* "/home/wiecki/working/projects/hddm/src/integrate.pxi":185
* cdef double h = (ub_t-lb_t)
*
* cdef double st = (ub_t - lb_t) # <<<<<<<<<<<<<<
* cdef double c_t = (lb_t + ub_t)/2.
* cdef double c_z = (lb_z + ub_z)/2.
*/
__pyx_v_st = (__pyx_v_ub_t - __pyx_v_lb_t);
/* "/home/wiecki/working/projects/hddm/src/integrate.pxi":186
*
* cdef double st = (ub_t - lb_t)
* cdef double c_t = (lb_t + ub_t)/2. # <<<<<<<<<<<<<<
* cdef double c_z = (lb_z + ub_z)/2.
*
*/
__pyx_v_c_t = ((__pyx_v_lb_t + __pyx_v_ub_t) / 2.);
/* "/home/wiecki/working/projects/hddm/src/integrate.pxi":187
* cdef double st = (ub_t - lb_t)
* cdef double c_t = (lb_t + ub_t)/2.
* cdef double c_z = (lb_z + ub_z)/2. # <<<<<<<<<<<<<<
*
* cdef double f_beg, f_end, f_mid, S
*/
__pyx_v_c_z = ((__pyx_v_lb_z + __pyx_v_ub_z) / 2.);
/* "/home/wiecki/working/projects/hddm/src/integrate.pxi":190
*
* cdef double f_beg, f_end, f_mid, S
* cdef double err_1d = simps_err # <<<<<<<<<<<<<<
* cdef double err_2d = simps_err
*
*/
__pyx_v_err_1d = __pyx_v_simps_err;
/* "/home/wiecki/working/projects/hddm/src/integrate.pxi":191
* cdef double f_beg, f_end, f_mid, S
* cdef double err_1d = simps_err
* cdef double err_2d = simps_err # <<<<<<<<<<<<<<
*
* f_beg = adaptiveSimpsons_1D(x, v, sv, a, z, lb_t, pdf_err, lb_z, ub_z,
*/
__pyx_v_err_2d = __pyx_v_simps_err;
/* "/home/wiecki/working/projects/hddm/src/integrate.pxi":194
*
* f_beg = adaptiveSimpsons_1D(x, v, sv, a, z, lb_t, pdf_err, lb_z, ub_z,
* 0, 0, err_1d, maxRecursionDepth_sz)/st # <<<<<<<<<<<<<<
*
* f_end = adaptiveSimpsons_1D(x, v, sv, a, z, ub_t, pdf_err, lb_z, ub_z,
*/
__pyx_v_f_beg = (__pyx_f_4wfpt_adaptiveSimpsons_1D(__pyx_v_x, __pyx_v_v, __pyx_v_sv, __pyx_v_a, __pyx_v_z, __pyx_v_lb_t, __pyx_v_pdf_err, __pyx_v_lb_z, __pyx_v_ub_z, 0.0, 0.0, __pyx_v_err_1d, __pyx_v_maxRecursionDepth_sz) / __pyx_v_st);
/* "/home/wiecki/working/projects/hddm/src/integrate.pxi":197
*
* f_end = adaptiveSimpsons_1D(x, v, sv, a, z, ub_t, pdf_err, lb_z, ub_z,
* 0, 0, err_1d, maxRecursionDepth_sz)/st # <<<<<<<<<<<<<<
* f_mid = adaptiveSimpsons_1D(x, v, sv, a, z, (lb_t+ub_t)/2, pdf_err, lb_z, ub_z,
* 0, 0, err_1d, maxRecursionDepth_sz)/st
*/
__pyx_v_f_end = (__pyx_f_4wfpt_adaptiveSimpsons_1D(__pyx_v_x, __pyx_v_v, __pyx_v_sv, __pyx_v_a, __pyx_v_z, __pyx_v_ub_t, __pyx_v_pdf_err, __pyx_v_lb_z, __pyx_v_ub_z, 0.0, 0.0, __pyx_v_err_1d, __pyx_v_maxRecursionDepth_sz) / __pyx_v_st);
/* "/home/wiecki/working/projects/hddm/src/integrate.pxi":199
* 0, 0, err_1d, maxRecursionDepth_sz)/st
* f_mid = adaptiveSimpsons_1D(x, v, sv, a, z, (lb_t+ub_t)/2, pdf_err, lb_z, ub_z,
* 0, 0, err_1d, maxRecursionDepth_sz)/st # <<<<<<<<<<<<<<
* S = (h/6)*(f_beg + 4*f_mid + f_end)
* cdef double res = adaptiveSimpsonsAux_2D(x, v, sv, a, z, t, pdf_err, err_1d,
*/
__pyx_v_f_mid = (__pyx_f_4wfpt_adaptiveSimpsons_1D(__pyx_v_x, __pyx_v_v, __pyx_v_sv, __pyx_v_a, __pyx_v_z, ((__pyx_v_lb_t + __pyx_v_ub_t) / 2.0), __pyx_v_pdf_err, __pyx_v_lb_z, __pyx_v_ub_z, 0.0, 0.0, __pyx_v_err_1d, __pyx_v_maxRecursionDepth_sz) / __pyx_v_st);
/* "/home/wiecki/working/projects/hddm/src/integrate.pxi":200
* f_mid = adaptiveSimpsons_1D(x, v, sv, a, z, (lb_t+ub_t)/2, pdf_err, lb_z, ub_z,
* 0, 0, err_1d, maxRecursionDepth_sz)/st
* S = (h/6)*(f_beg + 4*f_mid + f_end) # <<<<<<<<<<<<<<
* cdef double res = adaptiveSimpsonsAux_2D(x, v, sv, a, z, t, pdf_err, err_1d,
* lb_z, ub_z, lb_t, ub_t, st, err_2d,
*/
__pyx_v_S = ((__pyx_v_h / 6.0) * ((__pyx_v_f_beg + (4.0 * __pyx_v_f_mid)) + __pyx_v_f_end));
/* "/home/wiecki/working/projects/hddm/src/integrate.pxi":203
* cdef double res = adaptiveSimpsonsAux_2D(x, v, sv, a, z, t, pdf_err, err_1d,
* lb_z, ub_z, lb_t, ub_t, st, err_2d,
* S, f_beg, f_end, f_mid, maxRecursionDepth_sz, maxRecursionDepth_st) # <<<<<<<<<<<<<<
* return res
*/
__pyx_v_res = __pyx_f_4wfpt_adaptiveSimpsonsAux_2D(__pyx_v_x, __pyx_v_v, __pyx_v_sv, __pyx_v_a, __pyx_v_z, __pyx_v_t, __pyx_v_pdf_err, __pyx_v_err_1d, __pyx_v_lb_z, __pyx_v_ub_z, __pyx_v_lb_t, __pyx_v_ub_t, __pyx_v_st, __pyx_v_err_2d, __pyx_v_S, __pyx_v_f_beg, __pyx_v_f_end, __pyx_v_f_mid, __pyx_v_maxRecursionDepth_sz, __pyx_v_maxRecursionDepth_st);
/* "/home/wiecki/working/projects/hddm/src/integrate.pxi":204
* lb_z, ub_z, lb_t, ub_t, st, err_2d,
* S, f_beg, f_end, f_mid, maxRecursionDepth_sz, maxRecursionDepth_st)
* return res # <<<<<<<<<<<<<<
*/
__pyx_r = __pyx_v_res;
goto __pyx_L0;
__pyx_r = 0;
__pyx_L0:;
return __pyx_r;
}
/* Python wrapper */
static PyObject *__pyx_pw_4wfpt_3pdf_array(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/
static char __pyx_doc_4wfpt_2pdf_array[] = "pdf_array(ndarray x, double v, double sv, double a, double z, double sz, double t, double st, double err=0.0001, int logp=0, int n_st=2, int n_sz=2, int use_adaptive=1, double simps_err=0.001, double p_outlier=0, double w_outlier=0)";
static PyMethodDef __pyx_mdef_4wfpt_3pdf_array = {__Pyx_NAMESTR("pdf_array"), (PyCFunction)__pyx_pw_4wfpt_3pdf_array, METH_VARARGS|METH_KEYWORDS, __Pyx_DOCSTR(__pyx_doc_4wfpt_2pdf_array)};
static PyObject *__pyx_pw_4wfpt_3pdf_array(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds) {
PyArrayObject *__pyx_v_x = 0;
double __pyx_v_v;
double __pyx_v_sv;
double __pyx_v_a;
double __pyx_v_z;
double __pyx_v_sz;
double __pyx_v_t;
double __pyx_v_st;
double __pyx_v_err;
int __pyx_v_logp;
int __pyx_v_n_st;
int __pyx_v_n_sz;
int __pyx_v_use_adaptive;
double __pyx_v_simps_err;
double __pyx_v_p_outlier;
double __pyx_v_w_outlier;
static PyObject **__pyx_pyargnames[] = {&__pyx_n_s__x,&__pyx_n_s__v,&__pyx_n_s__sv,&__pyx_n_s__a,&__pyx_n_s__z,&__pyx_n_s__sz,&__pyx_n_s__t,&__pyx_n_s__st,&__pyx_n_s__err,&__pyx_n_s__logp,&__pyx_n_s__n_st,&__pyx_n_s__n_sz,&__pyx_n_s__use_adaptive,&__pyx_n_s__simps_err,&__pyx_n_s__p_outlier,&__pyx_n_s__w_outlier,0};
PyObject *__pyx_r = 0;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("pdf_array (wrapper)");
__pyx_self = __pyx_self;
{
PyObject* values[16] = {0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0};
if (unlikely(__pyx_kwds)) {
Py_ssize_t kw_args;
const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args);
switch (pos_args) {
case 16: values[15] = PyTuple_GET_ITEM(__pyx_args, 15);
case 15: values[14] = PyTuple_GET_ITEM(__pyx_args, 14);
case 14: values[13] = PyTuple_GET_ITEM(__pyx_args, 13);
case 13: values[12] = PyTuple_GET_ITEM(__pyx_args, 12);
case 12: values[11] = PyTuple_GET_ITEM(__pyx_args, 11);
case 11: values[10] = PyTuple_GET_ITEM(__pyx_args, 10);
case 10: values[9] = PyTuple_GET_ITEM(__pyx_args, 9);
case 9: values[8] = PyTuple_GET_ITEM(__pyx_args, 8);
case 8: values[7] = PyTuple_GET_ITEM(__pyx_args, 7);
case 7: values[6] = PyTuple_GET_ITEM(__pyx_args, 6);
case 6: values[5] = PyTuple_GET_ITEM(__pyx_args, 5);
case 5: values[4] = PyTuple_GET_ITEM(__pyx_args, 4);
case 4: values[3] = PyTuple_GET_ITEM(__pyx_args, 3);
case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2);
case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1);
case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0);
case 0: break;
default: goto __pyx_L5_argtuple_error;
}
kw_args = PyDict_Size(__pyx_kwds);
switch (pos_args) {
case 0:
values[0] = PyDict_GetItem(__pyx_kwds, __pyx_n_s__x);
if (likely(values[0])) kw_args--;
else goto __pyx_L5_argtuple_error;
case 1:
values[1] = PyDict_GetItem(__pyx_kwds, __pyx_n_s__v);
if (likely(values[1])) kw_args--;
else {
__Pyx_RaiseArgtupleInvalid("pdf_array", 0, 8, 16, 1); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 30; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
}
case 2:
values[2] = PyDict_GetItem(__pyx_kwds, __pyx_n_s__sv);
if (likely(values[2])) kw_args--;
else {
__Pyx_RaiseArgtupleInvalid("pdf_array", 0, 8, 16, 2); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 30; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
}
case 3:
values[3] = PyDict_GetItem(__pyx_kwds, __pyx_n_s__a);
if (likely(values[3])) kw_args--;
else {
__Pyx_RaiseArgtupleInvalid("pdf_array", 0, 8, 16, 3); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 30; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
}
case 4:
values[4] = PyDict_GetItem(__pyx_kwds, __pyx_n_s__z);
if (likely(values[4])) kw_args--;
else {
__Pyx_RaiseArgtupleInvalid("pdf_array", 0, 8, 16, 4); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 30; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
}
case 5:
values[5] = PyDict_GetItem(__pyx_kwds, __pyx_n_s__sz);
if (likely(values[5])) kw_args--;
else {
__Pyx_RaiseArgtupleInvalid("pdf_array", 0, 8, 16, 5); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 30; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
}
case 6:
values[6] = PyDict_GetItem(__pyx_kwds, __pyx_n_s__t);
if (likely(values[6])) kw_args--;
else {
__Pyx_RaiseArgtupleInvalid("pdf_array", 0, 8, 16, 6); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 30; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
}
case 7:
values[7] = PyDict_GetItem(__pyx_kwds, __pyx_n_s__st);
if (likely(values[7])) kw_args--;
else {
__Pyx_RaiseArgtupleInvalid("pdf_array", 0, 8, 16, 7); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 30; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
}
case 8:
if (kw_args > 0) {
PyObject* value = PyDict_GetItem(__pyx_kwds, __pyx_n_s__err);
if (value) { values[8] = value; kw_args--; }
}
case 9:
if (kw_args > 0) {
PyObject* value = PyDict_GetItem(__pyx_kwds, __pyx_n_s__logp);
if (value) { values[9] = value; kw_args--; }
}
case 10:
if (kw_args > 0) {
PyObject* value = PyDict_GetItem(__pyx_kwds, __pyx_n_s__n_st);
if (value) { values[10] = value; kw_args--; }
}
case 11:
if (kw_args > 0) {
PyObject* value = PyDict_GetItem(__pyx_kwds, __pyx_n_s__n_sz);
if (value) { values[11] = value; kw_args--; }
}
case 12:
if (kw_args > 0) {
PyObject* value = PyDict_GetItem(__pyx_kwds, __pyx_n_s__use_adaptive);
if (value) { values[12] = value; kw_args--; }
}
case 13:
if (kw_args > 0) {
PyObject* value = PyDict_GetItem(__pyx_kwds, __pyx_n_s__simps_err);
if (value) { values[13] = value; kw_args--; }
}
case 14:
if (kw_args > 0) {
PyObject* value = PyDict_GetItem(__pyx_kwds, __pyx_n_s__p_outlier);
if (value) { values[14] = value; kw_args--; }
}
case 15:
if (kw_args > 0) {
PyObject* value = PyDict_GetItem(__pyx_kwds, __pyx_n_s__w_outlier);
if (value) { values[15] = value; kw_args--; }
}
}
if (unlikely(kw_args > 0)) {
if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "pdf_array") < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 30; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
}
__pyx_v_x = ((PyArrayObject *)values[0]);
__pyx_v_v = __pyx_PyFloat_AsDouble(values[1]); if (unlikely((__pyx_v_v == (double)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 30; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
__pyx_v_sv = __pyx_PyFloat_AsDouble(values[2]); if (unlikely((__pyx_v_sv == (double)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 30; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
__pyx_v_a = __pyx_PyFloat_AsDouble(values[3]); if (unlikely((__pyx_v_a == (double)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 30; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
__pyx_v_z = __pyx_PyFloat_AsDouble(values[4]); if (unlikely((__pyx_v_z == (double)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 30; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
__pyx_v_sz = __pyx_PyFloat_AsDouble(values[5]); if (unlikely((__pyx_v_sz == (double)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 30; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
__pyx_v_t = __pyx_PyFloat_AsDouble(values[6]); if (unlikely((__pyx_v_t == (double)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 31; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
__pyx_v_st = __pyx_PyFloat_AsDouble(values[7]); if (unlikely((__pyx_v_st == (double)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 31; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
if (values[8]) {
__pyx_v_err = __pyx_PyFloat_AsDouble(values[8]); if (unlikely((__pyx_v_err == (double)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 31; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
} else {
/* "wfpt.pyx":31
*
* def pdf_array(np.ndarray[double, ndim=1] x, double v, double sv, double a, double z, double sz,
* double t, double st, double err=1e-4, bint logp=0, int n_st=2, int n_sz=2, bint use_adaptive=1, # <<<<<<<<<<<<<<
* double simps_err=1e-3, double p_outlier=0, double w_outlier=0):
*
*/
__pyx_v_err = ((double)1e-4);
}
if (values[9]) {
__pyx_v_logp = __Pyx_PyObject_IsTrue(values[9]); if (unlikely((__pyx_v_logp == (int)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 31; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
} else {
__pyx_v_logp = ((int)0);
}
if (values[10]) {
__pyx_v_n_st = __Pyx_PyInt_AsInt(values[10]); if (unlikely((__pyx_v_n_st == (int)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 31; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
} else {
__pyx_v_n_st = ((int)2);
}
if (values[11]) {
__pyx_v_n_sz = __Pyx_PyInt_AsInt(values[11]); if (unlikely((__pyx_v_n_sz == (int)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 31; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
} else {
__pyx_v_n_sz = ((int)2);
}
if (values[12]) {
__pyx_v_use_adaptive = __Pyx_PyObject_IsTrue(values[12]); if (unlikely((__pyx_v_use_adaptive == (int)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 31; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
} else {
__pyx_v_use_adaptive = ((int)1);
}
if (values[13]) {
__pyx_v_simps_err = __pyx_PyFloat_AsDouble(values[13]); if (unlikely((__pyx_v_simps_err == (double)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 32; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
} else {
/* "wfpt.pyx":32
* def pdf_array(np.ndarray[double, ndim=1] x, double v, double sv, double a, double z, double sz,
* double t, double st, double err=1e-4, bint logp=0, int n_st=2, int n_sz=2, bint use_adaptive=1,
* double simps_err=1e-3, double p_outlier=0, double w_outlier=0): # <<<<<<<<<<<<<<
*
* cdef Py_ssize_t size = x.shape[0]
*/
__pyx_v_simps_err = ((double)1e-3);
}
if (values[14]) {
__pyx_v_p_outlier = __pyx_PyFloat_AsDouble(values[14]); if (unlikely((__pyx_v_p_outlier == (double)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 32; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
} else {
__pyx_v_p_outlier = ((double)0.0);
}
if (values[15]) {
__pyx_v_w_outlier = __pyx_PyFloat_AsDouble(values[15]); if (unlikely((__pyx_v_w_outlier == (double)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 32; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
} else {
__pyx_v_w_outlier = ((double)0.0);
}
} else {
switch (PyTuple_GET_SIZE(__pyx_args)) {
case 16: values[15] = PyTuple_GET_ITEM(__pyx_args, 15);
case 15: values[14] = PyTuple_GET_ITEM(__pyx_args, 14);
case 14: values[13] = PyTuple_GET_ITEM(__pyx_args, 13);
case 13: values[12] = PyTuple_GET_ITEM(__pyx_args, 12);
case 12: values[11] = PyTuple_GET_ITEM(__pyx_args, 11);
case 11: values[10] = PyTuple_GET_ITEM(__pyx_args, 10);
case 10: values[9] = PyTuple_GET_ITEM(__pyx_args, 9);
case 9: values[8] = PyTuple_GET_ITEM(__pyx_args, 8);
case 8: values[7] = PyTuple_GET_ITEM(__pyx_args, 7);
values[6] = PyTuple_GET_ITEM(__pyx_args, 6);
values[5] = PyTuple_GET_ITEM(__pyx_args, 5);
values[4] = PyTuple_GET_ITEM(__pyx_args, 4);
values[3] = PyTuple_GET_ITEM(__pyx_args, 3);
values[2] = PyTuple_GET_ITEM(__pyx_args, 2);
values[1] = PyTuple_GET_ITEM(__pyx_args, 1);
values[0] = PyTuple_GET_ITEM(__pyx_args, 0);
break;
default: goto __pyx_L5_argtuple_error;
}
}
__pyx_v_x = ((PyArrayObject *)values[0]);
__pyx_v_v = __pyx_PyFloat_AsDouble(values[1]); if (unlikely((__pyx_v_v == (double)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 30; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
__pyx_v_sv = __pyx_PyFloat_AsDouble(values[2]); if (unlikely((__pyx_v_sv == (double)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 30; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
__pyx_v_a = __pyx_PyFloat_AsDouble(values[3]); if (unlikely((__pyx_v_a == (double)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 30; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
__pyx_v_z = __pyx_PyFloat_AsDouble(values[4]); if (unlikely((__pyx_v_z == (double)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 30; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
__pyx_v_sz = __pyx_PyFloat_AsDouble(values[5]); if (unlikely((__pyx_v_sz == (double)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 30; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
__pyx_v_t = __pyx_PyFloat_AsDouble(values[6]); if (unlikely((__pyx_v_t == (double)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 31; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
__pyx_v_st = __pyx_PyFloat_AsDouble(values[7]); if (unlikely((__pyx_v_st == (double)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 31; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
if (values[8]) {
__pyx_v_err = __pyx_PyFloat_AsDouble(values[8]); if (unlikely((__pyx_v_err == (double)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 31; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
} else {
/* "wfpt.pyx":31
*
* def pdf_array(np.ndarray[double, ndim=1] x, double v, double sv, double a, double z, double sz,
* double t, double st, double err=1e-4, bint logp=0, int n_st=2, int n_sz=2, bint use_adaptive=1, # <<<<<<<<<<<<<<
* double simps_err=1e-3, double p_outlier=0, double w_outlier=0):
*
*/
__pyx_v_err = ((double)1e-4);
}
if (values[9]) {
__pyx_v_logp = __Pyx_PyObject_IsTrue(values[9]); if (unlikely((__pyx_v_logp == (int)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 31; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
} else {
__pyx_v_logp = ((int)0);
}
if (values[10]) {
__pyx_v_n_st = __Pyx_PyInt_AsInt(values[10]); if (unlikely((__pyx_v_n_st == (int)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 31; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
} else {
__pyx_v_n_st = ((int)2);
}
if (values[11]) {
__pyx_v_n_sz = __Pyx_PyInt_AsInt(values[11]); if (unlikely((__pyx_v_n_sz == (int)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 31; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
} else {
__pyx_v_n_sz = ((int)2);
}
if (values[12]) {
__pyx_v_use_adaptive = __Pyx_PyObject_IsTrue(values[12]); if (unlikely((__pyx_v_use_adaptive == (int)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 31; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
} else {
__pyx_v_use_adaptive = ((int)1);
}
if (values[13]) {
__pyx_v_simps_err = __pyx_PyFloat_AsDouble(values[13]); if (unlikely((__pyx_v_simps_err == (double)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 32; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
} else {
/* "wfpt.pyx":32
* def pdf_array(np.ndarray[double, ndim=1] x, double v, double sv, double a, double z, double sz,
* double t, double st, double err=1e-4, bint logp=0, int n_st=2, int n_sz=2, bint use_adaptive=1,
* double simps_err=1e-3, double p_outlier=0, double w_outlier=0): # <<<<<<<<<<<<<<
*
* cdef Py_ssize_t size = x.shape[0]
*/
__pyx_v_simps_err = ((double)1e-3);
}
if (values[14]) {
__pyx_v_p_outlier = __pyx_PyFloat_AsDouble(values[14]); if (unlikely((__pyx_v_p_outlier == (double)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 32; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
} else {
__pyx_v_p_outlier = ((double)0.0);
}
if (values[15]) {
__pyx_v_w_outlier = __pyx_PyFloat_AsDouble(values[15]); if (unlikely((__pyx_v_w_outlier == (double)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 32; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
} else {
__pyx_v_w_outlier = ((double)0.0);
}
}
goto __pyx_L4_argument_unpacking_done;
__pyx_L5_argtuple_error:;
__Pyx_RaiseArgtupleInvalid("pdf_array", 0, 8, 16, PyTuple_GET_SIZE(__pyx_args)); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 30; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
__pyx_L3_error:;
__Pyx_AddTraceback("wfpt.pdf_array", __pyx_clineno, __pyx_lineno, __pyx_filename);
__Pyx_RefNannyFinishContext();
return NULL;
__pyx_L4_argument_unpacking_done:;
if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_x), __pyx_ptype_5numpy_ndarray, 1, "x", 0))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 30; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__pyx_r = __pyx_pf_4wfpt_2pdf_array(__pyx_self, __pyx_v_x, __pyx_v_v, __pyx_v_sv, __pyx_v_a, __pyx_v_z, __pyx_v_sz, __pyx_v_t, __pyx_v_st, __pyx_v_err, __pyx_v_logp, __pyx_v_n_st, __pyx_v_n_sz, __pyx_v_use_adaptive, __pyx_v_simps_err, __pyx_v_p_outlier, __pyx_v_w_outlier);
goto __pyx_L0;
__pyx_L1_error:;
__pyx_L0:;
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "wfpt.pyx":30
* include 'integrate.pxi'
*
* def pdf_array(np.ndarray[double, ndim=1] x, double v, double sv, double a, double z, double sz, # <<<<<<<<<<<<<<
* double t, double st, double err=1e-4, bint logp=0, int n_st=2, int n_sz=2, bint use_adaptive=1,
* double simps_err=1e-3, double p_outlier=0, double w_outlier=0):
*/
static PyObject *__pyx_pf_4wfpt_2pdf_array(CYTHON_UNUSED PyObject *__pyx_self, PyArrayObject *__pyx_v_x, double __pyx_v_v, double __pyx_v_sv, double __pyx_v_a, double __pyx_v_z, double __pyx_v_sz, double __pyx_v_t, double __pyx_v_st, double __pyx_v_err, int __pyx_v_logp, int __pyx_v_n_st, int __pyx_v_n_sz, int __pyx_v_use_adaptive, double __pyx_v_simps_err, double __pyx_v_p_outlier, double __pyx_v_w_outlier) {
Py_ssize_t __pyx_v_size;
Py_ssize_t __pyx_v_i;
PyArrayObject *__pyx_v_y = 0;
__Pyx_LocalBuf_ND __pyx_pybuffernd_y;
__Pyx_Buffer __pyx_pybuffer_y;
__Pyx_LocalBuf_ND __pyx_pybuffernd_x;
__Pyx_Buffer __pyx_pybuffer_x;
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
PyObject *__pyx_t_1 = NULL;
PyObject *__pyx_t_2 = NULL;
PyObject *__pyx_t_3 = NULL;
PyObject *__pyx_t_4 = NULL;
PyObject *__pyx_t_5 = NULL;
PyArrayObject *__pyx_t_6 = NULL;
Py_ssize_t __pyx_t_7;
Py_ssize_t __pyx_t_8;
Py_ssize_t __pyx_t_9;
Py_ssize_t __pyx_t_10;
double __pyx_t_11;
struct __pyx_opt_args_4wfpt_full_pdf __pyx_t_12;
Py_ssize_t __pyx_t_13;
int __pyx_t_14;
PyObject *__pyx_t_15 = NULL;
PyObject *__pyx_t_16 = NULL;
PyObject *__pyx_t_17 = NULL;
int __pyx_t_18;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("pdf_array");
__pyx_pybuffer_y.pybuffer.buf = NULL;
__pyx_pybuffer_y.refcount = 0;
__pyx_pybuffernd_y.data = NULL;
__pyx_pybuffernd_y.rcbuffer = &__pyx_pybuffer_y;
__pyx_pybuffer_x.pybuffer.buf = NULL;
__pyx_pybuffer_x.refcount = 0;
__pyx_pybuffernd_x.data = NULL;
__pyx_pybuffernd_x.rcbuffer = &__pyx_pybuffer_x;
{
__Pyx_BufFmt_StackElem __pyx_stack[1];
if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_x.rcbuffer->pybuffer, (PyObject*)__pyx_v_x, &__Pyx_TypeInfo_double, PyBUF_FORMAT| PyBUF_STRIDES, 1, 0, __pyx_stack) == -1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 30; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
}
__pyx_pybuffernd_x.diminfo[0].strides = __pyx_pybuffernd_x.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_x.diminfo[0].shape = __pyx_pybuffernd_x.rcbuffer->pybuffer.shape[0];
/* "wfpt.pyx":34
* double simps_err=1e-3, double p_outlier=0, double w_outlier=0):
*
* cdef Py_ssize_t size = x.shape[0] # <<<<<<<<<<<<<<
* cdef Py_ssize_t i
* cdef np.ndarray[double, ndim=1] y = np.empty(size, dtype=np.double)
*/
__pyx_v_size = (__pyx_v_x->dimensions[0]);
/* "wfpt.pyx":36
* cdef Py_ssize_t size = x.shape[0]
* cdef Py_ssize_t i
* cdef np.ndarray[double, ndim=1] y = np.empty(size, dtype=np.double) # <<<<<<<<<<<<<<
*
* for i in prange(size, nogil=True):
*/
__pyx_t_1 = __Pyx_GetName(__pyx_m, __pyx_n_s__np); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 36; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_1);
__pyx_t_2 = PyObject_GetAttr(__pyx_t_1, __pyx_n_s__empty); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 36; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_2);
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
__pyx_t_1 = PyInt_FromSsize_t(__pyx_v_size); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 36; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_1);
__pyx_t_3 = PyTuple_New(1); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 36; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_3);
PyTuple_SET_ITEM(__pyx_t_3, 0, __pyx_t_1);
__Pyx_GIVEREF(__pyx_t_1);
__pyx_t_1 = 0;
__pyx_t_1 = PyDict_New(); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 36; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(((PyObject *)__pyx_t_1));
__pyx_t_4 = __Pyx_GetName(__pyx_m, __pyx_n_s__np); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 36; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_4);
__pyx_t_5 = PyObject_GetAttr(__pyx_t_4, __pyx_n_s__double); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 36; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_5);
__Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
if (PyDict_SetItem(__pyx_t_1, ((PyObject *)__pyx_n_s__dtype), __pyx_t_5) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 36; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
__pyx_t_5 = PyObject_Call(__pyx_t_2, ((PyObject *)__pyx_t_3), ((PyObject *)__pyx_t_1)); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 36; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_5);
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
__Pyx_DECREF(((PyObject *)__pyx_t_3)); __pyx_t_3 = 0;
__Pyx_DECREF(((PyObject *)__pyx_t_1)); __pyx_t_1 = 0;
if (!(likely(((__pyx_t_5) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_5, __pyx_ptype_5numpy_ndarray))))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 36; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__pyx_t_6 = ((PyArrayObject *)__pyx_t_5);
{
__Pyx_BufFmt_StackElem __pyx_stack[1];
if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_y.rcbuffer->pybuffer, (PyObject*)__pyx_t_6, &__Pyx_TypeInfo_double, PyBUF_FORMAT| PyBUF_STRIDES| PyBUF_WRITABLE, 1, 0, __pyx_stack) == -1)) {
__pyx_v_y = ((PyArrayObject *)Py_None); __Pyx_INCREF(Py_None); __pyx_pybuffernd_y.rcbuffer->pybuffer.buf = NULL;
{__pyx_filename = __pyx_f[0]; __pyx_lineno = 36; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
} else {__pyx_pybuffernd_y.diminfo[0].strides = __pyx_pybuffernd_y.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_y.diminfo[0].shape = __pyx_pybuffernd_y.rcbuffer->pybuffer.shape[0];
}
}
__pyx_t_6 = 0;
__pyx_v_y = ((PyArrayObject *)__pyx_t_5);
__pyx_t_5 = 0;
/* "wfpt.pyx":38
* cdef np.ndarray[double, ndim=1] y = np.empty(size, dtype=np.double)
*
* for i in prange(size, nogil=True): # <<<<<<<<<<<<<<
* y[i] = full_pdf(x[i], v, sv, a, z, sz, t, st, err, n_st, n_sz, use_adaptive, simps_err)
*
*/
{
#ifdef WITH_THREAD
PyThreadState *_save = NULL;
#endif
Py_UNBLOCK_THREADS
/*try:*/ {
__pyx_t_7 = __pyx_v_size;
if (1 == 0) abort();
{
__pyx_t_9 = (__pyx_t_7 - 0) / 1;
if (__pyx_t_9 > 0)
{
__pyx_v_i = 0;
#ifdef _OPENMP
#pragma omp parallel private(__pyx_t_13, __pyx_t_12, __pyx_t_10, __pyx_t_11)
#endif /* _OPENMP */
{
#ifdef _OPENMP
#pragma omp for firstprivate(__pyx_v_i) lastprivate(__pyx_v_i)
#endif /* _OPENMP */
for (__pyx_t_8 = 0; __pyx_t_8 < __pyx_t_9; __pyx_t_8++){
{
__pyx_v_i = 0 + 1 * __pyx_t_8;
/* "wfpt.pyx":39
*
* for i in prange(size, nogil=True):
* y[i] = full_pdf(x[i], v, sv, a, z, sz, t, st, err, n_st, n_sz, use_adaptive, simps_err) # <<<<<<<<<<<<<<
*
* y = y * (1 - p_outlier) + (w_outlier * p_outlier)
*/
__pyx_t_10 = __pyx_v_i;
__pyx_t_12.__pyx_n = 4;
__pyx_t_12.n_st = __pyx_v_n_st;
__pyx_t_12.n_sz = __pyx_v_n_sz;
__pyx_t_12.use_adaptive = __pyx_v_use_adaptive;
__pyx_t_12.simps_err = __pyx_v_simps_err;
__pyx_t_11 = __pyx_f_4wfpt_full_pdf((*__Pyx_BufPtrStrided1d(double *, __pyx_pybuffernd_x.rcbuffer->pybuffer.buf, __pyx_t_10, __pyx_pybuffernd_x.diminfo[0].strides)), __pyx_v_v, __pyx_v_sv, __pyx_v_a, __pyx_v_z, __pyx_v_sz, __pyx_v_t, __pyx_v_st, __pyx_v_err, 0, &__pyx_t_12);
__pyx_t_13 = __pyx_v_i;
*__Pyx_BufPtrStrided1d(double *, __pyx_pybuffernd_y.rcbuffer->pybuffer.buf, __pyx_t_13, __pyx_pybuffernd_y.diminfo[0].strides) = __pyx_t_11;
}
}
}
}
}
}
/* "wfpt.pyx":38
* cdef np.ndarray[double, ndim=1] y = np.empty(size, dtype=np.double)
*
* for i in prange(size, nogil=True): # <<<<<<<<<<<<<<
* y[i] = full_pdf(x[i], v, sv, a, z, sz, t, st, err, n_st, n_sz, use_adaptive, simps_err)
*
*/
/*finally:*/ {
Py_BLOCK_THREADS
}
}
/* "wfpt.pyx":41
* y[i] = full_pdf(x[i], v, sv, a, z, sz, t, st, err, n_st, n_sz, use_adaptive, simps_err)
*
* y = y * (1 - p_outlier) + (w_outlier * p_outlier) # <<<<<<<<<<<<<<
* if logp==1:
* return np.log(y)
*/
__pyx_t_5 = PyFloat_FromDouble((1.0 - __pyx_v_p_outlier)); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 41; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_5);
__pyx_t_1 = PyNumber_Multiply(((PyObject *)__pyx_v_y), __pyx_t_5); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 41; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_1);
__Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
__pyx_t_5 = PyFloat_FromDouble((__pyx_v_w_outlier * __pyx_v_p_outlier)); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 41; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_5);
__pyx_t_3 = PyNumber_Add(__pyx_t_1, __pyx_t_5); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 41; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_3);
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
__Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
if (!(likely(((__pyx_t_3) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_3, __pyx_ptype_5numpy_ndarray))))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 41; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__pyx_t_6 = ((PyArrayObject *)__pyx_t_3);
{
__Pyx_BufFmt_StackElem __pyx_stack[1];
__Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_y.rcbuffer->pybuffer);
__pyx_t_14 = __Pyx_GetBufferAndValidate(&__pyx_pybuffernd_y.rcbuffer->pybuffer, (PyObject*)__pyx_t_6, &__Pyx_TypeInfo_double, PyBUF_FORMAT| PyBUF_STRIDES| PyBUF_WRITABLE, 1, 0, __pyx_stack);
if (unlikely(__pyx_t_14 < 0)) {
PyErr_Fetch(&__pyx_t_15, &__pyx_t_16, &__pyx_t_17);
if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_y.rcbuffer->pybuffer, (PyObject*)__pyx_v_y, &__Pyx_TypeInfo_double, PyBUF_FORMAT| PyBUF_STRIDES| PyBUF_WRITABLE, 1, 0, __pyx_stack) == -1)) {
Py_XDECREF(__pyx_t_15); Py_XDECREF(__pyx_t_16); Py_XDECREF(__pyx_t_17);
__Pyx_RaiseBufferFallbackError();
} else {
PyErr_Restore(__pyx_t_15, __pyx_t_16, __pyx_t_17);
}
}
__pyx_pybuffernd_y.diminfo[0].strides = __pyx_pybuffernd_y.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_y.diminfo[0].shape = __pyx_pybuffernd_y.rcbuffer->pybuffer.shape[0];
if (unlikely(__pyx_t_14 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 41; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
}
__pyx_t_6 = 0;
__Pyx_DECREF(((PyObject *)__pyx_v_y));
__pyx_v_y = ((PyArrayObject *)__pyx_t_3);
__pyx_t_3 = 0;
/* "wfpt.pyx":42
*
* y = y * (1 - p_outlier) + (w_outlier * p_outlier)
* if logp==1: # <<<<<<<<<<<<<<
* return np.log(y)
* else:
*/
__pyx_t_18 = (__pyx_v_logp == 1);
if (__pyx_t_18) {
/* "wfpt.pyx":43
* y = y * (1 - p_outlier) + (w_outlier * p_outlier)
* if logp==1:
* return np.log(y) # <<<<<<<<<<<<<<
* else:
* return y
*/
__Pyx_XDECREF(__pyx_r);
__pyx_t_3 = __Pyx_GetName(__pyx_m, __pyx_n_s__np); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 43; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_3);
__pyx_t_5 = PyObject_GetAttr(__pyx_t_3, __pyx_n_s__log); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 43; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_5);
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__pyx_t_3 = PyTuple_New(1); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 43; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_3);
__Pyx_INCREF(((PyObject *)__pyx_v_y));
PyTuple_SET_ITEM(__pyx_t_3, 0, ((PyObject *)__pyx_v_y));
__Pyx_GIVEREF(((PyObject *)__pyx_v_y));
__pyx_t_1 = PyObject_Call(__pyx_t_5, ((PyObject *)__pyx_t_3), NULL); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 43; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_1);
__Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
__Pyx_DECREF(((PyObject *)__pyx_t_3)); __pyx_t_3 = 0;
__pyx_r = __pyx_t_1;
__pyx_t_1 = 0;
goto __pyx_L0;
goto __pyx_L12;
}
/*else*/ {
/* "wfpt.pyx":45
* return np.log(y)
* else:
* return y # <<<<<<<<<<<<<<
*
* cdef inline bint p_outlier_in_range(double p_outlier): return (p_outlier >= 0) & (p_outlier <= 1)
*/
__Pyx_XDECREF(__pyx_r);
__Pyx_INCREF(((PyObject *)__pyx_v_y));
__pyx_r = ((PyObject *)__pyx_v_y);
goto __pyx_L0;
}
__pyx_L12:;
__pyx_r = Py_None; __Pyx_INCREF(Py_None);
goto __pyx_L0;
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_1);
__Pyx_XDECREF(__pyx_t_2);
__Pyx_XDECREF(__pyx_t_3);
__Pyx_XDECREF(__pyx_t_4);
__Pyx_XDECREF(__pyx_t_5);
{ PyObject *__pyx_type, *__pyx_value, *__pyx_tb;
__Pyx_ErrFetch(&__pyx_type, &__pyx_value, &__pyx_tb);
__Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_y.rcbuffer->pybuffer);
__Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_x.rcbuffer->pybuffer);
__Pyx_ErrRestore(__pyx_type, __pyx_value, __pyx_tb);}
__Pyx_AddTraceback("wfpt.pdf_array", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = NULL;
goto __pyx_L2;
__pyx_L0:;
__Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_y.rcbuffer->pybuffer);
__Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_x.rcbuffer->pybuffer);
__pyx_L2:;
__Pyx_XDECREF((PyObject *)__pyx_v_y);
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "wfpt.pyx":47
* return y
*
* cdef inline bint p_outlier_in_range(double p_outlier): return (p_outlier >= 0) & (p_outlier <= 1) # <<<<<<<<<<<<<<
*
* def wiener_like(np.ndarray[double, ndim=1] x, double v, double sv, double a, double z, double sz, double t,
*/
static CYTHON_INLINE int __pyx_f_4wfpt_p_outlier_in_range(double __pyx_v_p_outlier) {
int __pyx_r;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("p_outlier_in_range");
__pyx_r = ((__pyx_v_p_outlier >= 0.0) & (__pyx_v_p_outlier <= 1.0));
goto __pyx_L0;
__pyx_r = 0;
__pyx_L0:;
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* Python wrapper */
static PyObject *__pyx_pw_4wfpt_5wiener_like(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/
static char __pyx_doc_4wfpt_4wiener_like[] = "wiener_like(ndarray x, double v, double sv, double a, double z, double sz, double t, double st, double err, int n_st=10, int n_sz=10, int use_adaptive=1, double simps_err=1e-08, double p_outlier=0, double w_outlier=0)";
static PyMethodDef __pyx_mdef_4wfpt_5wiener_like = {__Pyx_NAMESTR("wiener_like"), (PyCFunction)__pyx_pw_4wfpt_5wiener_like, METH_VARARGS|METH_KEYWORDS, __Pyx_DOCSTR(__pyx_doc_4wfpt_4wiener_like)};
static PyObject *__pyx_pw_4wfpt_5wiener_like(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds) {
PyArrayObject *__pyx_v_x = 0;
double __pyx_v_v;
double __pyx_v_sv;
double __pyx_v_a;
double __pyx_v_z;
double __pyx_v_sz;
double __pyx_v_t;
double __pyx_v_st;
double __pyx_v_err;
int __pyx_v_n_st;
int __pyx_v_n_sz;
int __pyx_v_use_adaptive;
double __pyx_v_simps_err;
double __pyx_v_p_outlier;
double __pyx_v_w_outlier;
static PyObject **__pyx_pyargnames[] = {&__pyx_n_s__x,&__pyx_n_s__v,&__pyx_n_s__sv,&__pyx_n_s__a,&__pyx_n_s__z,&__pyx_n_s__sz,&__pyx_n_s__t,&__pyx_n_s__st,&__pyx_n_s__err,&__pyx_n_s__n_st,&__pyx_n_s__n_sz,&__pyx_n_s__use_adaptive,&__pyx_n_s__simps_err,&__pyx_n_s__p_outlier,&__pyx_n_s__w_outlier,0};
PyObject *__pyx_r = 0;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("wiener_like (wrapper)");
__pyx_self = __pyx_self;
{
PyObject* values[15] = {0,0,0,0,0,0,0,0,0,0,0,0,0,0,0};
if (unlikely(__pyx_kwds)) {
Py_ssize_t kw_args;
const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args);
switch (pos_args) {
case 15: values[14] = PyTuple_GET_ITEM(__pyx_args, 14);
case 14: values[13] = PyTuple_GET_ITEM(__pyx_args, 13);
case 13: values[12] = PyTuple_GET_ITEM(__pyx_args, 12);
case 12: values[11] = PyTuple_GET_ITEM(__pyx_args, 11);
case 11: values[10] = PyTuple_GET_ITEM(__pyx_args, 10);
case 10: values[9] = PyTuple_GET_ITEM(__pyx_args, 9);
case 9: values[8] = PyTuple_GET_ITEM(__pyx_args, 8);
case 8: values[7] = PyTuple_GET_ITEM(__pyx_args, 7);
case 7: values[6] = PyTuple_GET_ITEM(__pyx_args, 6);
case 6: values[5] = PyTuple_GET_ITEM(__pyx_args, 5);
case 5: values[4] = PyTuple_GET_ITEM(__pyx_args, 4);
case 4: values[3] = PyTuple_GET_ITEM(__pyx_args, 3);
case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2);
case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1);
case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0);
case 0: break;
default: goto __pyx_L5_argtuple_error;
}
kw_args = PyDict_Size(__pyx_kwds);
switch (pos_args) {
case 0:
values[0] = PyDict_GetItem(__pyx_kwds, __pyx_n_s__x);
if (likely(values[0])) kw_args--;
else goto __pyx_L5_argtuple_error;
case 1:
values[1] = PyDict_GetItem(__pyx_kwds, __pyx_n_s__v);
if (likely(values[1])) kw_args--;
else {
__Pyx_RaiseArgtupleInvalid("wiener_like", 0, 9, 15, 1); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 49; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
}
case 2:
values[2] = PyDict_GetItem(__pyx_kwds, __pyx_n_s__sv);
if (likely(values[2])) kw_args--;
else {
__Pyx_RaiseArgtupleInvalid("wiener_like", 0, 9, 15, 2); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 49; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
}
case 3:
values[3] = PyDict_GetItem(__pyx_kwds, __pyx_n_s__a);
if (likely(values[3])) kw_args--;
else {
__Pyx_RaiseArgtupleInvalid("wiener_like", 0, 9, 15, 3); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 49; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
}
case 4:
values[4] = PyDict_GetItem(__pyx_kwds, __pyx_n_s__z);
if (likely(values[4])) kw_args--;
else {
__Pyx_RaiseArgtupleInvalid("wiener_like", 0, 9, 15, 4); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 49; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
}
case 5:
values[5] = PyDict_GetItem(__pyx_kwds, __pyx_n_s__sz);
if (likely(values[5])) kw_args--;
else {
__Pyx_RaiseArgtupleInvalid("wiener_like", 0, 9, 15, 5); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 49; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
}
case 6:
values[6] = PyDict_GetItem(__pyx_kwds, __pyx_n_s__t);
if (likely(values[6])) kw_args--;
else {
__Pyx_RaiseArgtupleInvalid("wiener_like", 0, 9, 15, 6); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 49; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
}
case 7:
values[7] = PyDict_GetItem(__pyx_kwds, __pyx_n_s__st);
if (likely(values[7])) kw_args--;
else {
__Pyx_RaiseArgtupleInvalid("wiener_like", 0, 9, 15, 7); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 49; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
}
case 8:
values[8] = PyDict_GetItem(__pyx_kwds, __pyx_n_s__err);
if (likely(values[8])) kw_args--;
else {
__Pyx_RaiseArgtupleInvalid("wiener_like", 0, 9, 15, 8); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 49; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
}
case 9:
if (kw_args > 0) {
PyObject* value = PyDict_GetItem(__pyx_kwds, __pyx_n_s__n_st);
if (value) { values[9] = value; kw_args--; }
}
case 10:
if (kw_args > 0) {
PyObject* value = PyDict_GetItem(__pyx_kwds, __pyx_n_s__n_sz);
if (value) { values[10] = value; kw_args--; }
}
case 11:
if (kw_args > 0) {
PyObject* value = PyDict_GetItem(__pyx_kwds, __pyx_n_s__use_adaptive);
if (value) { values[11] = value; kw_args--; }
}
case 12:
if (kw_args > 0) {
PyObject* value = PyDict_GetItem(__pyx_kwds, __pyx_n_s__simps_err);
if (value) { values[12] = value; kw_args--; }
}
case 13:
if (kw_args > 0) {
PyObject* value = PyDict_GetItem(__pyx_kwds, __pyx_n_s__p_outlier);
if (value) { values[13] = value; kw_args--; }
}
case 14:
if (kw_args > 0) {
PyObject* value = PyDict_GetItem(__pyx_kwds, __pyx_n_s__w_outlier);
if (value) { values[14] = value; kw_args--; }
}
}
if (unlikely(kw_args > 0)) {
if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "wiener_like") < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 49; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
}
__pyx_v_x = ((PyArrayObject *)values[0]);
__pyx_v_v = __pyx_PyFloat_AsDouble(values[1]); if (unlikely((__pyx_v_v == (double)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 49; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
__pyx_v_sv = __pyx_PyFloat_AsDouble(values[2]); if (unlikely((__pyx_v_sv == (double)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 49; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
__pyx_v_a = __pyx_PyFloat_AsDouble(values[3]); if (unlikely((__pyx_v_a == (double)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 49; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
__pyx_v_z = __pyx_PyFloat_AsDouble(values[4]); if (unlikely((__pyx_v_z == (double)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 49; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
__pyx_v_sz = __pyx_PyFloat_AsDouble(values[5]); if (unlikely((__pyx_v_sz == (double)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 49; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
__pyx_v_t = __pyx_PyFloat_AsDouble(values[6]); if (unlikely((__pyx_v_t == (double)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 49; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
__pyx_v_st = __pyx_PyFloat_AsDouble(values[7]); if (unlikely((__pyx_v_st == (double)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 50; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
__pyx_v_err = __pyx_PyFloat_AsDouble(values[8]); if (unlikely((__pyx_v_err == (double)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 50; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
if (values[9]) {
__pyx_v_n_st = __Pyx_PyInt_AsInt(values[9]); if (unlikely((__pyx_v_n_st == (int)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 50; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
} else {
__pyx_v_n_st = ((int)10);
}
if (values[10]) {
__pyx_v_n_sz = __Pyx_PyInt_AsInt(values[10]); if (unlikely((__pyx_v_n_sz == (int)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 50; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
} else {
__pyx_v_n_sz = ((int)10);
}
if (values[11]) {
__pyx_v_use_adaptive = __Pyx_PyObject_IsTrue(values[11]); if (unlikely((__pyx_v_use_adaptive == (int)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 50; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
} else {
__pyx_v_use_adaptive = ((int)1);
}
if (values[12]) {
__pyx_v_simps_err = __pyx_PyFloat_AsDouble(values[12]); if (unlikely((__pyx_v_simps_err == (double)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 50; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
} else {
/* "wfpt.pyx":50
*
* def wiener_like(np.ndarray[double, ndim=1] x, double v, double sv, double a, double z, double sz, double t,
* double st, double err, int n_st=10, int n_sz=10, bint use_adaptive=1, double simps_err=1e-8, # <<<<<<<<<<<<<<
* double p_outlier=0, double w_outlier=0):
* cdef Py_ssize_t size = x.shape[0]
*/
__pyx_v_simps_err = ((double)1e-8);
}
if (values[13]) {
__pyx_v_p_outlier = __pyx_PyFloat_AsDouble(values[13]); if (unlikely((__pyx_v_p_outlier == (double)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 51; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
} else {
/* "wfpt.pyx":51
* def wiener_like(np.ndarray[double, ndim=1] x, double v, double sv, double a, double z, double sz, double t,
* double st, double err, int n_st=10, int n_sz=10, bint use_adaptive=1, double simps_err=1e-8,
* double p_outlier=0, double w_outlier=0): # <<<<<<<<<<<<<<
* cdef Py_ssize_t size = x.shape[0]
* cdef Py_ssize_t i
*/
__pyx_v_p_outlier = ((double)0.0);
}
if (values[14]) {
__pyx_v_w_outlier = __pyx_PyFloat_AsDouble(values[14]); if (unlikely((__pyx_v_w_outlier == (double)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 51; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
} else {
__pyx_v_w_outlier = ((double)0.0);
}
} else {
switch (PyTuple_GET_SIZE(__pyx_args)) {
case 15: values[14] = PyTuple_GET_ITEM(__pyx_args, 14);
case 14: values[13] = PyTuple_GET_ITEM(__pyx_args, 13);
case 13: values[12] = PyTuple_GET_ITEM(__pyx_args, 12);
case 12: values[11] = PyTuple_GET_ITEM(__pyx_args, 11);
case 11: values[10] = PyTuple_GET_ITEM(__pyx_args, 10);
case 10: values[9] = PyTuple_GET_ITEM(__pyx_args, 9);
case 9: values[8] = PyTuple_GET_ITEM(__pyx_args, 8);
values[7] = PyTuple_GET_ITEM(__pyx_args, 7);
values[6] = PyTuple_GET_ITEM(__pyx_args, 6);
values[5] = PyTuple_GET_ITEM(__pyx_args, 5);
values[4] = PyTuple_GET_ITEM(__pyx_args, 4);
values[3] = PyTuple_GET_ITEM(__pyx_args, 3);
values[2] = PyTuple_GET_ITEM(__pyx_args, 2);
values[1] = PyTuple_GET_ITEM(__pyx_args, 1);
values[0] = PyTuple_GET_ITEM(__pyx_args, 0);
break;
default: goto __pyx_L5_argtuple_error;
}
}
__pyx_v_x = ((PyArrayObject *)values[0]);
__pyx_v_v = __pyx_PyFloat_AsDouble(values[1]); if (unlikely((__pyx_v_v == (double)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 49; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
__pyx_v_sv = __pyx_PyFloat_AsDouble(values[2]); if (unlikely((__pyx_v_sv == (double)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 49; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
__pyx_v_a = __pyx_PyFloat_AsDouble(values[3]); if (unlikely((__pyx_v_a == (double)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 49; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
__pyx_v_z = __pyx_PyFloat_AsDouble(values[4]); if (unlikely((__pyx_v_z == (double)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 49; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
__pyx_v_sz = __pyx_PyFloat_AsDouble(values[5]); if (unlikely((__pyx_v_sz == (double)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 49; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
__pyx_v_t = __pyx_PyFloat_AsDouble(values[6]); if (unlikely((__pyx_v_t == (double)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 49; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
__pyx_v_st = __pyx_PyFloat_AsDouble(values[7]); if (unlikely((__pyx_v_st == (double)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 50; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
__pyx_v_err = __pyx_PyFloat_AsDouble(values[8]); if (unlikely((__pyx_v_err == (double)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 50; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
if (values[9]) {
__pyx_v_n_st = __Pyx_PyInt_AsInt(values[9]); if (unlikely((__pyx_v_n_st == (int)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 50; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
} else {
__pyx_v_n_st = ((int)10);
}
if (values[10]) {
__pyx_v_n_sz = __Pyx_PyInt_AsInt(values[10]); if (unlikely((__pyx_v_n_sz == (int)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 50; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
} else {
__pyx_v_n_sz = ((int)10);
}
if (values[11]) {
__pyx_v_use_adaptive = __Pyx_PyObject_IsTrue(values[11]); if (unlikely((__pyx_v_use_adaptive == (int)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 50; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
} else {
__pyx_v_use_adaptive = ((int)1);
}
if (values[12]) {
__pyx_v_simps_err = __pyx_PyFloat_AsDouble(values[12]); if (unlikely((__pyx_v_simps_err == (double)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 50; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
} else {
/* "wfpt.pyx":50
*
* def wiener_like(np.ndarray[double, ndim=1] x, double v, double sv, double a, double z, double sz, double t,
* double st, double err, int n_st=10, int n_sz=10, bint use_adaptive=1, double simps_err=1e-8, # <<<<<<<<<<<<<<
* double p_outlier=0, double w_outlier=0):
* cdef Py_ssize_t size = x.shape[0]
*/
__pyx_v_simps_err = ((double)1e-8);
}
if (values[13]) {
__pyx_v_p_outlier = __pyx_PyFloat_AsDouble(values[13]); if (unlikely((__pyx_v_p_outlier == (double)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 51; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
} else {
/* "wfpt.pyx":51
* def wiener_like(np.ndarray[double, ndim=1] x, double v, double sv, double a, double z, double sz, double t,
* double st, double err, int n_st=10, int n_sz=10, bint use_adaptive=1, double simps_err=1e-8,
* double p_outlier=0, double w_outlier=0): # <<<<<<<<<<<<<<
* cdef Py_ssize_t size = x.shape[0]
* cdef Py_ssize_t i
*/
__pyx_v_p_outlier = ((double)0.0);
}
if (values[14]) {
__pyx_v_w_outlier = __pyx_PyFloat_AsDouble(values[14]); if (unlikely((__pyx_v_w_outlier == (double)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 51; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
} else {
__pyx_v_w_outlier = ((double)0.0);
}
}
goto __pyx_L4_argument_unpacking_done;
__pyx_L5_argtuple_error:;
__Pyx_RaiseArgtupleInvalid("wiener_like", 0, 9, 15, PyTuple_GET_SIZE(__pyx_args)); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 49; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
__pyx_L3_error:;
__Pyx_AddTraceback("wfpt.wiener_like", __pyx_clineno, __pyx_lineno, __pyx_filename);
__Pyx_RefNannyFinishContext();
return NULL;
__pyx_L4_argument_unpacking_done:;
if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_x), __pyx_ptype_5numpy_ndarray, 1, "x", 0))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 49; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__pyx_r = __pyx_pf_4wfpt_4wiener_like(__pyx_self, __pyx_v_x, __pyx_v_v, __pyx_v_sv, __pyx_v_a, __pyx_v_z, __pyx_v_sz, __pyx_v_t, __pyx_v_st, __pyx_v_err, __pyx_v_n_st, __pyx_v_n_sz, __pyx_v_use_adaptive, __pyx_v_simps_err, __pyx_v_p_outlier, __pyx_v_w_outlier);
goto __pyx_L0;
__pyx_L1_error:;
__pyx_L0:;
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "wfpt.pyx":49
* cdef inline bint p_outlier_in_range(double p_outlier): return (p_outlier >= 0) & (p_outlier <= 1)
*
* def wiener_like(np.ndarray[double, ndim=1] x, double v, double sv, double a, double z, double sz, double t, # <<<<<<<<<<<<<<
* double st, double err, int n_st=10, int n_sz=10, bint use_adaptive=1, double simps_err=1e-8,
* double p_outlier=0, double w_outlier=0):
*/
static PyObject *__pyx_pf_4wfpt_4wiener_like(CYTHON_UNUSED PyObject *__pyx_self, PyArrayObject *__pyx_v_x, double __pyx_v_v, double __pyx_v_sv, double __pyx_v_a, double __pyx_v_z, double __pyx_v_sz, double __pyx_v_t, double __pyx_v_st, double __pyx_v_err, int __pyx_v_n_st, int __pyx_v_n_sz, int __pyx_v_use_adaptive, double __pyx_v_simps_err, double __pyx_v_p_outlier, double __pyx_v_w_outlier) {
CYTHON_UNUSED Py_ssize_t __pyx_v_size;
Py_ssize_t __pyx_v_i;
double __pyx_v_p;
double __pyx_v_sum_logp;
double __pyx_v_wp_outlier;
__Pyx_LocalBuf_ND __pyx_pybuffernd_x;
__Pyx_Buffer __pyx_pybuffer_x;
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
int __pyx_t_1;
PyObject *__pyx_t_2 = NULL;
PyObject *__pyx_t_3 = NULL;
Py_ssize_t __pyx_t_4;
Py_ssize_t __pyx_t_5;
Py_ssize_t __pyx_t_6;
Py_ssize_t __pyx_t_7;
double __pyx_t_8;
struct __pyx_opt_args_4wfpt_full_pdf __pyx_t_9;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("wiener_like");
__pyx_pybuffer_x.pybuffer.buf = NULL;
__pyx_pybuffer_x.refcount = 0;
__pyx_pybuffernd_x.data = NULL;
__pyx_pybuffernd_x.rcbuffer = &__pyx_pybuffer_x;
{
__Pyx_BufFmt_StackElem __pyx_stack[1];
if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_x.rcbuffer->pybuffer, (PyObject*)__pyx_v_x, &__Pyx_TypeInfo_double, PyBUF_FORMAT| PyBUF_STRIDES, 1, 0, __pyx_stack) == -1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 49; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
}
__pyx_pybuffernd_x.diminfo[0].strides = __pyx_pybuffernd_x.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_x.diminfo[0].shape = __pyx_pybuffernd_x.rcbuffer->pybuffer.shape[0];
/* "wfpt.pyx":52
* double st, double err, int n_st=10, int n_sz=10, bint use_adaptive=1, double simps_err=1e-8,
* double p_outlier=0, double w_outlier=0):
* cdef Py_ssize_t size = x.shape[0] # <<<<<<<<<<<<<<
* cdef Py_ssize_t i
* cdef double p
*/
__pyx_v_size = (__pyx_v_x->dimensions[0]);
/* "wfpt.pyx":55
* cdef Py_ssize_t i
* cdef double p
* cdef double sum_logp = 0 # <<<<<<<<<<<<<<
* cdef double wp_outlier = w_outlier * p_outlier
*
*/
__pyx_v_sum_logp = 0.0;
/* "wfpt.pyx":56
* cdef double p
* cdef double sum_logp = 0
* cdef double wp_outlier = w_outlier * p_outlier # <<<<<<<<<<<<<<
*
* if not p_outlier_in_range(p_outlier):
*/
__pyx_v_wp_outlier = (__pyx_v_w_outlier * __pyx_v_p_outlier);
/* "wfpt.pyx":58
* cdef double wp_outlier = w_outlier * p_outlier
*
* if not p_outlier_in_range(p_outlier): # <<<<<<<<<<<<<<
* return -np.inf
*
*/
__pyx_t_1 = (!__pyx_f_4wfpt_p_outlier_in_range(__pyx_v_p_outlier));
if (__pyx_t_1) {
/* "wfpt.pyx":59
*
* if not p_outlier_in_range(p_outlier):
* return -np.inf # <<<<<<<<<<<<<<
*
* for i in prange(size, nogil=True, schedule='dynamic'):
*/
__Pyx_XDECREF(__pyx_r);
__pyx_t_2 = __Pyx_GetName(__pyx_m, __pyx_n_s__np); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 59; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_2);
__pyx_t_3 = PyObject_GetAttr(__pyx_t_2, __pyx_n_s__inf); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 59; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_3);
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
__pyx_t_2 = PyNumber_Negative(__pyx_t_3); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 59; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_2);
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__pyx_r = __pyx_t_2;
__pyx_t_2 = 0;
goto __pyx_L0;
goto __pyx_L3;
}
__pyx_L3:;
/* "wfpt.pyx":61
* return -np.inf
*
* for i in prange(size, nogil=True, schedule='dynamic'): # <<<<<<<<<<<<<<
* p = full_pdf(x[i], v, sv, a, z, sz, t, st, err, n_st, n_sz, use_adaptive, simps_err)
* # If one probability = 0, the log sum will be -Inf
*/
{
#ifdef WITH_THREAD
PyThreadState *_save = NULL;
#endif
Py_UNBLOCK_THREADS
/*try:*/ {
__pyx_t_4 = __pyx_v_size;
if (1 == 0) abort();
{
double __pyx_parallel_temp0 = __PYX_NAN;
double __pyx_parallel_temp1 = __PYX_NAN;
Py_ssize_t __pyx_parallel_temp2 = 0xbad0bad0;
const char *__pyx_parallel_filename; int __pyx_parallel_lineno, __pyx_parallel_clineno;
__pyx_parallel_filename = NULL; __pyx_parallel_lineno = __pyx_parallel_clineno = 0;
PyObject *__pyx_parallel_exc_type = NULL, *__pyx_parallel_exc_value = NULL, *__pyx_parallel_exc_tb = NULL;
int __pyx_parallel_why;
__pyx_parallel_why = 0;
__pyx_t_6 = (__pyx_t_4 - 0) / 1;
if (__pyx_t_6 > 0)
{
__pyx_v_i = 0;
#ifdef _OPENMP
#pragma omp parallel reduction(+:__pyx_v_sum_logp) private(__pyx_t_8, __pyx_t_1, __pyx_t_9, __pyx_t_7) firstprivate(__pyx_t_3, __pyx_t_2) private(__pyx_filename, __pyx_lineno, __pyx_clineno) shared(__pyx_parallel_why, __pyx_parallel_exc_type, __pyx_parallel_exc_value, __pyx_parallel_exc_tb)
#endif /* _OPENMP */
{
#ifdef _OPENMP
#ifdef WITH_THREAD
PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure();
#endif
Py_BEGIN_ALLOW_THREADS
#endif /* _OPENMP */
#ifdef _OPENMP
#pragma omp for lastprivate(__pyx_v_p) firstprivate(__pyx_v_i) lastprivate(__pyx_v_i) schedule(dynamic)
#endif /* _OPENMP */
for (__pyx_t_5 = 0; __pyx_t_5 < __pyx_t_6; __pyx_t_5++){
if (__pyx_parallel_why < 2)
{
__pyx_v_i = 0 + 1 * __pyx_t_5;
/* Initialize private variables to invalid values */
__pyx_v_p = ((double)__PYX_NAN);
/* "wfpt.pyx":62
*
* for i in prange(size, nogil=True, schedule='dynamic'):
* p = full_pdf(x[i], v, sv, a, z, sz, t, st, err, n_st, n_sz, use_adaptive, simps_err) # <<<<<<<<<<<<<<
* # If one probability = 0, the log sum will be -Inf
* p = p * (1 - p_outlier) + wp_outlier
*/
__pyx_t_7 = __pyx_v_i;
__pyx_t_9.__pyx_n = 4;
__pyx_t_9.n_st = __pyx_v_n_st;
__pyx_t_9.n_sz = __pyx_v_n_sz;
__pyx_t_9.use_adaptive = __pyx_v_use_adaptive;
__pyx_t_9.simps_err = __pyx_v_simps_err;
__pyx_t_8 = __pyx_f_4wfpt_full_pdf((*__Pyx_BufPtrStrided1d(double *, __pyx_pybuffernd_x.rcbuffer->pybuffer.buf, __pyx_t_7, __pyx_pybuffernd_x.diminfo[0].strides)), __pyx_v_v, __pyx_v_sv, __pyx_v_a, __pyx_v_z, __pyx_v_sz, __pyx_v_t, __pyx_v_st, __pyx_v_err, 0, &__pyx_t_9);
__pyx_v_p = __pyx_t_8;
/* "wfpt.pyx":64
* p = full_pdf(x[i], v, sv, a, z, sz, t, st, err, n_st, n_sz, use_adaptive, simps_err)
* # If one probability = 0, the log sum will be -Inf
* p = p * (1 - p_outlier) + wp_outlier # <<<<<<<<<<<<<<
* if p == 0:
* with gil:
*/
__pyx_v_p = ((__pyx_v_p * (1.0 - __pyx_v_p_outlier)) + __pyx_v_wp_outlier);
/* "wfpt.pyx":65
* # If one probability = 0, the log sum will be -Inf
* p = p * (1 - p_outlier) + wp_outlier
* if p == 0: # <<<<<<<<<<<<<<
* with gil:
* return -np.inf
*/
__pyx_t_1 = (__pyx_v_p == 0.0);
if (__pyx_t_1) {
/* "wfpt.pyx":66
* p = p * (1 - p_outlier) + wp_outlier
* if p == 0:
* with gil: # <<<<<<<<<<<<<<
* return -np.inf
*
*/
{
#ifdef WITH_THREAD
PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure();
#endif
/*try:*/ {
/* "wfpt.pyx":67
* if p == 0:
* with gil:
* return -np.inf # <<<<<<<<<<<<<<
*
* sum_logp += log(p)
*/
__Pyx_XDECREF(__pyx_r);
__pyx_t_2 = __Pyx_GetName(__pyx_m, __pyx_n_s__np); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 67; __pyx_clineno = __LINE__; goto __pyx_L15;}
__Pyx_GOTREF(__pyx_t_2);
__pyx_t_3 = PyObject_GetAttr(__pyx_t_2, __pyx_n_s__inf); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 67; __pyx_clineno = __LINE__; goto __pyx_L15;}
__Pyx_GOTREF(__pyx_t_3);
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
__pyx_t_2 = PyNumber_Negative(__pyx_t_3); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 67; __pyx_clineno = __LINE__; goto __pyx_L15;}
__Pyx_GOTREF(__pyx_t_2);
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__pyx_r = __pyx_t_2;
__pyx_t_2 = 0;
goto __pyx_L14;
}
/* "wfpt.pyx":66
* p = p * (1 - p_outlier) + wp_outlier
* if p == 0:
* with gil: # <<<<<<<<<<<<<<
* return -np.inf
*
*/
/*finally:*/ {
int __pyx_why;
__pyx_why = 0; goto __pyx_L16;
__pyx_L14: __pyx_why = 3; goto __pyx_L16;
__pyx_L15: __pyx_why = 4; goto __pyx_L16;
__pyx_L16:;
#ifdef WITH_THREAD
PyGILState_Release(__pyx_gilstate_save);
#endif
switch (__pyx_why) {
case 3: goto __pyx_L10_return;
case 4: goto __pyx_L9_error;
}
}
}
goto __pyx_L11;
}
__pyx_L11:;
/* "wfpt.pyx":69
* return -np.inf
*
* sum_logp += log(p) # <<<<<<<<<<<<<<
*
* return sum_logp
*/
__pyx_v_sum_logp = (__pyx_v_sum_logp + log(__pyx_v_p));
goto __pyx_L19;
__pyx_L10_return:;
__pyx_parallel_why = 3;
goto __pyx_L18;
__pyx_L9_error:;
{
#ifdef WITH_THREAD
PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure();
#endif
#ifdef _OPENMP
#pragma omp flush(__pyx_parallel_exc_type)
#endif /* _OPENMP */
if (!__pyx_parallel_exc_type) {
__Pyx_ErrFetch(&__pyx_parallel_exc_type, &__pyx_parallel_exc_value, &__pyx_parallel_exc_tb);
__pyx_parallel_filename = __pyx_filename; __pyx_parallel_lineno = __pyx_lineno; __pyx_parallel_clineno = __pyx_clineno;
__Pyx_GOTREF(__pyx_parallel_exc_type);
}
#ifdef WITH_THREAD
PyGILState_Release(__pyx_gilstate_save);
#endif
}
__pyx_parallel_why = 4;
goto __pyx_L18;
__pyx_L18:;
#ifdef _OPENMP
#pragma omp critical(__pyx_parallel_lastprivates0)
#endif /* _OPENMP */
{
__pyx_parallel_temp0 = __pyx_v_sum_logp;
__pyx_parallel_temp1 = __pyx_v_i;
__pyx_parallel_temp2 = __pyx_v_p;
}
__pyx_L19:;
#ifdef _OPENMP
#pragma omp flush(__pyx_parallel_why)
#endif /* _OPENMP */
}
}
#ifdef _OPENMP
Py_END_ALLOW_THREADS
#ifdef WITH_THREAD
PyGILState_Release(__pyx_gilstate_save);
#endif
#endif /* _OPENMP */
}
}
if (__pyx_parallel_exc_type) {
/* This may have been overridden by a continue, break or return in another thread. Prefer the error. */
__pyx_parallel_why = 4;
}
if (__pyx_parallel_why) {
__pyx_v_sum_logp = __pyx_parallel_temp0;
__pyx_v_p = __pyx_parallel_temp1;
__pyx_v_i = __pyx_parallel_temp2;
switch (__pyx_parallel_why) {
case 3: goto __pyx_L4;
case 4:
{
#ifdef WITH_THREAD
PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure();
#endif
__Pyx_ErrRestore(__pyx_parallel_exc_type, __pyx_parallel_exc_value, __pyx_parallel_exc_tb);
__pyx_filename = __pyx_parallel_filename; __pyx_lineno = __pyx_parallel_lineno; __pyx_clineno = __pyx_parallel_clineno;
__Pyx_GIVEREF(__pyx_parallel_exc_type);
#ifdef WITH_THREAD
PyGILState_Release(__pyx_gilstate_save);
#endif
}
goto __pyx_L5;
}
}
}
}
/* "wfpt.pyx":61
* return -np.inf
*
* for i in prange(size, nogil=True, schedule='dynamic'): # <<<<<<<<<<<<<<
* p = full_pdf(x[i], v, sv, a, z, sz, t, st, err, n_st, n_sz, use_adaptive, simps_err)
* # If one probability = 0, the log sum will be -Inf
*/
/*finally:*/ {
int __pyx_why;
__pyx_why = 0; goto __pyx_L6;
__pyx_L4: __pyx_why = 3; goto __pyx_L6;
__pyx_L5: __pyx_why = 4; goto __pyx_L6;
__pyx_L6:;
Py_BLOCK_THREADS
switch (__pyx_why) {
case 3: goto __pyx_L0;
case 4: goto __pyx_L1_error;
}
}
}
/* "wfpt.pyx":71
* sum_logp += log(p)
*
* return sum_logp # <<<<<<<<<<<<<<
*
*
*/
__Pyx_XDECREF(__pyx_r);
__pyx_t_2 = PyFloat_FromDouble(__pyx_v_sum_logp); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 71; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_2);
__pyx_r = __pyx_t_2;
__pyx_t_2 = 0;
goto __pyx_L0;
__pyx_r = Py_None; __Pyx_INCREF(Py_None);
goto __pyx_L0;
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_2);
__Pyx_XDECREF(__pyx_t_3);
{ PyObject *__pyx_type, *__pyx_value, *__pyx_tb;
__Pyx_ErrFetch(&__pyx_type, &__pyx_value, &__pyx_tb);
__Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_x.rcbuffer->pybuffer);
__Pyx_ErrRestore(__pyx_type, __pyx_value, __pyx_tb);}
__Pyx_AddTraceback("wfpt.wiener_like", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = NULL;
goto __pyx_L2;
__pyx_L0:;
__Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_x.rcbuffer->pybuffer);
__pyx_L2:;
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* Python wrapper */
static PyObject *__pyx_pw_4wfpt_7wiener_like_multi(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/
static char __pyx_doc_4wfpt_6wiener_like_multi[] = "wiener_like_multi(ndarray x, v, sv, a, z, sz, t, st, double err, multi=None, int n_st=10, int n_sz=10, int use_adaptive=1, double simps_err=0.001, double p_outlier=0, double w_outlier=0)";
static PyMethodDef __pyx_mdef_4wfpt_7wiener_like_multi = {__Pyx_NAMESTR("wiener_like_multi"), (PyCFunction)__pyx_pw_4wfpt_7wiener_like_multi, METH_VARARGS|METH_KEYWORDS, __Pyx_DOCSTR(__pyx_doc_4wfpt_6wiener_like_multi)};
static PyObject *__pyx_pw_4wfpt_7wiener_like_multi(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds) {
PyArrayObject *__pyx_v_x = 0;
PyObject *__pyx_v_v = 0;
PyObject *__pyx_v_sv = 0;
PyObject *__pyx_v_a = 0;
PyObject *__pyx_v_z = 0;
PyObject *__pyx_v_sz = 0;
PyObject *__pyx_v_t = 0;
PyObject *__pyx_v_st = 0;
double __pyx_v_err;
PyObject *__pyx_v_multi = 0;
int __pyx_v_n_st;
int __pyx_v_n_sz;
int __pyx_v_use_adaptive;
double __pyx_v_simps_err;
double __pyx_v_p_outlier;
double __pyx_v_w_outlier;
static PyObject **__pyx_pyargnames[] = {&__pyx_n_s__x,&__pyx_n_s__v,&__pyx_n_s__sv,&__pyx_n_s__a,&__pyx_n_s__z,&__pyx_n_s__sz,&__pyx_n_s__t,&__pyx_n_s__st,&__pyx_n_s__err,&__pyx_n_s__multi,&__pyx_n_s__n_st,&__pyx_n_s__n_sz,&__pyx_n_s__use_adaptive,&__pyx_n_s__simps_err,&__pyx_n_s__p_outlier,&__pyx_n_s__w_outlier,0};
PyObject *__pyx_r = 0;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("wiener_like_multi (wrapper)");
__pyx_self = __pyx_self;
{
PyObject* values[16] = {0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0};
/* "wfpt.pyx":74
*
*
* def wiener_like_multi(np.ndarray[double, ndim=1] x, v, sv, a, z, sz, t, st, double err, multi=None, # <<<<<<<<<<<<<<
* int n_st=10, int n_sz=10, bint use_adaptive=1, double simps_err=1e-3,
* double p_outlier=0, double w_outlier=0):
*/
values[9] = ((PyObject *)Py_None);
if (unlikely(__pyx_kwds)) {
Py_ssize_t kw_args;
const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args);
switch (pos_args) {
case 16: values[15] = PyTuple_GET_ITEM(__pyx_args, 15);
case 15: values[14] = PyTuple_GET_ITEM(__pyx_args, 14);
case 14: values[13] = PyTuple_GET_ITEM(__pyx_args, 13);
case 13: values[12] = PyTuple_GET_ITEM(__pyx_args, 12);
case 12: values[11] = PyTuple_GET_ITEM(__pyx_args, 11);
case 11: values[10] = PyTuple_GET_ITEM(__pyx_args, 10);
case 10: values[9] = PyTuple_GET_ITEM(__pyx_args, 9);
case 9: values[8] = PyTuple_GET_ITEM(__pyx_args, 8);
case 8: values[7] = PyTuple_GET_ITEM(__pyx_args, 7);
case 7: values[6] = PyTuple_GET_ITEM(__pyx_args, 6);
case 6: values[5] = PyTuple_GET_ITEM(__pyx_args, 5);
case 5: values[4] = PyTuple_GET_ITEM(__pyx_args, 4);
case 4: values[3] = PyTuple_GET_ITEM(__pyx_args, 3);
case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2);
case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1);
case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0);
case 0: break;
default: goto __pyx_L5_argtuple_error;
}
kw_args = PyDict_Size(__pyx_kwds);
switch (pos_args) {
case 0:
values[0] = PyDict_GetItem(__pyx_kwds, __pyx_n_s__x);
if (likely(values[0])) kw_args--;
else goto __pyx_L5_argtuple_error;
case 1:
values[1] = PyDict_GetItem(__pyx_kwds, __pyx_n_s__v);
if (likely(values[1])) kw_args--;
else {
__Pyx_RaiseArgtupleInvalid("wiener_like_multi", 0, 9, 16, 1); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 74; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
}
case 2:
values[2] = PyDict_GetItem(__pyx_kwds, __pyx_n_s__sv);
if (likely(values[2])) kw_args--;
else {
__Pyx_RaiseArgtupleInvalid("wiener_like_multi", 0, 9, 16, 2); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 74; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
}
case 3:
values[3] = PyDict_GetItem(__pyx_kwds, __pyx_n_s__a);
if (likely(values[3])) kw_args--;
else {
__Pyx_RaiseArgtupleInvalid("wiener_like_multi", 0, 9, 16, 3); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 74; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
}
case 4:
values[4] = PyDict_GetItem(__pyx_kwds, __pyx_n_s__z);
if (likely(values[4])) kw_args--;
else {
__Pyx_RaiseArgtupleInvalid("wiener_like_multi", 0, 9, 16, 4); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 74; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
}
case 5:
values[5] = PyDict_GetItem(__pyx_kwds, __pyx_n_s__sz);
if (likely(values[5])) kw_args--;
else {
__Pyx_RaiseArgtupleInvalid("wiener_like_multi", 0, 9, 16, 5); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 74; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
}
case 6:
values[6] = PyDict_GetItem(__pyx_kwds, __pyx_n_s__t);
if (likely(values[6])) kw_args--;
else {
__Pyx_RaiseArgtupleInvalid("wiener_like_multi", 0, 9, 16, 6); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 74; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
}
case 7:
values[7] = PyDict_GetItem(__pyx_kwds, __pyx_n_s__st);
if (likely(values[7])) kw_args--;
else {
__Pyx_RaiseArgtupleInvalid("wiener_like_multi", 0, 9, 16, 7); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 74; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
}
case 8:
values[8] = PyDict_GetItem(__pyx_kwds, __pyx_n_s__err);
if (likely(values[8])) kw_args--;
else {
__Pyx_RaiseArgtupleInvalid("wiener_like_multi", 0, 9, 16, 8); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 74; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
}
case 9:
if (kw_args > 0) {
PyObject* value = PyDict_GetItem(__pyx_kwds, __pyx_n_s__multi);
if (value) { values[9] = value; kw_args--; }
}
case 10:
if (kw_args > 0) {
PyObject* value = PyDict_GetItem(__pyx_kwds, __pyx_n_s__n_st);
if (value) { values[10] = value; kw_args--; }
}
case 11:
if (kw_args > 0) {
PyObject* value = PyDict_GetItem(__pyx_kwds, __pyx_n_s__n_sz);
if (value) { values[11] = value; kw_args--; }
}
case 12:
if (kw_args > 0) {
PyObject* value = PyDict_GetItem(__pyx_kwds, __pyx_n_s__use_adaptive);
if (value) { values[12] = value; kw_args--; }
}
case 13:
if (kw_args > 0) {
PyObject* value = PyDict_GetItem(__pyx_kwds, __pyx_n_s__simps_err);
if (value) { values[13] = value; kw_args--; }
}
case 14:
if (kw_args > 0) {
PyObject* value = PyDict_GetItem(__pyx_kwds, __pyx_n_s__p_outlier);
if (value) { values[14] = value; kw_args--; }
}
case 15:
if (kw_args > 0) {
PyObject* value = PyDict_GetItem(__pyx_kwds, __pyx_n_s__w_outlier);
if (value) { values[15] = value; kw_args--; }
}
}
if (unlikely(kw_args > 0)) {
if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "wiener_like_multi") < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 74; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
}
__pyx_v_x = ((PyArrayObject *)values[0]);
__pyx_v_v = values[1];
__pyx_v_sv = values[2];
__pyx_v_a = values[3];
__pyx_v_z = values[4];
__pyx_v_sz = values[5];
__pyx_v_t = values[6];
__pyx_v_st = values[7];
__pyx_v_err = __pyx_PyFloat_AsDouble(values[8]); if (unlikely((__pyx_v_err == (double)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 74; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
__pyx_v_multi = values[9];
if (values[10]) {
__pyx_v_n_st = __Pyx_PyInt_AsInt(values[10]); if (unlikely((__pyx_v_n_st == (int)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 75; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
} else {
__pyx_v_n_st = ((int)10);
}
if (values[11]) {
__pyx_v_n_sz = __Pyx_PyInt_AsInt(values[11]); if (unlikely((__pyx_v_n_sz == (int)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 75; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
} else {
__pyx_v_n_sz = ((int)10);
}
if (values[12]) {
__pyx_v_use_adaptive = __Pyx_PyObject_IsTrue(values[12]); if (unlikely((__pyx_v_use_adaptive == (int)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 75; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
} else {
__pyx_v_use_adaptive = ((int)1);
}
if (values[13]) {
__pyx_v_simps_err = __pyx_PyFloat_AsDouble(values[13]); if (unlikely((__pyx_v_simps_err == (double)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 75; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
} else {
/* "wfpt.pyx":75
*
* def wiener_like_multi(np.ndarray[double, ndim=1] x, v, sv, a, z, sz, t, st, double err, multi=None,
* int n_st=10, int n_sz=10, bint use_adaptive=1, double simps_err=1e-3, # <<<<<<<<<<<<<<
* double p_outlier=0, double w_outlier=0):
* cdef Py_ssize_t size = x.shape[0]
*/
__pyx_v_simps_err = ((double)1e-3);
}
if (values[14]) {
__pyx_v_p_outlier = __pyx_PyFloat_AsDouble(values[14]); if (unlikely((__pyx_v_p_outlier == (double)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 76; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
} else {
/* "wfpt.pyx":76
* def wiener_like_multi(np.ndarray[double, ndim=1] x, v, sv, a, z, sz, t, st, double err, multi=None,
* int n_st=10, int n_sz=10, bint use_adaptive=1, double simps_err=1e-3,
* double p_outlier=0, double w_outlier=0): # <<<<<<<<<<<<<<
* cdef Py_ssize_t size = x.shape[0]
* cdef Py_ssize_t i
*/
__pyx_v_p_outlier = ((double)0.0);
}
if (values[15]) {
__pyx_v_w_outlier = __pyx_PyFloat_AsDouble(values[15]); if (unlikely((__pyx_v_w_outlier == (double)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 76; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
} else {
__pyx_v_w_outlier = ((double)0.0);
}
} else {
switch (PyTuple_GET_SIZE(__pyx_args)) {
case 16: values[15] = PyTuple_GET_ITEM(__pyx_args, 15);
case 15: values[14] = PyTuple_GET_ITEM(__pyx_args, 14);
case 14: values[13] = PyTuple_GET_ITEM(__pyx_args, 13);
case 13: values[12] = PyTuple_GET_ITEM(__pyx_args, 12);
case 12: values[11] = PyTuple_GET_ITEM(__pyx_args, 11);
case 11: values[10] = PyTuple_GET_ITEM(__pyx_args, 10);
case 10: values[9] = PyTuple_GET_ITEM(__pyx_args, 9);
case 9: values[8] = PyTuple_GET_ITEM(__pyx_args, 8);
values[7] = PyTuple_GET_ITEM(__pyx_args, 7);
values[6] = PyTuple_GET_ITEM(__pyx_args, 6);
values[5] = PyTuple_GET_ITEM(__pyx_args, 5);
values[4] = PyTuple_GET_ITEM(__pyx_args, 4);
values[3] = PyTuple_GET_ITEM(__pyx_args, 3);
values[2] = PyTuple_GET_ITEM(__pyx_args, 2);
values[1] = PyTuple_GET_ITEM(__pyx_args, 1);
values[0] = PyTuple_GET_ITEM(__pyx_args, 0);
break;
default: goto __pyx_L5_argtuple_error;
}
}
__pyx_v_x = ((PyArrayObject *)values[0]);
__pyx_v_v = values[1];
__pyx_v_sv = values[2];
__pyx_v_a = values[3];
__pyx_v_z = values[4];
__pyx_v_sz = values[5];
__pyx_v_t = values[6];
__pyx_v_st = values[7];
__pyx_v_err = __pyx_PyFloat_AsDouble(values[8]); if (unlikely((__pyx_v_err == (double)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 74; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
__pyx_v_multi = values[9];
if (values[10]) {
__pyx_v_n_st = __Pyx_PyInt_AsInt(values[10]); if (unlikely((__pyx_v_n_st == (int)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 75; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
} else {
__pyx_v_n_st = ((int)10);
}
if (values[11]) {
__pyx_v_n_sz = __Pyx_PyInt_AsInt(values[11]); if (unlikely((__pyx_v_n_sz == (int)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 75; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
} else {
__pyx_v_n_sz = ((int)10);
}
if (values[12]) {
__pyx_v_use_adaptive = __Pyx_PyObject_IsTrue(values[12]); if (unlikely((__pyx_v_use_adaptive == (int)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 75; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
} else {
__pyx_v_use_adaptive = ((int)1);
}
if (values[13]) {
__pyx_v_simps_err = __pyx_PyFloat_AsDouble(values[13]); if (unlikely((__pyx_v_simps_err == (double)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 75; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
} else {
/* "wfpt.pyx":75
*
* def wiener_like_multi(np.ndarray[double, ndim=1] x, v, sv, a, z, sz, t, st, double err, multi=None,
* int n_st=10, int n_sz=10, bint use_adaptive=1, double simps_err=1e-3, # <<<<<<<<<<<<<<
* double p_outlier=0, double w_outlier=0):
* cdef Py_ssize_t size = x.shape[0]
*/
__pyx_v_simps_err = ((double)1e-3);
}
if (values[14]) {
__pyx_v_p_outlier = __pyx_PyFloat_AsDouble(values[14]); if (unlikely((__pyx_v_p_outlier == (double)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 76; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
} else {
/* "wfpt.pyx":76
* def wiener_like_multi(np.ndarray[double, ndim=1] x, v, sv, a, z, sz, t, st, double err, multi=None,
* int n_st=10, int n_sz=10, bint use_adaptive=1, double simps_err=1e-3,
* double p_outlier=0, double w_outlier=0): # <<<<<<<<<<<<<<
* cdef Py_ssize_t size = x.shape[0]
* cdef Py_ssize_t i
*/
__pyx_v_p_outlier = ((double)0.0);
}
if (values[15]) {
__pyx_v_w_outlier = __pyx_PyFloat_AsDouble(values[15]); if (unlikely((__pyx_v_w_outlier == (double)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 76; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
} else {
__pyx_v_w_outlier = ((double)0.0);
}
}
goto __pyx_L4_argument_unpacking_done;
__pyx_L5_argtuple_error:;
__Pyx_RaiseArgtupleInvalid("wiener_like_multi", 0, 9, 16, PyTuple_GET_SIZE(__pyx_args)); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 74; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
__pyx_L3_error:;
__Pyx_AddTraceback("wfpt.wiener_like_multi", __pyx_clineno, __pyx_lineno, __pyx_filename);
__Pyx_RefNannyFinishContext();
return NULL;
__pyx_L4_argument_unpacking_done:;
if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_x), __pyx_ptype_5numpy_ndarray, 1, "x", 0))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 74; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__pyx_r = __pyx_pf_4wfpt_6wiener_like_multi(__pyx_self, __pyx_v_x, __pyx_v_v, __pyx_v_sv, __pyx_v_a, __pyx_v_z, __pyx_v_sz, __pyx_v_t, __pyx_v_st, __pyx_v_err, __pyx_v_multi, __pyx_v_n_st, __pyx_v_n_sz, __pyx_v_use_adaptive, __pyx_v_simps_err, __pyx_v_p_outlier, __pyx_v_w_outlier);
goto __pyx_L0;
__pyx_L1_error:;
__pyx_L0:;
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "wfpt.pyx":74
*
*
* def wiener_like_multi(np.ndarray[double, ndim=1] x, v, sv, a, z, sz, t, st, double err, multi=None, # <<<<<<<<<<<<<<
* int n_st=10, int n_sz=10, bint use_adaptive=1, double simps_err=1e-3,
* double p_outlier=0, double w_outlier=0):
*/
static PyObject *__pyx_pf_4wfpt_6wiener_like_multi(CYTHON_UNUSED PyObject *__pyx_self, PyArrayObject *__pyx_v_x, PyObject *__pyx_v_v, PyObject *__pyx_v_sv, PyObject *__pyx_v_a, PyObject *__pyx_v_z, PyObject *__pyx_v_sz, PyObject *__pyx_v_t, PyObject *__pyx_v_st, double __pyx_v_err, PyObject *__pyx_v_multi, int __pyx_v_n_st, int __pyx_v_n_sz, int __pyx_v_use_adaptive, double __pyx_v_simps_err, double __pyx_v_p_outlier, double __pyx_v_w_outlier) {
Py_ssize_t __pyx_v_size;
Py_ssize_t __pyx_v_i;
double __pyx_v_p;
double __pyx_v_sum_logp;
double __pyx_v_wp_outlier;
PyObject *__pyx_v_params = NULL;
PyObject *__pyx_v_params_iter = NULL;
PyObject *__pyx_v_param = NULL;
__Pyx_LocalBuf_ND __pyx_pybuffernd_x;
__Pyx_Buffer __pyx_pybuffer_x;
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
int __pyx_t_1;
double __pyx_t_2;
double __pyx_t_3;
double __pyx_t_4;
double __pyx_t_5;
double __pyx_t_6;
double __pyx_t_7;
double __pyx_t_8;
double __pyx_t_9;
PyObject *__pyx_t_10 = NULL;
PyObject *__pyx_t_11 = NULL;
PyObject *__pyx_t_12 = NULL;
Py_ssize_t __pyx_t_13;
Py_ssize_t __pyx_t_14;
PyObject *(*__pyx_t_15)(PyObject *);
struct __pyx_opt_args_4wfpt_full_pdf __pyx_t_16;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("wiener_like_multi");
__pyx_pybuffer_x.pybuffer.buf = NULL;
__pyx_pybuffer_x.refcount = 0;
__pyx_pybuffernd_x.data = NULL;
__pyx_pybuffernd_x.rcbuffer = &__pyx_pybuffer_x;
{
__Pyx_BufFmt_StackElem __pyx_stack[1];
if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_x.rcbuffer->pybuffer, (PyObject*)__pyx_v_x, &__Pyx_TypeInfo_double, PyBUF_FORMAT| PyBUF_STRIDES, 1, 0, __pyx_stack) == -1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 74; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
}
__pyx_pybuffernd_x.diminfo[0].strides = __pyx_pybuffernd_x.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_x.diminfo[0].shape = __pyx_pybuffernd_x.rcbuffer->pybuffer.shape[0];
/* "wfpt.pyx":77
* int n_st=10, int n_sz=10, bint use_adaptive=1, double simps_err=1e-3,
* double p_outlier=0, double w_outlier=0):
* cdef Py_ssize_t size = x.shape[0] # <<<<<<<<<<<<<<
* cdef Py_ssize_t i
* cdef double p = 0
*/
__pyx_v_size = (__pyx_v_x->dimensions[0]);
/* "wfpt.pyx":79
* cdef Py_ssize_t size = x.shape[0]
* cdef Py_ssize_t i
* cdef double p = 0 # <<<<<<<<<<<<<<
* cdef double sum_logp = 0
* cdef double wp_outlier = w_outlier * p_outlier
*/
__pyx_v_p = 0.0;
/* "wfpt.pyx":80
* cdef Py_ssize_t i
* cdef double p = 0
* cdef double sum_logp = 0 # <<<<<<<<<<<<<<
* cdef double wp_outlier = w_outlier * p_outlier
*
*/
__pyx_v_sum_logp = 0.0;
/* "wfpt.pyx":81
* cdef double p = 0
* cdef double sum_logp = 0
* cdef double wp_outlier = w_outlier * p_outlier # <<<<<<<<<<<<<<
*
* if multi is None:
*/
__pyx_v_wp_outlier = (__pyx_v_w_outlier * __pyx_v_p_outlier);
/* "wfpt.pyx":83
* cdef double wp_outlier = w_outlier * p_outlier
*
* if multi is None: # <<<<<<<<<<<<<<
* return full_pdf(x, v, sv, a, z, sz, t, st, err)
* else:
*/
__pyx_t_1 = (__pyx_v_multi == Py_None);
if (__pyx_t_1) {
/* "wfpt.pyx":84
*
* if multi is None:
* return full_pdf(x, v, sv, a, z, sz, t, st, err) # <<<<<<<<<<<<<<
* else:
* params = {'v':v, 'z':z, 't':t, 'a':a, 'sv':sv, 'sz':sz, 'st':st}
*/
__Pyx_XDECREF(__pyx_r);
__pyx_t_2 = __pyx_PyFloat_AsDouble(((PyObject *)__pyx_v_x)); if (unlikely((__pyx_t_2 == (double)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 84; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__pyx_t_3 = __pyx_PyFloat_AsDouble(__pyx_v_v); if (unlikely((__pyx_t_3 == (double)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 84; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__pyx_t_4 = __pyx_PyFloat_AsDouble(__pyx_v_sv); if (unlikely((__pyx_t_4 == (double)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 84; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__pyx_t_5 = __pyx_PyFloat_AsDouble(__pyx_v_a); if (unlikely((__pyx_t_5 == (double)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 84; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__pyx_t_6 = __pyx_PyFloat_AsDouble(__pyx_v_z); if (unlikely((__pyx_t_6 == (double)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 84; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__pyx_t_7 = __pyx_PyFloat_AsDouble(__pyx_v_sz); if (unlikely((__pyx_t_7 == (double)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 84; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__pyx_t_8 = __pyx_PyFloat_AsDouble(__pyx_v_t); if (unlikely((__pyx_t_8 == (double)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 84; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__pyx_t_9 = __pyx_PyFloat_AsDouble(__pyx_v_st); if (unlikely((__pyx_t_9 == (double)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 84; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__pyx_t_10 = PyFloat_FromDouble(__pyx_f_4wfpt_full_pdf(__pyx_t_2, __pyx_t_3, __pyx_t_4, __pyx_t_5, __pyx_t_6, __pyx_t_7, __pyx_t_8, __pyx_t_9, __pyx_v_err, 0, NULL)); if (unlikely(!__pyx_t_10)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 84; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_10);
__pyx_r = __pyx_t_10;
__pyx_t_10 = 0;
goto __pyx_L0;
goto __pyx_L3;
}
/*else*/ {
/* "wfpt.pyx":86
* return full_pdf(x, v, sv, a, z, sz, t, st, err)
* else:
* params = {'v':v, 'z':z, 't':t, 'a':a, 'sv':sv, 'sz':sz, 'st':st} # <<<<<<<<<<<<<<
* params_iter = copy(params)
* for i from 0 <= i < size:
*/
__pyx_t_10 = PyDict_New(); if (unlikely(!__pyx_t_10)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 86; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(((PyObject *)__pyx_t_10));
if (PyDict_SetItem(__pyx_t_10, ((PyObject *)__pyx_n_s__v), __pyx_v_v) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 86; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
if (PyDict_SetItem(__pyx_t_10, ((PyObject *)__pyx_n_s__z), __pyx_v_z) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 86; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
if (PyDict_SetItem(__pyx_t_10, ((PyObject *)__pyx_n_s__t), __pyx_v_t) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 86; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
if (PyDict_SetItem(__pyx_t_10, ((PyObject *)__pyx_n_s__a), __pyx_v_a) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 86; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
if (PyDict_SetItem(__pyx_t_10, ((PyObject *)__pyx_n_s__sv), __pyx_v_sv) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 86; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
if (PyDict_SetItem(__pyx_t_10, ((PyObject *)__pyx_n_s__sz), __pyx_v_sz) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 86; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
if (PyDict_SetItem(__pyx_t_10, ((PyObject *)__pyx_n_s__st), __pyx_v_st) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 86; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__pyx_v_params = __pyx_t_10;
__pyx_t_10 = 0;
/* "wfpt.pyx":87
* else:
* params = {'v':v, 'z':z, 't':t, 'a':a, 'sv':sv, 'sz':sz, 'st':st}
* params_iter = copy(params) # <<<<<<<<<<<<<<
* for i from 0 <= i < size:
* for param in multi:
*/
__pyx_t_10 = __Pyx_GetName(__pyx_m, __pyx_n_s__copy); if (unlikely(!__pyx_t_10)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 87; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_10);
__pyx_t_11 = PyTuple_New(1); if (unlikely(!__pyx_t_11)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 87; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_11);
__Pyx_INCREF(((PyObject *)__pyx_v_params));
PyTuple_SET_ITEM(__pyx_t_11, 0, ((PyObject *)__pyx_v_params));
__Pyx_GIVEREF(((PyObject *)__pyx_v_params));
__pyx_t_12 = PyObject_Call(__pyx_t_10, ((PyObject *)__pyx_t_11), NULL); if (unlikely(!__pyx_t_12)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 87; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_12);
__Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0;
__Pyx_DECREF(((PyObject *)__pyx_t_11)); __pyx_t_11 = 0;
__pyx_v_params_iter = __pyx_t_12;
__pyx_t_12 = 0;
/* "wfpt.pyx":88
* params = {'v':v, 'z':z, 't':t, 'a':a, 'sv':sv, 'sz':sz, 'st':st}
* params_iter = copy(params)
* for i from 0 <= i < size: # <<<<<<<<<<<<<<
* for param in multi:
* params_iter[param] = params[param][i]
*/
__pyx_t_13 = __pyx_v_size;
for (__pyx_v_i = 0; __pyx_v_i < __pyx_t_13; __pyx_v_i++) {
/* "wfpt.pyx":89
* params_iter = copy(params)
* for i from 0 <= i < size:
* for param in multi: # <<<<<<<<<<<<<<
* params_iter[param] = params[param][i]
*
*/
if (PyList_CheckExact(__pyx_v_multi) || PyTuple_CheckExact(__pyx_v_multi)) {
__pyx_t_12 = __pyx_v_multi; __Pyx_INCREF(__pyx_t_12); __pyx_t_14 = 0;
__pyx_t_15 = NULL;
} else {
__pyx_t_14 = -1; __pyx_t_12 = PyObject_GetIter(__pyx_v_multi); if (unlikely(!__pyx_t_12)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 89; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_12);
__pyx_t_15 = Py_TYPE(__pyx_t_12)->tp_iternext;
}
for (;;) {
if (!__pyx_t_15 && PyList_CheckExact(__pyx_t_12)) {
if (__pyx_t_14 >= PyList_GET_SIZE(__pyx_t_12)) break;
__pyx_t_11 = PyList_GET_ITEM(__pyx_t_12, __pyx_t_14); __Pyx_INCREF(__pyx_t_11); __pyx_t_14++;
} else if (!__pyx_t_15 && PyTuple_CheckExact(__pyx_t_12)) {
if (__pyx_t_14 >= PyTuple_GET_SIZE(__pyx_t_12)) break;
__pyx_t_11 = PyTuple_GET_ITEM(__pyx_t_12, __pyx_t_14); __Pyx_INCREF(__pyx_t_11); __pyx_t_14++;
} else {
__pyx_t_11 = __pyx_t_15(__pyx_t_12);
if (unlikely(!__pyx_t_11)) {
if (PyErr_Occurred()) {
if (likely(PyErr_ExceptionMatches(PyExc_StopIteration))) PyErr_Clear();
else {__pyx_filename = __pyx_f[0]; __pyx_lineno = 89; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
}
break;
}
__Pyx_GOTREF(__pyx_t_11);
}
__Pyx_XDECREF(__pyx_v_param);
__pyx_v_param = __pyx_t_11;
__pyx_t_11 = 0;
/* "wfpt.pyx":90
* for i from 0 <= i < size:
* for param in multi:
* params_iter[param] = params[param][i] # <<<<<<<<<<<<<<
*
* p = full_pdf(x[i], params_iter['v'],
*/
__pyx_t_11 = __Pyx_PyDict_GetItem(((PyObject *)__pyx_v_params), __pyx_v_param); if (!__pyx_t_11) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 90; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_11);
__pyx_t_10 = __Pyx_GetItemInt(__pyx_t_11, __pyx_v_i, sizeof(Py_ssize_t), PyInt_FromSsize_t); if (!__pyx_t_10) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 90; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_10);
__Pyx_DECREF(__pyx_t_11); __pyx_t_11 = 0;
if (PyObject_SetItem(__pyx_v_params_iter, __pyx_v_param, __pyx_t_10) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 90; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0;
}
__Pyx_DECREF(__pyx_t_12); __pyx_t_12 = 0;
/* "wfpt.pyx":92
* params_iter[param] = params[param][i]
*
* p = full_pdf(x[i], params_iter['v'], # <<<<<<<<<<<<<<
* params_iter['sv'], params_iter['a'], params_iter['z'],
* params_iter['sz'], params_iter['t'], params_iter['st'],
*/
__pyx_t_14 = __pyx_v_i;
__pyx_t_12 = PyObject_GetItem(__pyx_v_params_iter, ((PyObject *)__pyx_n_s__v)); if (!__pyx_t_12) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 92; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_12);
__pyx_t_9 = __pyx_PyFloat_AsDouble(__pyx_t_12); if (unlikely((__pyx_t_9 == (double)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 92; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_DECREF(__pyx_t_12); __pyx_t_12 = 0;
/* "wfpt.pyx":93
*
* p = full_pdf(x[i], params_iter['v'],
* params_iter['sv'], params_iter['a'], params_iter['z'], # <<<<<<<<<<<<<<
* params_iter['sz'], params_iter['t'], params_iter['st'],
* err, n_st, n_sz, use_adaptive, simps_err)
*/
__pyx_t_12 = PyObject_GetItem(__pyx_v_params_iter, ((PyObject *)__pyx_n_s__sv)); if (!__pyx_t_12) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 93; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_12);
__pyx_t_8 = __pyx_PyFloat_AsDouble(__pyx_t_12); if (unlikely((__pyx_t_8 == (double)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 93; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_DECREF(__pyx_t_12); __pyx_t_12 = 0;
__pyx_t_12 = PyObject_GetItem(__pyx_v_params_iter, ((PyObject *)__pyx_n_s__a)); if (!__pyx_t_12) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 93; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_12);
__pyx_t_7 = __pyx_PyFloat_AsDouble(__pyx_t_12); if (unlikely((__pyx_t_7 == (double)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 93; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_DECREF(__pyx_t_12); __pyx_t_12 = 0;
__pyx_t_12 = PyObject_GetItem(__pyx_v_params_iter, ((PyObject *)__pyx_n_s__z)); if (!__pyx_t_12) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 93; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_12);
__pyx_t_6 = __pyx_PyFloat_AsDouble(__pyx_t_12); if (unlikely((__pyx_t_6 == (double)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 93; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_DECREF(__pyx_t_12); __pyx_t_12 = 0;
/* "wfpt.pyx":94
* p = full_pdf(x[i], params_iter['v'],
* params_iter['sv'], params_iter['a'], params_iter['z'],
* params_iter['sz'], params_iter['t'], params_iter['st'], # <<<<<<<<<<<<<<
* err, n_st, n_sz, use_adaptive, simps_err)
* p = p * (1 - p_outlier) + wp_outlier
*/
__pyx_t_12 = PyObject_GetItem(__pyx_v_params_iter, ((PyObject *)__pyx_n_s__sz)); if (!__pyx_t_12) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 94; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_12);
__pyx_t_5 = __pyx_PyFloat_AsDouble(__pyx_t_12); if (unlikely((__pyx_t_5 == (double)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 94; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_DECREF(__pyx_t_12); __pyx_t_12 = 0;
__pyx_t_12 = PyObject_GetItem(__pyx_v_params_iter, ((PyObject *)__pyx_n_s__t)); if (!__pyx_t_12) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 94; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_12);
__pyx_t_4 = __pyx_PyFloat_AsDouble(__pyx_t_12); if (unlikely((__pyx_t_4 == (double)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 94; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_DECREF(__pyx_t_12); __pyx_t_12 = 0;
__pyx_t_12 = PyObject_GetItem(__pyx_v_params_iter, ((PyObject *)__pyx_n_s__st)); if (!__pyx_t_12) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 94; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_12);
__pyx_t_3 = __pyx_PyFloat_AsDouble(__pyx_t_12); if (unlikely((__pyx_t_3 == (double)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 94; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_DECREF(__pyx_t_12); __pyx_t_12 = 0;
/* "wfpt.pyx":95
* params_iter['sv'], params_iter['a'], params_iter['z'],
* params_iter['sz'], params_iter['t'], params_iter['st'],
* err, n_st, n_sz, use_adaptive, simps_err) # <<<<<<<<<<<<<<
* p = p * (1 - p_outlier) + wp_outlier
* sum_logp += log(p)
*/
__pyx_t_16.__pyx_n = 4;
__pyx_t_16.n_st = __pyx_v_n_st;
__pyx_t_16.n_sz = __pyx_v_n_sz;
__pyx_t_16.use_adaptive = __pyx_v_use_adaptive;
__pyx_t_16.simps_err = __pyx_v_simps_err;
__pyx_t_2 = __pyx_f_4wfpt_full_pdf((*__Pyx_BufPtrStrided1d(double *, __pyx_pybuffernd_x.rcbuffer->pybuffer.buf, __pyx_t_14, __pyx_pybuffernd_x.diminfo[0].strides)), __pyx_t_9, __pyx_t_8, __pyx_t_7, __pyx_t_6, __pyx_t_5, __pyx_t_4, __pyx_t_3, __pyx_v_err, 0, &__pyx_t_16);
__pyx_v_p = __pyx_t_2;
/* "wfpt.pyx":96
* params_iter['sz'], params_iter['t'], params_iter['st'],
* err, n_st, n_sz, use_adaptive, simps_err)
* p = p * (1 - p_outlier) + wp_outlier # <<<<<<<<<<<<<<
* sum_logp += log(p)
*
*/
__pyx_v_p = ((__pyx_v_p * (1.0 - __pyx_v_p_outlier)) + __pyx_v_wp_outlier);
/* "wfpt.pyx":97
* err, n_st, n_sz, use_adaptive, simps_err)
* p = p * (1 - p_outlier) + wp_outlier
* sum_logp += log(p) # <<<<<<<<<<<<<<
*
* return sum_logp
*/
__pyx_v_sum_logp = (__pyx_v_sum_logp + log(__pyx_v_p));
}
/* "wfpt.pyx":99
* sum_logp += log(p)
*
* return sum_logp # <<<<<<<<<<<<<<
*
* def gen_rts_from_cdf(double v, double sv, double a, double z, double sz, double t, \
*/
__Pyx_XDECREF(__pyx_r);
__pyx_t_12 = PyFloat_FromDouble(__pyx_v_sum_logp); if (unlikely(!__pyx_t_12)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 99; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_12);
__pyx_r = __pyx_t_12;
__pyx_t_12 = 0;
goto __pyx_L0;
}
__pyx_L3:;
__pyx_r = Py_None; __Pyx_INCREF(Py_None);
goto __pyx_L0;
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_10);
__Pyx_XDECREF(__pyx_t_11);
__Pyx_XDECREF(__pyx_t_12);
{ PyObject *__pyx_type, *__pyx_value, *__pyx_tb;
__Pyx_ErrFetch(&__pyx_type, &__pyx_value, &__pyx_tb);
__Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_x.rcbuffer->pybuffer);
__Pyx_ErrRestore(__pyx_type, __pyx_value, __pyx_tb);}
__Pyx_AddTraceback("wfpt.wiener_like_multi", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = NULL;
goto __pyx_L2;
__pyx_L0:;
__Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_x.rcbuffer->pybuffer);
__pyx_L2:;
__Pyx_XDECREF(__pyx_v_params);
__Pyx_XDECREF(__pyx_v_params_iter);
__Pyx_XDECREF(__pyx_v_param);
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* Python wrapper */
static PyObject *__pyx_pw_4wfpt_9gen_rts_from_cdf(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/
static char __pyx_doc_4wfpt_8gen_rts_from_cdf[] = "gen_rts_from_cdf(double v, double sv, double a, double z, double sz, double t, double st, int samples=1000, double cdf_lb=-6, double cdf_ub=6, double dt=0.01)";
static PyMethodDef __pyx_mdef_4wfpt_9gen_rts_from_cdf = {__Pyx_NAMESTR("gen_rts_from_cdf"), (PyCFunction)__pyx_pw_4wfpt_9gen_rts_from_cdf, METH_VARARGS|METH_KEYWORDS, __Pyx_DOCSTR(__pyx_doc_4wfpt_8gen_rts_from_cdf)};
static PyObject *__pyx_pw_4wfpt_9gen_rts_from_cdf(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds) {
double __pyx_v_v;
double __pyx_v_sv;
double __pyx_v_a;
double __pyx_v_z;
double __pyx_v_sz;
double __pyx_v_t;
double __pyx_v_st;
int __pyx_v_samples;
double __pyx_v_cdf_lb;
double __pyx_v_cdf_ub;
double __pyx_v_dt;
static PyObject **__pyx_pyargnames[] = {&__pyx_n_s__v,&__pyx_n_s__sv,&__pyx_n_s__a,&__pyx_n_s__z,&__pyx_n_s__sz,&__pyx_n_s__t,&__pyx_n_s__st,&__pyx_n_s__samples,&__pyx_n_s__cdf_lb,&__pyx_n_s__cdf_ub,&__pyx_n_s__dt,0};
PyObject *__pyx_r = 0;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("gen_rts_from_cdf (wrapper)");
__pyx_self = __pyx_self;
{
PyObject* values[11] = {0,0,0,0,0,0,0,0,0,0,0};
if (unlikely(__pyx_kwds)) {
Py_ssize_t kw_args;
const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args);
switch (pos_args) {
case 11: values[10] = PyTuple_GET_ITEM(__pyx_args, 10);
case 10: values[9] = PyTuple_GET_ITEM(__pyx_args, 9);
case 9: values[8] = PyTuple_GET_ITEM(__pyx_args, 8);
case 8: values[7] = PyTuple_GET_ITEM(__pyx_args, 7);
case 7: values[6] = PyTuple_GET_ITEM(__pyx_args, 6);
case 6: values[5] = PyTuple_GET_ITEM(__pyx_args, 5);
case 5: values[4] = PyTuple_GET_ITEM(__pyx_args, 4);
case 4: values[3] = PyTuple_GET_ITEM(__pyx_args, 3);
case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2);
case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1);
case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0);
case 0: break;
default: goto __pyx_L5_argtuple_error;
}
kw_args = PyDict_Size(__pyx_kwds);
switch (pos_args) {
case 0:
values[0] = PyDict_GetItem(__pyx_kwds, __pyx_n_s__v);
if (likely(values[0])) kw_args--;
else goto __pyx_L5_argtuple_error;
case 1:
values[1] = PyDict_GetItem(__pyx_kwds, __pyx_n_s__sv);
if (likely(values[1])) kw_args--;
else {
__Pyx_RaiseArgtupleInvalid("gen_rts_from_cdf", 0, 7, 11, 1); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 101; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
}
case 2:
values[2] = PyDict_GetItem(__pyx_kwds, __pyx_n_s__a);
if (likely(values[2])) kw_args--;
else {
__Pyx_RaiseArgtupleInvalid("gen_rts_from_cdf", 0, 7, 11, 2); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 101; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
}
case 3:
values[3] = PyDict_GetItem(__pyx_kwds, __pyx_n_s__z);
if (likely(values[3])) kw_args--;
else {
__Pyx_RaiseArgtupleInvalid("gen_rts_from_cdf", 0, 7, 11, 3); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 101; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
}
case 4:
values[4] = PyDict_GetItem(__pyx_kwds, __pyx_n_s__sz);
if (likely(values[4])) kw_args--;
else {
__Pyx_RaiseArgtupleInvalid("gen_rts_from_cdf", 0, 7, 11, 4); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 101; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
}
case 5:
values[5] = PyDict_GetItem(__pyx_kwds, __pyx_n_s__t);
if (likely(values[5])) kw_args--;
else {
__Pyx_RaiseArgtupleInvalid("gen_rts_from_cdf", 0, 7, 11, 5); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 101; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
}
case 6:
values[6] = PyDict_GetItem(__pyx_kwds, __pyx_n_s__st);
if (likely(values[6])) kw_args--;
else {
__Pyx_RaiseArgtupleInvalid("gen_rts_from_cdf", 0, 7, 11, 6); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 101; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
}
case 7:
if (kw_args > 0) {
PyObject* value = PyDict_GetItem(__pyx_kwds, __pyx_n_s__samples);
if (value) { values[7] = value; kw_args--; }
}
case 8:
if (kw_args > 0) {
PyObject* value = PyDict_GetItem(__pyx_kwds, __pyx_n_s__cdf_lb);
if (value) { values[8] = value; kw_args--; }
}
case 9:
if (kw_args > 0) {
PyObject* value = PyDict_GetItem(__pyx_kwds, __pyx_n_s__cdf_ub);
if (value) { values[9] = value; kw_args--; }
}
case 10:
if (kw_args > 0) {
PyObject* value = PyDict_GetItem(__pyx_kwds, __pyx_n_s__dt);
if (value) { values[10] = value; kw_args--; }
}
}
if (unlikely(kw_args > 0)) {
if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "gen_rts_from_cdf") < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 101; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
}
__pyx_v_v = __pyx_PyFloat_AsDouble(values[0]); if (unlikely((__pyx_v_v == (double)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 101; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
__pyx_v_sv = __pyx_PyFloat_AsDouble(values[1]); if (unlikely((__pyx_v_sv == (double)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 101; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
__pyx_v_a = __pyx_PyFloat_AsDouble(values[2]); if (unlikely((__pyx_v_a == (double)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 101; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
__pyx_v_z = __pyx_PyFloat_AsDouble(values[3]); if (unlikely((__pyx_v_z == (double)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 101; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
__pyx_v_sz = __pyx_PyFloat_AsDouble(values[4]); if (unlikely((__pyx_v_sz == (double)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 101; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
__pyx_v_t = __pyx_PyFloat_AsDouble(values[5]); if (unlikely((__pyx_v_t == (double)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 101; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
__pyx_v_st = __pyx_PyFloat_AsDouble(values[6]); if (unlikely((__pyx_v_st == (double)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 102; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
if (values[7]) {
__pyx_v_samples = __Pyx_PyInt_AsInt(values[7]); if (unlikely((__pyx_v_samples == (int)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 102; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
} else {
__pyx_v_samples = ((int)1000);
}
if (values[8]) {
__pyx_v_cdf_lb = __pyx_PyFloat_AsDouble(values[8]); if (unlikely((__pyx_v_cdf_lb == (double)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 102; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
} else {
/* "wfpt.pyx":102
*
* def gen_rts_from_cdf(double v, double sv, double a, double z, double sz, double t, \
* double st, int samples=1000, double cdf_lb=-6, double cdf_ub=6, double dt=1e-2): # <<<<<<<<<<<<<<
*
* cdef np.ndarray[double, ndim=1] x = np.arange(cdf_lb, cdf_ub, dt)
*/
__pyx_v_cdf_lb = ((double)-6.0);
}
if (values[9]) {
__pyx_v_cdf_ub = __pyx_PyFloat_AsDouble(values[9]); if (unlikely((__pyx_v_cdf_ub == (double)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 102; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
} else {
__pyx_v_cdf_ub = ((double)6.0);
}
if (values[10]) {
__pyx_v_dt = __pyx_PyFloat_AsDouble(values[10]); if (unlikely((__pyx_v_dt == (double)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 102; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
} else {
__pyx_v_dt = ((double)1e-2);
}
} else {
switch (PyTuple_GET_SIZE(__pyx_args)) {
case 11: values[10] = PyTuple_GET_ITEM(__pyx_args, 10);
case 10: values[9] = PyTuple_GET_ITEM(__pyx_args, 9);
case 9: values[8] = PyTuple_GET_ITEM(__pyx_args, 8);
case 8: values[7] = PyTuple_GET_ITEM(__pyx_args, 7);
case 7: values[6] = PyTuple_GET_ITEM(__pyx_args, 6);
values[5] = PyTuple_GET_ITEM(__pyx_args, 5);
values[4] = PyTuple_GET_ITEM(__pyx_args, 4);
values[3] = PyTuple_GET_ITEM(__pyx_args, 3);
values[2] = PyTuple_GET_ITEM(__pyx_args, 2);
values[1] = PyTuple_GET_ITEM(__pyx_args, 1);
values[0] = PyTuple_GET_ITEM(__pyx_args, 0);
break;
default: goto __pyx_L5_argtuple_error;
}
}
__pyx_v_v = __pyx_PyFloat_AsDouble(values[0]); if (unlikely((__pyx_v_v == (double)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 101; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
__pyx_v_sv = __pyx_PyFloat_AsDouble(values[1]); if (unlikely((__pyx_v_sv == (double)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 101; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
__pyx_v_a = __pyx_PyFloat_AsDouble(values[2]); if (unlikely((__pyx_v_a == (double)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 101; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
__pyx_v_z = __pyx_PyFloat_AsDouble(values[3]); if (unlikely((__pyx_v_z == (double)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 101; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
__pyx_v_sz = __pyx_PyFloat_AsDouble(values[4]); if (unlikely((__pyx_v_sz == (double)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 101; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
__pyx_v_t = __pyx_PyFloat_AsDouble(values[5]); if (unlikely((__pyx_v_t == (double)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 101; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
__pyx_v_st = __pyx_PyFloat_AsDouble(values[6]); if (unlikely((__pyx_v_st == (double)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 102; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
if (values[7]) {
__pyx_v_samples = __Pyx_PyInt_AsInt(values[7]); if (unlikely((__pyx_v_samples == (int)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 102; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
} else {
__pyx_v_samples = ((int)1000);
}
if (values[8]) {
__pyx_v_cdf_lb = __pyx_PyFloat_AsDouble(values[8]); if (unlikely((__pyx_v_cdf_lb == (double)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 102; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
} else {
__pyx_v_cdf_lb = ((double)-6.0);
}
if (values[9]) {
__pyx_v_cdf_ub = __pyx_PyFloat_AsDouble(values[9]); if (unlikely((__pyx_v_cdf_ub == (double)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 102; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
} else {
__pyx_v_cdf_ub = ((double)6.0);
}
if (values[10]) {
__pyx_v_dt = __pyx_PyFloat_AsDouble(values[10]); if (unlikely((__pyx_v_dt == (double)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 102; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
} else {
__pyx_v_dt = ((double)1e-2);
}
}
goto __pyx_L4_argument_unpacking_done;
__pyx_L5_argtuple_error:;
__Pyx_RaiseArgtupleInvalid("gen_rts_from_cdf", 0, 7, 11, PyTuple_GET_SIZE(__pyx_args)); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 101; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
__pyx_L3_error:;
__Pyx_AddTraceback("wfpt.gen_rts_from_cdf", __pyx_clineno, __pyx_lineno, __pyx_filename);
__Pyx_RefNannyFinishContext();
return NULL;
__pyx_L4_argument_unpacking_done:;
__pyx_r = __pyx_pf_4wfpt_8gen_rts_from_cdf(__pyx_self, __pyx_v_v, __pyx_v_sv, __pyx_v_a, __pyx_v_z, __pyx_v_sz, __pyx_v_t, __pyx_v_st, __pyx_v_samples, __pyx_v_cdf_lb, __pyx_v_cdf_ub, __pyx_v_dt);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "wfpt.pyx":101
* return sum_logp
*
* def gen_rts_from_cdf(double v, double sv, double a, double z, double sz, double t, \ # <<<<<<<<<<<<<<
* double st, int samples=1000, double cdf_lb=-6, double cdf_ub=6, double dt=1e-2):
*
*/
static PyObject *__pyx_pf_4wfpt_8gen_rts_from_cdf(CYTHON_UNUSED PyObject *__pyx_self, double __pyx_v_v, double __pyx_v_sv, double __pyx_v_a, double __pyx_v_z, double __pyx_v_sz, double __pyx_v_t, double __pyx_v_st, int __pyx_v_samples, double __pyx_v_cdf_lb, double __pyx_v_cdf_ub, double __pyx_v_dt) {
PyArrayObject *__pyx_v_x = 0;
PyArrayObject *__pyx_v_l_cdf = 0;
double __pyx_v_pdf;
double __pyx_v_rt;
Py_ssize_t __pyx_v_size;
Py_ssize_t __pyx_v_i;
int __pyx_v_idx;
PyArrayObject *__pyx_v_rts = 0;
PyArrayObject *__pyx_v_f = 0;
PyArrayObject *__pyx_v_delay = 0;
__Pyx_LocalBuf_ND __pyx_pybuffernd_rts;
__Pyx_Buffer __pyx_pybuffer_rts;
__Pyx_LocalBuf_ND __pyx_pybuffernd_delay;
__Pyx_Buffer __pyx_pybuffer_delay;
__Pyx_LocalBuf_ND __pyx_pybuffernd_l_cdf;
__Pyx_Buffer __pyx_pybuffer_l_cdf;
__Pyx_LocalBuf_ND __pyx_pybuffernd_x;
__Pyx_Buffer __pyx_pybuffer_x;
__Pyx_LocalBuf_ND __pyx_pybuffernd_f;
__Pyx_Buffer __pyx_pybuffer_f;
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
PyObject *__pyx_t_1 = NULL;
PyObject *__pyx_t_2 = NULL;
PyObject *__pyx_t_3 = NULL;
PyObject *__pyx_t_4 = NULL;
PyObject *__pyx_t_5 = NULL;
PyArrayObject *__pyx_t_6 = NULL;
PyArrayObject *__pyx_t_7 = NULL;
long __pyx_t_8;
Py_ssize_t __pyx_t_9;
Py_ssize_t __pyx_t_10;
Py_ssize_t __pyx_t_11;
Py_ssize_t __pyx_t_12;
long __pyx_t_13;
int __pyx_t_14;
PyObject *__pyx_t_15 = NULL;
PyObject *__pyx_t_16 = NULL;
PyObject *__pyx_t_17 = NULL;
PyArrayObject *__pyx_t_18 = NULL;
PyArrayObject *__pyx_t_19 = NULL;
int __pyx_t_20;
PyArrayObject *__pyx_t_21 = NULL;
int __pyx_t_22;
double __pyx_t_23;
Py_ssize_t __pyx_t_24;
Py_ssize_t __pyx_t_25;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("gen_rts_from_cdf");
__pyx_pybuffer_x.pybuffer.buf = NULL;
__pyx_pybuffer_x.refcount = 0;
__pyx_pybuffernd_x.data = NULL;
__pyx_pybuffernd_x.rcbuffer = &__pyx_pybuffer_x;
__pyx_pybuffer_l_cdf.pybuffer.buf = NULL;
__pyx_pybuffer_l_cdf.refcount = 0;
__pyx_pybuffernd_l_cdf.data = NULL;
__pyx_pybuffernd_l_cdf.rcbuffer = &__pyx_pybuffer_l_cdf;
__pyx_pybuffer_rts.pybuffer.buf = NULL;
__pyx_pybuffer_rts.refcount = 0;
__pyx_pybuffernd_rts.data = NULL;
__pyx_pybuffernd_rts.rcbuffer = &__pyx_pybuffer_rts;
__pyx_pybuffer_f.pybuffer.buf = NULL;
__pyx_pybuffer_f.refcount = 0;
__pyx_pybuffernd_f.data = NULL;
__pyx_pybuffernd_f.rcbuffer = &__pyx_pybuffer_f;
__pyx_pybuffer_delay.pybuffer.buf = NULL;
__pyx_pybuffer_delay.refcount = 0;
__pyx_pybuffernd_delay.data = NULL;
__pyx_pybuffernd_delay.rcbuffer = &__pyx_pybuffer_delay;
/* "wfpt.pyx":104
* double st, int samples=1000, double cdf_lb=-6, double cdf_ub=6, double dt=1e-2):
*
* cdef np.ndarray[double, ndim=1] x = np.arange(cdf_lb, cdf_ub, dt) # <<<<<<<<<<<<<<
* cdef np.ndarray[double, ndim=1] l_cdf = np.empty(x.shape[0], dtype=np.double)
* cdef double pdf, rt
*/
__pyx_t_1 = __Pyx_GetName(__pyx_m, __pyx_n_s__np); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 104; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_1);
__pyx_t_2 = PyObject_GetAttr(__pyx_t_1, __pyx_n_s__arange); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 104; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_2);
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
__pyx_t_1 = PyFloat_FromDouble(__pyx_v_cdf_lb); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 104; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_1);
__pyx_t_3 = PyFloat_FromDouble(__pyx_v_cdf_ub); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 104; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_3);
__pyx_t_4 = PyFloat_FromDouble(__pyx_v_dt); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 104; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_4);
__pyx_t_5 = PyTuple_New(3); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 104; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_5);
PyTuple_SET_ITEM(__pyx_t_5, 0, __pyx_t_1);
__Pyx_GIVEREF(__pyx_t_1);
PyTuple_SET_ITEM(__pyx_t_5, 1, __pyx_t_3);
__Pyx_GIVEREF(__pyx_t_3);
PyTuple_SET_ITEM(__pyx_t_5, 2, __pyx_t_4);
__Pyx_GIVEREF(__pyx_t_4);
__pyx_t_1 = 0;
__pyx_t_3 = 0;
__pyx_t_4 = 0;
__pyx_t_4 = PyObject_Call(__pyx_t_2, ((PyObject *)__pyx_t_5), NULL); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 104; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_4);
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
__Pyx_DECREF(((PyObject *)__pyx_t_5)); __pyx_t_5 = 0;
if (!(likely(((__pyx_t_4) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_4, __pyx_ptype_5numpy_ndarray))))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 104; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__pyx_t_6 = ((PyArrayObject *)__pyx_t_4);
{
__Pyx_BufFmt_StackElem __pyx_stack[1];
if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_x.rcbuffer->pybuffer, (PyObject*)__pyx_t_6, &__Pyx_TypeInfo_double, PyBUF_FORMAT| PyBUF_STRIDES, 1, 0, __pyx_stack) == -1)) {
__pyx_v_x = ((PyArrayObject *)Py_None); __Pyx_INCREF(Py_None); __pyx_pybuffernd_x.rcbuffer->pybuffer.buf = NULL;
{__pyx_filename = __pyx_f[0]; __pyx_lineno = 104; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
} else {__pyx_pybuffernd_x.diminfo[0].strides = __pyx_pybuffernd_x.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_x.diminfo[0].shape = __pyx_pybuffernd_x.rcbuffer->pybuffer.shape[0];
}
}
__pyx_t_6 = 0;
__pyx_v_x = ((PyArrayObject *)__pyx_t_4);
__pyx_t_4 = 0;
/* "wfpt.pyx":105
*
* cdef np.ndarray[double, ndim=1] x = np.arange(cdf_lb, cdf_ub, dt)
* cdef np.ndarray[double, ndim=1] l_cdf = np.empty(x.shape[0], dtype=np.double) # <<<<<<<<<<<<<<
* cdef double pdf, rt
* cdef Py_ssize_t size = x.shape[0]
*/
__pyx_t_4 = __Pyx_GetName(__pyx_m, __pyx_n_s__np); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 105; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_4);
__pyx_t_5 = PyObject_GetAttr(__pyx_t_4, __pyx_n_s__empty); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 105; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_5);
__Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
__pyx_t_4 = __Pyx_PyInt_to_py_Py_intptr_t((__pyx_v_x->dimensions[0])); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 105; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_4);
__pyx_t_2 = PyTuple_New(1); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 105; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_2);
PyTuple_SET_ITEM(__pyx_t_2, 0, __pyx_t_4);
__Pyx_GIVEREF(__pyx_t_4);
__pyx_t_4 = 0;
__pyx_t_4 = PyDict_New(); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 105; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(((PyObject *)__pyx_t_4));
__pyx_t_3 = __Pyx_GetName(__pyx_m, __pyx_n_s__np); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 105; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_3);
__pyx_t_1 = PyObject_GetAttr(__pyx_t_3, __pyx_n_s__double); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 105; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_1);
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
if (PyDict_SetItem(__pyx_t_4, ((PyObject *)__pyx_n_s__dtype), __pyx_t_1) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 105; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
__pyx_t_1 = PyObject_Call(__pyx_t_5, ((PyObject *)__pyx_t_2), ((PyObject *)__pyx_t_4)); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 105; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_1);
__Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
__Pyx_DECREF(((PyObject *)__pyx_t_2)); __pyx_t_2 = 0;
__Pyx_DECREF(((PyObject *)__pyx_t_4)); __pyx_t_4 = 0;
if (!(likely(((__pyx_t_1) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_1, __pyx_ptype_5numpy_ndarray))))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 105; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__pyx_t_7 = ((PyArrayObject *)__pyx_t_1);
{
__Pyx_BufFmt_StackElem __pyx_stack[1];
if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_l_cdf.rcbuffer->pybuffer, (PyObject*)__pyx_t_7, &__Pyx_TypeInfo_double, PyBUF_FORMAT| PyBUF_STRIDES| PyBUF_WRITABLE, 1, 0, __pyx_stack) == -1)) {
__pyx_v_l_cdf = ((PyArrayObject *)Py_None); __Pyx_INCREF(Py_None); __pyx_pybuffernd_l_cdf.rcbuffer->pybuffer.buf = NULL;
{__pyx_filename = __pyx_f[0]; __pyx_lineno = 105; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
} else {__pyx_pybuffernd_l_cdf.diminfo[0].strides = __pyx_pybuffernd_l_cdf.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_l_cdf.diminfo[0].shape = __pyx_pybuffernd_l_cdf.rcbuffer->pybuffer.shape[0];
}
}
__pyx_t_7 = 0;
__pyx_v_l_cdf = ((PyArrayObject *)__pyx_t_1);
__pyx_t_1 = 0;
/* "wfpt.pyx":107
* cdef np.ndarray[double, ndim=1] l_cdf = np.empty(x.shape[0], dtype=np.double)
* cdef double pdf, rt
* cdef Py_ssize_t size = x.shape[0] # <<<<<<<<<<<<<<
* cdef Py_ssize_t i, j
* cdef int idx
*/
__pyx_v_size = (__pyx_v_x->dimensions[0]);
/* "wfpt.pyx":111
* cdef int idx
*
* l_cdf[0] = 0 # <<<<<<<<<<<<<<
* for i from 1 <= i < size:
* pdf = full_pdf(x[i], v, sv, a, z, sz, 0, 0, 1e-4)
*/
__pyx_t_8 = 0;
*__Pyx_BufPtrStrided1d(double *, __pyx_pybuffernd_l_cdf.rcbuffer->pybuffer.buf, __pyx_t_8, __pyx_pybuffernd_l_cdf.diminfo[0].strides) = 0.0;
/* "wfpt.pyx":112
*
* l_cdf[0] = 0
* for i from 1 <= i < size: # <<<<<<<<<<<<<<
* pdf = full_pdf(x[i], v, sv, a, z, sz, 0, 0, 1e-4)
* l_cdf[i] = l_cdf[i-1] + pdf
*/
__pyx_t_9 = __pyx_v_size;
for (__pyx_v_i = 1; __pyx_v_i < __pyx_t_9; __pyx_v_i++) {
/* "wfpt.pyx":113
* l_cdf[0] = 0
* for i from 1 <= i < size:
* pdf = full_pdf(x[i], v, sv, a, z, sz, 0, 0, 1e-4) # <<<<<<<<<<<<<<
* l_cdf[i] = l_cdf[i-1] + pdf
*
*/
__pyx_t_10 = __pyx_v_i;
__pyx_v_pdf = __pyx_f_4wfpt_full_pdf((*__Pyx_BufPtrStrided1d(double *, __pyx_pybuffernd_x.rcbuffer->pybuffer.buf, __pyx_t_10, __pyx_pybuffernd_x.diminfo[0].strides)), __pyx_v_v, __pyx_v_sv, __pyx_v_a, __pyx_v_z, __pyx_v_sz, 0.0, 0.0, 1e-4, 0, NULL);
/* "wfpt.pyx":114
* for i from 1 <= i < size:
* pdf = full_pdf(x[i], v, sv, a, z, sz, 0, 0, 1e-4)
* l_cdf[i] = l_cdf[i-1] + pdf # <<<<<<<<<<<<<<
*
* l_cdf /= l_cdf[x.shape[0]-1]
*/
__pyx_t_11 = (__pyx_v_i - 1);
__pyx_t_12 = __pyx_v_i;
*__Pyx_BufPtrStrided1d(double *, __pyx_pybuffernd_l_cdf.rcbuffer->pybuffer.buf, __pyx_t_12, __pyx_pybuffernd_l_cdf.diminfo[0].strides) = ((*__Pyx_BufPtrStrided1d(double *, __pyx_pybuffernd_l_cdf.rcbuffer->pybuffer.buf, __pyx_t_11, __pyx_pybuffernd_l_cdf.diminfo[0].strides)) + __pyx_v_pdf);
}
/* "wfpt.pyx":116
* l_cdf[i] = l_cdf[i-1] + pdf
*
* l_cdf /= l_cdf[x.shape[0]-1] # <<<<<<<<<<<<<<
*
* cdef np.ndarray[double, ndim=1] rts = np.empty(samples, dtype=np.double)
*/
__pyx_t_13 = ((__pyx_v_x->dimensions[0]) - 1);
__pyx_t_1 = PyFloat_FromDouble((*__Pyx_BufPtrStrided1d(double *, __pyx_pybuffernd_l_cdf.rcbuffer->pybuffer.buf, __pyx_t_13, __pyx_pybuffernd_l_cdf.diminfo[0].strides))); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 116; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_1);
__pyx_t_4 = __Pyx_PyNumber_InPlaceDivide(((PyObject *)__pyx_v_l_cdf), __pyx_t_1); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 116; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_4);
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
if (!(likely(((__pyx_t_4) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_4, __pyx_ptype_5numpy_ndarray))))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 116; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__pyx_t_7 = ((PyArrayObject *)__pyx_t_4);
{
__Pyx_BufFmt_StackElem __pyx_stack[1];
__Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_l_cdf.rcbuffer->pybuffer);
__pyx_t_14 = __Pyx_GetBufferAndValidate(&__pyx_pybuffernd_l_cdf.rcbuffer->pybuffer, (PyObject*)__pyx_t_7, &__Pyx_TypeInfo_double, PyBUF_FORMAT| PyBUF_STRIDES| PyBUF_WRITABLE, 1, 0, __pyx_stack);
if (unlikely(__pyx_t_14 < 0)) {
PyErr_Fetch(&__pyx_t_15, &__pyx_t_16, &__pyx_t_17);
if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_l_cdf.rcbuffer->pybuffer, (PyObject*)__pyx_v_l_cdf, &__Pyx_TypeInfo_double, PyBUF_FORMAT| PyBUF_STRIDES| PyBUF_WRITABLE, 1, 0, __pyx_stack) == -1)) {
Py_XDECREF(__pyx_t_15); Py_XDECREF(__pyx_t_16); Py_XDECREF(__pyx_t_17);
__Pyx_RaiseBufferFallbackError();
} else {
PyErr_Restore(__pyx_t_15, __pyx_t_16, __pyx_t_17);
}
}
__pyx_pybuffernd_l_cdf.diminfo[0].strides = __pyx_pybuffernd_l_cdf.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_l_cdf.diminfo[0].shape = __pyx_pybuffernd_l_cdf.rcbuffer->pybuffer.shape[0];
if (unlikely(__pyx_t_14 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 116; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
}
__pyx_t_7 = 0;
__Pyx_DECREF(((PyObject *)__pyx_v_l_cdf));
__pyx_v_l_cdf = ((PyArrayObject *)__pyx_t_4);
__pyx_t_4 = 0;
/* "wfpt.pyx":118
* l_cdf /= l_cdf[x.shape[0]-1]
*
* cdef np.ndarray[double, ndim=1] rts = np.empty(samples, dtype=np.double) # <<<<<<<<<<<<<<
* cdef np.ndarray[double, ndim=1] f = np.random.rand(samples)
* cdef np.ndarray[double, ndim=1] delay
*/
__pyx_t_4 = __Pyx_GetName(__pyx_m, __pyx_n_s__np); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 118; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_4);
__pyx_t_1 = PyObject_GetAttr(__pyx_t_4, __pyx_n_s__empty); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 118; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_1);
__Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
__pyx_t_4 = PyInt_FromLong(__pyx_v_samples); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 118; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_4);
__pyx_t_2 = PyTuple_New(1); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 118; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_2);
PyTuple_SET_ITEM(__pyx_t_2, 0, __pyx_t_4);
__Pyx_GIVEREF(__pyx_t_4);
__pyx_t_4 = 0;
__pyx_t_4 = PyDict_New(); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 118; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(((PyObject *)__pyx_t_4));
__pyx_t_5 = __Pyx_GetName(__pyx_m, __pyx_n_s__np); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 118; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_5);
__pyx_t_3 = PyObject_GetAttr(__pyx_t_5, __pyx_n_s__double); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 118; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_3);
__Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
if (PyDict_SetItem(__pyx_t_4, ((PyObject *)__pyx_n_s__dtype), __pyx_t_3) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 118; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__pyx_t_3 = PyObject_Call(__pyx_t_1, ((PyObject *)__pyx_t_2), ((PyObject *)__pyx_t_4)); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 118; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_3);
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
__Pyx_DECREF(((PyObject *)__pyx_t_2)); __pyx_t_2 = 0;
__Pyx_DECREF(((PyObject *)__pyx_t_4)); __pyx_t_4 = 0;
if (!(likely(((__pyx_t_3) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_3, __pyx_ptype_5numpy_ndarray))))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 118; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__pyx_t_18 = ((PyArrayObject *)__pyx_t_3);
{
__Pyx_BufFmt_StackElem __pyx_stack[1];
if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_rts.rcbuffer->pybuffer, (PyObject*)__pyx_t_18, &__Pyx_TypeInfo_double, PyBUF_FORMAT| PyBUF_STRIDES| PyBUF_WRITABLE, 1, 0, __pyx_stack) == -1)) {
__pyx_v_rts = ((PyArrayObject *)Py_None); __Pyx_INCREF(Py_None); __pyx_pybuffernd_rts.rcbuffer->pybuffer.buf = NULL;
{__pyx_filename = __pyx_f[0]; __pyx_lineno = 118; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
} else {__pyx_pybuffernd_rts.diminfo[0].strides = __pyx_pybuffernd_rts.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_rts.diminfo[0].shape = __pyx_pybuffernd_rts.rcbuffer->pybuffer.shape[0];
}
}
__pyx_t_18 = 0;
__pyx_v_rts = ((PyArrayObject *)__pyx_t_3);
__pyx_t_3 = 0;
/* "wfpt.pyx":119
*
* cdef np.ndarray[double, ndim=1] rts = np.empty(samples, dtype=np.double)
* cdef np.ndarray[double, ndim=1] f = np.random.rand(samples) # <<<<<<<<<<<<<<
* cdef np.ndarray[double, ndim=1] delay
*
*/
__pyx_t_3 = __Pyx_GetName(__pyx_m, __pyx_n_s__np); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 119; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_3);
__pyx_t_4 = PyObject_GetAttr(__pyx_t_3, __pyx_n_s__random); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 119; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_4);
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__pyx_t_3 = PyObject_GetAttr(__pyx_t_4, __pyx_n_s__rand); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 119; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_3);
__Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
__pyx_t_4 = PyInt_FromLong(__pyx_v_samples); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 119; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_4);
__pyx_t_2 = PyTuple_New(1); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 119; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_2);
PyTuple_SET_ITEM(__pyx_t_2, 0, __pyx_t_4);
__Pyx_GIVEREF(__pyx_t_4);
__pyx_t_4 = 0;
__pyx_t_4 = PyObject_Call(__pyx_t_3, ((PyObject *)__pyx_t_2), NULL); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 119; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_4);
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__Pyx_DECREF(((PyObject *)__pyx_t_2)); __pyx_t_2 = 0;
if (!(likely(((__pyx_t_4) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_4, __pyx_ptype_5numpy_ndarray))))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 119; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__pyx_t_19 = ((PyArrayObject *)__pyx_t_4);
{
__Pyx_BufFmt_StackElem __pyx_stack[1];
if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_f.rcbuffer->pybuffer, (PyObject*)__pyx_t_19, &__Pyx_TypeInfo_double, PyBUF_FORMAT| PyBUF_STRIDES, 1, 0, __pyx_stack) == -1)) {
__pyx_v_f = ((PyArrayObject *)Py_None); __Pyx_INCREF(Py_None); __pyx_pybuffernd_f.rcbuffer->pybuffer.buf = NULL;
{__pyx_filename = __pyx_f[0]; __pyx_lineno = 119; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
} else {__pyx_pybuffernd_f.diminfo[0].strides = __pyx_pybuffernd_f.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_f.diminfo[0].shape = __pyx_pybuffernd_f.rcbuffer->pybuffer.shape[0];
}
}
__pyx_t_19 = 0;
__pyx_v_f = ((PyArrayObject *)__pyx_t_4);
__pyx_t_4 = 0;
/* "wfpt.pyx":122
* cdef np.ndarray[double, ndim=1] delay
*
* if st!=0: # <<<<<<<<<<<<<<
* delay = (np.random.rand(samples)*st + (t - st/2.))
* for i from 0 <= i < samples:
*/
__pyx_t_20 = (__pyx_v_st != 0.0);
if (__pyx_t_20) {
/* "wfpt.pyx":123
*
* if st!=0:
* delay = (np.random.rand(samples)*st + (t - st/2.)) # <<<<<<<<<<<<<<
* for i from 0 <= i < samples:
* idx = np.searchsorted(l_cdf, f[i])
*/
__pyx_t_4 = __Pyx_GetName(__pyx_m, __pyx_n_s__np); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 123; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_4);
__pyx_t_2 = PyObject_GetAttr(__pyx_t_4, __pyx_n_s__random); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 123; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_2);
__Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
__pyx_t_4 = PyObject_GetAttr(__pyx_t_2, __pyx_n_s__rand); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 123; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_4);
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
__pyx_t_2 = PyInt_FromLong(__pyx_v_samples); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 123; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_2);
__pyx_t_3 = PyTuple_New(1); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 123; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_3);
PyTuple_SET_ITEM(__pyx_t_3, 0, __pyx_t_2);
__Pyx_GIVEREF(__pyx_t_2);
__pyx_t_2 = 0;
__pyx_t_2 = PyObject_Call(__pyx_t_4, ((PyObject *)__pyx_t_3), NULL); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 123; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_2);
__Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
__Pyx_DECREF(((PyObject *)__pyx_t_3)); __pyx_t_3 = 0;
__pyx_t_3 = PyFloat_FromDouble(__pyx_v_st); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 123; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_3);
__pyx_t_4 = PyNumber_Multiply(__pyx_t_2, __pyx_t_3); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 123; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_4);
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__pyx_t_3 = PyFloat_FromDouble((__pyx_v_t - (__pyx_v_st / 2.))); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 123; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_3);
__pyx_t_2 = PyNumber_Add(__pyx_t_4, __pyx_t_3); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 123; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_2);
__Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
if (!(likely(((__pyx_t_2) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_2, __pyx_ptype_5numpy_ndarray))))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 123; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__pyx_t_21 = ((PyArrayObject *)__pyx_t_2);
{
__Pyx_BufFmt_StackElem __pyx_stack[1];
__Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_delay.rcbuffer->pybuffer);
__pyx_t_14 = __Pyx_GetBufferAndValidate(&__pyx_pybuffernd_delay.rcbuffer->pybuffer, (PyObject*)__pyx_t_21, &__Pyx_TypeInfo_double, PyBUF_FORMAT| PyBUF_STRIDES, 1, 0, __pyx_stack);
if (unlikely(__pyx_t_14 < 0)) {
PyErr_Fetch(&__pyx_t_17, &__pyx_t_16, &__pyx_t_15);
if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_delay.rcbuffer->pybuffer, (PyObject*)__pyx_v_delay, &__Pyx_TypeInfo_double, PyBUF_FORMAT| PyBUF_STRIDES, 1, 0, __pyx_stack) == -1)) {
Py_XDECREF(__pyx_t_17); Py_XDECREF(__pyx_t_16); Py_XDECREF(__pyx_t_15);
__Pyx_RaiseBufferFallbackError();
} else {
PyErr_Restore(__pyx_t_17, __pyx_t_16, __pyx_t_15);
}
}
__pyx_pybuffernd_delay.diminfo[0].strides = __pyx_pybuffernd_delay.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_delay.diminfo[0].shape = __pyx_pybuffernd_delay.rcbuffer->pybuffer.shape[0];
if (unlikely(__pyx_t_14 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 123; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
}
__pyx_t_21 = 0;
__pyx_v_delay = ((PyArrayObject *)__pyx_t_2);
__pyx_t_2 = 0;
goto __pyx_L5;
}
__pyx_L5:;
/* "wfpt.pyx":124
* if st!=0:
* delay = (np.random.rand(samples)*st + (t - st/2.))
* for i from 0 <= i < samples: # <<<<<<<<<<<<<<
* idx = np.searchsorted(l_cdf, f[i])
* rt = x[idx]
*/
__pyx_t_14 = __pyx_v_samples;
for (__pyx_v_i = 0; __pyx_v_i < __pyx_t_14; __pyx_v_i++) {
/* "wfpt.pyx":125
* delay = (np.random.rand(samples)*st + (t - st/2.))
* for i from 0 <= i < samples:
* idx = np.searchsorted(l_cdf, f[i]) # <<<<<<<<<<<<<<
* rt = x[idx]
* if st==0:
*/
__pyx_t_2 = __Pyx_GetName(__pyx_m, __pyx_n_s__np); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 125; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_2);
__pyx_t_3 = PyObject_GetAttr(__pyx_t_2, __pyx_n_s__searchsorted); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 125; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_3);
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
__pyx_t_9 = __pyx_v_i;
__pyx_t_2 = PyFloat_FromDouble((*__Pyx_BufPtrStrided1d(double *, __pyx_pybuffernd_f.rcbuffer->pybuffer.buf, __pyx_t_9, __pyx_pybuffernd_f.diminfo[0].strides))); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 125; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_2);
__pyx_t_4 = PyTuple_New(2); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 125; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_4);
__Pyx_INCREF(((PyObject *)__pyx_v_l_cdf));
PyTuple_SET_ITEM(__pyx_t_4, 0, ((PyObject *)__pyx_v_l_cdf));
__Pyx_GIVEREF(((PyObject *)__pyx_v_l_cdf));
PyTuple_SET_ITEM(__pyx_t_4, 1, __pyx_t_2);
__Pyx_GIVEREF(__pyx_t_2);
__pyx_t_2 = 0;
__pyx_t_2 = PyObject_Call(__pyx_t_3, ((PyObject *)__pyx_t_4), NULL); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 125; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_2);
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__Pyx_DECREF(((PyObject *)__pyx_t_4)); __pyx_t_4 = 0;
__pyx_t_22 = __Pyx_PyInt_AsInt(__pyx_t_2); if (unlikely((__pyx_t_22 == (int)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 125; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
__pyx_v_idx = __pyx_t_22;
/* "wfpt.pyx":126
* for i from 0 <= i < samples:
* idx = np.searchsorted(l_cdf, f[i])
* rt = x[idx] # <<<<<<<<<<<<<<
* if st==0:
* rt = rt + np.sign(rt)*t
*/
__pyx_t_22 = __pyx_v_idx;
__pyx_v_rt = (*__Pyx_BufPtrStrided1d(double *, __pyx_pybuffernd_x.rcbuffer->pybuffer.buf, __pyx_t_22, __pyx_pybuffernd_x.diminfo[0].strides));
/* "wfpt.pyx":127
* idx = np.searchsorted(l_cdf, f[i])
* rt = x[idx]
* if st==0: # <<<<<<<<<<<<<<
* rt = rt + np.sign(rt)*t
* else:
*/
__pyx_t_20 = (__pyx_v_st == 0.0);
if (__pyx_t_20) {
/* "wfpt.pyx":128
* rt = x[idx]
* if st==0:
* rt = rt + np.sign(rt)*t # <<<<<<<<<<<<<<
* else:
* rt = rt + np.sign(rt)*delay[i]
*/
__pyx_t_2 = PyFloat_FromDouble(__pyx_v_rt); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 128; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_2);
__pyx_t_4 = __Pyx_GetName(__pyx_m, __pyx_n_s__np); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 128; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_4);
__pyx_t_3 = PyObject_GetAttr(__pyx_t_4, __pyx_n_s__sign); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 128; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_3);
__Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
__pyx_t_4 = PyFloat_FromDouble(__pyx_v_rt); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 128; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_4);
__pyx_t_1 = PyTuple_New(1); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 128; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_1);
PyTuple_SET_ITEM(__pyx_t_1, 0, __pyx_t_4);
__Pyx_GIVEREF(__pyx_t_4);
__pyx_t_4 = 0;
__pyx_t_4 = PyObject_Call(__pyx_t_3, ((PyObject *)__pyx_t_1), NULL); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 128; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_4);
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__Pyx_DECREF(((PyObject *)__pyx_t_1)); __pyx_t_1 = 0;
__pyx_t_1 = PyFloat_FromDouble(__pyx_v_t); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 128; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_1);
__pyx_t_3 = PyNumber_Multiply(__pyx_t_4, __pyx_t_1); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 128; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_3);
__Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
__pyx_t_1 = PyNumber_Add(__pyx_t_2, __pyx_t_3); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 128; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_1);
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__pyx_t_23 = __pyx_PyFloat_AsDouble(__pyx_t_1); if (unlikely((__pyx_t_23 == (double)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 128; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
__pyx_v_rt = __pyx_t_23;
goto __pyx_L8;
}
/*else*/ {
/* "wfpt.pyx":130
* rt = rt + np.sign(rt)*t
* else:
* rt = rt + np.sign(rt)*delay[i] # <<<<<<<<<<<<<<
* rts[i] = rt
* return rts
*/
__pyx_t_1 = PyFloat_FromDouble(__pyx_v_rt); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 130; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_1);
__pyx_t_3 = __Pyx_GetName(__pyx_m, __pyx_n_s__np); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 130; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_3);
__pyx_t_2 = PyObject_GetAttr(__pyx_t_3, __pyx_n_s__sign); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 130; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_2);
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__pyx_t_3 = PyFloat_FromDouble(__pyx_v_rt); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 130; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_3);
__pyx_t_4 = PyTuple_New(1); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 130; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_4);
PyTuple_SET_ITEM(__pyx_t_4, 0, __pyx_t_3);
__Pyx_GIVEREF(__pyx_t_3);
__pyx_t_3 = 0;
__pyx_t_3 = PyObject_Call(__pyx_t_2, ((PyObject *)__pyx_t_4), NULL); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 130; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_3);
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
__Pyx_DECREF(((PyObject *)__pyx_t_4)); __pyx_t_4 = 0;
__pyx_t_24 = __pyx_v_i;
__pyx_t_4 = PyFloat_FromDouble((*__Pyx_BufPtrStrided1d(double *, __pyx_pybuffernd_delay.rcbuffer->pybuffer.buf, __pyx_t_24, __pyx_pybuffernd_delay.diminfo[0].strides))); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 130; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_4);
__pyx_t_2 = PyNumber_Multiply(__pyx_t_3, __pyx_t_4); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 130; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_2);
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
__pyx_t_4 = PyNumber_Add(__pyx_t_1, __pyx_t_2); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 130; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_4);
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
__pyx_t_23 = __pyx_PyFloat_AsDouble(__pyx_t_4); if (unlikely((__pyx_t_23 == (double)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 130; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
__pyx_v_rt = __pyx_t_23;
}
__pyx_L8:;
/* "wfpt.pyx":131
* else:
* rt = rt + np.sign(rt)*delay[i]
* rts[i] = rt # <<<<<<<<<<<<<<
* return rts
*
*/
__pyx_t_25 = __pyx_v_i;
*__Pyx_BufPtrStrided1d(double *, __pyx_pybuffernd_rts.rcbuffer->pybuffer.buf, __pyx_t_25, __pyx_pybuffernd_rts.diminfo[0].strides) = __pyx_v_rt;
}
/* "wfpt.pyx":132
* rt = rt + np.sign(rt)*delay[i]
* rts[i] = rt
* return rts # <<<<<<<<<<<<<<
*
* def wiener_like_contaminant(np.ndarray[double, ndim=1] x, np.ndarray[int, ndim=1] cont_x, double v, \
*/
__Pyx_XDECREF(__pyx_r);
__Pyx_INCREF(((PyObject *)__pyx_v_rts));
__pyx_r = ((PyObject *)__pyx_v_rts);
goto __pyx_L0;
__pyx_r = Py_None; __Pyx_INCREF(Py_None);
goto __pyx_L0;
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_1);
__Pyx_XDECREF(__pyx_t_2);
__Pyx_XDECREF(__pyx_t_3);
__Pyx_XDECREF(__pyx_t_4);
__Pyx_XDECREF(__pyx_t_5);
{ PyObject *__pyx_type, *__pyx_value, *__pyx_tb;
__Pyx_ErrFetch(&__pyx_type, &__pyx_value, &__pyx_tb);
__Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_rts.rcbuffer->pybuffer);
__Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_delay.rcbuffer->pybuffer);
__Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_l_cdf.rcbuffer->pybuffer);
__Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_x.rcbuffer->pybuffer);
__Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_f.rcbuffer->pybuffer);
__Pyx_ErrRestore(__pyx_type, __pyx_value, __pyx_tb);}
__Pyx_AddTraceback("wfpt.gen_rts_from_cdf", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = NULL;
goto __pyx_L2;
__pyx_L0:;
__Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_rts.rcbuffer->pybuffer);
__Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_delay.rcbuffer->pybuffer);
__Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_l_cdf.rcbuffer->pybuffer);
__Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_x.rcbuffer->pybuffer);
__Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_f.rcbuffer->pybuffer);
__pyx_L2:;
__Pyx_XDECREF((PyObject *)__pyx_v_x);
__Pyx_XDECREF((PyObject *)__pyx_v_l_cdf);
__Pyx_XDECREF((PyObject *)__pyx_v_rts);
__Pyx_XDECREF((PyObject *)__pyx_v_f);
__Pyx_XDECREF((PyObject *)__pyx_v_delay);
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* Python wrapper */
static PyObject *__pyx_pw_4wfpt_11wiener_like_contaminant(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/
static char __pyx_doc_4wfpt_10wiener_like_contaminant[] = "wiener_like_contaminant(ndarray x, ndarray cont_x, double v, double sv, double a, double z, double sz, double t, double st, double t_min, double t_max, double err, int n_st=10, int n_sz=10, int use_adaptive=1, double simps_err=1e-08)\nWiener likelihood function where RTs could come from a\n separate, uniform contaminant distribution.\n\n Reference: Lee, Vandekerckhove, Navarro, & Tuernlinckx (2007)\n ";
static PyMethodDef __pyx_mdef_4wfpt_11wiener_like_contaminant = {__Pyx_NAMESTR("wiener_like_contaminant"), (PyCFunction)__pyx_pw_4wfpt_11wiener_like_contaminant, METH_VARARGS|METH_KEYWORDS, __Pyx_DOCSTR(__pyx_doc_4wfpt_10wiener_like_contaminant)};
static PyObject *__pyx_pw_4wfpt_11wiener_like_contaminant(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds) {
PyArrayObject *__pyx_v_x = 0;
PyArrayObject *__pyx_v_cont_x = 0;
double __pyx_v_v;
double __pyx_v_sv;
double __pyx_v_a;
double __pyx_v_z;
double __pyx_v_sz;
double __pyx_v_t;
double __pyx_v_st;
double __pyx_v_t_min;
double __pyx_v_t_max;
double __pyx_v_err;
int __pyx_v_n_st;
int __pyx_v_n_sz;
int __pyx_v_use_adaptive;
double __pyx_v_simps_err;
static PyObject **__pyx_pyargnames[] = {&__pyx_n_s__x,&__pyx_n_s__cont_x,&__pyx_n_s__v,&__pyx_n_s__sv,&__pyx_n_s__a,&__pyx_n_s__z,&__pyx_n_s__sz,&__pyx_n_s__t,&__pyx_n_s__st,&__pyx_n_s__t_min,&__pyx_n_s__t_max,&__pyx_n_s__err,&__pyx_n_s__n_st,&__pyx_n_s__n_sz,&__pyx_n_s__use_adaptive,&__pyx_n_s__simps_err,0};
PyObject *__pyx_r = 0;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("wiener_like_contaminant (wrapper)");
__pyx_self = __pyx_self;
{
PyObject* values[16] = {0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0};
if (unlikely(__pyx_kwds)) {
Py_ssize_t kw_args;
const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args);
switch (pos_args) {
case 16: values[15] = PyTuple_GET_ITEM(__pyx_args, 15);
case 15: values[14] = PyTuple_GET_ITEM(__pyx_args, 14);
case 14: values[13] = PyTuple_GET_ITEM(__pyx_args, 13);
case 13: values[12] = PyTuple_GET_ITEM(__pyx_args, 12);
case 12: values[11] = PyTuple_GET_ITEM(__pyx_args, 11);
case 11: values[10] = PyTuple_GET_ITEM(__pyx_args, 10);
case 10: values[9] = PyTuple_GET_ITEM(__pyx_args, 9);
case 9: values[8] = PyTuple_GET_ITEM(__pyx_args, 8);
case 8: values[7] = PyTuple_GET_ITEM(__pyx_args, 7);
case 7: values[6] = PyTuple_GET_ITEM(__pyx_args, 6);
case 6: values[5] = PyTuple_GET_ITEM(__pyx_args, 5);
case 5: values[4] = PyTuple_GET_ITEM(__pyx_args, 4);
case 4: values[3] = PyTuple_GET_ITEM(__pyx_args, 3);
case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2);
case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1);
case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0);
case 0: break;
default: goto __pyx_L5_argtuple_error;
}
kw_args = PyDict_Size(__pyx_kwds);
switch (pos_args) {
case 0:
values[0] = PyDict_GetItem(__pyx_kwds, __pyx_n_s__x);
if (likely(values[0])) kw_args--;
else goto __pyx_L5_argtuple_error;
case 1:
values[1] = PyDict_GetItem(__pyx_kwds, __pyx_n_s__cont_x);
if (likely(values[1])) kw_args--;
else {
__Pyx_RaiseArgtupleInvalid("wiener_like_contaminant", 0, 12, 16, 1); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 134; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
}
case 2:
values[2] = PyDict_GetItem(__pyx_kwds, __pyx_n_s__v);
if (likely(values[2])) kw_args--;
else {
__Pyx_RaiseArgtupleInvalid("wiener_like_contaminant", 0, 12, 16, 2); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 134; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
}
case 3:
values[3] = PyDict_GetItem(__pyx_kwds, __pyx_n_s__sv);
if (likely(values[3])) kw_args--;
else {
__Pyx_RaiseArgtupleInvalid("wiener_like_contaminant", 0, 12, 16, 3); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 134; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
}
case 4:
values[4] = PyDict_GetItem(__pyx_kwds, __pyx_n_s__a);
if (likely(values[4])) kw_args--;
else {
__Pyx_RaiseArgtupleInvalid("wiener_like_contaminant", 0, 12, 16, 4); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 134; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
}
case 5:
values[5] = PyDict_GetItem(__pyx_kwds, __pyx_n_s__z);
if (likely(values[5])) kw_args--;
else {
__Pyx_RaiseArgtupleInvalid("wiener_like_contaminant", 0, 12, 16, 5); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 134; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
}
case 6:
values[6] = PyDict_GetItem(__pyx_kwds, __pyx_n_s__sz);
if (likely(values[6])) kw_args--;
else {
__Pyx_RaiseArgtupleInvalid("wiener_like_contaminant", 0, 12, 16, 6); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 134; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
}
case 7:
values[7] = PyDict_GetItem(__pyx_kwds, __pyx_n_s__t);
if (likely(values[7])) kw_args--;
else {
__Pyx_RaiseArgtupleInvalid("wiener_like_contaminant", 0, 12, 16, 7); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 134; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
}
case 8:
values[8] = PyDict_GetItem(__pyx_kwds, __pyx_n_s__st);
if (likely(values[8])) kw_args--;
else {
__Pyx_RaiseArgtupleInvalid("wiener_like_contaminant", 0, 12, 16, 8); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 134; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
}
case 9:
values[9] = PyDict_GetItem(__pyx_kwds, __pyx_n_s__t_min);
if (likely(values[9])) kw_args--;
else {
__Pyx_RaiseArgtupleInvalid("wiener_like_contaminant", 0, 12, 16, 9); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 134; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
}
case 10:
values[10] = PyDict_GetItem(__pyx_kwds, __pyx_n_s__t_max);
if (likely(values[10])) kw_args--;
else {
__Pyx_RaiseArgtupleInvalid("wiener_like_contaminant", 0, 12, 16, 10); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 134; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
}
case 11:
values[11] = PyDict_GetItem(__pyx_kwds, __pyx_n_s__err);
if (likely(values[11])) kw_args--;
else {
__Pyx_RaiseArgtupleInvalid("wiener_like_contaminant", 0, 12, 16, 11); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 134; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
}
case 12:
if (kw_args > 0) {
PyObject* value = PyDict_GetItem(__pyx_kwds, __pyx_n_s__n_st);
if (value) { values[12] = value; kw_args--; }
}
case 13:
if (kw_args > 0) {
PyObject* value = PyDict_GetItem(__pyx_kwds, __pyx_n_s__n_sz);
if (value) { values[13] = value; kw_args--; }
}
case 14:
if (kw_args > 0) {
PyObject* value = PyDict_GetItem(__pyx_kwds, __pyx_n_s__use_adaptive);
if (value) { values[14] = value; kw_args--; }
}
case 15:
if (kw_args > 0) {
PyObject* value = PyDict_GetItem(__pyx_kwds, __pyx_n_s__simps_err);
if (value) { values[15] = value; kw_args--; }
}
}
if (unlikely(kw_args > 0)) {
if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "wiener_like_contaminant") < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 134; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
}
__pyx_v_x = ((PyArrayObject *)values[0]);
__pyx_v_cont_x = ((PyArrayObject *)values[1]);
__pyx_v_v = __pyx_PyFloat_AsDouble(values[2]); if (unlikely((__pyx_v_v == (double)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 134; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
__pyx_v_sv = __pyx_PyFloat_AsDouble(values[3]); if (unlikely((__pyx_v_sv == (double)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 135; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
__pyx_v_a = __pyx_PyFloat_AsDouble(values[4]); if (unlikely((__pyx_v_a == (double)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 135; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
__pyx_v_z = __pyx_PyFloat_AsDouble(values[5]); if (unlikely((__pyx_v_z == (double)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 135; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
__pyx_v_sz = __pyx_PyFloat_AsDouble(values[6]); if (unlikely((__pyx_v_sz == (double)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 135; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
__pyx_v_t = __pyx_PyFloat_AsDouble(values[7]); if (unlikely((__pyx_v_t == (double)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 135; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
__pyx_v_st = __pyx_PyFloat_AsDouble(values[8]); if (unlikely((__pyx_v_st == (double)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 135; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
__pyx_v_t_min = __pyx_PyFloat_AsDouble(values[9]); if (unlikely((__pyx_v_t_min == (double)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 135; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
__pyx_v_t_max = __pyx_PyFloat_AsDouble(values[10]); if (unlikely((__pyx_v_t_max == (double)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 136; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
__pyx_v_err = __pyx_PyFloat_AsDouble(values[11]); if (unlikely((__pyx_v_err == (double)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 136; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
if (values[12]) {
__pyx_v_n_st = __Pyx_PyInt_AsInt(values[12]); if (unlikely((__pyx_v_n_st == (int)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 136; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
} else {
__pyx_v_n_st = ((int)10);
}
if (values[13]) {
__pyx_v_n_sz = __Pyx_PyInt_AsInt(values[13]); if (unlikely((__pyx_v_n_sz == (int)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 136; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
} else {
__pyx_v_n_sz = ((int)10);
}
if (values[14]) {
__pyx_v_use_adaptive = __Pyx_PyObject_IsTrue(values[14]); if (unlikely((__pyx_v_use_adaptive == (int)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 136; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
} else {
__pyx_v_use_adaptive = ((int)1);
}
if (values[15]) {
__pyx_v_simps_err = __pyx_PyFloat_AsDouble(values[15]); if (unlikely((__pyx_v_simps_err == (double)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 137; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
} else {
/* "wfpt.pyx":137
* double sv, double a, double z, double sz, double t, double st, double t_min, \
* double t_max, double err, int n_st= 10, int n_sz=10, bint use_adaptive=1, \
* double simps_err=1e-8): # <<<<<<<<<<<<<<
* """Wiener likelihood function where RTs could come from a
* separate, uniform contaminant distribution.
*/
__pyx_v_simps_err = ((double)1e-8);
}
} else {
switch (PyTuple_GET_SIZE(__pyx_args)) {
case 16: values[15] = PyTuple_GET_ITEM(__pyx_args, 15);
case 15: values[14] = PyTuple_GET_ITEM(__pyx_args, 14);
case 14: values[13] = PyTuple_GET_ITEM(__pyx_args, 13);
case 13: values[12] = PyTuple_GET_ITEM(__pyx_args, 12);
case 12: values[11] = PyTuple_GET_ITEM(__pyx_args, 11);
values[10] = PyTuple_GET_ITEM(__pyx_args, 10);
values[9] = PyTuple_GET_ITEM(__pyx_args, 9);
values[8] = PyTuple_GET_ITEM(__pyx_args, 8);
values[7] = PyTuple_GET_ITEM(__pyx_args, 7);
values[6] = PyTuple_GET_ITEM(__pyx_args, 6);
values[5] = PyTuple_GET_ITEM(__pyx_args, 5);
values[4] = PyTuple_GET_ITEM(__pyx_args, 4);
values[3] = PyTuple_GET_ITEM(__pyx_args, 3);
values[2] = PyTuple_GET_ITEM(__pyx_args, 2);
values[1] = PyTuple_GET_ITEM(__pyx_args, 1);
values[0] = PyTuple_GET_ITEM(__pyx_args, 0);
break;
default: goto __pyx_L5_argtuple_error;
}
}
__pyx_v_x = ((PyArrayObject *)values[0]);
__pyx_v_cont_x = ((PyArrayObject *)values[1]);
__pyx_v_v = __pyx_PyFloat_AsDouble(values[2]); if (unlikely((__pyx_v_v == (double)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 134; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
__pyx_v_sv = __pyx_PyFloat_AsDouble(values[3]); if (unlikely((__pyx_v_sv == (double)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 135; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
__pyx_v_a = __pyx_PyFloat_AsDouble(values[4]); if (unlikely((__pyx_v_a == (double)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 135; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
__pyx_v_z = __pyx_PyFloat_AsDouble(values[5]); if (unlikely((__pyx_v_z == (double)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 135; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
__pyx_v_sz = __pyx_PyFloat_AsDouble(values[6]); if (unlikely((__pyx_v_sz == (double)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 135; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
__pyx_v_t = __pyx_PyFloat_AsDouble(values[7]); if (unlikely((__pyx_v_t == (double)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 135; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
__pyx_v_st = __pyx_PyFloat_AsDouble(values[8]); if (unlikely((__pyx_v_st == (double)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 135; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
__pyx_v_t_min = __pyx_PyFloat_AsDouble(values[9]); if (unlikely((__pyx_v_t_min == (double)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 135; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
__pyx_v_t_max = __pyx_PyFloat_AsDouble(values[10]); if (unlikely((__pyx_v_t_max == (double)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 136; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
__pyx_v_err = __pyx_PyFloat_AsDouble(values[11]); if (unlikely((__pyx_v_err == (double)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 136; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
if (values[12]) {
__pyx_v_n_st = __Pyx_PyInt_AsInt(values[12]); if (unlikely((__pyx_v_n_st == (int)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 136; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
} else {
__pyx_v_n_st = ((int)10);
}
if (values[13]) {
__pyx_v_n_sz = __Pyx_PyInt_AsInt(values[13]); if (unlikely((__pyx_v_n_sz == (int)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 136; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
} else {
__pyx_v_n_sz = ((int)10);
}
if (values[14]) {
__pyx_v_use_adaptive = __Pyx_PyObject_IsTrue(values[14]); if (unlikely((__pyx_v_use_adaptive == (int)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 136; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
} else {
__pyx_v_use_adaptive = ((int)1);
}
if (values[15]) {
__pyx_v_simps_err = __pyx_PyFloat_AsDouble(values[15]); if (unlikely((__pyx_v_simps_err == (double)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 137; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
} else {
__pyx_v_simps_err = ((double)1e-8);
}
}
goto __pyx_L4_argument_unpacking_done;
__pyx_L5_argtuple_error:;
__Pyx_RaiseArgtupleInvalid("wiener_like_contaminant", 0, 12, 16, PyTuple_GET_SIZE(__pyx_args)); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 134; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
__pyx_L3_error:;
__Pyx_AddTraceback("wfpt.wiener_like_contaminant", __pyx_clineno, __pyx_lineno, __pyx_filename);
__Pyx_RefNannyFinishContext();
return NULL;
__pyx_L4_argument_unpacking_done:;
if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_x), __pyx_ptype_5numpy_ndarray, 1, "x", 0))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 134; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_cont_x), __pyx_ptype_5numpy_ndarray, 1, "cont_x", 0))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 134; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__pyx_r = __pyx_pf_4wfpt_10wiener_like_contaminant(__pyx_self, __pyx_v_x, __pyx_v_cont_x, __pyx_v_v, __pyx_v_sv, __pyx_v_a, __pyx_v_z, __pyx_v_sz, __pyx_v_t, __pyx_v_st, __pyx_v_t_min, __pyx_v_t_max, __pyx_v_err, __pyx_v_n_st, __pyx_v_n_sz, __pyx_v_use_adaptive, __pyx_v_simps_err);
goto __pyx_L0;
__pyx_L1_error:;
__pyx_L0:;
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "wfpt.pyx":134
* return rts
*
* def wiener_like_contaminant(np.ndarray[double, ndim=1] x, np.ndarray[int, ndim=1] cont_x, double v, \ # <<<<<<<<<<<<<<
* double sv, double a, double z, double sz, double t, double st, double t_min, \
* double t_max, double err, int n_st= 10, int n_sz=10, bint use_adaptive=1, \
*/
static PyObject *__pyx_pf_4wfpt_10wiener_like_contaminant(CYTHON_UNUSED PyObject *__pyx_self, PyArrayObject *__pyx_v_x, PyArrayObject *__pyx_v_cont_x, double __pyx_v_v, double __pyx_v_sv, double __pyx_v_a, double __pyx_v_z, double __pyx_v_sz, double __pyx_v_t, double __pyx_v_st, double __pyx_v_t_min, double __pyx_v_t_max, double __pyx_v_err, int __pyx_v_n_st, int __pyx_v_n_sz, int __pyx_v_use_adaptive, double __pyx_v_simps_err) {
CYTHON_UNUSED Py_ssize_t __pyx_v_size;
Py_ssize_t __pyx_v_i;
double __pyx_v_p;
double __pyx_v_sum_logp;
int __pyx_v_n_cont;
CYTHON_UNUSED int __pyx_v_pos_cont;
__Pyx_LocalBuf_ND __pyx_pybuffernd_cont_x;
__Pyx_Buffer __pyx_pybuffer_cont_x;
__Pyx_LocalBuf_ND __pyx_pybuffernd_x;
__Pyx_Buffer __pyx_pybuffer_x;
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
PyObject *__pyx_t_1 = NULL;
PyObject *__pyx_t_2 = NULL;
PyObject *__pyx_t_3 = NULL;
int __pyx_t_4;
Py_ssize_t __pyx_t_5;
Py_ssize_t __pyx_t_6;
Py_ssize_t __pyx_t_7;
Py_ssize_t __pyx_t_8;
int __pyx_t_9;
Py_ssize_t __pyx_t_10;
double __pyx_t_11;
struct __pyx_opt_args_4wfpt_full_pdf __pyx_t_12;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("wiener_like_contaminant");
__pyx_pybuffer_x.pybuffer.buf = NULL;
__pyx_pybuffer_x.refcount = 0;
__pyx_pybuffernd_x.data = NULL;
__pyx_pybuffernd_x.rcbuffer = &__pyx_pybuffer_x;
__pyx_pybuffer_cont_x.pybuffer.buf = NULL;
__pyx_pybuffer_cont_x.refcount = 0;
__pyx_pybuffernd_cont_x.data = NULL;
__pyx_pybuffernd_cont_x.rcbuffer = &__pyx_pybuffer_cont_x;
{
__Pyx_BufFmt_StackElem __pyx_stack[1];
if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_x.rcbuffer->pybuffer, (PyObject*)__pyx_v_x, &__Pyx_TypeInfo_double, PyBUF_FORMAT| PyBUF_STRIDES, 1, 0, __pyx_stack) == -1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 134; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
}
__pyx_pybuffernd_x.diminfo[0].strides = __pyx_pybuffernd_x.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_x.diminfo[0].shape = __pyx_pybuffernd_x.rcbuffer->pybuffer.shape[0];
{
__Pyx_BufFmt_StackElem __pyx_stack[1];
if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_cont_x.rcbuffer->pybuffer, (PyObject*)__pyx_v_cont_x, &__Pyx_TypeInfo_int, PyBUF_FORMAT| PyBUF_STRIDES, 1, 0, __pyx_stack) == -1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 134; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
}
__pyx_pybuffernd_cont_x.diminfo[0].strides = __pyx_pybuffernd_cont_x.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_cont_x.diminfo[0].shape = __pyx_pybuffernd_cont_x.rcbuffer->pybuffer.shape[0];
/* "wfpt.pyx":143
* Reference: Lee, Vandekerckhove, Navarro, & Tuernlinckx (2007)
* """
* cdef Py_ssize_t size = x.shape[0] # <<<<<<<<<<<<<<
* cdef Py_ssize_t i
* cdef double p
*/
__pyx_v_size = (__pyx_v_x->dimensions[0]);
/* "wfpt.pyx":146
* cdef Py_ssize_t i
* cdef double p
* cdef double sum_logp = 0 # <<<<<<<<<<<<<<
* cdef int n_cont = np.sum(cont_x)
* cdef int pos_cont = 0
*/
__pyx_v_sum_logp = 0.0;
/* "wfpt.pyx":147
* cdef double p
* cdef double sum_logp = 0
* cdef int n_cont = np.sum(cont_x) # <<<<<<<<<<<<<<
* cdef int pos_cont = 0
*
*/
__pyx_t_1 = __Pyx_GetName(__pyx_m, __pyx_n_s__np); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 147; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_1);
__pyx_t_2 = PyObject_GetAttr(__pyx_t_1, __pyx_n_s__sum); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 147; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_2);
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
__pyx_t_1 = PyTuple_New(1); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 147; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_1);
__Pyx_INCREF(((PyObject *)__pyx_v_cont_x));
PyTuple_SET_ITEM(__pyx_t_1, 0, ((PyObject *)__pyx_v_cont_x));
__Pyx_GIVEREF(((PyObject *)__pyx_v_cont_x));
__pyx_t_3 = PyObject_Call(__pyx_t_2, ((PyObject *)__pyx_t_1), NULL); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 147; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_3);
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
__Pyx_DECREF(((PyObject *)__pyx_t_1)); __pyx_t_1 = 0;
__pyx_t_4 = __Pyx_PyInt_AsInt(__pyx_t_3); if (unlikely((__pyx_t_4 == (int)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 147; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__pyx_v_n_cont = __pyx_t_4;
/* "wfpt.pyx":148
* cdef double sum_logp = 0
* cdef int n_cont = np.sum(cont_x)
* cdef int pos_cont = 0 # <<<<<<<<<<<<<<
*
* for i in prange(size, nogil=True):
*/
__pyx_v_pos_cont = 0;
/* "wfpt.pyx":150
* cdef int pos_cont = 0
*
* for i in prange(size, nogil=True): # <<<<<<<<<<<<<<
* if cont_x[i] == 0:
* p = full_pdf(x[i], v, sv, a, z, sz, t, st, err, n_st, n_sz, use_adaptive, simps_err)
*/
{
#ifdef WITH_THREAD
PyThreadState *_save = NULL;
#endif
Py_UNBLOCK_THREADS
/*try:*/ {
__pyx_t_5 = __pyx_v_size;
if (1 == 0) abort();
{
double __pyx_parallel_temp0 = __PYX_NAN;
Py_ssize_t __pyx_parallel_temp1 = 0xbad0bad0;
double __pyx_parallel_temp2 = __PYX_NAN;
const char *__pyx_parallel_filename; int __pyx_parallel_lineno, __pyx_parallel_clineno;
__pyx_parallel_filename = NULL; __pyx_parallel_lineno = __pyx_parallel_clineno = 0;
PyObject *__pyx_parallel_exc_type = NULL, *__pyx_parallel_exc_value = NULL, *__pyx_parallel_exc_tb = NULL;
int __pyx_parallel_why;
__pyx_parallel_why = 0;
__pyx_t_7 = (__pyx_t_5 - 0) / 1;
if (__pyx_t_7 > 0)
{
__pyx_v_i = 0;
#ifdef _OPENMP
#pragma omp parallel reduction(+:__pyx_v_sum_logp) private(__pyx_t_12, __pyx_t_10, __pyx_t_8, __pyx_t_9, __pyx_t_11) firstprivate(__pyx_t_3, __pyx_t_1) private(__pyx_filename, __pyx_lineno, __pyx_clineno) shared(__pyx_parallel_why, __pyx_parallel_exc_type, __pyx_parallel_exc_value, __pyx_parallel_exc_tb)
#endif /* _OPENMP */
{
#ifdef _OPENMP
#ifdef WITH_THREAD
PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure();
#endif
Py_BEGIN_ALLOW_THREADS
#endif /* _OPENMP */
#ifdef _OPENMP
#pragma omp for lastprivate(__pyx_v_p) firstprivate(__pyx_v_i) lastprivate(__pyx_v_i)
#endif /* _OPENMP */
for (__pyx_t_6 = 0; __pyx_t_6 < __pyx_t_7; __pyx_t_6++){
if (__pyx_parallel_why < 2)
{
__pyx_v_i = 0 + 1 * __pyx_t_6;
/* Initialize private variables to invalid values */
__pyx_v_p = ((double)__PYX_NAN);
/* "wfpt.pyx":151
*
* for i in prange(size, nogil=True):
* if cont_x[i] == 0: # <<<<<<<<<<<<<<
* p = full_pdf(x[i], v, sv, a, z, sz, t, st, err, n_st, n_sz, use_adaptive, simps_err)
* if p == 0:
*/
__pyx_t_8 = __pyx_v_i;
__pyx_t_9 = ((*__Pyx_BufPtrStrided1d(int *, __pyx_pybuffernd_cont_x.rcbuffer->pybuffer.buf, __pyx_t_8, __pyx_pybuffernd_cont_x.diminfo[0].strides)) == 0);
if (__pyx_t_9) {
/* "wfpt.pyx":152
* for i in prange(size, nogil=True):
* if cont_x[i] == 0:
* p = full_pdf(x[i], v, sv, a, z, sz, t, st, err, n_st, n_sz, use_adaptive, simps_err) # <<<<<<<<<<<<<<
* if p == 0:
* with gil:
*/
__pyx_t_10 = __pyx_v_i;
__pyx_t_12.__pyx_n = 4;
__pyx_t_12.n_st = __pyx_v_n_st;
__pyx_t_12.n_sz = __pyx_v_n_sz;
__pyx_t_12.use_adaptive = __pyx_v_use_adaptive;
__pyx_t_12.simps_err = __pyx_v_simps_err;
__pyx_t_11 = __pyx_f_4wfpt_full_pdf((*__Pyx_BufPtrStrided1d(double *, __pyx_pybuffernd_x.rcbuffer->pybuffer.buf, __pyx_t_10, __pyx_pybuffernd_x.diminfo[0].strides)), __pyx_v_v, __pyx_v_sv, __pyx_v_a, __pyx_v_z, __pyx_v_sz, __pyx_v_t, __pyx_v_st, __pyx_v_err, 0, &__pyx_t_12);
__pyx_v_p = __pyx_t_11;
/* "wfpt.pyx":153
* if cont_x[i] == 0:
* p = full_pdf(x[i], v, sv, a, z, sz, t, st, err, n_st, n_sz, use_adaptive, simps_err)
* if p == 0: # <<<<<<<<<<<<<<
* with gil:
* return -np.inf
*/
__pyx_t_9 = (__pyx_v_p == 0.0);
if (__pyx_t_9) {
/* "wfpt.pyx":154
* p = full_pdf(x[i], v, sv, a, z, sz, t, st, err, n_st, n_sz, use_adaptive, simps_err)
* if p == 0:
* with gil: # <<<<<<<<<<<<<<
* return -np.inf
* sum_logp += log(p)
*/
{
#ifdef WITH_THREAD
PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure();
#endif
/*try:*/ {
/* "wfpt.pyx":155
* if p == 0:
* with gil:
* return -np.inf # <<<<<<<<<<<<<<
* sum_logp += log(p)
* # If one probability = 0, the log sum will be -Inf
*/
__Pyx_XDECREF(__pyx_r);
__pyx_t_3 = __Pyx_GetName(__pyx_m, __pyx_n_s__np); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 155; __pyx_clineno = __LINE__; goto __pyx_L15;}
__Pyx_GOTREF(__pyx_t_3);
__pyx_t_1 = PyObject_GetAttr(__pyx_t_3, __pyx_n_s__inf); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 155; __pyx_clineno = __LINE__; goto __pyx_L15;}
__Pyx_GOTREF(__pyx_t_1);
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__pyx_t_3 = PyNumber_Negative(__pyx_t_1); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 155; __pyx_clineno = __LINE__; goto __pyx_L15;}
__Pyx_GOTREF(__pyx_t_3);
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
__pyx_r = __pyx_t_3;
__pyx_t_3 = 0;
goto __pyx_L14;
}
/* "wfpt.pyx":154
* p = full_pdf(x[i], v, sv, a, z, sz, t, st, err, n_st, n_sz, use_adaptive, simps_err)
* if p == 0:
* with gil: # <<<<<<<<<<<<<<
* return -np.inf
* sum_logp += log(p)
*/
/*finally:*/ {
int __pyx_why;
__pyx_why = 0; goto __pyx_L16;
__pyx_L14: __pyx_why = 3; goto __pyx_L16;
__pyx_L15: __pyx_why = 4; goto __pyx_L16;
__pyx_L16:;
#ifdef WITH_THREAD
PyGILState_Release(__pyx_gilstate_save);
#endif
switch (__pyx_why) {
case 3: goto __pyx_L9_return;
case 4: goto __pyx_L8_error;
}
}
}
goto __pyx_L11;
}
__pyx_L11:;
/* "wfpt.pyx":156
* with gil:
* return -np.inf
* sum_logp += log(p) # <<<<<<<<<<<<<<
* # If one probability = 0, the log sum will be -Inf
*
*/
__pyx_v_sum_logp = (__pyx_v_sum_logp + log(__pyx_v_p));
goto __pyx_L10;
}
__pyx_L10:;
goto __pyx_L19;
__pyx_L9_return:;
__pyx_parallel_why = 3;
goto __pyx_L18;
__pyx_L8_error:;
{
#ifdef WITH_THREAD
PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure();
#endif
#ifdef _OPENMP
#pragma omp flush(__pyx_parallel_exc_type)
#endif /* _OPENMP */
if (!__pyx_parallel_exc_type) {
__Pyx_ErrFetch(&__pyx_parallel_exc_type, &__pyx_parallel_exc_value, &__pyx_parallel_exc_tb);
__pyx_parallel_filename = __pyx_filename; __pyx_parallel_lineno = __pyx_lineno; __pyx_parallel_clineno = __pyx_clineno;
__Pyx_GOTREF(__pyx_parallel_exc_type);
}
#ifdef WITH_THREAD
PyGILState_Release(__pyx_gilstate_save);
#endif
}
__pyx_parallel_why = 4;
goto __pyx_L18;
__pyx_L18:;
#ifdef _OPENMP
#pragma omp critical(__pyx_parallel_lastprivates1)
#endif /* _OPENMP */
{
__pyx_parallel_temp0 = __pyx_v_p;
__pyx_parallel_temp1 = __pyx_v_i;
__pyx_parallel_temp2 = __pyx_v_sum_logp;
}
__pyx_L19:;
#ifdef _OPENMP
#pragma omp flush(__pyx_parallel_why)
#endif /* _OPENMP */
}
}
#ifdef _OPENMP
Py_END_ALLOW_THREADS
#ifdef WITH_THREAD
PyGILState_Release(__pyx_gilstate_save);
#endif
#endif /* _OPENMP */
}
}
if (__pyx_parallel_exc_type) {
/* This may have been overridden by a continue, break or return in another thread. Prefer the error. */
__pyx_parallel_why = 4;
}
if (__pyx_parallel_why) {
__pyx_v_p = __pyx_parallel_temp0;
__pyx_v_i = __pyx_parallel_temp1;
__pyx_v_sum_logp = __pyx_parallel_temp2;
switch (__pyx_parallel_why) {
case 3: goto __pyx_L3;
case 4:
{
#ifdef WITH_THREAD
PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure();
#endif
__Pyx_ErrRestore(__pyx_parallel_exc_type, __pyx_parallel_exc_value, __pyx_parallel_exc_tb);
__pyx_filename = __pyx_parallel_filename; __pyx_lineno = __pyx_parallel_lineno; __pyx_clineno = __pyx_parallel_clineno;
__Pyx_GIVEREF(__pyx_parallel_exc_type);
#ifdef WITH_THREAD
PyGILState_Release(__pyx_gilstate_save);
#endif
}
goto __pyx_L4;
}
}
}
}
/* "wfpt.pyx":150
* cdef int pos_cont = 0
*
* for i in prange(size, nogil=True): # <<<<<<<<<<<<<<
* if cont_x[i] == 0:
* p = full_pdf(x[i], v, sv, a, z, sz, t, st, err, n_st, n_sz, use_adaptive, simps_err)
*/
/*finally:*/ {
int __pyx_why;
__pyx_why = 0; goto __pyx_L5;
__pyx_L3: __pyx_why = 3; goto __pyx_L5;
__pyx_L4: __pyx_why = 4; goto __pyx_L5;
__pyx_L5:;
Py_BLOCK_THREADS
switch (__pyx_why) {
case 3: goto __pyx_L0;
case 4: goto __pyx_L1_error;
}
}
}
/* "wfpt.pyx":161
*
* # add the log likelihood of the contaminations
* sum_logp += n_cont*log(0.5 * 1./(t_max-t_min)) # <<<<<<<<<<<<<<
*
* return sum_logp
*/
__pyx_v_sum_logp = (__pyx_v_sum_logp + (__pyx_v_n_cont * log(((0.5 * 1.) / (__pyx_v_t_max - __pyx_v_t_min)))));
/* "wfpt.pyx":163
* sum_logp += n_cont*log(0.5 * 1./(t_max-t_min))
*
* return sum_logp # <<<<<<<<<<<<<<
*
* def gen_cdf_using_pdf(double v, double sv, double a, double z, double sz, double t, double st, double err,
*/
__Pyx_XDECREF(__pyx_r);
__pyx_t_3 = PyFloat_FromDouble(__pyx_v_sum_logp); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 163; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_3);
__pyx_r = __pyx_t_3;
__pyx_t_3 = 0;
goto __pyx_L0;
__pyx_r = Py_None; __Pyx_INCREF(Py_None);
goto __pyx_L0;
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_1);
__Pyx_XDECREF(__pyx_t_2);
__Pyx_XDECREF(__pyx_t_3);
{ PyObject *__pyx_type, *__pyx_value, *__pyx_tb;
__Pyx_ErrFetch(&__pyx_type, &__pyx_value, &__pyx_tb);
__Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_cont_x.rcbuffer->pybuffer);
__Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_x.rcbuffer->pybuffer);
__Pyx_ErrRestore(__pyx_type, __pyx_value, __pyx_tb);}
__Pyx_AddTraceback("wfpt.wiener_like_contaminant", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = NULL;
goto __pyx_L2;
__pyx_L0:;
__Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_cont_x.rcbuffer->pybuffer);
__Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_x.rcbuffer->pybuffer);
__pyx_L2:;
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* Python wrapper */
static PyObject *__pyx_pw_4wfpt_13gen_cdf_using_pdf(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/
static char __pyx_doc_4wfpt_12gen_cdf_using_pdf[] = "gen_cdf_using_pdf(double v, double sv, double a, double z, double sz, double t, double st, double err, int N=500, double time=5.0, int n_st=2, int n_sz=2, int use_adaptive=1, double simps_err=0.001, double p_outlier=0, double w_outlier=0)\n\n generate cdf vector using the pdf\n ";
static PyMethodDef __pyx_mdef_4wfpt_13gen_cdf_using_pdf = {__Pyx_NAMESTR("gen_cdf_using_pdf"), (PyCFunction)__pyx_pw_4wfpt_13gen_cdf_using_pdf, METH_VARARGS|METH_KEYWORDS, __Pyx_DOCSTR(__pyx_doc_4wfpt_12gen_cdf_using_pdf)};
static PyObject *__pyx_pw_4wfpt_13gen_cdf_using_pdf(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds) {
double __pyx_v_v;
double __pyx_v_sv;
double __pyx_v_a;
double __pyx_v_z;
double __pyx_v_sz;
double __pyx_v_t;
double __pyx_v_st;
double __pyx_v_err;
int __pyx_v_N;
double __pyx_v_time;
int __pyx_v_n_st;
int __pyx_v_n_sz;
int __pyx_v_use_adaptive;
double __pyx_v_simps_err;
double __pyx_v_p_outlier;
double __pyx_v_w_outlier;
static PyObject **__pyx_pyargnames[] = {&__pyx_n_s__v,&__pyx_n_s__sv,&__pyx_n_s__a,&__pyx_n_s__z,&__pyx_n_s__sz,&__pyx_n_s__t,&__pyx_n_s__st,&__pyx_n_s__err,&__pyx_n_s__N,&__pyx_n_s__time,&__pyx_n_s__n_st,&__pyx_n_s__n_sz,&__pyx_n_s__use_adaptive,&__pyx_n_s__simps_err,&__pyx_n_s__p_outlier,&__pyx_n_s__w_outlier,0};
PyObject *__pyx_r = 0;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("gen_cdf_using_pdf (wrapper)");
__pyx_self = __pyx_self;
{
PyObject* values[16] = {0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0};
if (unlikely(__pyx_kwds)) {
Py_ssize_t kw_args;
const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args);
switch (pos_args) {
case 16: values[15] = PyTuple_GET_ITEM(__pyx_args, 15);
case 15: values[14] = PyTuple_GET_ITEM(__pyx_args, 14);
case 14: values[13] = PyTuple_GET_ITEM(__pyx_args, 13);
case 13: values[12] = PyTuple_GET_ITEM(__pyx_args, 12);
case 12: values[11] = PyTuple_GET_ITEM(__pyx_args, 11);
case 11: values[10] = PyTuple_GET_ITEM(__pyx_args, 10);
case 10: values[9] = PyTuple_GET_ITEM(__pyx_args, 9);
case 9: values[8] = PyTuple_GET_ITEM(__pyx_args, 8);
case 8: values[7] = PyTuple_GET_ITEM(__pyx_args, 7);
case 7: values[6] = PyTuple_GET_ITEM(__pyx_args, 6);
case 6: values[5] = PyTuple_GET_ITEM(__pyx_args, 5);
case 5: values[4] = PyTuple_GET_ITEM(__pyx_args, 4);
case 4: values[3] = PyTuple_GET_ITEM(__pyx_args, 3);
case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2);
case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1);
case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0);
case 0: break;
default: goto __pyx_L5_argtuple_error;
}
kw_args = PyDict_Size(__pyx_kwds);
switch (pos_args) {
case 0:
values[0] = PyDict_GetItem(__pyx_kwds, __pyx_n_s__v);
if (likely(values[0])) kw_args--;
else goto __pyx_L5_argtuple_error;
case 1:
values[1] = PyDict_GetItem(__pyx_kwds, __pyx_n_s__sv);
if (likely(values[1])) kw_args--;
else {
__Pyx_RaiseArgtupleInvalid("gen_cdf_using_pdf", 0, 8, 16, 1); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 165; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
}
case 2:
values[2] = PyDict_GetItem(__pyx_kwds, __pyx_n_s__a);
if (likely(values[2])) kw_args--;
else {
__Pyx_RaiseArgtupleInvalid("gen_cdf_using_pdf", 0, 8, 16, 2); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 165; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
}
case 3:
values[3] = PyDict_GetItem(__pyx_kwds, __pyx_n_s__z);
if (likely(values[3])) kw_args--;
else {
__Pyx_RaiseArgtupleInvalid("gen_cdf_using_pdf", 0, 8, 16, 3); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 165; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
}
case 4:
values[4] = PyDict_GetItem(__pyx_kwds, __pyx_n_s__sz);
if (likely(values[4])) kw_args--;
else {
__Pyx_RaiseArgtupleInvalid("gen_cdf_using_pdf", 0, 8, 16, 4); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 165; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
}
case 5:
values[5] = PyDict_GetItem(__pyx_kwds, __pyx_n_s__t);
if (likely(values[5])) kw_args--;
else {
__Pyx_RaiseArgtupleInvalid("gen_cdf_using_pdf", 0, 8, 16, 5); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 165; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
}
case 6:
values[6] = PyDict_GetItem(__pyx_kwds, __pyx_n_s__st);
if (likely(values[6])) kw_args--;
else {
__Pyx_RaiseArgtupleInvalid("gen_cdf_using_pdf", 0, 8, 16, 6); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 165; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
}
case 7:
values[7] = PyDict_GetItem(__pyx_kwds, __pyx_n_s__err);
if (likely(values[7])) kw_args--;
else {
__Pyx_RaiseArgtupleInvalid("gen_cdf_using_pdf", 0, 8, 16, 7); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 165; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
}
case 8:
if (kw_args > 0) {
PyObject* value = PyDict_GetItem(__pyx_kwds, __pyx_n_s__N);
if (value) { values[8] = value; kw_args--; }
}
case 9:
if (kw_args > 0) {
PyObject* value = PyDict_GetItem(__pyx_kwds, __pyx_n_s__time);
if (value) { values[9] = value; kw_args--; }
}
case 10:
if (kw_args > 0) {
PyObject* value = PyDict_GetItem(__pyx_kwds, __pyx_n_s__n_st);
if (value) { values[10] = value; kw_args--; }
}
case 11:
if (kw_args > 0) {
PyObject* value = PyDict_GetItem(__pyx_kwds, __pyx_n_s__n_sz);
if (value) { values[11] = value; kw_args--; }
}
case 12:
if (kw_args > 0) {
PyObject* value = PyDict_GetItem(__pyx_kwds, __pyx_n_s__use_adaptive);
if (value) { values[12] = value; kw_args--; }
}
case 13:
if (kw_args > 0) {
PyObject* value = PyDict_GetItem(__pyx_kwds, __pyx_n_s__simps_err);
if (value) { values[13] = value; kw_args--; }
}
case 14:
if (kw_args > 0) {
PyObject* value = PyDict_GetItem(__pyx_kwds, __pyx_n_s__p_outlier);
if (value) { values[14] = value; kw_args--; }
}
case 15:
if (kw_args > 0) {
PyObject* value = PyDict_GetItem(__pyx_kwds, __pyx_n_s__w_outlier);
if (value) { values[15] = value; kw_args--; }
}
}
if (unlikely(kw_args > 0)) {
if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "gen_cdf_using_pdf") < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 165; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
}
__pyx_v_v = __pyx_PyFloat_AsDouble(values[0]); if (unlikely((__pyx_v_v == (double)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 165; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
__pyx_v_sv = __pyx_PyFloat_AsDouble(values[1]); if (unlikely((__pyx_v_sv == (double)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 165; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
__pyx_v_a = __pyx_PyFloat_AsDouble(values[2]); if (unlikely((__pyx_v_a == (double)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 165; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
__pyx_v_z = __pyx_PyFloat_AsDouble(values[3]); if (unlikely((__pyx_v_z == (double)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 165; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
__pyx_v_sz = __pyx_PyFloat_AsDouble(values[4]); if (unlikely((__pyx_v_sz == (double)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 165; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
__pyx_v_t = __pyx_PyFloat_AsDouble(values[5]); if (unlikely((__pyx_v_t == (double)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 165; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
__pyx_v_st = __pyx_PyFloat_AsDouble(values[6]); if (unlikely((__pyx_v_st == (double)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 165; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
__pyx_v_err = __pyx_PyFloat_AsDouble(values[7]); if (unlikely((__pyx_v_err == (double)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 165; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
if (values[8]) {
__pyx_v_N = __Pyx_PyInt_AsInt(values[8]); if (unlikely((__pyx_v_N == (int)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 166; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
} else {
__pyx_v_N = ((int)500);
}
if (values[9]) {
__pyx_v_time = __pyx_PyFloat_AsDouble(values[9]); if (unlikely((__pyx_v_time == (double)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 166; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
} else {
/* "wfpt.pyx":166
*
* def gen_cdf_using_pdf(double v, double sv, double a, double z, double sz, double t, double st, double err,
* int N=500, double time=5., int n_st=2, int n_sz=2, bint use_adaptive=1, double simps_err=1e-3, # <<<<<<<<<<<<<<
* double p_outlier=0, double w_outlier=0):
* """
*/
__pyx_v_time = ((double)5.);
}
if (values[10]) {
__pyx_v_n_st = __Pyx_PyInt_AsInt(values[10]); if (unlikely((__pyx_v_n_st == (int)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 166; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
} else {
__pyx_v_n_st = ((int)2);
}
if (values[11]) {
__pyx_v_n_sz = __Pyx_PyInt_AsInt(values[11]); if (unlikely((__pyx_v_n_sz == (int)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 166; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
} else {
__pyx_v_n_sz = ((int)2);
}
if (values[12]) {
__pyx_v_use_adaptive = __Pyx_PyObject_IsTrue(values[12]); if (unlikely((__pyx_v_use_adaptive == (int)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 166; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
} else {
__pyx_v_use_adaptive = ((int)1);
}
if (values[13]) {
__pyx_v_simps_err = __pyx_PyFloat_AsDouble(values[13]); if (unlikely((__pyx_v_simps_err == (double)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 166; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
} else {
__pyx_v_simps_err = ((double)1e-3);
}
if (values[14]) {
__pyx_v_p_outlier = __pyx_PyFloat_AsDouble(values[14]); if (unlikely((__pyx_v_p_outlier == (double)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 167; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
} else {
/* "wfpt.pyx":167
* def gen_cdf_using_pdf(double v, double sv, double a, double z, double sz, double t, double st, double err,
* int N=500, double time=5., int n_st=2, int n_sz=2, bint use_adaptive=1, double simps_err=1e-3,
* double p_outlier=0, double w_outlier=0): # <<<<<<<<<<<<<<
* """
* generate cdf vector using the pdf
*/
__pyx_v_p_outlier = ((double)0.0);
}
if (values[15]) {
__pyx_v_w_outlier = __pyx_PyFloat_AsDouble(values[15]); if (unlikely((__pyx_v_w_outlier == (double)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 167; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
} else {
__pyx_v_w_outlier = ((double)0.0);
}
} else {
switch (PyTuple_GET_SIZE(__pyx_args)) {
case 16: values[15] = PyTuple_GET_ITEM(__pyx_args, 15);
case 15: values[14] = PyTuple_GET_ITEM(__pyx_args, 14);
case 14: values[13] = PyTuple_GET_ITEM(__pyx_args, 13);
case 13: values[12] = PyTuple_GET_ITEM(__pyx_args, 12);
case 12: values[11] = PyTuple_GET_ITEM(__pyx_args, 11);
case 11: values[10] = PyTuple_GET_ITEM(__pyx_args, 10);
case 10: values[9] = PyTuple_GET_ITEM(__pyx_args, 9);
case 9: values[8] = PyTuple_GET_ITEM(__pyx_args, 8);
case 8: values[7] = PyTuple_GET_ITEM(__pyx_args, 7);
values[6] = PyTuple_GET_ITEM(__pyx_args, 6);
values[5] = PyTuple_GET_ITEM(__pyx_args, 5);
values[4] = PyTuple_GET_ITEM(__pyx_args, 4);
values[3] = PyTuple_GET_ITEM(__pyx_args, 3);
values[2] = PyTuple_GET_ITEM(__pyx_args, 2);
values[1] = PyTuple_GET_ITEM(__pyx_args, 1);
values[0] = PyTuple_GET_ITEM(__pyx_args, 0);
break;
default: goto __pyx_L5_argtuple_error;
}
}
__pyx_v_v = __pyx_PyFloat_AsDouble(values[0]); if (unlikely((__pyx_v_v == (double)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 165; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
__pyx_v_sv = __pyx_PyFloat_AsDouble(values[1]); if (unlikely((__pyx_v_sv == (double)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 165; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
__pyx_v_a = __pyx_PyFloat_AsDouble(values[2]); if (unlikely((__pyx_v_a == (double)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 165; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
__pyx_v_z = __pyx_PyFloat_AsDouble(values[3]); if (unlikely((__pyx_v_z == (double)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 165; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
__pyx_v_sz = __pyx_PyFloat_AsDouble(values[4]); if (unlikely((__pyx_v_sz == (double)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 165; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
__pyx_v_t = __pyx_PyFloat_AsDouble(values[5]); if (unlikely((__pyx_v_t == (double)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 165; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
__pyx_v_st = __pyx_PyFloat_AsDouble(values[6]); if (unlikely((__pyx_v_st == (double)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 165; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
__pyx_v_err = __pyx_PyFloat_AsDouble(values[7]); if (unlikely((__pyx_v_err == (double)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 165; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
if (values[8]) {
__pyx_v_N = __Pyx_PyInt_AsInt(values[8]); if (unlikely((__pyx_v_N == (int)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 166; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
} else {
__pyx_v_N = ((int)500);
}
if (values[9]) {
__pyx_v_time = __pyx_PyFloat_AsDouble(values[9]); if (unlikely((__pyx_v_time == (double)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 166; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
} else {
/* "wfpt.pyx":166
*
* def gen_cdf_using_pdf(double v, double sv, double a, double z, double sz, double t, double st, double err,
* int N=500, double time=5., int n_st=2, int n_sz=2, bint use_adaptive=1, double simps_err=1e-3, # <<<<<<<<<<<<<<
* double p_outlier=0, double w_outlier=0):
* """
*/
__pyx_v_time = ((double)5.);
}
if (values[10]) {
__pyx_v_n_st = __Pyx_PyInt_AsInt(values[10]); if (unlikely((__pyx_v_n_st == (int)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 166; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
} else {
__pyx_v_n_st = ((int)2);
}
if (values[11]) {
__pyx_v_n_sz = __Pyx_PyInt_AsInt(values[11]); if (unlikely((__pyx_v_n_sz == (int)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 166; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
} else {
__pyx_v_n_sz = ((int)2);
}
if (values[12]) {
__pyx_v_use_adaptive = __Pyx_PyObject_IsTrue(values[12]); if (unlikely((__pyx_v_use_adaptive == (int)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 166; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
} else {
__pyx_v_use_adaptive = ((int)1);
}
if (values[13]) {
__pyx_v_simps_err = __pyx_PyFloat_AsDouble(values[13]); if (unlikely((__pyx_v_simps_err == (double)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 166; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
} else {
__pyx_v_simps_err = ((double)1e-3);
}
if (values[14]) {
__pyx_v_p_outlier = __pyx_PyFloat_AsDouble(values[14]); if (unlikely((__pyx_v_p_outlier == (double)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 167; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
} else {
/* "wfpt.pyx":167
* def gen_cdf_using_pdf(double v, double sv, double a, double z, double sz, double t, double st, double err,
* int N=500, double time=5., int n_st=2, int n_sz=2, bint use_adaptive=1, double simps_err=1e-3,
* double p_outlier=0, double w_outlier=0): # <<<<<<<<<<<<<<
* """
* generate cdf vector using the pdf
*/
__pyx_v_p_outlier = ((double)0.0);
}
if (values[15]) {
__pyx_v_w_outlier = __pyx_PyFloat_AsDouble(values[15]); if (unlikely((__pyx_v_w_outlier == (double)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 167; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
} else {
__pyx_v_w_outlier = ((double)0.0);
}
}
goto __pyx_L4_argument_unpacking_done;
__pyx_L5_argtuple_error:;
__Pyx_RaiseArgtupleInvalid("gen_cdf_using_pdf", 0, 8, 16, PyTuple_GET_SIZE(__pyx_args)); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 165; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
__pyx_L3_error:;
__Pyx_AddTraceback("wfpt.gen_cdf_using_pdf", __pyx_clineno, __pyx_lineno, __pyx_filename);
__Pyx_RefNannyFinishContext();
return NULL;
__pyx_L4_argument_unpacking_done:;
__pyx_r = __pyx_pf_4wfpt_12gen_cdf_using_pdf(__pyx_self, __pyx_v_v, __pyx_v_sv, __pyx_v_a, __pyx_v_z, __pyx_v_sz, __pyx_v_t, __pyx_v_st, __pyx_v_err, __pyx_v_N, __pyx_v_time, __pyx_v_n_st, __pyx_v_n_sz, __pyx_v_use_adaptive, __pyx_v_simps_err, __pyx_v_p_outlier, __pyx_v_w_outlier);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "wfpt.pyx":165
* return sum_logp
*
* def gen_cdf_using_pdf(double v, double sv, double a, double z, double sz, double t, double st, double err, # <<<<<<<<<<<<<<
* int N=500, double time=5., int n_st=2, int n_sz=2, bint use_adaptive=1, double simps_err=1e-3,
* double p_outlier=0, double w_outlier=0):
*/
static PyObject *__pyx_pf_4wfpt_12gen_cdf_using_pdf(CYTHON_UNUSED PyObject *__pyx_self, double __pyx_v_v, double __pyx_v_sv, double __pyx_v_a, double __pyx_v_z, double __pyx_v_sz, double __pyx_v_t, double __pyx_v_st, double __pyx_v_err, int __pyx_v_N, double __pyx_v_time, int __pyx_v_n_st, int __pyx_v_n_sz, int __pyx_v_use_adaptive, double __pyx_v_simps_err, double __pyx_v_p_outlier, double __pyx_v_w_outlier) {
PyArrayObject *__pyx_v_x = 0;
PyArrayObject *__pyx_v_cdf_array = 0;
__Pyx_LocalBuf_ND __pyx_pybuffernd_x;
__Pyx_Buffer __pyx_pybuffer_x;
__Pyx_LocalBuf_ND __pyx_pybuffernd_cdf_array;
__Pyx_Buffer __pyx_pybuffer_cdf_array;
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
int __pyx_t_1;
int __pyx_t_2;
int __pyx_t_3;
int __pyx_t_4;
int __pyx_t_5;
int __pyx_t_6;
int __pyx_t_7;
int __pyx_t_8;
int __pyx_t_9;
int __pyx_t_10;
int __pyx_t_11;
int __pyx_t_12;
int __pyx_t_13;
PyObject *__pyx_t_14 = NULL;
PyObject *__pyx_t_15 = NULL;
PyObject *__pyx_t_16 = NULL;
PyObject *__pyx_t_17 = NULL;
PyObject *__pyx_t_18 = NULL;
PyArrayObject *__pyx_t_19 = NULL;
PyArrayObject *__pyx_t_20 = NULL;
PyObject *__pyx_t_21 = NULL;
PyObject *__pyx_t_22 = NULL;
PyObject *__pyx_t_23 = NULL;
PyObject *__pyx_t_24 = NULL;
PyObject *__pyx_t_25 = NULL;
PyObject *__pyx_t_26 = NULL;
PyObject *__pyx_t_27 = NULL;
PyObject *__pyx_t_28 = NULL;
PyObject *__pyx_t_29 = NULL;
PyObject *__pyx_t_30 = NULL;
PyObject *__pyx_t_31 = NULL;
int __pyx_t_32;
PyObject *__pyx_t_33 = NULL;
PyObject *__pyx_t_34 = NULL;
PyObject *__pyx_t_35 = NULL;
long __pyx_t_36;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("gen_cdf_using_pdf");
__pyx_pybuffer_x.pybuffer.buf = NULL;
__pyx_pybuffer_x.refcount = 0;
__pyx_pybuffernd_x.data = NULL;
__pyx_pybuffernd_x.rcbuffer = &__pyx_pybuffer_x;
__pyx_pybuffer_cdf_array.pybuffer.buf = NULL;
__pyx_pybuffer_cdf_array.refcount = 0;
__pyx_pybuffernd_cdf_array.data = NULL;
__pyx_pybuffernd_cdf_array.rcbuffer = &__pyx_pybuffer_cdf_array;
/* "wfpt.pyx":171
* generate cdf vector using the pdf
* """
* if (sv < 0) or (a <=0 ) or (z < 0) or (z > 1) or (sz < 0) or (sz > 1) or (z+sz/2.>1) or \ # <<<<<<<<<<<<<<
* (z-sz/2.<0) or (t-st/2.<0) or (t<0) or (st < 0) or not p_outlier_in_range(p_outlier):
* raise ValueError("at least one of the parameters is out of the support")
*/
__pyx_t_1 = (__pyx_v_sv < 0.0);
if (!__pyx_t_1) {
__pyx_t_2 = (__pyx_v_a <= 0.0);
if (!__pyx_t_2) {
__pyx_t_3 = (__pyx_v_z < 0.0);
if (!__pyx_t_3) {
__pyx_t_4 = (__pyx_v_z > 1.0);
if (!__pyx_t_4) {
__pyx_t_5 = (__pyx_v_sz < 0.0);
if (!__pyx_t_5) {
__pyx_t_6 = (__pyx_v_sz > 1.0);
if (!__pyx_t_6) {
__pyx_t_7 = ((__pyx_v_z + (__pyx_v_sz / 2.)) > 1.0);
if (!__pyx_t_7) {
/* "wfpt.pyx":172
* """
* if (sv < 0) or (a <=0 ) or (z < 0) or (z > 1) or (sz < 0) or (sz > 1) or (z+sz/2.>1) or \
* (z-sz/2.<0) or (t-st/2.<0) or (t<0) or (st < 0) or not p_outlier_in_range(p_outlier): # <<<<<<<<<<<<<<
* raise ValueError("at least one of the parameters is out of the support")
*
*/
__pyx_t_8 = ((__pyx_v_z - (__pyx_v_sz / 2.)) < 0.0);
if (!__pyx_t_8) {
__pyx_t_9 = ((__pyx_v_t - (__pyx_v_st / 2.)) < 0.0);
if (!__pyx_t_9) {
__pyx_t_10 = (__pyx_v_t < 0.0);
if (!__pyx_t_10) {
__pyx_t_11 = (__pyx_v_st < 0.0);
if (!__pyx_t_11) {
__pyx_t_12 = (!__pyx_f_4wfpt_p_outlier_in_range(__pyx_v_p_outlier));
__pyx_t_13 = __pyx_t_12;
} else {
__pyx_t_13 = __pyx_t_11;
}
__pyx_t_11 = __pyx_t_13;
} else {
__pyx_t_11 = __pyx_t_10;
}
__pyx_t_10 = __pyx_t_11;
} else {
__pyx_t_10 = __pyx_t_9;
}
__pyx_t_9 = __pyx_t_10;
} else {
__pyx_t_9 = __pyx_t_8;
}
__pyx_t_8 = __pyx_t_9;
} else {
__pyx_t_8 = __pyx_t_7;
}
__pyx_t_7 = __pyx_t_8;
} else {
__pyx_t_7 = __pyx_t_6;
}
__pyx_t_6 = __pyx_t_7;
} else {
__pyx_t_6 = __pyx_t_5;
}
__pyx_t_5 = __pyx_t_6;
} else {
__pyx_t_5 = __pyx_t_4;
}
__pyx_t_4 = __pyx_t_5;
} else {
__pyx_t_4 = __pyx_t_3;
}
__pyx_t_3 = __pyx_t_4;
} else {
__pyx_t_3 = __pyx_t_2;
}
__pyx_t_2 = __pyx_t_3;
} else {
__pyx_t_2 = __pyx_t_1;
}
if (__pyx_t_2) {
/* "wfpt.pyx":173
* if (sv < 0) or (a <=0 ) or (z < 0) or (z > 1) or (sz < 0) or (sz > 1) or (z+sz/2.>1) or \
* (z-sz/2.<0) or (t-st/2.<0) or (t<0) or (st < 0) or not p_outlier_in_range(p_outlier):
* raise ValueError("at least one of the parameters is out of the support") # <<<<<<<<<<<<<<
*
* cdef np.ndarray[double, ndim=1] x = np.linspace(-time, time, 2*N+1)
*/
__pyx_t_14 = PyObject_Call(__pyx_builtin_ValueError, ((PyObject *)__pyx_k_tuple_2), NULL); if (unlikely(!__pyx_t_14)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 173; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_14);
__Pyx_Raise(__pyx_t_14, 0, 0, 0);
__Pyx_DECREF(__pyx_t_14); __pyx_t_14 = 0;
{__pyx_filename = __pyx_f[0]; __pyx_lineno = 173; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
goto __pyx_L3;
}
__pyx_L3:;
/* "wfpt.pyx":175
* raise ValueError("at least one of the parameters is out of the support")
*
* cdef np.ndarray[double, ndim=1] x = np.linspace(-time, time, 2*N+1) # <<<<<<<<<<<<<<
* cdef np.ndarray[double, ndim=1] cdf_array = np.empty(x.shape[0], dtype=np.double)
* cdef int idx
*/
__pyx_t_14 = __Pyx_GetName(__pyx_m, __pyx_n_s__np); if (unlikely(!__pyx_t_14)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 175; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_14);
__pyx_t_15 = PyObject_GetAttr(__pyx_t_14, __pyx_n_s__linspace); if (unlikely(!__pyx_t_15)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 175; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_15);
__Pyx_DECREF(__pyx_t_14); __pyx_t_14 = 0;
__pyx_t_14 = PyFloat_FromDouble((-__pyx_v_time)); if (unlikely(!__pyx_t_14)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 175; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_14);
__pyx_t_16 = PyFloat_FromDouble(__pyx_v_time); if (unlikely(!__pyx_t_16)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 175; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_16);
__pyx_t_17 = PyInt_FromLong(((2 * __pyx_v_N) + 1)); if (unlikely(!__pyx_t_17)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 175; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_17);
__pyx_t_18 = PyTuple_New(3); if (unlikely(!__pyx_t_18)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 175; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_18);
PyTuple_SET_ITEM(__pyx_t_18, 0, __pyx_t_14);
__Pyx_GIVEREF(__pyx_t_14);
PyTuple_SET_ITEM(__pyx_t_18, 1, __pyx_t_16);
__Pyx_GIVEREF(__pyx_t_16);
PyTuple_SET_ITEM(__pyx_t_18, 2, __pyx_t_17);
__Pyx_GIVEREF(__pyx_t_17);
__pyx_t_14 = 0;
__pyx_t_16 = 0;
__pyx_t_17 = 0;
__pyx_t_17 = PyObject_Call(__pyx_t_15, ((PyObject *)__pyx_t_18), NULL); if (unlikely(!__pyx_t_17)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 175; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_17);
__Pyx_DECREF(__pyx_t_15); __pyx_t_15 = 0;
__Pyx_DECREF(((PyObject *)__pyx_t_18)); __pyx_t_18 = 0;
if (!(likely(((__pyx_t_17) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_17, __pyx_ptype_5numpy_ndarray))))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 175; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__pyx_t_19 = ((PyArrayObject *)__pyx_t_17);
{
__Pyx_BufFmt_StackElem __pyx_stack[1];
if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_x.rcbuffer->pybuffer, (PyObject*)__pyx_t_19, &__Pyx_TypeInfo_double, PyBUF_FORMAT| PyBUF_STRIDES, 1, 0, __pyx_stack) == -1)) {
__pyx_v_x = ((PyArrayObject *)Py_None); __Pyx_INCREF(Py_None); __pyx_pybuffernd_x.rcbuffer->pybuffer.buf = NULL;
{__pyx_filename = __pyx_f[0]; __pyx_lineno = 175; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
} else {__pyx_pybuffernd_x.diminfo[0].strides = __pyx_pybuffernd_x.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_x.diminfo[0].shape = __pyx_pybuffernd_x.rcbuffer->pybuffer.shape[0];
}
}
__pyx_t_19 = 0;
__pyx_v_x = ((PyArrayObject *)__pyx_t_17);
__pyx_t_17 = 0;
/* "wfpt.pyx":176
*
* cdef np.ndarray[double, ndim=1] x = np.linspace(-time, time, 2*N+1)
* cdef np.ndarray[double, ndim=1] cdf_array = np.empty(x.shape[0], dtype=np.double) # <<<<<<<<<<<<<<
* cdef int idx
*
*/
__pyx_t_17 = __Pyx_GetName(__pyx_m, __pyx_n_s__np); if (unlikely(!__pyx_t_17)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 176; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_17);
__pyx_t_18 = PyObject_GetAttr(__pyx_t_17, __pyx_n_s__empty); if (unlikely(!__pyx_t_18)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 176; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_18);
__Pyx_DECREF(__pyx_t_17); __pyx_t_17 = 0;
__pyx_t_17 = __Pyx_PyInt_to_py_Py_intptr_t((__pyx_v_x->dimensions[0])); if (unlikely(!__pyx_t_17)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 176; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_17);
__pyx_t_15 = PyTuple_New(1); if (unlikely(!__pyx_t_15)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 176; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_15);
PyTuple_SET_ITEM(__pyx_t_15, 0, __pyx_t_17);
__Pyx_GIVEREF(__pyx_t_17);
__pyx_t_17 = 0;
__pyx_t_17 = PyDict_New(); if (unlikely(!__pyx_t_17)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 176; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(((PyObject *)__pyx_t_17));
__pyx_t_16 = __Pyx_GetName(__pyx_m, __pyx_n_s__np); if (unlikely(!__pyx_t_16)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 176; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_16);
__pyx_t_14 = PyObject_GetAttr(__pyx_t_16, __pyx_n_s__double); if (unlikely(!__pyx_t_14)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 176; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_14);
__Pyx_DECREF(__pyx_t_16); __pyx_t_16 = 0;
if (PyDict_SetItem(__pyx_t_17, ((PyObject *)__pyx_n_s__dtype), __pyx_t_14) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 176; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_DECREF(__pyx_t_14); __pyx_t_14 = 0;
__pyx_t_14 = PyObject_Call(__pyx_t_18, ((PyObject *)__pyx_t_15), ((PyObject *)__pyx_t_17)); if (unlikely(!__pyx_t_14)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 176; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_14);
__Pyx_DECREF(__pyx_t_18); __pyx_t_18 = 0;
__Pyx_DECREF(((PyObject *)__pyx_t_15)); __pyx_t_15 = 0;
__Pyx_DECREF(((PyObject *)__pyx_t_17)); __pyx_t_17 = 0;
if (!(likely(((__pyx_t_14) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_14, __pyx_ptype_5numpy_ndarray))))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 176; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__pyx_t_20 = ((PyArrayObject *)__pyx_t_14);
{
__Pyx_BufFmt_StackElem __pyx_stack[1];
if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_cdf_array.rcbuffer->pybuffer, (PyObject*)__pyx_t_20, &__Pyx_TypeInfo_double, PyBUF_FORMAT| PyBUF_STRIDES, 1, 0, __pyx_stack) == -1)) {
__pyx_v_cdf_array = ((PyArrayObject *)Py_None); __Pyx_INCREF(Py_None); __pyx_pybuffernd_cdf_array.rcbuffer->pybuffer.buf = NULL;
{__pyx_filename = __pyx_f[0]; __pyx_lineno = 176; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
} else {__pyx_pybuffernd_cdf_array.diminfo[0].strides = __pyx_pybuffernd_cdf_array.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_cdf_array.diminfo[0].shape = __pyx_pybuffernd_cdf_array.rcbuffer->pybuffer.shape[0];
}
}
__pyx_t_20 = 0;
__pyx_v_cdf_array = ((PyArrayObject *)__pyx_t_14);
__pyx_t_14 = 0;
/* "wfpt.pyx":180
*
* #compute pdf on the real line
* cdf_array = pdf_array(x, v, sv, a, z, sz, t, st, err, 0, n_st, n_sz, use_adaptive, simps_err, p_outlier, w_outlier) # <<<<<<<<<<<<<<
*
* #integrate
*/
__pyx_t_14 = __Pyx_GetName(__pyx_m, __pyx_n_s__pdf_array); if (unlikely(!__pyx_t_14)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 180; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_14);
__pyx_t_17 = PyFloat_FromDouble(__pyx_v_v); if (unlikely(!__pyx_t_17)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 180; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_17);
__pyx_t_15 = PyFloat_FromDouble(__pyx_v_sv); if (unlikely(!__pyx_t_15)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 180; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_15);
__pyx_t_18 = PyFloat_FromDouble(__pyx_v_a); if (unlikely(!__pyx_t_18)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 180; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_18);
__pyx_t_16 = PyFloat_FromDouble(__pyx_v_z); if (unlikely(!__pyx_t_16)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 180; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_16);
__pyx_t_21 = PyFloat_FromDouble(__pyx_v_sz); if (unlikely(!__pyx_t_21)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 180; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_21);
__pyx_t_22 = PyFloat_FromDouble(__pyx_v_t); if (unlikely(!__pyx_t_22)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 180; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_22);
__pyx_t_23 = PyFloat_FromDouble(__pyx_v_st); if (unlikely(!__pyx_t_23)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 180; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_23);
__pyx_t_24 = PyFloat_FromDouble(__pyx_v_err); if (unlikely(!__pyx_t_24)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 180; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_24);
__pyx_t_25 = PyInt_FromLong(__pyx_v_n_st); if (unlikely(!__pyx_t_25)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 180; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_25);
__pyx_t_26 = PyInt_FromLong(__pyx_v_n_sz); if (unlikely(!__pyx_t_26)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 180; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_26);
__pyx_t_27 = __Pyx_PyBool_FromLong(__pyx_v_use_adaptive); if (unlikely(!__pyx_t_27)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 180; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_27);
__pyx_t_28 = PyFloat_FromDouble(__pyx_v_simps_err); if (unlikely(!__pyx_t_28)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 180; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_28);
__pyx_t_29 = PyFloat_FromDouble(__pyx_v_p_outlier); if (unlikely(!__pyx_t_29)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 180; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_29);
__pyx_t_30 = PyFloat_FromDouble(__pyx_v_w_outlier); if (unlikely(!__pyx_t_30)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 180; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_30);
__pyx_t_31 = PyTuple_New(16); if (unlikely(!__pyx_t_31)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 180; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_31);
__Pyx_INCREF(((PyObject *)__pyx_v_x));
PyTuple_SET_ITEM(__pyx_t_31, 0, ((PyObject *)__pyx_v_x));
__Pyx_GIVEREF(((PyObject *)__pyx_v_x));
PyTuple_SET_ITEM(__pyx_t_31, 1, __pyx_t_17);
__Pyx_GIVEREF(__pyx_t_17);
PyTuple_SET_ITEM(__pyx_t_31, 2, __pyx_t_15);
__Pyx_GIVEREF(__pyx_t_15);
PyTuple_SET_ITEM(__pyx_t_31, 3, __pyx_t_18);
__Pyx_GIVEREF(__pyx_t_18);
PyTuple_SET_ITEM(__pyx_t_31, 4, __pyx_t_16);
__Pyx_GIVEREF(__pyx_t_16);
PyTuple_SET_ITEM(__pyx_t_31, 5, __pyx_t_21);
__Pyx_GIVEREF(__pyx_t_21);
PyTuple_SET_ITEM(__pyx_t_31, 6, __pyx_t_22);
__Pyx_GIVEREF(__pyx_t_22);
PyTuple_SET_ITEM(__pyx_t_31, 7, __pyx_t_23);
__Pyx_GIVEREF(__pyx_t_23);
PyTuple_SET_ITEM(__pyx_t_31, 8, __pyx_t_24);
__Pyx_GIVEREF(__pyx_t_24);
__Pyx_INCREF(__pyx_int_0);
PyTuple_SET_ITEM(__pyx_t_31, 9, __pyx_int_0);
__Pyx_GIVEREF(__pyx_int_0);
PyTuple_SET_ITEM(__pyx_t_31, 10, __pyx_t_25);
__Pyx_GIVEREF(__pyx_t_25);
PyTuple_SET_ITEM(__pyx_t_31, 11, __pyx_t_26);
__Pyx_GIVEREF(__pyx_t_26);
PyTuple_SET_ITEM(__pyx_t_31, 12, __pyx_t_27);
__Pyx_GIVEREF(__pyx_t_27);
PyTuple_SET_ITEM(__pyx_t_31, 13, __pyx_t_28);
__Pyx_GIVEREF(__pyx_t_28);
PyTuple_SET_ITEM(__pyx_t_31, 14, __pyx_t_29);
__Pyx_GIVEREF(__pyx_t_29);
PyTuple_SET_ITEM(__pyx_t_31, 15, __pyx_t_30);
__Pyx_GIVEREF(__pyx_t_30);
__pyx_t_17 = 0;
__pyx_t_15 = 0;
__pyx_t_18 = 0;
__pyx_t_16 = 0;
__pyx_t_21 = 0;
__pyx_t_22 = 0;
__pyx_t_23 = 0;
__pyx_t_24 = 0;
__pyx_t_25 = 0;
__pyx_t_26 = 0;
__pyx_t_27 = 0;
__pyx_t_28 = 0;
__pyx_t_29 = 0;
__pyx_t_30 = 0;
__pyx_t_30 = PyObject_Call(__pyx_t_14, ((PyObject *)__pyx_t_31), NULL); if (unlikely(!__pyx_t_30)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 180; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_30);
__Pyx_DECREF(__pyx_t_14); __pyx_t_14 = 0;
__Pyx_DECREF(((PyObject *)__pyx_t_31)); __pyx_t_31 = 0;
if (!(likely(((__pyx_t_30) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_30, __pyx_ptype_5numpy_ndarray))))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 180; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__pyx_t_20 = ((PyArrayObject *)__pyx_t_30);
{
__Pyx_BufFmt_StackElem __pyx_stack[1];
__Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_cdf_array.rcbuffer->pybuffer);
__pyx_t_32 = __Pyx_GetBufferAndValidate(&__pyx_pybuffernd_cdf_array.rcbuffer->pybuffer, (PyObject*)__pyx_t_20, &__Pyx_TypeInfo_double, PyBUF_FORMAT| PyBUF_STRIDES, 1, 0, __pyx_stack);
if (unlikely(__pyx_t_32 < 0)) {
PyErr_Fetch(&__pyx_t_33, &__pyx_t_34, &__pyx_t_35);
if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_cdf_array.rcbuffer->pybuffer, (PyObject*)__pyx_v_cdf_array, &__Pyx_TypeInfo_double, PyBUF_FORMAT| PyBUF_STRIDES, 1, 0, __pyx_stack) == -1)) {
Py_XDECREF(__pyx_t_33); Py_XDECREF(__pyx_t_34); Py_XDECREF(__pyx_t_35);
__Pyx_RaiseBufferFallbackError();
} else {
PyErr_Restore(__pyx_t_33, __pyx_t_34, __pyx_t_35);
}
}
__pyx_pybuffernd_cdf_array.diminfo[0].strides = __pyx_pybuffernd_cdf_array.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_cdf_array.diminfo[0].shape = __pyx_pybuffernd_cdf_array.rcbuffer->pybuffer.shape[0];
if (unlikely(__pyx_t_32 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 180; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
}
__pyx_t_20 = 0;
__Pyx_DECREF(((PyObject *)__pyx_v_cdf_array));
__pyx_v_cdf_array = ((PyArrayObject *)__pyx_t_30);
__pyx_t_30 = 0;
/* "wfpt.pyx":183
*
* #integrate
* cdf_array[1:] = integrate.cumtrapz(cdf_array) # <<<<<<<<<<<<<<
*
* #normalize
*/
__pyx_t_30 = __Pyx_GetName(__pyx_m, __pyx_n_s__integrate); if (unlikely(!__pyx_t_30)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 183; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_30);
__pyx_t_31 = PyObject_GetAttr(__pyx_t_30, __pyx_n_s__cumtrapz); if (unlikely(!__pyx_t_31)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 183; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_31);
__Pyx_DECREF(__pyx_t_30); __pyx_t_30 = 0;
__pyx_t_30 = PyTuple_New(1); if (unlikely(!__pyx_t_30)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 183; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_30);
__Pyx_INCREF(((PyObject *)__pyx_v_cdf_array));
PyTuple_SET_ITEM(__pyx_t_30, 0, ((PyObject *)__pyx_v_cdf_array));
__Pyx_GIVEREF(((PyObject *)__pyx_v_cdf_array));
__pyx_t_14 = PyObject_Call(__pyx_t_31, ((PyObject *)__pyx_t_30), NULL); if (unlikely(!__pyx_t_14)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 183; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_14);
__Pyx_DECREF(__pyx_t_31); __pyx_t_31 = 0;
__Pyx_DECREF(((PyObject *)__pyx_t_30)); __pyx_t_30 = 0;
if (__Pyx_PySequence_SetSlice(((PyObject *)__pyx_v_cdf_array), 1, PY_SSIZE_T_MAX, __pyx_t_14) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 183; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_DECREF(__pyx_t_14); __pyx_t_14 = 0;
/* "wfpt.pyx":186
*
* #normalize
* cdf_array /= cdf_array[x.shape[0]-1] # <<<<<<<<<<<<<<
*
* return x, cdf_array
*/
__pyx_t_36 = ((__pyx_v_x->dimensions[0]) - 1);
__pyx_t_14 = PyFloat_FromDouble((*__Pyx_BufPtrStrided1d(double *, __pyx_pybuffernd_cdf_array.rcbuffer->pybuffer.buf, __pyx_t_36, __pyx_pybuffernd_cdf_array.diminfo[0].strides))); if (unlikely(!__pyx_t_14)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 186; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_14);
__pyx_t_30 = __Pyx_PyNumber_InPlaceDivide(((PyObject *)__pyx_v_cdf_array), __pyx_t_14); if (unlikely(!__pyx_t_30)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 186; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_30);
__Pyx_DECREF(__pyx_t_14); __pyx_t_14 = 0;
if (!(likely(((__pyx_t_30) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_30, __pyx_ptype_5numpy_ndarray))))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 186; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__pyx_t_20 = ((PyArrayObject *)__pyx_t_30);
{
__Pyx_BufFmt_StackElem __pyx_stack[1];
__Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_cdf_array.rcbuffer->pybuffer);
__pyx_t_32 = __Pyx_GetBufferAndValidate(&__pyx_pybuffernd_cdf_array.rcbuffer->pybuffer, (PyObject*)__pyx_t_20, &__Pyx_TypeInfo_double, PyBUF_FORMAT| PyBUF_STRIDES, 1, 0, __pyx_stack);
if (unlikely(__pyx_t_32 < 0)) {
PyErr_Fetch(&__pyx_t_35, &__pyx_t_34, &__pyx_t_33);
if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_cdf_array.rcbuffer->pybuffer, (PyObject*)__pyx_v_cdf_array, &__Pyx_TypeInfo_double, PyBUF_FORMAT| PyBUF_STRIDES, 1, 0, __pyx_stack) == -1)) {
Py_XDECREF(__pyx_t_35); Py_XDECREF(__pyx_t_34); Py_XDECREF(__pyx_t_33);
__Pyx_RaiseBufferFallbackError();
} else {
PyErr_Restore(__pyx_t_35, __pyx_t_34, __pyx_t_33);
}
}
__pyx_pybuffernd_cdf_array.diminfo[0].strides = __pyx_pybuffernd_cdf_array.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_cdf_array.diminfo[0].shape = __pyx_pybuffernd_cdf_array.rcbuffer->pybuffer.shape[0];
if (unlikely(__pyx_t_32 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 186; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
}
__pyx_t_20 = 0;
__Pyx_DECREF(((PyObject *)__pyx_v_cdf_array));
__pyx_v_cdf_array = ((PyArrayObject *)__pyx_t_30);
__pyx_t_30 = 0;
/* "wfpt.pyx":188
* cdf_array /= cdf_array[x.shape[0]-1]
*
* return x, cdf_array # <<<<<<<<<<<<<<
*
* def split_cdf(np.ndarray[double, ndim=1] x, np.ndarray[double, ndim=1] data):
*/
__Pyx_XDECREF(__pyx_r);
__pyx_t_30 = PyTuple_New(2); if (unlikely(!__pyx_t_30)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 188; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_30);
__Pyx_INCREF(((PyObject *)__pyx_v_x));
PyTuple_SET_ITEM(__pyx_t_30, 0, ((PyObject *)__pyx_v_x));
__Pyx_GIVEREF(((PyObject *)__pyx_v_x));
__Pyx_INCREF(((PyObject *)__pyx_v_cdf_array));
PyTuple_SET_ITEM(__pyx_t_30, 1, ((PyObject *)__pyx_v_cdf_array));
__Pyx_GIVEREF(((PyObject *)__pyx_v_cdf_array));
__pyx_r = ((PyObject *)__pyx_t_30);
__pyx_t_30 = 0;
goto __pyx_L0;
__pyx_r = Py_None; __Pyx_INCREF(Py_None);
goto __pyx_L0;
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_14);
__Pyx_XDECREF(__pyx_t_15);
__Pyx_XDECREF(__pyx_t_16);
__Pyx_XDECREF(__pyx_t_17);
__Pyx_XDECREF(__pyx_t_18);
__Pyx_XDECREF(__pyx_t_21);
__Pyx_XDECREF(__pyx_t_22);
__Pyx_XDECREF(__pyx_t_23);
__Pyx_XDECREF(__pyx_t_24);
__Pyx_XDECREF(__pyx_t_25);
__Pyx_XDECREF(__pyx_t_26);
__Pyx_XDECREF(__pyx_t_27);
__Pyx_XDECREF(__pyx_t_28);
__Pyx_XDECREF(__pyx_t_29);
__Pyx_XDECREF(__pyx_t_30);
__Pyx_XDECREF(__pyx_t_31);
{ PyObject *__pyx_type, *__pyx_value, *__pyx_tb;
__Pyx_ErrFetch(&__pyx_type, &__pyx_value, &__pyx_tb);
__Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_x.rcbuffer->pybuffer);
__Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_cdf_array.rcbuffer->pybuffer);
__Pyx_ErrRestore(__pyx_type, __pyx_value, __pyx_tb);}
__Pyx_AddTraceback("wfpt.gen_cdf_using_pdf", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = NULL;
goto __pyx_L2;
__pyx_L0:;
__Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_x.rcbuffer->pybuffer);
__Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_cdf_array.rcbuffer->pybuffer);
__pyx_L2:;
__Pyx_XDECREF((PyObject *)__pyx_v_x);
__Pyx_XDECREF((PyObject *)__pyx_v_cdf_array);
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* Python wrapper */
static PyObject *__pyx_pw_4wfpt_15split_cdf(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/
static char __pyx_doc_4wfpt_14split_cdf[] = "split_cdf(ndarray x, ndarray data)";
static PyMethodDef __pyx_mdef_4wfpt_15split_cdf = {__Pyx_NAMESTR("split_cdf"), (PyCFunction)__pyx_pw_4wfpt_15split_cdf, METH_VARARGS|METH_KEYWORDS, __Pyx_DOCSTR(__pyx_doc_4wfpt_14split_cdf)};
static PyObject *__pyx_pw_4wfpt_15split_cdf(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds) {
PyArrayObject *__pyx_v_x = 0;
PyArrayObject *__pyx_v_data = 0;
static PyObject **__pyx_pyargnames[] = {&__pyx_n_s__x,&__pyx_n_s__data,0};
PyObject *__pyx_r = 0;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("split_cdf (wrapper)");
__pyx_self = __pyx_self;
{
PyObject* values[2] = {0,0};
if (unlikely(__pyx_kwds)) {
Py_ssize_t kw_args;
const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args);
switch (pos_args) {
case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1);
case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0);
case 0: break;
default: goto __pyx_L5_argtuple_error;
}
kw_args = PyDict_Size(__pyx_kwds);
switch (pos_args) {
case 0:
values[0] = PyDict_GetItem(__pyx_kwds, __pyx_n_s__x);
if (likely(values[0])) kw_args--;
else goto __pyx_L5_argtuple_error;
case 1:
values[1] = PyDict_GetItem(__pyx_kwds, __pyx_n_s__data);
if (likely(values[1])) kw_args--;
else {
__Pyx_RaiseArgtupleInvalid("split_cdf", 1, 2, 2, 1); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 190; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
}
}
if (unlikely(kw_args > 0)) {
if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "split_cdf") < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 190; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
}
__pyx_v_x = ((PyArrayObject *)values[0]);
__pyx_v_data = ((PyArrayObject *)values[1]);
} else if (PyTuple_GET_SIZE(__pyx_args) != 2) {
goto __pyx_L5_argtuple_error;
} else {
values[0] = PyTuple_GET_ITEM(__pyx_args, 0);
values[1] = PyTuple_GET_ITEM(__pyx_args, 1);
}
__pyx_v_x = ((PyArrayObject *)values[0]);
__pyx_v_data = ((PyArrayObject *)values[1]);
}
goto __pyx_L4_argument_unpacking_done;
__pyx_L5_argtuple_error:;
__Pyx_RaiseArgtupleInvalid("split_cdf", 1, 2, 2, PyTuple_GET_SIZE(__pyx_args)); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 190; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
__pyx_L3_error:;
__Pyx_AddTraceback("wfpt.split_cdf", __pyx_clineno, __pyx_lineno, __pyx_filename);
__Pyx_RefNannyFinishContext();
return NULL;
__pyx_L4_argument_unpacking_done:;
if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_x), __pyx_ptype_5numpy_ndarray, 1, "x", 0))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 190; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_data), __pyx_ptype_5numpy_ndarray, 1, "data", 0))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 190; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__pyx_r = __pyx_pf_4wfpt_14split_cdf(__pyx_self, __pyx_v_x, __pyx_v_data);
goto __pyx_L0;
__pyx_L1_error:;
__pyx_L0:;
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "wfpt.pyx":190
* return x, cdf_array
*
* def split_cdf(np.ndarray[double, ndim=1] x, np.ndarray[double, ndim=1] data): # <<<<<<<<<<<<<<
*
* #get length of data
*/
static PyObject *__pyx_pf_4wfpt_14split_cdf(CYTHON_UNUSED PyObject *__pyx_self, PyArrayObject *__pyx_v_x, PyArrayObject *__pyx_v_data) {
int __pyx_v_N;
PyArrayObject *__pyx_v_x_lb = 0;
PyArrayObject *__pyx_v_lb = 0;
PyArrayObject *__pyx_v_x_ub = 0;
PyArrayObject *__pyx_v_ub = 0;
__Pyx_LocalBuf_ND __pyx_pybuffernd_lb;
__Pyx_Buffer __pyx_pybuffer_lb;
__Pyx_LocalBuf_ND __pyx_pybuffernd_x_ub;
__Pyx_Buffer __pyx_pybuffer_x_ub;
__Pyx_LocalBuf_ND __pyx_pybuffernd_x;
__Pyx_Buffer __pyx_pybuffer_x;
__Pyx_LocalBuf_ND __pyx_pybuffernd_data;
__Pyx_Buffer __pyx_pybuffer_data;
__Pyx_LocalBuf_ND __pyx_pybuffernd_x_lb;
__Pyx_Buffer __pyx_pybuffer_x_lb;
__Pyx_LocalBuf_ND __pyx_pybuffernd_ub;
__Pyx_Buffer __pyx_pybuffer_ub;
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
Py_ssize_t __pyx_t_1;
PyObject *__pyx_t_2 = NULL;
PyObject *__pyx_t_3 = NULL;
PyArrayObject *__pyx_t_4 = NULL;
PyArrayObject *__pyx_t_5 = NULL;
PyObject *__pyx_t_6 = NULL;
PyObject *__pyx_t_7 = NULL;
PyObject *__pyx_t_8 = NULL;
PyObject *__pyx_t_9 = NULL;
int __pyx_t_10;
PyObject *__pyx_t_11 = NULL;
PyObject *__pyx_t_12 = NULL;
PyObject *__pyx_t_13 = NULL;
PyArrayObject *__pyx_t_14 = NULL;
PyArrayObject *__pyx_t_15 = NULL;
long __pyx_t_16;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("split_cdf");
__pyx_pybuffer_x_lb.pybuffer.buf = NULL;
__pyx_pybuffer_x_lb.refcount = 0;
__pyx_pybuffernd_x_lb.data = NULL;
__pyx_pybuffernd_x_lb.rcbuffer = &__pyx_pybuffer_x_lb;
__pyx_pybuffer_lb.pybuffer.buf = NULL;
__pyx_pybuffer_lb.refcount = 0;
__pyx_pybuffernd_lb.data = NULL;
__pyx_pybuffernd_lb.rcbuffer = &__pyx_pybuffer_lb;
__pyx_pybuffer_x_ub.pybuffer.buf = NULL;
__pyx_pybuffer_x_ub.refcount = 0;
__pyx_pybuffernd_x_ub.data = NULL;
__pyx_pybuffernd_x_ub.rcbuffer = &__pyx_pybuffer_x_ub;
__pyx_pybuffer_ub.pybuffer.buf = NULL;
__pyx_pybuffer_ub.refcount = 0;
__pyx_pybuffernd_ub.data = NULL;
__pyx_pybuffernd_ub.rcbuffer = &__pyx_pybuffer_ub;
__pyx_pybuffer_x.pybuffer.buf = NULL;
__pyx_pybuffer_x.refcount = 0;
__pyx_pybuffernd_x.data = NULL;
__pyx_pybuffernd_x.rcbuffer = &__pyx_pybuffer_x;
__pyx_pybuffer_data.pybuffer.buf = NULL;
__pyx_pybuffer_data.refcount = 0;
__pyx_pybuffernd_data.data = NULL;
__pyx_pybuffernd_data.rcbuffer = &__pyx_pybuffer_data;
{
__Pyx_BufFmt_StackElem __pyx_stack[1];
if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_x.rcbuffer->pybuffer, (PyObject*)__pyx_v_x, &__Pyx_TypeInfo_double, PyBUF_FORMAT| PyBUF_STRIDES, 1, 0, __pyx_stack) == -1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 190; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
}
__pyx_pybuffernd_x.diminfo[0].strides = __pyx_pybuffernd_x.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_x.diminfo[0].shape = __pyx_pybuffernd_x.rcbuffer->pybuffer.shape[0];
{
__Pyx_BufFmt_StackElem __pyx_stack[1];
if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_data.rcbuffer->pybuffer, (PyObject*)__pyx_v_data, &__Pyx_TypeInfo_double, PyBUF_FORMAT| PyBUF_STRIDES, 1, 0, __pyx_stack) == -1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 190; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
}
__pyx_pybuffernd_data.diminfo[0].strides = __pyx_pybuffernd_data.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_data.diminfo[0].shape = __pyx_pybuffernd_data.rcbuffer->pybuffer.shape[0];
/* "wfpt.pyx":193
*
* #get length of data
* cdef int N = (len(data) -1) / 2 # <<<<<<<<<<<<<<
*
* # lower bound is reversed
*/
__pyx_t_1 = PyObject_Length(((PyObject *)__pyx_v_data)); if (unlikely(__pyx_t_1 == -1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 193; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__pyx_v_N = ((__pyx_t_1 - 1) / 2);
/* "wfpt.pyx":196
*
* # lower bound is reversed
* cdef np.ndarray[double, ndim=1] x_lb = -x[:N][::-1] # <<<<<<<<<<<<<<
* cdef np.ndarray[double, ndim=1] lb = data[:N][::-1]
* # lower bound is cumulative in the wrong direction
*/
__pyx_t_2 = __Pyx_PySequence_GetSlice(((PyObject *)__pyx_v_x), 0, __pyx_v_N); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 196; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_2);
__pyx_t_3 = PyObject_GetItem(__pyx_t_2, __pyx_k_slice_3); if (!__pyx_t_3) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 196; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_3);
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
__pyx_t_2 = PyNumber_Negative(__pyx_t_3); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 196; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_2);
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
if (!(likely(((__pyx_t_2) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_2, __pyx_ptype_5numpy_ndarray))))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 196; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__pyx_t_4 = ((PyArrayObject *)__pyx_t_2);
{
__Pyx_BufFmt_StackElem __pyx_stack[1];
if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_x_lb.rcbuffer->pybuffer, (PyObject*)__pyx_t_4, &__Pyx_TypeInfo_double, PyBUF_FORMAT| PyBUF_STRIDES, 1, 0, __pyx_stack) == -1)) {
__pyx_v_x_lb = ((PyArrayObject *)Py_None); __Pyx_INCREF(Py_None); __pyx_pybuffernd_x_lb.rcbuffer->pybuffer.buf = NULL;
{__pyx_filename = __pyx_f[0]; __pyx_lineno = 196; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
} else {__pyx_pybuffernd_x_lb.diminfo[0].strides = __pyx_pybuffernd_x_lb.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_x_lb.diminfo[0].shape = __pyx_pybuffernd_x_lb.rcbuffer->pybuffer.shape[0];
}
}
__pyx_t_4 = 0;
__pyx_v_x_lb = ((PyArrayObject *)__pyx_t_2);
__pyx_t_2 = 0;
/* "wfpt.pyx":197
* # lower bound is reversed
* cdef np.ndarray[double, ndim=1] x_lb = -x[:N][::-1]
* cdef np.ndarray[double, ndim=1] lb = data[:N][::-1] # <<<<<<<<<<<<<<
* # lower bound is cumulative in the wrong direction
* lb = np.cumsum(np.concatenate([np.array([0]), -np.diff(lb)]))
*/
__pyx_t_2 = __Pyx_PySequence_GetSlice(((PyObject *)__pyx_v_data), 0, __pyx_v_N); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 197; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_2);
__pyx_t_3 = PyObject_GetItem(__pyx_t_2, __pyx_k_slice_4); if (!__pyx_t_3) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 197; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_3);
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
if (!(likely(((__pyx_t_3) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_3, __pyx_ptype_5numpy_ndarray))))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 197; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__pyx_t_5 = ((PyArrayObject *)__pyx_t_3);
{
__Pyx_BufFmt_StackElem __pyx_stack[1];
if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_lb.rcbuffer->pybuffer, (PyObject*)__pyx_t_5, &__Pyx_TypeInfo_double, PyBUF_FORMAT| PyBUF_STRIDES, 1, 0, __pyx_stack) == -1)) {
__pyx_v_lb = ((PyArrayObject *)Py_None); __Pyx_INCREF(Py_None); __pyx_pybuffernd_lb.rcbuffer->pybuffer.buf = NULL;
{__pyx_filename = __pyx_f[0]; __pyx_lineno = 197; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
} else {__pyx_pybuffernd_lb.diminfo[0].strides = __pyx_pybuffernd_lb.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_lb.diminfo[0].shape = __pyx_pybuffernd_lb.rcbuffer->pybuffer.shape[0];
}
}
__pyx_t_5 = 0;
__pyx_v_lb = ((PyArrayObject *)__pyx_t_3);
__pyx_t_3 = 0;
/* "wfpt.pyx":199
* cdef np.ndarray[double, ndim=1] lb = data[:N][::-1]
* # lower bound is cumulative in the wrong direction
* lb = np.cumsum(np.concatenate([np.array([0]), -np.diff(lb)])) # <<<<<<<<<<<<<<
*
* cdef np.ndarray[double, ndim=1] x_ub = x[N+1:]
*/
__pyx_t_3 = __Pyx_GetName(__pyx_m, __pyx_n_s__np); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 199; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_3);
__pyx_t_2 = PyObject_GetAttr(__pyx_t_3, __pyx_n_s__cumsum); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 199; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_2);
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__pyx_t_3 = __Pyx_GetName(__pyx_m, __pyx_n_s__np); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 199; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_3);
__pyx_t_6 = PyObject_GetAttr(__pyx_t_3, __pyx_n_s__concatenate); if (unlikely(!__pyx_t_6)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 199; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_6);
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__pyx_t_3 = __Pyx_GetName(__pyx_m, __pyx_n_s__np); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 199; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_3);
__pyx_t_7 = PyObject_GetAttr(__pyx_t_3, __pyx_n_s__array); if (unlikely(!__pyx_t_7)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 199; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_7);
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__pyx_t_3 = PyList_New(1); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 199; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_3);
__Pyx_INCREF(__pyx_int_0);
PyList_SET_ITEM(__pyx_t_3, 0, __pyx_int_0);
__Pyx_GIVEREF(__pyx_int_0);
__pyx_t_8 = PyTuple_New(1); if (unlikely(!__pyx_t_8)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 199; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_8);
PyTuple_SET_ITEM(__pyx_t_8, 0, ((PyObject *)__pyx_t_3));
__Pyx_GIVEREF(((PyObject *)__pyx_t_3));
__pyx_t_3 = 0;
__pyx_t_3 = PyObject_Call(__pyx_t_7, ((PyObject *)__pyx_t_8), NULL); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 199; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_3);
__Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0;
__Pyx_DECREF(((PyObject *)__pyx_t_8)); __pyx_t_8 = 0;
__pyx_t_8 = __Pyx_GetName(__pyx_m, __pyx_n_s__np); if (unlikely(!__pyx_t_8)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 199; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_8);
__pyx_t_7 = PyObject_GetAttr(__pyx_t_8, __pyx_n_s__diff); if (unlikely(!__pyx_t_7)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 199; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_7);
__Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0;
__pyx_t_8 = PyTuple_New(1); if (unlikely(!__pyx_t_8)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 199; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_8);
__Pyx_INCREF(((PyObject *)__pyx_v_lb));
PyTuple_SET_ITEM(__pyx_t_8, 0, ((PyObject *)__pyx_v_lb));
__Pyx_GIVEREF(((PyObject *)__pyx_v_lb));
__pyx_t_9 = PyObject_Call(__pyx_t_7, ((PyObject *)__pyx_t_8), NULL); if (unlikely(!__pyx_t_9)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 199; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_9);
__Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0;
__Pyx_DECREF(((PyObject *)__pyx_t_8)); __pyx_t_8 = 0;
__pyx_t_8 = PyNumber_Negative(__pyx_t_9); if (unlikely(!__pyx_t_8)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 199; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_8);
__Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0;
__pyx_t_9 = PyList_New(2); if (unlikely(!__pyx_t_9)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 199; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_9);
PyList_SET_ITEM(__pyx_t_9, 0, __pyx_t_3);
__Pyx_GIVEREF(__pyx_t_3);
PyList_SET_ITEM(__pyx_t_9, 1, __pyx_t_8);
__Pyx_GIVEREF(__pyx_t_8);
__pyx_t_3 = 0;
__pyx_t_8 = 0;
__pyx_t_8 = PyTuple_New(1); if (unlikely(!__pyx_t_8)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 199; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_8);
PyTuple_SET_ITEM(__pyx_t_8, 0, ((PyObject *)__pyx_t_9));
__Pyx_GIVEREF(((PyObject *)__pyx_t_9));
__pyx_t_9 = 0;
__pyx_t_9 = PyObject_Call(__pyx_t_6, ((PyObject *)__pyx_t_8), NULL); if (unlikely(!__pyx_t_9)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 199; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_9);
__Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
__Pyx_DECREF(((PyObject *)__pyx_t_8)); __pyx_t_8 = 0;
__pyx_t_8 = PyTuple_New(1); if (unlikely(!__pyx_t_8)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 199; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_8);
PyTuple_SET_ITEM(__pyx_t_8, 0, __pyx_t_9);
__Pyx_GIVEREF(__pyx_t_9);
__pyx_t_9 = 0;
__pyx_t_9 = PyObject_Call(__pyx_t_2, ((PyObject *)__pyx_t_8), NULL); if (unlikely(!__pyx_t_9)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 199; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_9);
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
__Pyx_DECREF(((PyObject *)__pyx_t_8)); __pyx_t_8 = 0;
if (!(likely(((__pyx_t_9) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_9, __pyx_ptype_5numpy_ndarray))))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 199; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__pyx_t_5 = ((PyArrayObject *)__pyx_t_9);
{
__Pyx_BufFmt_StackElem __pyx_stack[1];
__Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_lb.rcbuffer->pybuffer);
__pyx_t_10 = __Pyx_GetBufferAndValidate(&__pyx_pybuffernd_lb.rcbuffer->pybuffer, (PyObject*)__pyx_t_5, &__Pyx_TypeInfo_double, PyBUF_FORMAT| PyBUF_STRIDES, 1, 0, __pyx_stack);
if (unlikely(__pyx_t_10 < 0)) {
PyErr_Fetch(&__pyx_t_11, &__pyx_t_12, &__pyx_t_13);
if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_lb.rcbuffer->pybuffer, (PyObject*)__pyx_v_lb, &__Pyx_TypeInfo_double, PyBUF_FORMAT| PyBUF_STRIDES, 1, 0, __pyx_stack) == -1)) {
Py_XDECREF(__pyx_t_11); Py_XDECREF(__pyx_t_12); Py_XDECREF(__pyx_t_13);
__Pyx_RaiseBufferFallbackError();
} else {
PyErr_Restore(__pyx_t_11, __pyx_t_12, __pyx_t_13);
}
}
__pyx_pybuffernd_lb.diminfo[0].strides = __pyx_pybuffernd_lb.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_lb.diminfo[0].shape = __pyx_pybuffernd_lb.rcbuffer->pybuffer.shape[0];
if (unlikely(__pyx_t_10 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 199; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
}
__pyx_t_5 = 0;
__Pyx_DECREF(((PyObject *)__pyx_v_lb));
__pyx_v_lb = ((PyArrayObject *)__pyx_t_9);
__pyx_t_9 = 0;
/* "wfpt.pyx":201
* lb = np.cumsum(np.concatenate([np.array([0]), -np.diff(lb)]))
*
* cdef np.ndarray[double, ndim=1] x_ub = x[N+1:] # <<<<<<<<<<<<<<
* cdef np.ndarray[double, ndim=1] ub = data[N+1:]
* # ub does not start at 0
*/
__pyx_t_9 = __Pyx_PySequence_GetSlice(((PyObject *)__pyx_v_x), (__pyx_v_N + 1), PY_SSIZE_T_MAX); if (unlikely(!__pyx_t_9)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 201; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_9);
if (!(likely(((__pyx_t_9) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_9, __pyx_ptype_5numpy_ndarray))))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 201; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__pyx_t_14 = ((PyArrayObject *)__pyx_t_9);
{
__Pyx_BufFmt_StackElem __pyx_stack[1];
if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_x_ub.rcbuffer->pybuffer, (PyObject*)__pyx_t_14, &__Pyx_TypeInfo_double, PyBUF_FORMAT| PyBUF_STRIDES, 1, 0, __pyx_stack) == -1)) {
__pyx_v_x_ub = ((PyArrayObject *)Py_None); __Pyx_INCREF(Py_None); __pyx_pybuffernd_x_ub.rcbuffer->pybuffer.buf = NULL;
{__pyx_filename = __pyx_f[0]; __pyx_lineno = 201; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
} else {__pyx_pybuffernd_x_ub.diminfo[0].strides = __pyx_pybuffernd_x_ub.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_x_ub.diminfo[0].shape = __pyx_pybuffernd_x_ub.rcbuffer->pybuffer.shape[0];
}
}
__pyx_t_14 = 0;
__pyx_v_x_ub = ((PyArrayObject *)__pyx_t_9);
__pyx_t_9 = 0;
/* "wfpt.pyx":202
*
* cdef np.ndarray[double, ndim=1] x_ub = x[N+1:]
* cdef np.ndarray[double, ndim=1] ub = data[N+1:] # <<<<<<<<<<<<<<
* # ub does not start at 0
* ub -= ub[0]
*/
__pyx_t_9 = __Pyx_PySequence_GetSlice(((PyObject *)__pyx_v_data), (__pyx_v_N + 1), PY_SSIZE_T_MAX); if (unlikely(!__pyx_t_9)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 202; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_9);
if (!(likely(((__pyx_t_9) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_9, __pyx_ptype_5numpy_ndarray))))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 202; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__pyx_t_15 = ((PyArrayObject *)__pyx_t_9);
{
__Pyx_BufFmt_StackElem __pyx_stack[1];
if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_ub.rcbuffer->pybuffer, (PyObject*)__pyx_t_15, &__Pyx_TypeInfo_double, PyBUF_FORMAT| PyBUF_STRIDES, 1, 0, __pyx_stack) == -1)) {
__pyx_v_ub = ((PyArrayObject *)Py_None); __Pyx_INCREF(Py_None); __pyx_pybuffernd_ub.rcbuffer->pybuffer.buf = NULL;
{__pyx_filename = __pyx_f[0]; __pyx_lineno = 202; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
} else {__pyx_pybuffernd_ub.diminfo[0].strides = __pyx_pybuffernd_ub.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_ub.diminfo[0].shape = __pyx_pybuffernd_ub.rcbuffer->pybuffer.shape[0];
}
}
__pyx_t_15 = 0;
__pyx_v_ub = ((PyArrayObject *)__pyx_t_9);
__pyx_t_9 = 0;
/* "wfpt.pyx":204
* cdef np.ndarray[double, ndim=1] ub = data[N+1:]
* # ub does not start at 0
* ub -= ub[0] # <<<<<<<<<<<<<<
*
* return (x_lb, lb, x_ub, ub)
*/
__pyx_t_16 = 0;
__pyx_t_9 = PyFloat_FromDouble((*__Pyx_BufPtrStrided1d(double *, __pyx_pybuffernd_ub.rcbuffer->pybuffer.buf, __pyx_t_16, __pyx_pybuffernd_ub.diminfo[0].strides))); if (unlikely(!__pyx_t_9)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 204; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_9);
__pyx_t_8 = PyNumber_InPlaceSubtract(((PyObject *)__pyx_v_ub), __pyx_t_9); if (unlikely(!__pyx_t_8)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 204; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_8);
__Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0;
if (!(likely(((__pyx_t_8) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_8, __pyx_ptype_5numpy_ndarray))))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 204; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__pyx_t_15 = ((PyArrayObject *)__pyx_t_8);
{
__Pyx_BufFmt_StackElem __pyx_stack[1];
__Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_ub.rcbuffer->pybuffer);
__pyx_t_10 = __Pyx_GetBufferAndValidate(&__pyx_pybuffernd_ub.rcbuffer->pybuffer, (PyObject*)__pyx_t_15, &__Pyx_TypeInfo_double, PyBUF_FORMAT| PyBUF_STRIDES, 1, 0, __pyx_stack);
if (unlikely(__pyx_t_10 < 0)) {
PyErr_Fetch(&__pyx_t_13, &__pyx_t_12, &__pyx_t_11);
if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_ub.rcbuffer->pybuffer, (PyObject*)__pyx_v_ub, &__Pyx_TypeInfo_double, PyBUF_FORMAT| PyBUF_STRIDES, 1, 0, __pyx_stack) == -1)) {
Py_XDECREF(__pyx_t_13); Py_XDECREF(__pyx_t_12); Py_XDECREF(__pyx_t_11);
__Pyx_RaiseBufferFallbackError();
} else {
PyErr_Restore(__pyx_t_13, __pyx_t_12, __pyx_t_11);
}
}
__pyx_pybuffernd_ub.diminfo[0].strides = __pyx_pybuffernd_ub.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_ub.diminfo[0].shape = __pyx_pybuffernd_ub.rcbuffer->pybuffer.shape[0];
if (unlikely(__pyx_t_10 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 204; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
}
__pyx_t_15 = 0;
__Pyx_DECREF(((PyObject *)__pyx_v_ub));
__pyx_v_ub = ((PyArrayObject *)__pyx_t_8);
__pyx_t_8 = 0;
/* "wfpt.pyx":206
* ub -= ub[0]
*
* return (x_lb, lb, x_ub, ub) # <<<<<<<<<<<<<<
*/
__Pyx_XDECREF(__pyx_r);
__pyx_t_8 = PyTuple_New(4); if (unlikely(!__pyx_t_8)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 206; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_8);
__Pyx_INCREF(((PyObject *)__pyx_v_x_lb));
PyTuple_SET_ITEM(__pyx_t_8, 0, ((PyObject *)__pyx_v_x_lb));
__Pyx_GIVEREF(((PyObject *)__pyx_v_x_lb));
__Pyx_INCREF(((PyObject *)__pyx_v_lb));
PyTuple_SET_ITEM(__pyx_t_8, 1, ((PyObject *)__pyx_v_lb));
__Pyx_GIVEREF(((PyObject *)__pyx_v_lb));
__Pyx_INCREF(((PyObject *)__pyx_v_x_ub));
PyTuple_SET_ITEM(__pyx_t_8, 2, ((PyObject *)__pyx_v_x_ub));
__Pyx_GIVEREF(((PyObject *)__pyx_v_x_ub));
__Pyx_INCREF(((PyObject *)__pyx_v_ub));
PyTuple_SET_ITEM(__pyx_t_8, 3, ((PyObject *)__pyx_v_ub));
__Pyx_GIVEREF(((PyObject *)__pyx_v_ub));
__pyx_r = ((PyObject *)__pyx_t_8);
__pyx_t_8 = 0;
goto __pyx_L0;
__pyx_r = Py_None; __Pyx_INCREF(Py_None);
goto __pyx_L0;
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_2);
__Pyx_XDECREF(__pyx_t_3);
__Pyx_XDECREF(__pyx_t_6);
__Pyx_XDECREF(__pyx_t_7);
__Pyx_XDECREF(__pyx_t_8);
__Pyx_XDECREF(__pyx_t_9);
{ PyObject *__pyx_type, *__pyx_value, *__pyx_tb;
__Pyx_ErrFetch(&__pyx_type, &__pyx_value, &__pyx_tb);
__Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_lb.rcbuffer->pybuffer);
__Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_x_ub.rcbuffer->pybuffer);
__Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_x.rcbuffer->pybuffer);
__Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_data.rcbuffer->pybuffer);
__Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_x_lb.rcbuffer->pybuffer);
__Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_ub.rcbuffer->pybuffer);
__Pyx_ErrRestore(__pyx_type, __pyx_value, __pyx_tb);}
__Pyx_AddTraceback("wfpt.split_cdf", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = NULL;
goto __pyx_L2;
__pyx_L0:;
__Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_lb.rcbuffer->pybuffer);
__Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_x_ub.rcbuffer->pybuffer);
__Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_x.rcbuffer->pybuffer);
__Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_data.rcbuffer->pybuffer);
__Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_x_lb.rcbuffer->pybuffer);
__Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_ub.rcbuffer->pybuffer);
__pyx_L2:;
__Pyx_XDECREF((PyObject *)__pyx_v_x_lb);
__Pyx_XDECREF((PyObject *)__pyx_v_lb);
__Pyx_XDECREF((PyObject *)__pyx_v_x_ub);
__Pyx_XDECREF((PyObject *)__pyx_v_ub);
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* Python wrapper */
static int __pyx_pw_5numpy_7ndarray_1__getbuffer__(PyObject *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags); /*proto*/
static int __pyx_pw_5numpy_7ndarray_1__getbuffer__(PyObject *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags) {
int __pyx_r;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__getbuffer__ (wrapper)");
__pyx_r = __pyx_pf_5numpy_7ndarray___getbuffer__(((PyArrayObject *)__pyx_v_self), ((Py_buffer *)__pyx_v_info), ((int)__pyx_v_flags));
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "numpy.pxd":193
* # experimental exception made for __getbuffer__ and __releasebuffer__
* # -- the details of this may change.
* def __getbuffer__(ndarray self, Py_buffer* info, int flags): # <<<<<<<<<<<<<<
* # This implementation of getbuffer is geared towards Cython
* # requirements, and does not yet fullfill the PEP.
*/
static int __pyx_pf_5numpy_7ndarray___getbuffer__(PyArrayObject *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags) {
int __pyx_v_copy_shape;
int __pyx_v_i;
int __pyx_v_ndim;
int __pyx_v_endian_detector;
int __pyx_v_little_endian;
int __pyx_v_t;
char *__pyx_v_f;
PyArray_Descr *__pyx_v_descr = 0;
int __pyx_v_offset;
int __pyx_v_hasfields;
int __pyx_r;
__Pyx_RefNannyDeclarations
int __pyx_t_1;
int __pyx_t_2;
int __pyx_t_3;
PyObject *__pyx_t_4 = NULL;
int __pyx_t_5;
int __pyx_t_6;
int __pyx_t_7;
PyObject *__pyx_t_8 = NULL;
char *__pyx_t_9;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("__getbuffer__");
if (__pyx_v_info != NULL) {
__pyx_v_info->obj = Py_None; __Pyx_INCREF(Py_None);
__Pyx_GIVEREF(__pyx_v_info->obj);
}
/* "numpy.pxd":199
* # of flags
*
* if info == NULL: return # <<<<<<<<<<<<<<
*
* cdef int copy_shape, i, ndim
*/
__pyx_t_1 = (__pyx_v_info == NULL);
if (__pyx_t_1) {
__pyx_r = 0;
goto __pyx_L0;
goto __pyx_L3;
}
__pyx_L3:;
/* "numpy.pxd":202
*
* cdef int copy_shape, i, ndim
* cdef int endian_detector = 1 # <<<<<<<<<<<<<<
* cdef bint little_endian = ((<char*>&endian_detector)[0] != 0)
*
*/
__pyx_v_endian_detector = 1;
/* "numpy.pxd":203
* cdef int copy_shape, i, ndim
* cdef int endian_detector = 1
* cdef bint little_endian = ((<char*>&endian_detector)[0] != 0) # <<<<<<<<<<<<<<
*
* ndim = PyArray_NDIM(self)
*/
__pyx_v_little_endian = ((((char *)(&__pyx_v_endian_detector))[0]) != 0);
/* "numpy.pxd":205
* cdef bint little_endian = ((<char*>&endian_detector)[0] != 0)
*
* ndim = PyArray_NDIM(self) # <<<<<<<<<<<<<<
*
* if sizeof(npy_intp) != sizeof(Py_ssize_t):
*/
__pyx_v_ndim = PyArray_NDIM(__pyx_v_self);
/* "numpy.pxd":207
* ndim = PyArray_NDIM(self)
*
* if sizeof(npy_intp) != sizeof(Py_ssize_t): # <<<<<<<<<<<<<<
* copy_shape = 1
* else:
*/
__pyx_t_1 = ((sizeof(npy_intp)) != (sizeof(Py_ssize_t)));
if (__pyx_t_1) {
/* "numpy.pxd":208
*
* if sizeof(npy_intp) != sizeof(Py_ssize_t):
* copy_shape = 1 # <<<<<<<<<<<<<<
* else:
* copy_shape = 0
*/
__pyx_v_copy_shape = 1;
goto __pyx_L4;
}
/*else*/ {
/* "numpy.pxd":210
* copy_shape = 1
* else:
* copy_shape = 0 # <<<<<<<<<<<<<<
*
* if ((flags & pybuf.PyBUF_C_CONTIGUOUS == pybuf.PyBUF_C_CONTIGUOUS)
*/
__pyx_v_copy_shape = 0;
}
__pyx_L4:;
/* "numpy.pxd":212
* copy_shape = 0
*
* if ((flags & pybuf.PyBUF_C_CONTIGUOUS == pybuf.PyBUF_C_CONTIGUOUS) # <<<<<<<<<<<<<<
* and not PyArray_CHKFLAGS(self, NPY_C_CONTIGUOUS)):
* raise ValueError(u"ndarray is not C contiguous")
*/
__pyx_t_1 = ((__pyx_v_flags & PyBUF_C_CONTIGUOUS) == PyBUF_C_CONTIGUOUS);
if (__pyx_t_1) {
/* "numpy.pxd":213
*
* if ((flags & pybuf.PyBUF_C_CONTIGUOUS == pybuf.PyBUF_C_CONTIGUOUS)
* and not PyArray_CHKFLAGS(self, NPY_C_CONTIGUOUS)): # <<<<<<<<<<<<<<
* raise ValueError(u"ndarray is not C contiguous")
*
*/
__pyx_t_2 = (!PyArray_CHKFLAGS(__pyx_v_self, NPY_C_CONTIGUOUS));
__pyx_t_3 = __pyx_t_2;
} else {
__pyx_t_3 = __pyx_t_1;
}
if (__pyx_t_3) {
/* "numpy.pxd":214
* if ((flags & pybuf.PyBUF_C_CONTIGUOUS == pybuf.PyBUF_C_CONTIGUOUS)
* and not PyArray_CHKFLAGS(self, NPY_C_CONTIGUOUS)):
* raise ValueError(u"ndarray is not C contiguous") # <<<<<<<<<<<<<<
*
* if ((flags & pybuf.PyBUF_F_CONTIGUOUS == pybuf.PyBUF_F_CONTIGUOUS)
*/
__pyx_t_4 = PyObject_Call(__pyx_builtin_ValueError, ((PyObject *)__pyx_k_tuple_6), NULL); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 214; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_4);
__Pyx_Raise(__pyx_t_4, 0, 0, 0);
__Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
{__pyx_filename = __pyx_f[2]; __pyx_lineno = 214; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
goto __pyx_L5;
}
__pyx_L5:;
/* "numpy.pxd":216
* raise ValueError(u"ndarray is not C contiguous")
*
* if ((flags & pybuf.PyBUF_F_CONTIGUOUS == pybuf.PyBUF_F_CONTIGUOUS) # <<<<<<<<<<<<<<
* and not PyArray_CHKFLAGS(self, NPY_F_CONTIGUOUS)):
* raise ValueError(u"ndarray is not Fortran contiguous")
*/
__pyx_t_3 = ((__pyx_v_flags & PyBUF_F_CONTIGUOUS) == PyBUF_F_CONTIGUOUS);
if (__pyx_t_3) {
/* "numpy.pxd":217
*
* if ((flags & pybuf.PyBUF_F_CONTIGUOUS == pybuf.PyBUF_F_CONTIGUOUS)
* and not PyArray_CHKFLAGS(self, NPY_F_CONTIGUOUS)): # <<<<<<<<<<<<<<
* raise ValueError(u"ndarray is not Fortran contiguous")
*
*/
__pyx_t_1 = (!PyArray_CHKFLAGS(__pyx_v_self, NPY_F_CONTIGUOUS));
__pyx_t_2 = __pyx_t_1;
} else {
__pyx_t_2 = __pyx_t_3;
}
if (__pyx_t_2) {
/* "numpy.pxd":218
* if ((flags & pybuf.PyBUF_F_CONTIGUOUS == pybuf.PyBUF_F_CONTIGUOUS)
* and not PyArray_CHKFLAGS(self, NPY_F_CONTIGUOUS)):
* raise ValueError(u"ndarray is not Fortran contiguous") # <<<<<<<<<<<<<<
*
* info.buf = PyArray_DATA(self)
*/
__pyx_t_4 = PyObject_Call(__pyx_builtin_ValueError, ((PyObject *)__pyx_k_tuple_8), NULL); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 218; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_4);
__Pyx_Raise(__pyx_t_4, 0, 0, 0);
__Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
{__pyx_filename = __pyx_f[2]; __pyx_lineno = 218; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
goto __pyx_L6;
}
__pyx_L6:;
/* "numpy.pxd":220
* raise ValueError(u"ndarray is not Fortran contiguous")
*
* info.buf = PyArray_DATA(self) # <<<<<<<<<<<<<<
* info.ndim = ndim
* if copy_shape:
*/
__pyx_v_info->buf = PyArray_DATA(__pyx_v_self);
/* "numpy.pxd":221
*
* info.buf = PyArray_DATA(self)
* info.ndim = ndim # <<<<<<<<<<<<<<
* if copy_shape:
* # Allocate new buffer for strides and shape info.
*/
__pyx_v_info->ndim = __pyx_v_ndim;
/* "numpy.pxd":222
* info.buf = PyArray_DATA(self)
* info.ndim = ndim
* if copy_shape: # <<<<<<<<<<<<<<
* # Allocate new buffer for strides and shape info.
* # This is allocated as one block, strides first.
*/
if (__pyx_v_copy_shape) {
/* "numpy.pxd":225
* # Allocate new buffer for strides and shape info.
* # This is allocated as one block, strides first.
* info.strides = <Py_ssize_t*>stdlib.malloc(sizeof(Py_ssize_t) * <size_t>ndim * 2) # <<<<<<<<<<<<<<
* info.shape = info.strides + ndim
* for i in range(ndim):
*/
__pyx_v_info->strides = ((Py_ssize_t *)malloc((((sizeof(Py_ssize_t)) * ((size_t)__pyx_v_ndim)) * 2)));
/* "numpy.pxd":226
* # This is allocated as one block, strides first.
* info.strides = <Py_ssize_t*>stdlib.malloc(sizeof(Py_ssize_t) * <size_t>ndim * 2)
* info.shape = info.strides + ndim # <<<<<<<<<<<<<<
* for i in range(ndim):
* info.strides[i] = PyArray_STRIDES(self)[i]
*/
__pyx_v_info->shape = (__pyx_v_info->strides + __pyx_v_ndim);
/* "numpy.pxd":227
* info.strides = <Py_ssize_t*>stdlib.malloc(sizeof(Py_ssize_t) * <size_t>ndim * 2)
* info.shape = info.strides + ndim
* for i in range(ndim): # <<<<<<<<<<<<<<
* info.strides[i] = PyArray_STRIDES(self)[i]
* info.shape[i] = PyArray_DIMS(self)[i]
*/
__pyx_t_5 = __pyx_v_ndim;
for (__pyx_t_6 = 0; __pyx_t_6 < __pyx_t_5; __pyx_t_6+=1) {
__pyx_v_i = __pyx_t_6;
/* "numpy.pxd":228
* info.shape = info.strides + ndim
* for i in range(ndim):
* info.strides[i] = PyArray_STRIDES(self)[i] # <<<<<<<<<<<<<<
* info.shape[i] = PyArray_DIMS(self)[i]
* else:
*/
(__pyx_v_info->strides[__pyx_v_i]) = (PyArray_STRIDES(__pyx_v_self)[__pyx_v_i]);
/* "numpy.pxd":229
* for i in range(ndim):
* info.strides[i] = PyArray_STRIDES(self)[i]
* info.shape[i] = PyArray_DIMS(self)[i] # <<<<<<<<<<<<<<
* else:
* info.strides = <Py_ssize_t*>PyArray_STRIDES(self)
*/
(__pyx_v_info->shape[__pyx_v_i]) = (PyArray_DIMS(__pyx_v_self)[__pyx_v_i]);
}
goto __pyx_L7;
}
/*else*/ {
/* "numpy.pxd":231
* info.shape[i] = PyArray_DIMS(self)[i]
* else:
* info.strides = <Py_ssize_t*>PyArray_STRIDES(self) # <<<<<<<<<<<<<<
* info.shape = <Py_ssize_t*>PyArray_DIMS(self)
* info.suboffsets = NULL
*/
__pyx_v_info->strides = ((Py_ssize_t *)PyArray_STRIDES(__pyx_v_self));
/* "numpy.pxd":232
* else:
* info.strides = <Py_ssize_t*>PyArray_STRIDES(self)
* info.shape = <Py_ssize_t*>PyArray_DIMS(self) # <<<<<<<<<<<<<<
* info.suboffsets = NULL
* info.itemsize = PyArray_ITEMSIZE(self)
*/
__pyx_v_info->shape = ((Py_ssize_t *)PyArray_DIMS(__pyx_v_self));
}
__pyx_L7:;
/* "numpy.pxd":233
* info.strides = <Py_ssize_t*>PyArray_STRIDES(self)
* info.shape = <Py_ssize_t*>PyArray_DIMS(self)
* info.suboffsets = NULL # <<<<<<<<<<<<<<
* info.itemsize = PyArray_ITEMSIZE(self)
* info.readonly = not PyArray_ISWRITEABLE(self)
*/
__pyx_v_info->suboffsets = NULL;
/* "numpy.pxd":234
* info.shape = <Py_ssize_t*>PyArray_DIMS(self)
* info.suboffsets = NULL
* info.itemsize = PyArray_ITEMSIZE(self) # <<<<<<<<<<<<<<
* info.readonly = not PyArray_ISWRITEABLE(self)
*
*/
__pyx_v_info->itemsize = PyArray_ITEMSIZE(__pyx_v_self);
/* "numpy.pxd":235
* info.suboffsets = NULL
* info.itemsize = PyArray_ITEMSIZE(self)
* info.readonly = not PyArray_ISWRITEABLE(self) # <<<<<<<<<<<<<<
*
* cdef int t
*/
__pyx_v_info->readonly = (!PyArray_ISWRITEABLE(__pyx_v_self));
/* "numpy.pxd":238
*
* cdef int t
* cdef char* f = NULL # <<<<<<<<<<<<<<
* cdef dtype descr = self.descr
* cdef list stack
*/
__pyx_v_f = NULL;
/* "numpy.pxd":239
* cdef int t
* cdef char* f = NULL
* cdef dtype descr = self.descr # <<<<<<<<<<<<<<
* cdef list stack
* cdef int offset
*/
__Pyx_INCREF(((PyObject *)__pyx_v_self->descr));
__pyx_v_descr = __pyx_v_self->descr;
/* "numpy.pxd":243
* cdef int offset
*
* cdef bint hasfields = PyDataType_HASFIELDS(descr) # <<<<<<<<<<<<<<
*
* if not hasfields and not copy_shape:
*/
__pyx_v_hasfields = PyDataType_HASFIELDS(__pyx_v_descr);
/* "numpy.pxd":245
* cdef bint hasfields = PyDataType_HASFIELDS(descr)
*
* if not hasfields and not copy_shape: # <<<<<<<<<<<<<<
* # do not call releasebuffer
* info.obj = None
*/
__pyx_t_2 = (!__pyx_v_hasfields);
if (__pyx_t_2) {
__pyx_t_3 = (!__pyx_v_copy_shape);
__pyx_t_1 = __pyx_t_3;
} else {
__pyx_t_1 = __pyx_t_2;
}
if (__pyx_t_1) {
/* "numpy.pxd":247
* if not hasfields and not copy_shape:
* # do not call releasebuffer
* info.obj = None # <<<<<<<<<<<<<<
* else:
* # need to call releasebuffer
*/
__Pyx_INCREF(Py_None);
__Pyx_GIVEREF(Py_None);
__Pyx_GOTREF(__pyx_v_info->obj);
__Pyx_DECREF(__pyx_v_info->obj);
__pyx_v_info->obj = Py_None;
goto __pyx_L10;
}
/*else*/ {
/* "numpy.pxd":250
* else:
* # need to call releasebuffer
* info.obj = self # <<<<<<<<<<<<<<
*
* if not hasfields:
*/
__Pyx_INCREF(((PyObject *)__pyx_v_self));
__Pyx_GIVEREF(((PyObject *)__pyx_v_self));
__Pyx_GOTREF(__pyx_v_info->obj);
__Pyx_DECREF(__pyx_v_info->obj);
__pyx_v_info->obj = ((PyObject *)__pyx_v_self);
}
__pyx_L10:;
/* "numpy.pxd":252
* info.obj = self
*
* if not hasfields: # <<<<<<<<<<<<<<
* t = descr.type_num
* if ((descr.byteorder == '>' and little_endian) or
*/
__pyx_t_1 = (!__pyx_v_hasfields);
if (__pyx_t_1) {
/* "numpy.pxd":253
*
* if not hasfields:
* t = descr.type_num # <<<<<<<<<<<<<<
* if ((descr.byteorder == '>' and little_endian) or
* (descr.byteorder == '<' and not little_endian)):
*/
__pyx_v_t = __pyx_v_descr->type_num;
/* "numpy.pxd":254
* if not hasfields:
* t = descr.type_num
* if ((descr.byteorder == '>' and little_endian) or # <<<<<<<<<<<<<<
* (descr.byteorder == '<' and not little_endian)):
* raise ValueError(u"Non-native byte order not supported")
*/
__pyx_t_1 = (__pyx_v_descr->byteorder == '>');
if (__pyx_t_1) {
__pyx_t_2 = __pyx_v_little_endian;
} else {
__pyx_t_2 = __pyx_t_1;
}
if (!__pyx_t_2) {
/* "numpy.pxd":255
* t = descr.type_num
* if ((descr.byteorder == '>' and little_endian) or
* (descr.byteorder == '<' and not little_endian)): # <<<<<<<<<<<<<<
* raise ValueError(u"Non-native byte order not supported")
* if t == NPY_BYTE: f = "b"
*/
__pyx_t_1 = (__pyx_v_descr->byteorder == '<');
if (__pyx_t_1) {
__pyx_t_3 = (!__pyx_v_little_endian);
__pyx_t_7 = __pyx_t_3;
} else {
__pyx_t_7 = __pyx_t_1;
}
__pyx_t_1 = __pyx_t_7;
} else {
__pyx_t_1 = __pyx_t_2;
}
if (__pyx_t_1) {
/* "numpy.pxd":256
* if ((descr.byteorder == '>' and little_endian) or
* (descr.byteorder == '<' and not little_endian)):
* raise ValueError(u"Non-native byte order not supported") # <<<<<<<<<<<<<<
* if t == NPY_BYTE: f = "b"
* elif t == NPY_UBYTE: f = "B"
*/
__pyx_t_4 = PyObject_Call(__pyx_builtin_ValueError, ((PyObject *)__pyx_k_tuple_10), NULL); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 256; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_4);
__Pyx_Raise(__pyx_t_4, 0, 0, 0);
__Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
{__pyx_filename = __pyx_f[2]; __pyx_lineno = 256; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
goto __pyx_L12;
}
__pyx_L12:;
/* "numpy.pxd":257
* (descr.byteorder == '<' and not little_endian)):
* raise ValueError(u"Non-native byte order not supported")
* if t == NPY_BYTE: f = "b" # <<<<<<<<<<<<<<
* elif t == NPY_UBYTE: f = "B"
* elif t == NPY_SHORT: f = "h"
*/
__pyx_t_1 = (__pyx_v_t == NPY_BYTE);
if (__pyx_t_1) {
__pyx_v_f = __pyx_k__b;
goto __pyx_L13;
}
/* "numpy.pxd":258
* raise ValueError(u"Non-native byte order not supported")
* if t == NPY_BYTE: f = "b"
* elif t == NPY_UBYTE: f = "B" # <<<<<<<<<<<<<<
* elif t == NPY_SHORT: f = "h"
* elif t == NPY_USHORT: f = "H"
*/
__pyx_t_1 = (__pyx_v_t == NPY_UBYTE);
if (__pyx_t_1) {
__pyx_v_f = __pyx_k__B;
goto __pyx_L13;
}
/* "numpy.pxd":259
* if t == NPY_BYTE: f = "b"
* elif t == NPY_UBYTE: f = "B"
* elif t == NPY_SHORT: f = "h" # <<<<<<<<<<<<<<
* elif t == NPY_USHORT: f = "H"
* elif t == NPY_INT: f = "i"
*/
__pyx_t_1 = (__pyx_v_t == NPY_SHORT);
if (__pyx_t_1) {
__pyx_v_f = __pyx_k__h;
goto __pyx_L13;
}
/* "numpy.pxd":260
* elif t == NPY_UBYTE: f = "B"
* elif t == NPY_SHORT: f = "h"
* elif t == NPY_USHORT: f = "H" # <<<<<<<<<<<<<<
* elif t == NPY_INT: f = "i"
* elif t == NPY_UINT: f = "I"
*/
__pyx_t_1 = (__pyx_v_t == NPY_USHORT);
if (__pyx_t_1) {
__pyx_v_f = __pyx_k__H;
goto __pyx_L13;
}
/* "numpy.pxd":261
* elif t == NPY_SHORT: f = "h"
* elif t == NPY_USHORT: f = "H"
* elif t == NPY_INT: f = "i" # <<<<<<<<<<<<<<
* elif t == NPY_UINT: f = "I"
* elif t == NPY_LONG: f = "l"
*/
__pyx_t_1 = (__pyx_v_t == NPY_INT);
if (__pyx_t_1) {
__pyx_v_f = __pyx_k__i;
goto __pyx_L13;
}
/* "numpy.pxd":262
* elif t == NPY_USHORT: f = "H"
* elif t == NPY_INT: f = "i"
* elif t == NPY_UINT: f = "I" # <<<<<<<<<<<<<<
* elif t == NPY_LONG: f = "l"
* elif t == NPY_ULONG: f = "L"
*/
__pyx_t_1 = (__pyx_v_t == NPY_UINT);
if (__pyx_t_1) {
__pyx_v_f = __pyx_k__I;
goto __pyx_L13;
}
/* "numpy.pxd":263
* elif t == NPY_INT: f = "i"
* elif t == NPY_UINT: f = "I"
* elif t == NPY_LONG: f = "l" # <<<<<<<<<<<<<<
* elif t == NPY_ULONG: f = "L"
* elif t == NPY_LONGLONG: f = "q"
*/
__pyx_t_1 = (__pyx_v_t == NPY_LONG);
if (__pyx_t_1) {
__pyx_v_f = __pyx_k__l;
goto __pyx_L13;
}
/* "numpy.pxd":264
* elif t == NPY_UINT: f = "I"
* elif t == NPY_LONG: f = "l"
* elif t == NPY_ULONG: f = "L" # <<<<<<<<<<<<<<
* elif t == NPY_LONGLONG: f = "q"
* elif t == NPY_ULONGLONG: f = "Q"
*/
__pyx_t_1 = (__pyx_v_t == NPY_ULONG);
if (__pyx_t_1) {
__pyx_v_f = __pyx_k__L;
goto __pyx_L13;
}
/* "numpy.pxd":265
* elif t == NPY_LONG: f = "l"
* elif t == NPY_ULONG: f = "L"
* elif t == NPY_LONGLONG: f = "q" # <<<<<<<<<<<<<<
* elif t == NPY_ULONGLONG: f = "Q"
* elif t == NPY_FLOAT: f = "f"
*/
__pyx_t_1 = (__pyx_v_t == NPY_LONGLONG);
if (__pyx_t_1) {
__pyx_v_f = __pyx_k__q;
goto __pyx_L13;
}
/* "numpy.pxd":266
* elif t == NPY_ULONG: f = "L"
* elif t == NPY_LONGLONG: f = "q"
* elif t == NPY_ULONGLONG: f = "Q" # <<<<<<<<<<<<<<
* elif t == NPY_FLOAT: f = "f"
* elif t == NPY_DOUBLE: f = "d"
*/
__pyx_t_1 = (__pyx_v_t == NPY_ULONGLONG);
if (__pyx_t_1) {
__pyx_v_f = __pyx_k__Q;
goto __pyx_L13;
}
/* "numpy.pxd":267
* elif t == NPY_LONGLONG: f = "q"
* elif t == NPY_ULONGLONG: f = "Q"
* elif t == NPY_FLOAT: f = "f" # <<<<<<<<<<<<<<
* elif t == NPY_DOUBLE: f = "d"
* elif t == NPY_LONGDOUBLE: f = "g"
*/
__pyx_t_1 = (__pyx_v_t == NPY_FLOAT);
if (__pyx_t_1) {
__pyx_v_f = __pyx_k__f;
goto __pyx_L13;
}
/* "numpy.pxd":268
* elif t == NPY_ULONGLONG: f = "Q"
* elif t == NPY_FLOAT: f = "f"
* elif t == NPY_DOUBLE: f = "d" # <<<<<<<<<<<<<<
* elif t == NPY_LONGDOUBLE: f = "g"
* elif t == NPY_CFLOAT: f = "Zf"
*/
__pyx_t_1 = (__pyx_v_t == NPY_DOUBLE);
if (__pyx_t_1) {
__pyx_v_f = __pyx_k__d;
goto __pyx_L13;
}
/* "numpy.pxd":269
* elif t == NPY_FLOAT: f = "f"
* elif t == NPY_DOUBLE: f = "d"
* elif t == NPY_LONGDOUBLE: f = "g" # <<<<<<<<<<<<<<
* elif t == NPY_CFLOAT: f = "Zf"
* elif t == NPY_CDOUBLE: f = "Zd"
*/
__pyx_t_1 = (__pyx_v_t == NPY_LONGDOUBLE);
if (__pyx_t_1) {
__pyx_v_f = __pyx_k__g;
goto __pyx_L13;
}
/* "numpy.pxd":270
* elif t == NPY_DOUBLE: f = "d"
* elif t == NPY_LONGDOUBLE: f = "g"
* elif t == NPY_CFLOAT: f = "Zf" # <<<<<<<<<<<<<<
* elif t == NPY_CDOUBLE: f = "Zd"
* elif t == NPY_CLONGDOUBLE: f = "Zg"
*/
__pyx_t_1 = (__pyx_v_t == NPY_CFLOAT);
if (__pyx_t_1) {
__pyx_v_f = __pyx_k__Zf;
goto __pyx_L13;
}
/* "numpy.pxd":271
* elif t == NPY_LONGDOUBLE: f = "g"
* elif t == NPY_CFLOAT: f = "Zf"
* elif t == NPY_CDOUBLE: f = "Zd" # <<<<<<<<<<<<<<
* elif t == NPY_CLONGDOUBLE: f = "Zg"
* elif t == NPY_OBJECT: f = "O"
*/
__pyx_t_1 = (__pyx_v_t == NPY_CDOUBLE);
if (__pyx_t_1) {
__pyx_v_f = __pyx_k__Zd;
goto __pyx_L13;
}
/* "numpy.pxd":272
* elif t == NPY_CFLOAT: f = "Zf"
* elif t == NPY_CDOUBLE: f = "Zd"
* elif t == NPY_CLONGDOUBLE: f = "Zg" # <<<<<<<<<<<<<<
* elif t == NPY_OBJECT: f = "O"
* else:
*/
__pyx_t_1 = (__pyx_v_t == NPY_CLONGDOUBLE);
if (__pyx_t_1) {
__pyx_v_f = __pyx_k__Zg;
goto __pyx_L13;
}
/* "numpy.pxd":273
* elif t == NPY_CDOUBLE: f = "Zd"
* elif t == NPY_CLONGDOUBLE: f = "Zg"
* elif t == NPY_OBJECT: f = "O" # <<<<<<<<<<<<<<
* else:
* raise ValueError(u"unknown dtype code in numpy.pxd (%d)" % t)
*/
__pyx_t_1 = (__pyx_v_t == NPY_OBJECT);
if (__pyx_t_1) {
__pyx_v_f = __pyx_k__O;
goto __pyx_L13;
}
/*else*/ {
/* "numpy.pxd":275
* elif t == NPY_OBJECT: f = "O"
* else:
* raise ValueError(u"unknown dtype code in numpy.pxd (%d)" % t) # <<<<<<<<<<<<<<
* info.format = f
* return
*/
__pyx_t_4 = PyInt_FromLong(__pyx_v_t); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 275; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_4);
__pyx_t_8 = PyNumber_Remainder(((PyObject *)__pyx_kp_u_11), __pyx_t_4); if (unlikely(!__pyx_t_8)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 275; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(((PyObject *)__pyx_t_8));
__Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
__pyx_t_4 = PyTuple_New(1); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 275; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_4);
PyTuple_SET_ITEM(__pyx_t_4, 0, ((PyObject *)__pyx_t_8));
__Pyx_GIVEREF(((PyObject *)__pyx_t_8));
__pyx_t_8 = 0;
__pyx_t_8 = PyObject_Call(__pyx_builtin_ValueError, ((PyObject *)__pyx_t_4), NULL); if (unlikely(!__pyx_t_8)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 275; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_8);
__Pyx_DECREF(((PyObject *)__pyx_t_4)); __pyx_t_4 = 0;
__Pyx_Raise(__pyx_t_8, 0, 0, 0);
__Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0;
{__pyx_filename = __pyx_f[2]; __pyx_lineno = 275; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
}
__pyx_L13:;
/* "numpy.pxd":276
* else:
* raise ValueError(u"unknown dtype code in numpy.pxd (%d)" % t)
* info.format = f # <<<<<<<<<<<<<<
* return
* else:
*/
__pyx_v_info->format = __pyx_v_f;
/* "numpy.pxd":277
* raise ValueError(u"unknown dtype code in numpy.pxd (%d)" % t)
* info.format = f
* return # <<<<<<<<<<<<<<
* else:
* info.format = <char*>stdlib.malloc(_buffer_format_string_len)
*/
__pyx_r = 0;
goto __pyx_L0;
goto __pyx_L11;
}
/*else*/ {
/* "numpy.pxd":279
* return
* else:
* info.format = <char*>stdlib.malloc(_buffer_format_string_len) # <<<<<<<<<<<<<<
* info.format[0] = '^' # Native data types, manual alignment
* offset = 0
*/
__pyx_v_info->format = ((char *)malloc(255));
/* "numpy.pxd":280
* else:
* info.format = <char*>stdlib.malloc(_buffer_format_string_len)
* info.format[0] = '^' # Native data types, manual alignment # <<<<<<<<<<<<<<
* offset = 0
* f = _util_dtypestring(descr, info.format + 1,
*/
(__pyx_v_info->format[0]) = '^';
/* "numpy.pxd":281
* info.format = <char*>stdlib.malloc(_buffer_format_string_len)
* info.format[0] = '^' # Native data types, manual alignment
* offset = 0 # <<<<<<<<<<<<<<
* f = _util_dtypestring(descr, info.format + 1,
* info.format + _buffer_format_string_len,
*/
__pyx_v_offset = 0;
/* "numpy.pxd":284
* f = _util_dtypestring(descr, info.format + 1,
* info.format + _buffer_format_string_len,
* &offset) # <<<<<<<<<<<<<<
* f[0] = 0 # Terminate format string
*
*/
__pyx_t_9 = __pyx_f_5numpy__util_dtypestring(__pyx_v_descr, (__pyx_v_info->format + 1), (__pyx_v_info->format + 255), (&__pyx_v_offset)); if (unlikely(__pyx_t_9 == NULL)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 282; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__pyx_v_f = __pyx_t_9;
/* "numpy.pxd":285
* info.format + _buffer_format_string_len,
* &offset)
* f[0] = 0 # Terminate format string # <<<<<<<<<<<<<<
*
* def __releasebuffer__(ndarray self, Py_buffer* info):
*/
(__pyx_v_f[0]) = 0;
}
__pyx_L11:;
__pyx_r = 0;
goto __pyx_L0;
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_4);
__Pyx_XDECREF(__pyx_t_8);
__Pyx_AddTraceback("numpy.ndarray.__getbuffer__", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = -1;
if (__pyx_v_info != NULL && __pyx_v_info->obj != NULL) {
__Pyx_GOTREF(__pyx_v_info->obj);
__Pyx_DECREF(__pyx_v_info->obj); __pyx_v_info->obj = NULL;
}
goto __pyx_L2;
__pyx_L0:;
if (__pyx_v_info != NULL && __pyx_v_info->obj == Py_None) {
__Pyx_GOTREF(Py_None);
__Pyx_DECREF(Py_None); __pyx_v_info->obj = NULL;
}
__pyx_L2:;
__Pyx_XDECREF((PyObject *)__pyx_v_descr);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* Python wrapper */
static void __pyx_pw_5numpy_7ndarray_3__releasebuffer__(PyObject *__pyx_v_self, Py_buffer *__pyx_v_info); /*proto*/
static void __pyx_pw_5numpy_7ndarray_3__releasebuffer__(PyObject *__pyx_v_self, Py_buffer *__pyx_v_info) {
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__releasebuffer__ (wrapper)");
__pyx_pf_5numpy_7ndarray_2__releasebuffer__(((PyArrayObject *)__pyx_v_self), ((Py_buffer *)__pyx_v_info));
__Pyx_RefNannyFinishContext();
}
/* "numpy.pxd":287
* f[0] = 0 # Terminate format string
*
* def __releasebuffer__(ndarray self, Py_buffer* info): # <<<<<<<<<<<<<<
* if PyArray_HASFIELDS(self):
* stdlib.free(info.format)
*/
static void __pyx_pf_5numpy_7ndarray_2__releasebuffer__(PyArrayObject *__pyx_v_self, Py_buffer *__pyx_v_info) {
__Pyx_RefNannyDeclarations
int __pyx_t_1;
__Pyx_RefNannySetupContext("__releasebuffer__");
/* "numpy.pxd":288
*
* def __releasebuffer__(ndarray self, Py_buffer* info):
* if PyArray_HASFIELDS(self): # <<<<<<<<<<<<<<
* stdlib.free(info.format)
* if sizeof(npy_intp) != sizeof(Py_ssize_t):
*/
__pyx_t_1 = PyArray_HASFIELDS(__pyx_v_self);
if (__pyx_t_1) {
/* "numpy.pxd":289
* def __releasebuffer__(ndarray self, Py_buffer* info):
* if PyArray_HASFIELDS(self):
* stdlib.free(info.format) # <<<<<<<<<<<<<<
* if sizeof(npy_intp) != sizeof(Py_ssize_t):
* stdlib.free(info.strides)
*/
free(__pyx_v_info->format);
goto __pyx_L3;
}
__pyx_L3:;
/* "numpy.pxd":290
* if PyArray_HASFIELDS(self):
* stdlib.free(info.format)
* if sizeof(npy_intp) != sizeof(Py_ssize_t): # <<<<<<<<<<<<<<
* stdlib.free(info.strides)
* # info.shape was stored after info.strides in the same block
*/
__pyx_t_1 = ((sizeof(npy_intp)) != (sizeof(Py_ssize_t)));
if (__pyx_t_1) {
/* "numpy.pxd":291
* stdlib.free(info.format)
* if sizeof(npy_intp) != sizeof(Py_ssize_t):
* stdlib.free(info.strides) # <<<<<<<<<<<<<<
* # info.shape was stored after info.strides in the same block
*
*/
free(__pyx_v_info->strides);
goto __pyx_L4;
}
__pyx_L4:;
__Pyx_RefNannyFinishContext();
}
/* "numpy.pxd":767
* ctypedef npy_cdouble complex_t
*
* cdef inline object PyArray_MultiIterNew1(a): # <<<<<<<<<<<<<<
* return PyArray_MultiIterNew(1, <void*>a)
*
*/
static CYTHON_INLINE PyObject *__pyx_f_5numpy_PyArray_MultiIterNew1(PyObject *__pyx_v_a) {
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
PyObject *__pyx_t_1 = NULL;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("PyArray_MultiIterNew1");
/* "numpy.pxd":768
*
* cdef inline object PyArray_MultiIterNew1(a):
* return PyArray_MultiIterNew(1, <void*>a) # <<<<<<<<<<<<<<
*
* cdef inline object PyArray_MultiIterNew2(a, b):
*/
__Pyx_XDECREF(__pyx_r);
__pyx_t_1 = PyArray_MultiIterNew(1, ((void *)__pyx_v_a)); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 768; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_1);
__pyx_r = __pyx_t_1;
__pyx_t_1 = 0;
goto __pyx_L0;
__pyx_r = Py_None; __Pyx_INCREF(Py_None);
goto __pyx_L0;
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_1);
__Pyx_AddTraceback("numpy.PyArray_MultiIterNew1", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = 0;
__pyx_L0:;
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "numpy.pxd":770
* return PyArray_MultiIterNew(1, <void*>a)
*
* cdef inline object PyArray_MultiIterNew2(a, b): # <<<<<<<<<<<<<<
* return PyArray_MultiIterNew(2, <void*>a, <void*>b)
*
*/
static CYTHON_INLINE PyObject *__pyx_f_5numpy_PyArray_MultiIterNew2(PyObject *__pyx_v_a, PyObject *__pyx_v_b) {
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
PyObject *__pyx_t_1 = NULL;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("PyArray_MultiIterNew2");
/* "numpy.pxd":771
*
* cdef inline object PyArray_MultiIterNew2(a, b):
* return PyArray_MultiIterNew(2, <void*>a, <void*>b) # <<<<<<<<<<<<<<
*
* cdef inline object PyArray_MultiIterNew3(a, b, c):
*/
__Pyx_XDECREF(__pyx_r);
__pyx_t_1 = PyArray_MultiIterNew(2, ((void *)__pyx_v_a), ((void *)__pyx_v_b)); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 771; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_1);
__pyx_r = __pyx_t_1;
__pyx_t_1 = 0;
goto __pyx_L0;
__pyx_r = Py_None; __Pyx_INCREF(Py_None);
goto __pyx_L0;
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_1);
__Pyx_AddTraceback("numpy.PyArray_MultiIterNew2", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = 0;
__pyx_L0:;
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "numpy.pxd":773
* return PyArray_MultiIterNew(2, <void*>a, <void*>b)
*
* cdef inline object PyArray_MultiIterNew3(a, b, c): # <<<<<<<<<<<<<<
* return PyArray_MultiIterNew(3, <void*>a, <void*>b, <void*> c)
*
*/
static CYTHON_INLINE PyObject *__pyx_f_5numpy_PyArray_MultiIterNew3(PyObject *__pyx_v_a, PyObject *__pyx_v_b, PyObject *__pyx_v_c) {
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
PyObject *__pyx_t_1 = NULL;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("PyArray_MultiIterNew3");
/* "numpy.pxd":774
*
* cdef inline object PyArray_MultiIterNew3(a, b, c):
* return PyArray_MultiIterNew(3, <void*>a, <void*>b, <void*> c) # <<<<<<<<<<<<<<
*
* cdef inline object PyArray_MultiIterNew4(a, b, c, d):
*/
__Pyx_XDECREF(__pyx_r);
__pyx_t_1 = PyArray_MultiIterNew(3, ((void *)__pyx_v_a), ((void *)__pyx_v_b), ((void *)__pyx_v_c)); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 774; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_1);
__pyx_r = __pyx_t_1;
__pyx_t_1 = 0;
goto __pyx_L0;
__pyx_r = Py_None; __Pyx_INCREF(Py_None);
goto __pyx_L0;
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_1);
__Pyx_AddTraceback("numpy.PyArray_MultiIterNew3", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = 0;
__pyx_L0:;
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "numpy.pxd":776
* return PyArray_MultiIterNew(3, <void*>a, <void*>b, <void*> c)
*
* cdef inline object PyArray_MultiIterNew4(a, b, c, d): # <<<<<<<<<<<<<<
* return PyArray_MultiIterNew(4, <void*>a, <void*>b, <void*>c, <void*> d)
*
*/
static CYTHON_INLINE PyObject *__pyx_f_5numpy_PyArray_MultiIterNew4(PyObject *__pyx_v_a, PyObject *__pyx_v_b, PyObject *__pyx_v_c, PyObject *__pyx_v_d) {
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
PyObject *__pyx_t_1 = NULL;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("PyArray_MultiIterNew4");
/* "numpy.pxd":777
*
* cdef inline object PyArray_MultiIterNew4(a, b, c, d):
* return PyArray_MultiIterNew(4, <void*>a, <void*>b, <void*>c, <void*> d) # <<<<<<<<<<<<<<
*
* cdef inline object PyArray_MultiIterNew5(a, b, c, d, e):
*/
__Pyx_XDECREF(__pyx_r);
__pyx_t_1 = PyArray_MultiIterNew(4, ((void *)__pyx_v_a), ((void *)__pyx_v_b), ((void *)__pyx_v_c), ((void *)__pyx_v_d)); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 777; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_1);
__pyx_r = __pyx_t_1;
__pyx_t_1 = 0;
goto __pyx_L0;
__pyx_r = Py_None; __Pyx_INCREF(Py_None);
goto __pyx_L0;
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_1);
__Pyx_AddTraceback("numpy.PyArray_MultiIterNew4", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = 0;
__pyx_L0:;
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "numpy.pxd":779
* return PyArray_MultiIterNew(4, <void*>a, <void*>b, <void*>c, <void*> d)
*
* cdef inline object PyArray_MultiIterNew5(a, b, c, d, e): # <<<<<<<<<<<<<<
* return PyArray_MultiIterNew(5, <void*>a, <void*>b, <void*>c, <void*> d, <void*> e)
*
*/
static CYTHON_INLINE PyObject *__pyx_f_5numpy_PyArray_MultiIterNew5(PyObject *__pyx_v_a, PyObject *__pyx_v_b, PyObject *__pyx_v_c, PyObject *__pyx_v_d, PyObject *__pyx_v_e) {
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
PyObject *__pyx_t_1 = NULL;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("PyArray_MultiIterNew5");
/* "numpy.pxd":780
*
* cdef inline object PyArray_MultiIterNew5(a, b, c, d, e):
* return PyArray_MultiIterNew(5, <void*>a, <void*>b, <void*>c, <void*> d, <void*> e) # <<<<<<<<<<<<<<
*
* cdef inline char* _util_dtypestring(dtype descr, char* f, char* end, int* offset) except NULL:
*/
__Pyx_XDECREF(__pyx_r);
__pyx_t_1 = PyArray_MultiIterNew(5, ((void *)__pyx_v_a), ((void *)__pyx_v_b), ((void *)__pyx_v_c), ((void *)__pyx_v_d), ((void *)__pyx_v_e)); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 780; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_1);
__pyx_r = __pyx_t_1;
__pyx_t_1 = 0;
goto __pyx_L0;
__pyx_r = Py_None; __Pyx_INCREF(Py_None);
goto __pyx_L0;
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_1);
__Pyx_AddTraceback("numpy.PyArray_MultiIterNew5", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = 0;
__pyx_L0:;
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "numpy.pxd":782
* return PyArray_MultiIterNew(5, <void*>a, <void*>b, <void*>c, <void*> d, <void*> e)
*
* cdef inline char* _util_dtypestring(dtype descr, char* f, char* end, int* offset) except NULL: # <<<<<<<<<<<<<<
* # Recursive utility function used in __getbuffer__ to get format
* # string. The new location in the format string is returned.
*/
static CYTHON_INLINE char *__pyx_f_5numpy__util_dtypestring(PyArray_Descr *__pyx_v_descr, char *__pyx_v_f, char *__pyx_v_end, int *__pyx_v_offset) {
PyArray_Descr *__pyx_v_child = 0;
int __pyx_v_endian_detector;
int __pyx_v_little_endian;
PyObject *__pyx_v_fields = 0;
PyObject *__pyx_v_childname = NULL;
PyObject *__pyx_v_new_offset = NULL;
PyObject *__pyx_v_t = NULL;
char *__pyx_r;
__Pyx_RefNannyDeclarations
PyObject *__pyx_t_1 = NULL;
Py_ssize_t __pyx_t_2;
PyObject *__pyx_t_3 = NULL;
PyObject *__pyx_t_4 = NULL;
PyObject *__pyx_t_5 = NULL;
int __pyx_t_6;
int __pyx_t_7;
int __pyx_t_8;
int __pyx_t_9;
long __pyx_t_10;
char *__pyx_t_11;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("_util_dtypestring");
/* "numpy.pxd":789
* cdef int delta_offset
* cdef tuple i
* cdef int endian_detector = 1 # <<<<<<<<<<<<<<
* cdef bint little_endian = ((<char*>&endian_detector)[0] != 0)
* cdef tuple fields
*/
__pyx_v_endian_detector = 1;
/* "numpy.pxd":790
* cdef tuple i
* cdef int endian_detector = 1
* cdef bint little_endian = ((<char*>&endian_detector)[0] != 0) # <<<<<<<<<<<<<<
* cdef tuple fields
*
*/
__pyx_v_little_endian = ((((char *)(&__pyx_v_endian_detector))[0]) != 0);
/* "numpy.pxd":793
* cdef tuple fields
*
* for childname in descr.names: # <<<<<<<<<<<<<<
* fields = descr.fields[childname]
* child, new_offset = fields
*/
if (unlikely(((PyObject *)__pyx_v_descr->names) == Py_None)) {
PyErr_SetString(PyExc_TypeError, "'NoneType' object is not iterable"); {__pyx_filename = __pyx_f[2]; __pyx_lineno = 793; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
}
__pyx_t_1 = ((PyObject *)__pyx_v_descr->names); __Pyx_INCREF(__pyx_t_1); __pyx_t_2 = 0;
for (;;) {
if (__pyx_t_2 >= PyTuple_GET_SIZE(__pyx_t_1)) break;
__pyx_t_3 = PyTuple_GET_ITEM(__pyx_t_1, __pyx_t_2); __Pyx_INCREF(__pyx_t_3); __pyx_t_2++;
__Pyx_XDECREF(__pyx_v_childname);
__pyx_v_childname = __pyx_t_3;
__pyx_t_3 = 0;
/* "numpy.pxd":794
*
* for childname in descr.names:
* fields = descr.fields[childname] # <<<<<<<<<<<<<<
* child, new_offset = fields
*
*/
__pyx_t_3 = PyObject_GetItem(__pyx_v_descr->fields, __pyx_v_childname); if (!__pyx_t_3) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 794; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_3);
if (!(likely(PyTuple_CheckExact(__pyx_t_3))||((__pyx_t_3) == Py_None)||(PyErr_Format(PyExc_TypeError, "Expected tuple, got %.200s", Py_TYPE(__pyx_t_3)->tp_name), 0))) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 794; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_XDECREF(((PyObject *)__pyx_v_fields));
__pyx_v_fields = ((PyObject*)__pyx_t_3);
__pyx_t_3 = 0;
/* "numpy.pxd":795
* for childname in descr.names:
* fields = descr.fields[childname]
* child, new_offset = fields # <<<<<<<<<<<<<<
*
* if (end - f) - (new_offset - offset[0]) < 15:
*/
if (likely(PyTuple_CheckExact(((PyObject *)__pyx_v_fields)))) {
PyObject* sequence = ((PyObject *)__pyx_v_fields);
if (unlikely(PyTuple_GET_SIZE(sequence) != 2)) {
if (PyTuple_GET_SIZE(sequence) > 2) __Pyx_RaiseTooManyValuesError(2);
else __Pyx_RaiseNeedMoreValuesError(PyTuple_GET_SIZE(sequence));
{__pyx_filename = __pyx_f[2]; __pyx_lineno = 795; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
}
__pyx_t_3 = PyTuple_GET_ITEM(sequence, 0);
__pyx_t_4 = PyTuple_GET_ITEM(sequence, 1);
__Pyx_INCREF(__pyx_t_3);
__Pyx_INCREF(__pyx_t_4);
} else {
__Pyx_UnpackTupleError(((PyObject *)__pyx_v_fields), 2);
{__pyx_filename = __pyx_f[2]; __pyx_lineno = 795; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
}
if (!(likely(((__pyx_t_3) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_3, __pyx_ptype_5numpy_dtype))))) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 795; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_XDECREF(((PyObject *)__pyx_v_child));
__pyx_v_child = ((PyArray_Descr *)__pyx_t_3);
__pyx_t_3 = 0;
__Pyx_XDECREF(__pyx_v_new_offset);
__pyx_v_new_offset = __pyx_t_4;
__pyx_t_4 = 0;
/* "numpy.pxd":797
* child, new_offset = fields
*
* if (end - f) - (new_offset - offset[0]) < 15: # <<<<<<<<<<<<<<
* raise RuntimeError(u"Format string allocated too short, see comment in numpy.pxd")
*
*/
__pyx_t_4 = PyInt_FromLong((__pyx_v_end - __pyx_v_f)); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 797; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_4);
__pyx_t_3 = PyInt_FromLong((__pyx_v_offset[0])); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 797; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_3);
__pyx_t_5 = PyNumber_Subtract(__pyx_v_new_offset, __pyx_t_3); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 797; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_5);
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__pyx_t_3 = PyNumber_Subtract(__pyx_t_4, __pyx_t_5); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 797; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_3);
__Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
__Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
__pyx_t_5 = PyObject_RichCompare(__pyx_t_3, __pyx_int_15, Py_LT); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 797; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_5);
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_5); if (unlikely(__pyx_t_6 < 0)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 797; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
if (__pyx_t_6) {
/* "numpy.pxd":798
*
* if (end - f) - (new_offset - offset[0]) < 15:
* raise RuntimeError(u"Format string allocated too short, see comment in numpy.pxd") # <<<<<<<<<<<<<<
*
* if ((child.byteorder == '>' and little_endian) or
*/
__pyx_t_5 = PyObject_Call(__pyx_builtin_RuntimeError, ((PyObject *)__pyx_k_tuple_13), NULL); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 798; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_5);
__Pyx_Raise(__pyx_t_5, 0, 0, 0);
__Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
{__pyx_filename = __pyx_f[2]; __pyx_lineno = 798; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
goto __pyx_L5;
}
__pyx_L5:;
/* "numpy.pxd":800
* raise RuntimeError(u"Format string allocated too short, see comment in numpy.pxd")
*
* if ((child.byteorder == '>' and little_endian) or # <<<<<<<<<<<<<<
* (child.byteorder == '<' and not little_endian)):
* raise ValueError(u"Non-native byte order not supported")
*/
__pyx_t_6 = (__pyx_v_child->byteorder == '>');
if (__pyx_t_6) {
__pyx_t_7 = __pyx_v_little_endian;
} else {
__pyx_t_7 = __pyx_t_6;
}
if (!__pyx_t_7) {
/* "numpy.pxd":801
*
* if ((child.byteorder == '>' and little_endian) or
* (child.byteorder == '<' and not little_endian)): # <<<<<<<<<<<<<<
* raise ValueError(u"Non-native byte order not supported")
* # One could encode it in the format string and have Cython
*/
__pyx_t_6 = (__pyx_v_child->byteorder == '<');
if (__pyx_t_6) {
__pyx_t_8 = (!__pyx_v_little_endian);
__pyx_t_9 = __pyx_t_8;
} else {
__pyx_t_9 = __pyx_t_6;
}
__pyx_t_6 = __pyx_t_9;
} else {
__pyx_t_6 = __pyx_t_7;
}
if (__pyx_t_6) {
/* "numpy.pxd":802
* if ((child.byteorder == '>' and little_endian) or
* (child.byteorder == '<' and not little_endian)):
* raise ValueError(u"Non-native byte order not supported") # <<<<<<<<<<<<<<
* # One could encode it in the format string and have Cython
* # complain instead, BUT: < and > in format strings also imply
*/
__pyx_t_5 = PyObject_Call(__pyx_builtin_ValueError, ((PyObject *)__pyx_k_tuple_14), NULL); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 802; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_5);
__Pyx_Raise(__pyx_t_5, 0, 0, 0);
__Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
{__pyx_filename = __pyx_f[2]; __pyx_lineno = 802; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
goto __pyx_L6;
}
__pyx_L6:;
/* "numpy.pxd":812
*
* # Output padding bytes
* while offset[0] < new_offset: # <<<<<<<<<<<<<<
* f[0] = 120 # "x"; pad byte
* f += 1
*/
while (1) {
__pyx_t_5 = PyInt_FromLong((__pyx_v_offset[0])); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 812; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_5);
__pyx_t_3 = PyObject_RichCompare(__pyx_t_5, __pyx_v_new_offset, Py_LT); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 812; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_3);
__Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
__pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_6 < 0)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 812; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
if (!__pyx_t_6) break;
/* "numpy.pxd":813
* # Output padding bytes
* while offset[0] < new_offset:
* f[0] = 120 # "x"; pad byte # <<<<<<<<<<<<<<
* f += 1
* offset[0] += 1
*/
(__pyx_v_f[0]) = 120;
/* "numpy.pxd":814
* while offset[0] < new_offset:
* f[0] = 120 # "x"; pad byte
* f += 1 # <<<<<<<<<<<<<<
* offset[0] += 1
*
*/
__pyx_v_f = (__pyx_v_f + 1);
/* "numpy.pxd":815
* f[0] = 120 # "x"; pad byte
* f += 1
* offset[0] += 1 # <<<<<<<<<<<<<<
*
* offset[0] += child.itemsize
*/
__pyx_t_10 = 0;
(__pyx_v_offset[__pyx_t_10]) = ((__pyx_v_offset[__pyx_t_10]) + 1);
}
/* "numpy.pxd":817
* offset[0] += 1
*
* offset[0] += child.itemsize # <<<<<<<<<<<<<<
*
* if not PyDataType_HASFIELDS(child):
*/
__pyx_t_10 = 0;
(__pyx_v_offset[__pyx_t_10]) = ((__pyx_v_offset[__pyx_t_10]) + __pyx_v_child->elsize);
/* "numpy.pxd":819
* offset[0] += child.itemsize
*
* if not PyDataType_HASFIELDS(child): # <<<<<<<<<<<<<<
* t = child.type_num
* if end - f < 5:
*/
__pyx_t_6 = (!PyDataType_HASFIELDS(__pyx_v_child));
if (__pyx_t_6) {
/* "numpy.pxd":820
*
* if not PyDataType_HASFIELDS(child):
* t = child.type_num # <<<<<<<<<<<<<<
* if end - f < 5:
* raise RuntimeError(u"Format string allocated too short.")
*/
__pyx_t_3 = PyInt_FromLong(__pyx_v_child->type_num); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 820; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_3);
__Pyx_XDECREF(__pyx_v_t);
__pyx_v_t = __pyx_t_3;
__pyx_t_3 = 0;
/* "numpy.pxd":821
* if not PyDataType_HASFIELDS(child):
* t = child.type_num
* if end - f < 5: # <<<<<<<<<<<<<<
* raise RuntimeError(u"Format string allocated too short.")
*
*/
__pyx_t_6 = ((__pyx_v_end - __pyx_v_f) < 5);
if (__pyx_t_6) {
/* "numpy.pxd":822
* t = child.type_num
* if end - f < 5:
* raise RuntimeError(u"Format string allocated too short.") # <<<<<<<<<<<<<<
*
* # Until ticket #99 is fixed, use integers to avoid warnings
*/
__pyx_t_3 = PyObject_Call(__pyx_builtin_RuntimeError, ((PyObject *)__pyx_k_tuple_16), NULL); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 822; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_3);
__Pyx_Raise(__pyx_t_3, 0, 0, 0);
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
{__pyx_filename = __pyx_f[2]; __pyx_lineno = 822; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
goto __pyx_L10;
}
__pyx_L10:;
/* "numpy.pxd":825
*
* # Until ticket #99 is fixed, use integers to avoid warnings
* if t == NPY_BYTE: f[0] = 98 #"b" # <<<<<<<<<<<<<<
* elif t == NPY_UBYTE: f[0] = 66 #"B"
* elif t == NPY_SHORT: f[0] = 104 #"h"
*/
__pyx_t_3 = PyInt_FromLong(NPY_BYTE); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 825; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_3);
__pyx_t_5 = PyObject_RichCompare(__pyx_v_t, __pyx_t_3, Py_EQ); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 825; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_5);
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_5); if (unlikely(__pyx_t_6 < 0)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 825; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
if (__pyx_t_6) {
(__pyx_v_f[0]) = 98;
goto __pyx_L11;
}
/* "numpy.pxd":826
* # Until ticket #99 is fixed, use integers to avoid warnings
* if t == NPY_BYTE: f[0] = 98 #"b"
* elif t == NPY_UBYTE: f[0] = 66 #"B" # <<<<<<<<<<<<<<
* elif t == NPY_SHORT: f[0] = 104 #"h"
* elif t == NPY_USHORT: f[0] = 72 #"H"
*/
__pyx_t_5 = PyInt_FromLong(NPY_UBYTE); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 826; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_5);
__pyx_t_3 = PyObject_RichCompare(__pyx_v_t, __pyx_t_5, Py_EQ); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 826; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_3);
__Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
__pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_6 < 0)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 826; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
if (__pyx_t_6) {
(__pyx_v_f[0]) = 66;
goto __pyx_L11;
}
/* "numpy.pxd":827
* if t == NPY_BYTE: f[0] = 98 #"b"
* elif t == NPY_UBYTE: f[0] = 66 #"B"
* elif t == NPY_SHORT: f[0] = 104 #"h" # <<<<<<<<<<<<<<
* elif t == NPY_USHORT: f[0] = 72 #"H"
* elif t == NPY_INT: f[0] = 105 #"i"
*/
__pyx_t_3 = PyInt_FromLong(NPY_SHORT); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 827; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_3);
__pyx_t_5 = PyObject_RichCompare(__pyx_v_t, __pyx_t_3, Py_EQ); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 827; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_5);
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_5); if (unlikely(__pyx_t_6 < 0)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 827; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
if (__pyx_t_6) {
(__pyx_v_f[0]) = 104;
goto __pyx_L11;
}
/* "numpy.pxd":828
* elif t == NPY_UBYTE: f[0] = 66 #"B"
* elif t == NPY_SHORT: f[0] = 104 #"h"
* elif t == NPY_USHORT: f[0] = 72 #"H" # <<<<<<<<<<<<<<
* elif t == NPY_INT: f[0] = 105 #"i"
* elif t == NPY_UINT: f[0] = 73 #"I"
*/
__pyx_t_5 = PyInt_FromLong(NPY_USHORT); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 828; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_5);
__pyx_t_3 = PyObject_RichCompare(__pyx_v_t, __pyx_t_5, Py_EQ); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 828; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_3);
__Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
__pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_6 < 0)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 828; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
if (__pyx_t_6) {
(__pyx_v_f[0]) = 72;
goto __pyx_L11;
}
/* "numpy.pxd":829
* elif t == NPY_SHORT: f[0] = 104 #"h"
* elif t == NPY_USHORT: f[0] = 72 #"H"
* elif t == NPY_INT: f[0] = 105 #"i" # <<<<<<<<<<<<<<
* elif t == NPY_UINT: f[0] = 73 #"I"
* elif t == NPY_LONG: f[0] = 108 #"l"
*/
__pyx_t_3 = PyInt_FromLong(NPY_INT); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 829; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_3);
__pyx_t_5 = PyObject_RichCompare(__pyx_v_t, __pyx_t_3, Py_EQ); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 829; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_5);
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_5); if (unlikely(__pyx_t_6 < 0)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 829; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
if (__pyx_t_6) {
(__pyx_v_f[0]) = 105;
goto __pyx_L11;
}
/* "numpy.pxd":830
* elif t == NPY_USHORT: f[0] = 72 #"H"
* elif t == NPY_INT: f[0] = 105 #"i"
* elif t == NPY_UINT: f[0] = 73 #"I" # <<<<<<<<<<<<<<
* elif t == NPY_LONG: f[0] = 108 #"l"
* elif t == NPY_ULONG: f[0] = 76 #"L"
*/
__pyx_t_5 = PyInt_FromLong(NPY_UINT); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 830; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_5);
__pyx_t_3 = PyObject_RichCompare(__pyx_v_t, __pyx_t_5, Py_EQ); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 830; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_3);
__Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
__pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_6 < 0)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 830; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
if (__pyx_t_6) {
(__pyx_v_f[0]) = 73;
goto __pyx_L11;
}
/* "numpy.pxd":831
* elif t == NPY_INT: f[0] = 105 #"i"
* elif t == NPY_UINT: f[0] = 73 #"I"
* elif t == NPY_LONG: f[0] = 108 #"l" # <<<<<<<<<<<<<<
* elif t == NPY_ULONG: f[0] = 76 #"L"
* elif t == NPY_LONGLONG: f[0] = 113 #"q"
*/
__pyx_t_3 = PyInt_FromLong(NPY_LONG); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 831; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_3);
__pyx_t_5 = PyObject_RichCompare(__pyx_v_t, __pyx_t_3, Py_EQ); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 831; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_5);
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_5); if (unlikely(__pyx_t_6 < 0)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 831; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
if (__pyx_t_6) {
(__pyx_v_f[0]) = 108;
goto __pyx_L11;
}
/* "numpy.pxd":832
* elif t == NPY_UINT: f[0] = 73 #"I"
* elif t == NPY_LONG: f[0] = 108 #"l"
* elif t == NPY_ULONG: f[0] = 76 #"L" # <<<<<<<<<<<<<<
* elif t == NPY_LONGLONG: f[0] = 113 #"q"
* elif t == NPY_ULONGLONG: f[0] = 81 #"Q"
*/
__pyx_t_5 = PyInt_FromLong(NPY_ULONG); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 832; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_5);
__pyx_t_3 = PyObject_RichCompare(__pyx_v_t, __pyx_t_5, Py_EQ); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 832; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_3);
__Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
__pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_6 < 0)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 832; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
if (__pyx_t_6) {
(__pyx_v_f[0]) = 76;
goto __pyx_L11;
}
/* "numpy.pxd":833
* elif t == NPY_LONG: f[0] = 108 #"l"
* elif t == NPY_ULONG: f[0] = 76 #"L"
* elif t == NPY_LONGLONG: f[0] = 113 #"q" # <<<<<<<<<<<<<<
* elif t == NPY_ULONGLONG: f[0] = 81 #"Q"
* elif t == NPY_FLOAT: f[0] = 102 #"f"
*/
__pyx_t_3 = PyInt_FromLong(NPY_LONGLONG); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 833; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_3);
__pyx_t_5 = PyObject_RichCompare(__pyx_v_t, __pyx_t_3, Py_EQ); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 833; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_5);
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_5); if (unlikely(__pyx_t_6 < 0)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 833; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
if (__pyx_t_6) {
(__pyx_v_f[0]) = 113;
goto __pyx_L11;
}
/* "numpy.pxd":834
* elif t == NPY_ULONG: f[0] = 76 #"L"
* elif t == NPY_LONGLONG: f[0] = 113 #"q"
* elif t == NPY_ULONGLONG: f[0] = 81 #"Q" # <<<<<<<<<<<<<<
* elif t == NPY_FLOAT: f[0] = 102 #"f"
* elif t == NPY_DOUBLE: f[0] = 100 #"d"
*/
__pyx_t_5 = PyInt_FromLong(NPY_ULONGLONG); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 834; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_5);
__pyx_t_3 = PyObject_RichCompare(__pyx_v_t, __pyx_t_5, Py_EQ); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 834; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_3);
__Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
__pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_6 < 0)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 834; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
if (__pyx_t_6) {
(__pyx_v_f[0]) = 81;
goto __pyx_L11;
}
/* "numpy.pxd":835
* elif t == NPY_LONGLONG: f[0] = 113 #"q"
* elif t == NPY_ULONGLONG: f[0] = 81 #"Q"
* elif t == NPY_FLOAT: f[0] = 102 #"f" # <<<<<<<<<<<<<<
* elif t == NPY_DOUBLE: f[0] = 100 #"d"
* elif t == NPY_LONGDOUBLE: f[0] = 103 #"g"
*/
__pyx_t_3 = PyInt_FromLong(NPY_FLOAT); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 835; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_3);
__pyx_t_5 = PyObject_RichCompare(__pyx_v_t, __pyx_t_3, Py_EQ); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 835; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_5);
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_5); if (unlikely(__pyx_t_6 < 0)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 835; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
if (__pyx_t_6) {
(__pyx_v_f[0]) = 102;
goto __pyx_L11;
}
/* "numpy.pxd":836
* elif t == NPY_ULONGLONG: f[0] = 81 #"Q"
* elif t == NPY_FLOAT: f[0] = 102 #"f"
* elif t == NPY_DOUBLE: f[0] = 100 #"d" # <<<<<<<<<<<<<<
* elif t == NPY_LONGDOUBLE: f[0] = 103 #"g"
* elif t == NPY_CFLOAT: f[0] = 90; f[1] = 102; f += 1 # Zf
*/
__pyx_t_5 = PyInt_FromLong(NPY_DOUBLE); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 836; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_5);
__pyx_t_3 = PyObject_RichCompare(__pyx_v_t, __pyx_t_5, Py_EQ); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 836; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_3);
__Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
__pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_6 < 0)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 836; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
if (__pyx_t_6) {
(__pyx_v_f[0]) = 100;
goto __pyx_L11;
}
/* "numpy.pxd":837
* elif t == NPY_FLOAT: f[0] = 102 #"f"
* elif t == NPY_DOUBLE: f[0] = 100 #"d"
* elif t == NPY_LONGDOUBLE: f[0] = 103 #"g" # <<<<<<<<<<<<<<
* elif t == NPY_CFLOAT: f[0] = 90; f[1] = 102; f += 1 # Zf
* elif t == NPY_CDOUBLE: f[0] = 90; f[1] = 100; f += 1 # Zd
*/
__pyx_t_3 = PyInt_FromLong(NPY_LONGDOUBLE); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 837; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_3);
__pyx_t_5 = PyObject_RichCompare(__pyx_v_t, __pyx_t_3, Py_EQ); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 837; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_5);
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_5); if (unlikely(__pyx_t_6 < 0)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 837; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
if (__pyx_t_6) {
(__pyx_v_f[0]) = 103;
goto __pyx_L11;
}
/* "numpy.pxd":838
* elif t == NPY_DOUBLE: f[0] = 100 #"d"
* elif t == NPY_LONGDOUBLE: f[0] = 103 #"g"
* elif t == NPY_CFLOAT: f[0] = 90; f[1] = 102; f += 1 # Zf # <<<<<<<<<<<<<<
* elif t == NPY_CDOUBLE: f[0] = 90; f[1] = 100; f += 1 # Zd
* elif t == NPY_CLONGDOUBLE: f[0] = 90; f[1] = 103; f += 1 # Zg
*/
__pyx_t_5 = PyInt_FromLong(NPY_CFLOAT); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 838; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_5);
__pyx_t_3 = PyObject_RichCompare(__pyx_v_t, __pyx_t_5, Py_EQ); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 838; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_3);
__Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
__pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_6 < 0)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 838; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
if (__pyx_t_6) {
(__pyx_v_f[0]) = 90;
(__pyx_v_f[1]) = 102;
__pyx_v_f = (__pyx_v_f + 1);
goto __pyx_L11;
}
/* "numpy.pxd":839
* elif t == NPY_LONGDOUBLE: f[0] = 103 #"g"
* elif t == NPY_CFLOAT: f[0] = 90; f[1] = 102; f += 1 # Zf
* elif t == NPY_CDOUBLE: f[0] = 90; f[1] = 100; f += 1 # Zd # <<<<<<<<<<<<<<
* elif t == NPY_CLONGDOUBLE: f[0] = 90; f[1] = 103; f += 1 # Zg
* elif t == NPY_OBJECT: f[0] = 79 #"O"
*/
__pyx_t_3 = PyInt_FromLong(NPY_CDOUBLE); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 839; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_3);
__pyx_t_5 = PyObject_RichCompare(__pyx_v_t, __pyx_t_3, Py_EQ); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 839; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_5);
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_5); if (unlikely(__pyx_t_6 < 0)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 839; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
if (__pyx_t_6) {
(__pyx_v_f[0]) = 90;
(__pyx_v_f[1]) = 100;
__pyx_v_f = (__pyx_v_f + 1);
goto __pyx_L11;
}
/* "numpy.pxd":840
* elif t == NPY_CFLOAT: f[0] = 90; f[1] = 102; f += 1 # Zf
* elif t == NPY_CDOUBLE: f[0] = 90; f[1] = 100; f += 1 # Zd
* elif t == NPY_CLONGDOUBLE: f[0] = 90; f[1] = 103; f += 1 # Zg # <<<<<<<<<<<<<<
* elif t == NPY_OBJECT: f[0] = 79 #"O"
* else:
*/
__pyx_t_5 = PyInt_FromLong(NPY_CLONGDOUBLE); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 840; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_5);
__pyx_t_3 = PyObject_RichCompare(__pyx_v_t, __pyx_t_5, Py_EQ); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 840; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_3);
__Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
__pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_6 < 0)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 840; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
if (__pyx_t_6) {
(__pyx_v_f[0]) = 90;
(__pyx_v_f[1]) = 103;
__pyx_v_f = (__pyx_v_f + 1);
goto __pyx_L11;
}
/* "numpy.pxd":841
* elif t == NPY_CDOUBLE: f[0] = 90; f[1] = 100; f += 1 # Zd
* elif t == NPY_CLONGDOUBLE: f[0] = 90; f[1] = 103; f += 1 # Zg
* elif t == NPY_OBJECT: f[0] = 79 #"O" # <<<<<<<<<<<<<<
* else:
* raise ValueError(u"unknown dtype code in numpy.pxd (%d)" % t)
*/
__pyx_t_3 = PyInt_FromLong(NPY_OBJECT); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 841; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_3);
__pyx_t_5 = PyObject_RichCompare(__pyx_v_t, __pyx_t_3, Py_EQ); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 841; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_5);
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_5); if (unlikely(__pyx_t_6 < 0)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 841; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
if (__pyx_t_6) {
(__pyx_v_f[0]) = 79;
goto __pyx_L11;
}
/*else*/ {
/* "numpy.pxd":843
* elif t == NPY_OBJECT: f[0] = 79 #"O"
* else:
* raise ValueError(u"unknown dtype code in numpy.pxd (%d)" % t) # <<<<<<<<<<<<<<
* f += 1
* else:
*/
__pyx_t_5 = PyNumber_Remainder(((PyObject *)__pyx_kp_u_11), __pyx_v_t); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 843; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(((PyObject *)__pyx_t_5));
__pyx_t_3 = PyTuple_New(1); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 843; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_3);
PyTuple_SET_ITEM(__pyx_t_3, 0, ((PyObject *)__pyx_t_5));
__Pyx_GIVEREF(((PyObject *)__pyx_t_5));
__pyx_t_5 = 0;
__pyx_t_5 = PyObject_Call(__pyx_builtin_ValueError, ((PyObject *)__pyx_t_3), NULL); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 843; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_5);
__Pyx_DECREF(((PyObject *)__pyx_t_3)); __pyx_t_3 = 0;
__Pyx_Raise(__pyx_t_5, 0, 0, 0);
__Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
{__pyx_filename = __pyx_f[2]; __pyx_lineno = 843; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
}
__pyx_L11:;
/* "numpy.pxd":844
* else:
* raise ValueError(u"unknown dtype code in numpy.pxd (%d)" % t)
* f += 1 # <<<<<<<<<<<<<<
* else:
* # Cython ignores struct boundary information ("T{...}"),
*/
__pyx_v_f = (__pyx_v_f + 1);
goto __pyx_L9;
}
/*else*/ {
/* "numpy.pxd":848
* # Cython ignores struct boundary information ("T{...}"),
* # so don't output it
* f = _util_dtypestring(child, f, end, offset) # <<<<<<<<<<<<<<
* return f
*
*/
__pyx_t_11 = __pyx_f_5numpy__util_dtypestring(__pyx_v_child, __pyx_v_f, __pyx_v_end, __pyx_v_offset); if (unlikely(__pyx_t_11 == NULL)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 848; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__pyx_v_f = __pyx_t_11;
}
__pyx_L9:;
}
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
/* "numpy.pxd":849
* # so don't output it
* f = _util_dtypestring(child, f, end, offset)
* return f # <<<<<<<<<<<<<<
*
*
*/
__pyx_r = __pyx_v_f;
goto __pyx_L0;
__pyx_r = 0;
goto __pyx_L0;
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_1);
__Pyx_XDECREF(__pyx_t_3);
__Pyx_XDECREF(__pyx_t_4);
__Pyx_XDECREF(__pyx_t_5);
__Pyx_AddTraceback("numpy._util_dtypestring", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = NULL;
__pyx_L0:;
__Pyx_XDECREF((PyObject *)__pyx_v_child);
__Pyx_XDECREF(__pyx_v_fields);
__Pyx_XDECREF(__pyx_v_childname);
__Pyx_XDECREF(__pyx_v_new_offset);
__Pyx_XDECREF(__pyx_v_t);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "numpy.pxd":964
*
*
* cdef inline void set_array_base(ndarray arr, object base): # <<<<<<<<<<<<<<
* cdef PyObject* baseptr
* if base is None:
*/
static CYTHON_INLINE void __pyx_f_5numpy_set_array_base(PyArrayObject *__pyx_v_arr, PyObject *__pyx_v_base) {
PyObject *__pyx_v_baseptr;
__Pyx_RefNannyDeclarations
int __pyx_t_1;
__Pyx_RefNannySetupContext("set_array_base");
/* "numpy.pxd":966
* cdef inline void set_array_base(ndarray arr, object base):
* cdef PyObject* baseptr
* if base is None: # <<<<<<<<<<<<<<
* baseptr = NULL
* else:
*/
__pyx_t_1 = (__pyx_v_base == Py_None);
if (__pyx_t_1) {
/* "numpy.pxd":967
* cdef PyObject* baseptr
* if base is None:
* baseptr = NULL # <<<<<<<<<<<<<<
* else:
* Py_INCREF(base) # important to do this before decref below!
*/
__pyx_v_baseptr = NULL;
goto __pyx_L3;
}
/*else*/ {
/* "numpy.pxd":969
* baseptr = NULL
* else:
* Py_INCREF(base) # important to do this before decref below! # <<<<<<<<<<<<<<
* baseptr = <PyObject*>base
* Py_XDECREF(arr.base)
*/
Py_INCREF(__pyx_v_base);
/* "numpy.pxd":970
* else:
* Py_INCREF(base) # important to do this before decref below!
* baseptr = <PyObject*>base # <<<<<<<<<<<<<<
* Py_XDECREF(arr.base)
* arr.base = baseptr
*/
__pyx_v_baseptr = ((PyObject *)__pyx_v_base);
}
__pyx_L3:;
/* "numpy.pxd":971
* Py_INCREF(base) # important to do this before decref below!
* baseptr = <PyObject*>base
* Py_XDECREF(arr.base) # <<<<<<<<<<<<<<
* arr.base = baseptr
*
*/
Py_XDECREF(__pyx_v_arr->base);
/* "numpy.pxd":972
* baseptr = <PyObject*>base
* Py_XDECREF(arr.base)
* arr.base = baseptr # <<<<<<<<<<<<<<
*
* cdef inline object get_array_base(ndarray arr):
*/
__pyx_v_arr->base = __pyx_v_baseptr;
__Pyx_RefNannyFinishContext();
}
/* "numpy.pxd":974
* arr.base = baseptr
*
* cdef inline object get_array_base(ndarray arr): # <<<<<<<<<<<<<<
* if arr.base is NULL:
* return None
*/
static CYTHON_INLINE PyObject *__pyx_f_5numpy_get_array_base(PyArrayObject *__pyx_v_arr) {
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
int __pyx_t_1;
__Pyx_RefNannySetupContext("get_array_base");
/* "numpy.pxd":975
*
* cdef inline object get_array_base(ndarray arr):
* if arr.base is NULL: # <<<<<<<<<<<<<<
* return None
* else:
*/
__pyx_t_1 = (__pyx_v_arr->base == NULL);
if (__pyx_t_1) {
/* "numpy.pxd":976
* cdef inline object get_array_base(ndarray arr):
* if arr.base is NULL:
* return None # <<<<<<<<<<<<<<
* else:
* return <object>arr.base
*/
__Pyx_XDECREF(__pyx_r);
__Pyx_INCREF(Py_None);
__pyx_r = Py_None;
goto __pyx_L0;
goto __pyx_L3;
}
/*else*/ {
/* "numpy.pxd":978
* return None
* else:
* return <object>arr.base # <<<<<<<<<<<<<<
*/
__Pyx_XDECREF(__pyx_r);
__Pyx_INCREF(((PyObject *)__pyx_v_arr->base));
__pyx_r = ((PyObject *)__pyx_v_arr->base);
goto __pyx_L0;
}
__pyx_L3:;
__pyx_r = Py_None; __Pyx_INCREF(Py_None);
__pyx_L0:;
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static PyMethodDef __pyx_methods[] = {
{__Pyx_NAMESTR("full_pdf"), (PyCFunction)__pyx_pw_4wfpt_1full_pdf, METH_VARARGS|METH_KEYWORDS, __Pyx_DOCSTR(__pyx_doc_4wfpt_full_pdf)},
{0, 0, 0, 0}
};
#if PY_MAJOR_VERSION >= 3
static struct PyModuleDef __pyx_moduledef = {
PyModuleDef_HEAD_INIT,
__Pyx_NAMESTR("wfpt"),
0, /* m_doc */
-1, /* m_size */
__pyx_methods /* m_methods */,
NULL, /* m_reload */
NULL, /* m_traverse */
NULL, /* m_clear */
NULL /* m_free */
};
#endif
static __Pyx_StringTabEntry __pyx_string_tab[] = {
{&__pyx_kp_s_1, __pyx_k_1, sizeof(__pyx_k_1), 0, 0, 1, 0},
{&__pyx_kp_u_11, __pyx_k_11, sizeof(__pyx_k_11), 0, 1, 0, 0},
{&__pyx_kp_u_12, __pyx_k_12, sizeof(__pyx_k_12), 0, 1, 0, 0},
{&__pyx_kp_u_15, __pyx_k_15, sizeof(__pyx_k_15), 0, 1, 0, 0},
{&__pyx_n_s_17, __pyx_k_17, sizeof(__pyx_k_17), 0, 0, 1, 1},
{&__pyx_n_s_18, __pyx_k_18, sizeof(__pyx_k_18), 0, 0, 1, 1},
{&__pyx_kp_s_21, __pyx_k_21, sizeof(__pyx_k_21), 0, 0, 1, 0},
{&__pyx_n_s_30, __pyx_k_30, sizeof(__pyx_k_30), 0, 0, 1, 1},
{&__pyx_kp_u_5, __pyx_k_5, sizeof(__pyx_k_5), 0, 1, 0, 0},
{&__pyx_kp_u_7, __pyx_k_7, sizeof(__pyx_k_7), 0, 1, 0, 0},
{&__pyx_kp_u_9, __pyx_k_9, sizeof(__pyx_k_9), 0, 1, 0, 0},
{&__pyx_n_s__N, __pyx_k__N, sizeof(__pyx_k__N), 0, 0, 1, 1},
{&__pyx_n_s__RuntimeError, __pyx_k__RuntimeError, sizeof(__pyx_k__RuntimeError), 0, 0, 1, 1},
{&__pyx_n_s__ValueError, __pyx_k__ValueError, sizeof(__pyx_k__ValueError), 0, 0, 1, 1},
{&__pyx_n_s____main__, __pyx_k____main__, sizeof(__pyx_k____main__), 0, 0, 1, 1},
{&__pyx_n_s____test__, __pyx_k____test__, sizeof(__pyx_k____test__), 0, 0, 1, 1},
{&__pyx_n_s__a, __pyx_k__a, sizeof(__pyx_k__a), 0, 0, 1, 1},
{&__pyx_n_s__arange, __pyx_k__arange, sizeof(__pyx_k__arange), 0, 0, 1, 1},
{&__pyx_n_s__array, __pyx_k__array, sizeof(__pyx_k__array), 0, 0, 1, 1},
{&__pyx_n_s__cdf_array, __pyx_k__cdf_array, sizeof(__pyx_k__cdf_array), 0, 0, 1, 1},
{&__pyx_n_s__cdf_lb, __pyx_k__cdf_lb, sizeof(__pyx_k__cdf_lb), 0, 0, 1, 1},
{&__pyx_n_s__cdf_ub, __pyx_k__cdf_ub, sizeof(__pyx_k__cdf_ub), 0, 0, 1, 1},
{&__pyx_n_s__concatenate, __pyx_k__concatenate, sizeof(__pyx_k__concatenate), 0, 0, 1, 1},
{&__pyx_n_s__cont_x, __pyx_k__cont_x, sizeof(__pyx_k__cont_x), 0, 0, 1, 1},
{&__pyx_n_s__copy, __pyx_k__copy, sizeof(__pyx_k__copy), 0, 0, 1, 1},
{&__pyx_n_s__cumsum, __pyx_k__cumsum, sizeof(__pyx_k__cumsum), 0, 0, 1, 1},
{&__pyx_n_s__cumtrapz, __pyx_k__cumtrapz, sizeof(__pyx_k__cumtrapz), 0, 0, 1, 1},
{&__pyx_n_s__data, __pyx_k__data, sizeof(__pyx_k__data), 0, 0, 1, 1},
{&__pyx_n_s__delay, __pyx_k__delay, sizeof(__pyx_k__delay), 0, 0, 1, 1},
{&__pyx_n_s__diff, __pyx_k__diff, sizeof(__pyx_k__diff), 0, 0, 1, 1},
{&__pyx_n_s__double, __pyx_k__double, sizeof(__pyx_k__double), 0, 0, 1, 1},
{&__pyx_n_s__dt, __pyx_k__dt, sizeof(__pyx_k__dt), 0, 0, 1, 1},
{&__pyx_n_s__dtype, __pyx_k__dtype, sizeof(__pyx_k__dtype), 0, 0, 1, 1},
{&__pyx_n_s__empty, __pyx_k__empty, sizeof(__pyx_k__empty), 0, 0, 1, 1},
{&__pyx_n_s__err, __pyx_k__err, sizeof(__pyx_k__err), 0, 0, 1, 1},
{&__pyx_n_s__f, __pyx_k__f, sizeof(__pyx_k__f), 0, 0, 1, 1},
{&__pyx_n_s__gen_cdf_using_pdf, __pyx_k__gen_cdf_using_pdf, sizeof(__pyx_k__gen_cdf_using_pdf), 0, 0, 1, 1},
{&__pyx_n_s__gen_rts_from_cdf, __pyx_k__gen_rts_from_cdf, sizeof(__pyx_k__gen_rts_from_cdf), 0, 0, 1, 1},
{&__pyx_n_s__hddm, __pyx_k__hddm, sizeof(__pyx_k__hddm), 0, 0, 1, 1},
{&__pyx_n_s__i, __pyx_k__i, sizeof(__pyx_k__i), 0, 0, 1, 1},
{&__pyx_n_s__idx, __pyx_k__idx, sizeof(__pyx_k__idx), 0, 0, 1, 1},
{&__pyx_n_s__inf, __pyx_k__inf, sizeof(__pyx_k__inf), 0, 0, 1, 1},
{&__pyx_n_s__integrate, __pyx_k__integrate, sizeof(__pyx_k__integrate), 0, 0, 1, 1},
{&__pyx_n_s__j, __pyx_k__j, sizeof(__pyx_k__j), 0, 0, 1, 1},
{&__pyx_n_s__l_cdf, __pyx_k__l_cdf, sizeof(__pyx_k__l_cdf), 0, 0, 1, 1},
{&__pyx_n_s__lb, __pyx_k__lb, sizeof(__pyx_k__lb), 0, 0, 1, 1},
{&__pyx_n_s__linspace, __pyx_k__linspace, sizeof(__pyx_k__linspace), 0, 0, 1, 1},
{&__pyx_n_s__log, __pyx_k__log, sizeof(__pyx_k__log), 0, 0, 1, 1},
{&__pyx_n_s__logp, __pyx_k__logp, sizeof(__pyx_k__logp), 0, 0, 1, 1},
{&__pyx_n_s__multi, __pyx_k__multi, sizeof(__pyx_k__multi), 0, 0, 1, 1},
{&__pyx_n_s__n_cont, __pyx_k__n_cont, sizeof(__pyx_k__n_cont), 0, 0, 1, 1},
{&__pyx_n_s__n_st, __pyx_k__n_st, sizeof(__pyx_k__n_st), 0, 0, 1, 1},
{&__pyx_n_s__n_sz, __pyx_k__n_sz, sizeof(__pyx_k__n_sz), 0, 0, 1, 1},
{&__pyx_n_s__np, __pyx_k__np, sizeof(__pyx_k__np), 0, 0, 1, 1},
{&__pyx_n_s__numpy, __pyx_k__numpy, sizeof(__pyx_k__numpy), 0, 0, 1, 1},
{&__pyx_n_s__p, __pyx_k__p, sizeof(__pyx_k__p), 0, 0, 1, 1},
{&__pyx_n_s__p_outlier, __pyx_k__p_outlier, sizeof(__pyx_k__p_outlier), 0, 0, 1, 1},
{&__pyx_n_s__param, __pyx_k__param, sizeof(__pyx_k__param), 0, 0, 1, 1},
{&__pyx_n_s__params, __pyx_k__params, sizeof(__pyx_k__params), 0, 0, 1, 1},
{&__pyx_n_s__params_iter, __pyx_k__params_iter, sizeof(__pyx_k__params_iter), 0, 0, 1, 1},
{&__pyx_n_s__pdf, __pyx_k__pdf, sizeof(__pyx_k__pdf), 0, 0, 1, 1},
{&__pyx_n_s__pdf_array, __pyx_k__pdf_array, sizeof(__pyx_k__pdf_array), 0, 0, 1, 1},
{&__pyx_n_s__pos_cont, __pyx_k__pos_cont, sizeof(__pyx_k__pos_cont), 0, 0, 1, 1},
{&__pyx_n_s__rand, __pyx_k__rand, sizeof(__pyx_k__rand), 0, 0, 1, 1},
{&__pyx_n_s__random, __pyx_k__random, sizeof(__pyx_k__random), 0, 0, 1, 1},
{&__pyx_n_s__range, __pyx_k__range, sizeof(__pyx_k__range), 0, 0, 1, 1},
{&__pyx_n_s__rt, __pyx_k__rt, sizeof(__pyx_k__rt), 0, 0, 1, 1},
{&__pyx_n_s__rts, __pyx_k__rts, sizeof(__pyx_k__rts), 0, 0, 1, 1},
{&__pyx_n_s__samples, __pyx_k__samples, sizeof(__pyx_k__samples), 0, 0, 1, 1},
{&__pyx_n_s__searchsorted, __pyx_k__searchsorted, sizeof(__pyx_k__searchsorted), 0, 0, 1, 1},
{&__pyx_n_s__sign, __pyx_k__sign, sizeof(__pyx_k__sign), 0, 0, 1, 1},
{&__pyx_n_s__simps_err, __pyx_k__simps_err, sizeof(__pyx_k__simps_err), 0, 0, 1, 1},
{&__pyx_n_s__size, __pyx_k__size, sizeof(__pyx_k__size), 0, 0, 1, 1},
{&__pyx_n_s__split_cdf, __pyx_k__split_cdf, sizeof(__pyx_k__split_cdf), 0, 0, 1, 1},
{&__pyx_n_s__st, __pyx_k__st, sizeof(__pyx_k__st), 0, 0, 1, 1},
{&__pyx_n_s__sum, __pyx_k__sum, sizeof(__pyx_k__sum), 0, 0, 1, 1},
{&__pyx_n_s__sum_logp, __pyx_k__sum_logp, sizeof(__pyx_k__sum_logp), 0, 0, 1, 1},
{&__pyx_n_s__sv, __pyx_k__sv, sizeof(__pyx_k__sv), 0, 0, 1, 1},
{&__pyx_n_s__sz, __pyx_k__sz, sizeof(__pyx_k__sz), 0, 0, 1, 1},
{&__pyx_n_s__t, __pyx_k__t, sizeof(__pyx_k__t), 0, 0, 1, 1},
{&__pyx_n_s__t_max, __pyx_k__t_max, sizeof(__pyx_k__t_max), 0, 0, 1, 1},
{&__pyx_n_s__t_min, __pyx_k__t_min, sizeof(__pyx_k__t_min), 0, 0, 1, 1},
{&__pyx_n_s__time, __pyx_k__time, sizeof(__pyx_k__time), 0, 0, 1, 1},
{&__pyx_n_s__ub, __pyx_k__ub, sizeof(__pyx_k__ub), 0, 0, 1, 1},
{&__pyx_n_s__use_adaptive, __pyx_k__use_adaptive, sizeof(__pyx_k__use_adaptive), 0, 0, 1, 1},
{&__pyx_n_s__v, __pyx_k__v, sizeof(__pyx_k__v), 0, 0, 1, 1},
{&__pyx_n_s__w_outlier, __pyx_k__w_outlier, sizeof(__pyx_k__w_outlier), 0, 0, 1, 1},
{&__pyx_n_s__wfpt, __pyx_k__wfpt, sizeof(__pyx_k__wfpt), 0, 0, 1, 1},
{&__pyx_n_s__wiener_like, __pyx_k__wiener_like, sizeof(__pyx_k__wiener_like), 0, 0, 1, 1},
{&__pyx_n_s__wiener_like_multi, __pyx_k__wiener_like_multi, sizeof(__pyx_k__wiener_like_multi), 0, 0, 1, 1},
{&__pyx_n_s__wp_outlier, __pyx_k__wp_outlier, sizeof(__pyx_k__wp_outlier), 0, 0, 1, 1},
{&__pyx_n_s__x, __pyx_k__x, sizeof(__pyx_k__x), 0, 0, 1, 1},
{&__pyx_n_s__x_lb, __pyx_k__x_lb, sizeof(__pyx_k__x_lb), 0, 0, 1, 1},
{&__pyx_n_s__x_ub, __pyx_k__x_ub, sizeof(__pyx_k__x_ub), 0, 0, 1, 1},
{&__pyx_n_s__y, __pyx_k__y, sizeof(__pyx_k__y), 0, 0, 1, 1},
{&__pyx_n_s__z, __pyx_k__z, sizeof(__pyx_k__z), 0, 0, 1, 1},
{0, 0, 0, 0, 0, 0, 0}
};
static int __Pyx_InitCachedBuiltins(void) {
__pyx_builtin_ValueError = __Pyx_GetName(__pyx_b, __pyx_n_s__ValueError); if (!__pyx_builtin_ValueError) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 173; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__pyx_builtin_range = __Pyx_GetName(__pyx_b, __pyx_n_s__range); if (!__pyx_builtin_range) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 227; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__pyx_builtin_RuntimeError = __Pyx_GetName(__pyx_b, __pyx_n_s__RuntimeError); if (!__pyx_builtin_RuntimeError) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 798; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
return 0;
__pyx_L1_error:;
return -1;
}
static int __Pyx_InitCachedConstants(void) {
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__Pyx_InitCachedConstants");
/* "wfpt.pyx":173
* if (sv < 0) or (a <=0 ) or (z < 0) or (z > 1) or (sz < 0) or (sz > 1) or (z+sz/2.>1) or \
* (z-sz/2.<0) or (t-st/2.<0) or (t<0) or (st < 0) or not p_outlier_in_range(p_outlier):
* raise ValueError("at least one of the parameters is out of the support") # <<<<<<<<<<<<<<
*
* cdef np.ndarray[double, ndim=1] x = np.linspace(-time, time, 2*N+1)
*/
__pyx_k_tuple_2 = PyTuple_New(1); if (unlikely(!__pyx_k_tuple_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 173; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_k_tuple_2);
__Pyx_INCREF(((PyObject *)__pyx_kp_s_1));
PyTuple_SET_ITEM(__pyx_k_tuple_2, 0, ((PyObject *)__pyx_kp_s_1));
__Pyx_GIVEREF(((PyObject *)__pyx_kp_s_1));
__Pyx_GIVEREF(((PyObject *)__pyx_k_tuple_2));
/* "wfpt.pyx":196
*
* # lower bound is reversed
* cdef np.ndarray[double, ndim=1] x_lb = -x[:N][::-1] # <<<<<<<<<<<<<<
* cdef np.ndarray[double, ndim=1] lb = data[:N][::-1]
* # lower bound is cumulative in the wrong direction
*/
__pyx_k_slice_3 = PySlice_New(Py_None, Py_None, __pyx_int_neg_1); if (unlikely(!__pyx_k_slice_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 196; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_k_slice_3);
__Pyx_GIVEREF(__pyx_k_slice_3);
/* "wfpt.pyx":197
* # lower bound is reversed
* cdef np.ndarray[double, ndim=1] x_lb = -x[:N][::-1]
* cdef np.ndarray[double, ndim=1] lb = data[:N][::-1] # <<<<<<<<<<<<<<
* # lower bound is cumulative in the wrong direction
* lb = np.cumsum(np.concatenate([np.array([0]), -np.diff(lb)]))
*/
__pyx_k_slice_4 = PySlice_New(Py_None, Py_None, __pyx_int_neg_1); if (unlikely(!__pyx_k_slice_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 197; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_k_slice_4);
__Pyx_GIVEREF(__pyx_k_slice_4);
/* "numpy.pxd":214
* if ((flags & pybuf.PyBUF_C_CONTIGUOUS == pybuf.PyBUF_C_CONTIGUOUS)
* and not PyArray_CHKFLAGS(self, NPY_C_CONTIGUOUS)):
* raise ValueError(u"ndarray is not C contiguous") # <<<<<<<<<<<<<<
*
* if ((flags & pybuf.PyBUF_F_CONTIGUOUS == pybuf.PyBUF_F_CONTIGUOUS)
*/
__pyx_k_tuple_6 = PyTuple_New(1); if (unlikely(!__pyx_k_tuple_6)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 214; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_k_tuple_6);
__Pyx_INCREF(((PyObject *)__pyx_kp_u_5));
PyTuple_SET_ITEM(__pyx_k_tuple_6, 0, ((PyObject *)__pyx_kp_u_5));
__Pyx_GIVEREF(((PyObject *)__pyx_kp_u_5));
__Pyx_GIVEREF(((PyObject *)__pyx_k_tuple_6));
/* "numpy.pxd":218
* if ((flags & pybuf.PyBUF_F_CONTIGUOUS == pybuf.PyBUF_F_CONTIGUOUS)
* and not PyArray_CHKFLAGS(self, NPY_F_CONTIGUOUS)):
* raise ValueError(u"ndarray is not Fortran contiguous") # <<<<<<<<<<<<<<
*
* info.buf = PyArray_DATA(self)
*/
__pyx_k_tuple_8 = PyTuple_New(1); if (unlikely(!__pyx_k_tuple_8)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 218; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_k_tuple_8);
__Pyx_INCREF(((PyObject *)__pyx_kp_u_7));
PyTuple_SET_ITEM(__pyx_k_tuple_8, 0, ((PyObject *)__pyx_kp_u_7));
__Pyx_GIVEREF(((PyObject *)__pyx_kp_u_7));
__Pyx_GIVEREF(((PyObject *)__pyx_k_tuple_8));
/* "numpy.pxd":256
* if ((descr.byteorder == '>' and little_endian) or
* (descr.byteorder == '<' and not little_endian)):
* raise ValueError(u"Non-native byte order not supported") # <<<<<<<<<<<<<<
* if t == NPY_BYTE: f = "b"
* elif t == NPY_UBYTE: f = "B"
*/
__pyx_k_tuple_10 = PyTuple_New(1); if (unlikely(!__pyx_k_tuple_10)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 256; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_k_tuple_10);
__Pyx_INCREF(((PyObject *)__pyx_kp_u_9));
PyTuple_SET_ITEM(__pyx_k_tuple_10, 0, ((PyObject *)__pyx_kp_u_9));
__Pyx_GIVEREF(((PyObject *)__pyx_kp_u_9));
__Pyx_GIVEREF(((PyObject *)__pyx_k_tuple_10));
/* "numpy.pxd":798
*
* if (end - f) - (new_offset - offset[0]) < 15:
* raise RuntimeError(u"Format string allocated too short, see comment in numpy.pxd") # <<<<<<<<<<<<<<
*
* if ((child.byteorder == '>' and little_endian) or
*/
__pyx_k_tuple_13 = PyTuple_New(1); if (unlikely(!__pyx_k_tuple_13)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 798; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_k_tuple_13);
__Pyx_INCREF(((PyObject *)__pyx_kp_u_12));
PyTuple_SET_ITEM(__pyx_k_tuple_13, 0, ((PyObject *)__pyx_kp_u_12));
__Pyx_GIVEREF(((PyObject *)__pyx_kp_u_12));
__Pyx_GIVEREF(((PyObject *)__pyx_k_tuple_13));
/* "numpy.pxd":802
* if ((child.byteorder == '>' and little_endian) or
* (child.byteorder == '<' and not little_endian)):
* raise ValueError(u"Non-native byte order not supported") # <<<<<<<<<<<<<<
* # One could encode it in the format string and have Cython
* # complain instead, BUT: < and > in format strings also imply
*/
__pyx_k_tuple_14 = PyTuple_New(1); if (unlikely(!__pyx_k_tuple_14)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 802; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_k_tuple_14);
__Pyx_INCREF(((PyObject *)__pyx_kp_u_9));
PyTuple_SET_ITEM(__pyx_k_tuple_14, 0, ((PyObject *)__pyx_kp_u_9));
__Pyx_GIVEREF(((PyObject *)__pyx_kp_u_9));
__Pyx_GIVEREF(((PyObject *)__pyx_k_tuple_14));
/* "numpy.pxd":822
* t = child.type_num
* if end - f < 5:
* raise RuntimeError(u"Format string allocated too short.") # <<<<<<<<<<<<<<
*
* # Until ticket #99 is fixed, use integers to avoid warnings
*/
__pyx_k_tuple_16 = PyTuple_New(1); if (unlikely(!__pyx_k_tuple_16)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 822; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_k_tuple_16);
__Pyx_INCREF(((PyObject *)__pyx_kp_u_15));
PyTuple_SET_ITEM(__pyx_k_tuple_16, 0, ((PyObject *)__pyx_kp_u_15));
__Pyx_GIVEREF(((PyObject *)__pyx_kp_u_15));
__Pyx_GIVEREF(((PyObject *)__pyx_k_tuple_16));
/* "wfpt.pyx":30
* include 'integrate.pxi'
*
* def pdf_array(np.ndarray[double, ndim=1] x, double v, double sv, double a, double z, double sz, # <<<<<<<<<<<<<<
* double t, double st, double err=1e-4, bint logp=0, int n_st=2, int n_sz=2, bint use_adaptive=1,
* double simps_err=1e-3, double p_outlier=0, double w_outlier=0):
*/
__pyx_k_tuple_19 = PyTuple_New(19); if (unlikely(!__pyx_k_tuple_19)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 30; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_k_tuple_19);
__Pyx_INCREF(((PyObject *)__pyx_n_s__x));
PyTuple_SET_ITEM(__pyx_k_tuple_19, 0, ((PyObject *)__pyx_n_s__x));
__Pyx_GIVEREF(((PyObject *)__pyx_n_s__x));
__Pyx_INCREF(((PyObject *)__pyx_n_s__v));
PyTuple_SET_ITEM(__pyx_k_tuple_19, 1, ((PyObject *)__pyx_n_s__v));
__Pyx_GIVEREF(((PyObject *)__pyx_n_s__v));
__Pyx_INCREF(((PyObject *)__pyx_n_s__sv));
PyTuple_SET_ITEM(__pyx_k_tuple_19, 2, ((PyObject *)__pyx_n_s__sv));
__Pyx_GIVEREF(((PyObject *)__pyx_n_s__sv));
__Pyx_INCREF(((PyObject *)__pyx_n_s__a));
PyTuple_SET_ITEM(__pyx_k_tuple_19, 3, ((PyObject *)__pyx_n_s__a));
__Pyx_GIVEREF(((PyObject *)__pyx_n_s__a));
__Pyx_INCREF(((PyObject *)__pyx_n_s__z));
PyTuple_SET_ITEM(__pyx_k_tuple_19, 4, ((PyObject *)__pyx_n_s__z));
__Pyx_GIVEREF(((PyObject *)__pyx_n_s__z));
__Pyx_INCREF(((PyObject *)__pyx_n_s__sz));
PyTuple_SET_ITEM(__pyx_k_tuple_19, 5, ((PyObject *)__pyx_n_s__sz));
__Pyx_GIVEREF(((PyObject *)__pyx_n_s__sz));
__Pyx_INCREF(((PyObject *)__pyx_n_s__t));
PyTuple_SET_ITEM(__pyx_k_tuple_19, 6, ((PyObject *)__pyx_n_s__t));
__Pyx_GIVEREF(((PyObject *)__pyx_n_s__t));
__Pyx_INCREF(((PyObject *)__pyx_n_s__st));
PyTuple_SET_ITEM(__pyx_k_tuple_19, 7, ((PyObject *)__pyx_n_s__st));
__Pyx_GIVEREF(((PyObject *)__pyx_n_s__st));
__Pyx_INCREF(((PyObject *)__pyx_n_s__err));
PyTuple_SET_ITEM(__pyx_k_tuple_19, 8, ((PyObject *)__pyx_n_s__err));
__Pyx_GIVEREF(((PyObject *)__pyx_n_s__err));
__Pyx_INCREF(((PyObject *)__pyx_n_s__logp));
PyTuple_SET_ITEM(__pyx_k_tuple_19, 9, ((PyObject *)__pyx_n_s__logp));
__Pyx_GIVEREF(((PyObject *)__pyx_n_s__logp));
__Pyx_INCREF(((PyObject *)__pyx_n_s__n_st));
PyTuple_SET_ITEM(__pyx_k_tuple_19, 10, ((PyObject *)__pyx_n_s__n_st));
__Pyx_GIVEREF(((PyObject *)__pyx_n_s__n_st));
__Pyx_INCREF(((PyObject *)__pyx_n_s__n_sz));
PyTuple_SET_ITEM(__pyx_k_tuple_19, 11, ((PyObject *)__pyx_n_s__n_sz));
__Pyx_GIVEREF(((PyObject *)__pyx_n_s__n_sz));
__Pyx_INCREF(((PyObject *)__pyx_n_s__use_adaptive));
PyTuple_SET_ITEM(__pyx_k_tuple_19, 12, ((PyObject *)__pyx_n_s__use_adaptive));
__Pyx_GIVEREF(((PyObject *)__pyx_n_s__use_adaptive));
__Pyx_INCREF(((PyObject *)__pyx_n_s__simps_err));
PyTuple_SET_ITEM(__pyx_k_tuple_19, 13, ((PyObject *)__pyx_n_s__simps_err));
__Pyx_GIVEREF(((PyObject *)__pyx_n_s__simps_err));
__Pyx_INCREF(((PyObject *)__pyx_n_s__p_outlier));
PyTuple_SET_ITEM(__pyx_k_tuple_19, 14, ((PyObject *)__pyx_n_s__p_outlier));
__Pyx_GIVEREF(((PyObject *)__pyx_n_s__p_outlier));
__Pyx_INCREF(((PyObject *)__pyx_n_s__w_outlier));
PyTuple_SET_ITEM(__pyx_k_tuple_19, 15, ((PyObject *)__pyx_n_s__w_outlier));
__Pyx_GIVEREF(((PyObject *)__pyx_n_s__w_outlier));
__Pyx_INCREF(((PyObject *)__pyx_n_s__size));
PyTuple_SET_ITEM(__pyx_k_tuple_19, 16, ((PyObject *)__pyx_n_s__size));
__Pyx_GIVEREF(((PyObject *)__pyx_n_s__size));
__Pyx_INCREF(((PyObject *)__pyx_n_s__i));
PyTuple_SET_ITEM(__pyx_k_tuple_19, 17, ((PyObject *)__pyx_n_s__i));
__Pyx_GIVEREF(((PyObject *)__pyx_n_s__i));
__Pyx_INCREF(((PyObject *)__pyx_n_s__y));
PyTuple_SET_ITEM(__pyx_k_tuple_19, 18, ((PyObject *)__pyx_n_s__y));
__Pyx_GIVEREF(((PyObject *)__pyx_n_s__y));
__Pyx_GIVEREF(((PyObject *)__pyx_k_tuple_19));
__pyx_k_codeobj_20 = (PyObject*)__Pyx_PyCode_New(16, 0, 19, 0, 0, __pyx_empty_bytes, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_k_tuple_19, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_kp_s_21, __pyx_n_s__pdf_array, 30, __pyx_empty_bytes); if (unlikely(!__pyx_k_codeobj_20)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 30; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
/* "wfpt.pyx":49
* cdef inline bint p_outlier_in_range(double p_outlier): return (p_outlier >= 0) & (p_outlier <= 1)
*
* def wiener_like(np.ndarray[double, ndim=1] x, double v, double sv, double a, double z, double sz, double t, # <<<<<<<<<<<<<<
* double st, double err, int n_st=10, int n_sz=10, bint use_adaptive=1, double simps_err=1e-8,
* double p_outlier=0, double w_outlier=0):
*/
__pyx_k_tuple_22 = PyTuple_New(20); if (unlikely(!__pyx_k_tuple_22)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 49; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_k_tuple_22);
__Pyx_INCREF(((PyObject *)__pyx_n_s__x));
PyTuple_SET_ITEM(__pyx_k_tuple_22, 0, ((PyObject *)__pyx_n_s__x));
__Pyx_GIVEREF(((PyObject *)__pyx_n_s__x));
__Pyx_INCREF(((PyObject *)__pyx_n_s__v));
PyTuple_SET_ITEM(__pyx_k_tuple_22, 1, ((PyObject *)__pyx_n_s__v));
__Pyx_GIVEREF(((PyObject *)__pyx_n_s__v));
__Pyx_INCREF(((PyObject *)__pyx_n_s__sv));
PyTuple_SET_ITEM(__pyx_k_tuple_22, 2, ((PyObject *)__pyx_n_s__sv));
__Pyx_GIVEREF(((PyObject *)__pyx_n_s__sv));
__Pyx_INCREF(((PyObject *)__pyx_n_s__a));
PyTuple_SET_ITEM(__pyx_k_tuple_22, 3, ((PyObject *)__pyx_n_s__a));
__Pyx_GIVEREF(((PyObject *)__pyx_n_s__a));
__Pyx_INCREF(((PyObject *)__pyx_n_s__z));
PyTuple_SET_ITEM(__pyx_k_tuple_22, 4, ((PyObject *)__pyx_n_s__z));
__Pyx_GIVEREF(((PyObject *)__pyx_n_s__z));
__Pyx_INCREF(((PyObject *)__pyx_n_s__sz));
PyTuple_SET_ITEM(__pyx_k_tuple_22, 5, ((PyObject *)__pyx_n_s__sz));
__Pyx_GIVEREF(((PyObject *)__pyx_n_s__sz));
__Pyx_INCREF(((PyObject *)__pyx_n_s__t));
PyTuple_SET_ITEM(__pyx_k_tuple_22, 6, ((PyObject *)__pyx_n_s__t));
__Pyx_GIVEREF(((PyObject *)__pyx_n_s__t));
__Pyx_INCREF(((PyObject *)__pyx_n_s__st));
PyTuple_SET_ITEM(__pyx_k_tuple_22, 7, ((PyObject *)__pyx_n_s__st));
__Pyx_GIVEREF(((PyObject *)__pyx_n_s__st));
__Pyx_INCREF(((PyObject *)__pyx_n_s__err));
PyTuple_SET_ITEM(__pyx_k_tuple_22, 8, ((PyObject *)__pyx_n_s__err));
__Pyx_GIVEREF(((PyObject *)__pyx_n_s__err));
__Pyx_INCREF(((PyObject *)__pyx_n_s__n_st));
PyTuple_SET_ITEM(__pyx_k_tuple_22, 9, ((PyObject *)__pyx_n_s__n_st));
__Pyx_GIVEREF(((PyObject *)__pyx_n_s__n_st));
__Pyx_INCREF(((PyObject *)__pyx_n_s__n_sz));
PyTuple_SET_ITEM(__pyx_k_tuple_22, 10, ((PyObject *)__pyx_n_s__n_sz));
__Pyx_GIVEREF(((PyObject *)__pyx_n_s__n_sz));
__Pyx_INCREF(((PyObject *)__pyx_n_s__use_adaptive));
PyTuple_SET_ITEM(__pyx_k_tuple_22, 11, ((PyObject *)__pyx_n_s__use_adaptive));
__Pyx_GIVEREF(((PyObject *)__pyx_n_s__use_adaptive));
__Pyx_INCREF(((PyObject *)__pyx_n_s__simps_err));
PyTuple_SET_ITEM(__pyx_k_tuple_22, 12, ((PyObject *)__pyx_n_s__simps_err));
__Pyx_GIVEREF(((PyObject *)__pyx_n_s__simps_err));
__Pyx_INCREF(((PyObject *)__pyx_n_s__p_outlier));
PyTuple_SET_ITEM(__pyx_k_tuple_22, 13, ((PyObject *)__pyx_n_s__p_outlier));
__Pyx_GIVEREF(((PyObject *)__pyx_n_s__p_outlier));
__Pyx_INCREF(((PyObject *)__pyx_n_s__w_outlier));
PyTuple_SET_ITEM(__pyx_k_tuple_22, 14, ((PyObject *)__pyx_n_s__w_outlier));
__Pyx_GIVEREF(((PyObject *)__pyx_n_s__w_outlier));
__Pyx_INCREF(((PyObject *)__pyx_n_s__size));
PyTuple_SET_ITEM(__pyx_k_tuple_22, 15, ((PyObject *)__pyx_n_s__size));
__Pyx_GIVEREF(((PyObject *)__pyx_n_s__size));
__Pyx_INCREF(((PyObject *)__pyx_n_s__i));
PyTuple_SET_ITEM(__pyx_k_tuple_22, 16, ((PyObject *)__pyx_n_s__i));
__Pyx_GIVEREF(((PyObject *)__pyx_n_s__i));
__Pyx_INCREF(((PyObject *)__pyx_n_s__p));
PyTuple_SET_ITEM(__pyx_k_tuple_22, 17, ((PyObject *)__pyx_n_s__p));
__Pyx_GIVEREF(((PyObject *)__pyx_n_s__p));
__Pyx_INCREF(((PyObject *)__pyx_n_s__sum_logp));
PyTuple_SET_ITEM(__pyx_k_tuple_22, 18, ((PyObject *)__pyx_n_s__sum_logp));
__Pyx_GIVEREF(((PyObject *)__pyx_n_s__sum_logp));
__Pyx_INCREF(((PyObject *)__pyx_n_s__wp_outlier));
PyTuple_SET_ITEM(__pyx_k_tuple_22, 19, ((PyObject *)__pyx_n_s__wp_outlier));
__Pyx_GIVEREF(((PyObject *)__pyx_n_s__wp_outlier));
__Pyx_GIVEREF(((PyObject *)__pyx_k_tuple_22));
__pyx_k_codeobj_23 = (PyObject*)__Pyx_PyCode_New(15, 0, 20, 0, 0, __pyx_empty_bytes, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_k_tuple_22, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_kp_s_21, __pyx_n_s__wiener_like, 49, __pyx_empty_bytes); if (unlikely(!__pyx_k_codeobj_23)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 49; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
/* "wfpt.pyx":74
*
*
* def wiener_like_multi(np.ndarray[double, ndim=1] x, v, sv, a, z, sz, t, st, double err, multi=None, # <<<<<<<<<<<<<<
* int n_st=10, int n_sz=10, bint use_adaptive=1, double simps_err=1e-3,
* double p_outlier=0, double w_outlier=0):
*/
__pyx_k_tuple_24 = PyTuple_New(24); if (unlikely(!__pyx_k_tuple_24)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 74; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_k_tuple_24);
__Pyx_INCREF(((PyObject *)__pyx_n_s__x));
PyTuple_SET_ITEM(__pyx_k_tuple_24, 0, ((PyObject *)__pyx_n_s__x));
__Pyx_GIVEREF(((PyObject *)__pyx_n_s__x));
__Pyx_INCREF(((PyObject *)__pyx_n_s__v));
PyTuple_SET_ITEM(__pyx_k_tuple_24, 1, ((PyObject *)__pyx_n_s__v));
__Pyx_GIVEREF(((PyObject *)__pyx_n_s__v));
__Pyx_INCREF(((PyObject *)__pyx_n_s__sv));
PyTuple_SET_ITEM(__pyx_k_tuple_24, 2, ((PyObject *)__pyx_n_s__sv));
__Pyx_GIVEREF(((PyObject *)__pyx_n_s__sv));
__Pyx_INCREF(((PyObject *)__pyx_n_s__a));
PyTuple_SET_ITEM(__pyx_k_tuple_24, 3, ((PyObject *)__pyx_n_s__a));
__Pyx_GIVEREF(((PyObject *)__pyx_n_s__a));
__Pyx_INCREF(((PyObject *)__pyx_n_s__z));
PyTuple_SET_ITEM(__pyx_k_tuple_24, 4, ((PyObject *)__pyx_n_s__z));
__Pyx_GIVEREF(((PyObject *)__pyx_n_s__z));
__Pyx_INCREF(((PyObject *)__pyx_n_s__sz));
PyTuple_SET_ITEM(__pyx_k_tuple_24, 5, ((PyObject *)__pyx_n_s__sz));
__Pyx_GIVEREF(((PyObject *)__pyx_n_s__sz));
__Pyx_INCREF(((PyObject *)__pyx_n_s__t));
PyTuple_SET_ITEM(__pyx_k_tuple_24, 6, ((PyObject *)__pyx_n_s__t));
__Pyx_GIVEREF(((PyObject *)__pyx_n_s__t));
__Pyx_INCREF(((PyObject *)__pyx_n_s__st));
PyTuple_SET_ITEM(__pyx_k_tuple_24, 7, ((PyObject *)__pyx_n_s__st));
__Pyx_GIVEREF(((PyObject *)__pyx_n_s__st));
__Pyx_INCREF(((PyObject *)__pyx_n_s__err));
PyTuple_SET_ITEM(__pyx_k_tuple_24, 8, ((PyObject *)__pyx_n_s__err));
__Pyx_GIVEREF(((PyObject *)__pyx_n_s__err));
__Pyx_INCREF(((PyObject *)__pyx_n_s__multi));
PyTuple_SET_ITEM(__pyx_k_tuple_24, 9, ((PyObject *)__pyx_n_s__multi));
__Pyx_GIVEREF(((PyObject *)__pyx_n_s__multi));
__Pyx_INCREF(((PyObject *)__pyx_n_s__n_st));
PyTuple_SET_ITEM(__pyx_k_tuple_24, 10, ((PyObject *)__pyx_n_s__n_st));
__Pyx_GIVEREF(((PyObject *)__pyx_n_s__n_st));
__Pyx_INCREF(((PyObject *)__pyx_n_s__n_sz));
PyTuple_SET_ITEM(__pyx_k_tuple_24, 11, ((PyObject *)__pyx_n_s__n_sz));
__Pyx_GIVEREF(((PyObject *)__pyx_n_s__n_sz));
__Pyx_INCREF(((PyObject *)__pyx_n_s__use_adaptive));
PyTuple_SET_ITEM(__pyx_k_tuple_24, 12, ((PyObject *)__pyx_n_s__use_adaptive));
__Pyx_GIVEREF(((PyObject *)__pyx_n_s__use_adaptive));
__Pyx_INCREF(((PyObject *)__pyx_n_s__simps_err));
PyTuple_SET_ITEM(__pyx_k_tuple_24, 13, ((PyObject *)__pyx_n_s__simps_err));
__Pyx_GIVEREF(((PyObject *)__pyx_n_s__simps_err));
__Pyx_INCREF(((PyObject *)__pyx_n_s__p_outlier));
PyTuple_SET_ITEM(__pyx_k_tuple_24, 14, ((PyObject *)__pyx_n_s__p_outlier));
__Pyx_GIVEREF(((PyObject *)__pyx_n_s__p_outlier));
__Pyx_INCREF(((PyObject *)__pyx_n_s__w_outlier));
PyTuple_SET_ITEM(__pyx_k_tuple_24, 15, ((PyObject *)__pyx_n_s__w_outlier));
__Pyx_GIVEREF(((PyObject *)__pyx_n_s__w_outlier));
__Pyx_INCREF(((PyObject *)__pyx_n_s__size));
PyTuple_SET_ITEM(__pyx_k_tuple_24, 16, ((PyObject *)__pyx_n_s__size));
__Pyx_GIVEREF(((PyObject *)__pyx_n_s__size));
__Pyx_INCREF(((PyObject *)__pyx_n_s__i));
PyTuple_SET_ITEM(__pyx_k_tuple_24, 17, ((PyObject *)__pyx_n_s__i));
__Pyx_GIVEREF(((PyObject *)__pyx_n_s__i));
__Pyx_INCREF(((PyObject *)__pyx_n_s__p));
PyTuple_SET_ITEM(__pyx_k_tuple_24, 18, ((PyObject *)__pyx_n_s__p));
__Pyx_GIVEREF(((PyObject *)__pyx_n_s__p));
__Pyx_INCREF(((PyObject *)__pyx_n_s__sum_logp));
PyTuple_SET_ITEM(__pyx_k_tuple_24, 19, ((PyObject *)__pyx_n_s__sum_logp));
__Pyx_GIVEREF(((PyObject *)__pyx_n_s__sum_logp));
__Pyx_INCREF(((PyObject *)__pyx_n_s__wp_outlier));
PyTuple_SET_ITEM(__pyx_k_tuple_24, 20, ((PyObject *)__pyx_n_s__wp_outlier));
__Pyx_GIVEREF(((PyObject *)__pyx_n_s__wp_outlier));
__Pyx_INCREF(((PyObject *)__pyx_n_s__params));
PyTuple_SET_ITEM(__pyx_k_tuple_24, 21, ((PyObject *)__pyx_n_s__params));
__Pyx_GIVEREF(((PyObject *)__pyx_n_s__params));
__Pyx_INCREF(((PyObject *)__pyx_n_s__params_iter));
PyTuple_SET_ITEM(__pyx_k_tuple_24, 22, ((PyObject *)__pyx_n_s__params_iter));
__Pyx_GIVEREF(((PyObject *)__pyx_n_s__params_iter));
__Pyx_INCREF(((PyObject *)__pyx_n_s__param));
PyTuple_SET_ITEM(__pyx_k_tuple_24, 23, ((PyObject *)__pyx_n_s__param));
__Pyx_GIVEREF(((PyObject *)__pyx_n_s__param));
__Pyx_GIVEREF(((PyObject *)__pyx_k_tuple_24));
__pyx_k_codeobj_25 = (PyObject*)__Pyx_PyCode_New(16, 0, 24, 0, 0, __pyx_empty_bytes, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_k_tuple_24, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_kp_s_21, __pyx_n_s__wiener_like_multi, 74, __pyx_empty_bytes); if (unlikely(!__pyx_k_codeobj_25)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 74; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
/* "wfpt.pyx":101
* return sum_logp
*
* def gen_rts_from_cdf(double v, double sv, double a, double z, double sz, double t, \ # <<<<<<<<<<<<<<
* double st, int samples=1000, double cdf_lb=-6, double cdf_ub=6, double dt=1e-2):
*
*/
__pyx_k_tuple_26 = PyTuple_New(22); if (unlikely(!__pyx_k_tuple_26)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 101; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_k_tuple_26);
__Pyx_INCREF(((PyObject *)__pyx_n_s__v));
PyTuple_SET_ITEM(__pyx_k_tuple_26, 0, ((PyObject *)__pyx_n_s__v));
__Pyx_GIVEREF(((PyObject *)__pyx_n_s__v));
__Pyx_INCREF(((PyObject *)__pyx_n_s__sv));
PyTuple_SET_ITEM(__pyx_k_tuple_26, 1, ((PyObject *)__pyx_n_s__sv));
__Pyx_GIVEREF(((PyObject *)__pyx_n_s__sv));
__Pyx_INCREF(((PyObject *)__pyx_n_s__a));
PyTuple_SET_ITEM(__pyx_k_tuple_26, 2, ((PyObject *)__pyx_n_s__a));
__Pyx_GIVEREF(((PyObject *)__pyx_n_s__a));
__Pyx_INCREF(((PyObject *)__pyx_n_s__z));
PyTuple_SET_ITEM(__pyx_k_tuple_26, 3, ((PyObject *)__pyx_n_s__z));
__Pyx_GIVEREF(((PyObject *)__pyx_n_s__z));
__Pyx_INCREF(((PyObject *)__pyx_n_s__sz));
PyTuple_SET_ITEM(__pyx_k_tuple_26, 4, ((PyObject *)__pyx_n_s__sz));
__Pyx_GIVEREF(((PyObject *)__pyx_n_s__sz));
__Pyx_INCREF(((PyObject *)__pyx_n_s__t));
PyTuple_SET_ITEM(__pyx_k_tuple_26, 5, ((PyObject *)__pyx_n_s__t));
__Pyx_GIVEREF(((PyObject *)__pyx_n_s__t));
__Pyx_INCREF(((PyObject *)__pyx_n_s__st));
PyTuple_SET_ITEM(__pyx_k_tuple_26, 6, ((PyObject *)__pyx_n_s__st));
__Pyx_GIVEREF(((PyObject *)__pyx_n_s__st));
__Pyx_INCREF(((PyObject *)__pyx_n_s__samples));
PyTuple_SET_ITEM(__pyx_k_tuple_26, 7, ((PyObject *)__pyx_n_s__samples));
__Pyx_GIVEREF(((PyObject *)__pyx_n_s__samples));
__Pyx_INCREF(((PyObject *)__pyx_n_s__cdf_lb));
PyTuple_SET_ITEM(__pyx_k_tuple_26, 8, ((PyObject *)__pyx_n_s__cdf_lb));
__Pyx_GIVEREF(((PyObject *)__pyx_n_s__cdf_lb));
__Pyx_INCREF(((PyObject *)__pyx_n_s__cdf_ub));
PyTuple_SET_ITEM(__pyx_k_tuple_26, 9, ((PyObject *)__pyx_n_s__cdf_ub));
__Pyx_GIVEREF(((PyObject *)__pyx_n_s__cdf_ub));
__Pyx_INCREF(((PyObject *)__pyx_n_s__dt));
PyTuple_SET_ITEM(__pyx_k_tuple_26, 10, ((PyObject *)__pyx_n_s__dt));
__Pyx_GIVEREF(((PyObject *)__pyx_n_s__dt));
__Pyx_INCREF(((PyObject *)__pyx_n_s__x));
PyTuple_SET_ITEM(__pyx_k_tuple_26, 11, ((PyObject *)__pyx_n_s__x));
__Pyx_GIVEREF(((PyObject *)__pyx_n_s__x));
__Pyx_INCREF(((PyObject *)__pyx_n_s__l_cdf));
PyTuple_SET_ITEM(__pyx_k_tuple_26, 12, ((PyObject *)__pyx_n_s__l_cdf));
__Pyx_GIVEREF(((PyObject *)__pyx_n_s__l_cdf));
__Pyx_INCREF(((PyObject *)__pyx_n_s__pdf));
PyTuple_SET_ITEM(__pyx_k_tuple_26, 13, ((PyObject *)__pyx_n_s__pdf));
__Pyx_GIVEREF(((PyObject *)__pyx_n_s__pdf));
__Pyx_INCREF(((PyObject *)__pyx_n_s__rt));
PyTuple_SET_ITEM(__pyx_k_tuple_26, 14, ((PyObject *)__pyx_n_s__rt));
__Pyx_GIVEREF(((PyObject *)__pyx_n_s__rt));
__Pyx_INCREF(((PyObject *)__pyx_n_s__size));
PyTuple_SET_ITEM(__pyx_k_tuple_26, 15, ((PyObject *)__pyx_n_s__size));
__Pyx_GIVEREF(((PyObject *)__pyx_n_s__size));
__Pyx_INCREF(((PyObject *)__pyx_n_s__i));
PyTuple_SET_ITEM(__pyx_k_tuple_26, 16, ((PyObject *)__pyx_n_s__i));
__Pyx_GIVEREF(((PyObject *)__pyx_n_s__i));
__Pyx_INCREF(((PyObject *)__pyx_n_s__j));
PyTuple_SET_ITEM(__pyx_k_tuple_26, 17, ((PyObject *)__pyx_n_s__j));
__Pyx_GIVEREF(((PyObject *)__pyx_n_s__j));
__Pyx_INCREF(((PyObject *)__pyx_n_s__idx));
PyTuple_SET_ITEM(__pyx_k_tuple_26, 18, ((PyObject *)__pyx_n_s__idx));
__Pyx_GIVEREF(((PyObject *)__pyx_n_s__idx));
__Pyx_INCREF(((PyObject *)__pyx_n_s__rts));
PyTuple_SET_ITEM(__pyx_k_tuple_26, 19, ((PyObject *)__pyx_n_s__rts));
__Pyx_GIVEREF(((PyObject *)__pyx_n_s__rts));
__Pyx_INCREF(((PyObject *)__pyx_n_s__f));
PyTuple_SET_ITEM(__pyx_k_tuple_26, 20, ((PyObject *)__pyx_n_s__f));
__Pyx_GIVEREF(((PyObject *)__pyx_n_s__f));
__Pyx_INCREF(((PyObject *)__pyx_n_s__delay));
PyTuple_SET_ITEM(__pyx_k_tuple_26, 21, ((PyObject *)__pyx_n_s__delay));
__Pyx_GIVEREF(((PyObject *)__pyx_n_s__delay));
__Pyx_GIVEREF(((PyObject *)__pyx_k_tuple_26));
__pyx_k_codeobj_27 = (PyObject*)__Pyx_PyCode_New(11, 0, 22, 0, 0, __pyx_empty_bytes, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_k_tuple_26, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_kp_s_21, __pyx_n_s__gen_rts_from_cdf, 101, __pyx_empty_bytes); if (unlikely(!__pyx_k_codeobj_27)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 101; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
/* "wfpt.pyx":134
* return rts
*
* def wiener_like_contaminant(np.ndarray[double, ndim=1] x, np.ndarray[int, ndim=1] cont_x, double v, \ # <<<<<<<<<<<<<<
* double sv, double a, double z, double sz, double t, double st, double t_min, \
* double t_max, double err, int n_st= 10, int n_sz=10, bint use_adaptive=1, \
*/
__pyx_k_tuple_28 = PyTuple_New(22); if (unlikely(!__pyx_k_tuple_28)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 134; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_k_tuple_28);
__Pyx_INCREF(((PyObject *)__pyx_n_s__x));
PyTuple_SET_ITEM(__pyx_k_tuple_28, 0, ((PyObject *)__pyx_n_s__x));
__Pyx_GIVEREF(((PyObject *)__pyx_n_s__x));
__Pyx_INCREF(((PyObject *)__pyx_n_s__cont_x));
PyTuple_SET_ITEM(__pyx_k_tuple_28, 1, ((PyObject *)__pyx_n_s__cont_x));
__Pyx_GIVEREF(((PyObject *)__pyx_n_s__cont_x));
__Pyx_INCREF(((PyObject *)__pyx_n_s__v));
PyTuple_SET_ITEM(__pyx_k_tuple_28, 2, ((PyObject *)__pyx_n_s__v));
__Pyx_GIVEREF(((PyObject *)__pyx_n_s__v));
__Pyx_INCREF(((PyObject *)__pyx_n_s__sv));
PyTuple_SET_ITEM(__pyx_k_tuple_28, 3, ((PyObject *)__pyx_n_s__sv));
__Pyx_GIVEREF(((PyObject *)__pyx_n_s__sv));
__Pyx_INCREF(((PyObject *)__pyx_n_s__a));
PyTuple_SET_ITEM(__pyx_k_tuple_28, 4, ((PyObject *)__pyx_n_s__a));
__Pyx_GIVEREF(((PyObject *)__pyx_n_s__a));
__Pyx_INCREF(((PyObject *)__pyx_n_s__z));
PyTuple_SET_ITEM(__pyx_k_tuple_28, 5, ((PyObject *)__pyx_n_s__z));
__Pyx_GIVEREF(((PyObject *)__pyx_n_s__z));
__Pyx_INCREF(((PyObject *)__pyx_n_s__sz));
PyTuple_SET_ITEM(__pyx_k_tuple_28, 6, ((PyObject *)__pyx_n_s__sz));
__Pyx_GIVEREF(((PyObject *)__pyx_n_s__sz));
__Pyx_INCREF(((PyObject *)__pyx_n_s__t));
PyTuple_SET_ITEM(__pyx_k_tuple_28, 7, ((PyObject *)__pyx_n_s__t));
__Pyx_GIVEREF(((PyObject *)__pyx_n_s__t));
__Pyx_INCREF(((PyObject *)__pyx_n_s__st));
PyTuple_SET_ITEM(__pyx_k_tuple_28, 8, ((PyObject *)__pyx_n_s__st));
__Pyx_GIVEREF(((PyObject *)__pyx_n_s__st));
__Pyx_INCREF(((PyObject *)__pyx_n_s__t_min));
PyTuple_SET_ITEM(__pyx_k_tuple_28, 9, ((PyObject *)__pyx_n_s__t_min));
__Pyx_GIVEREF(((PyObject *)__pyx_n_s__t_min));
__Pyx_INCREF(((PyObject *)__pyx_n_s__t_max));
PyTuple_SET_ITEM(__pyx_k_tuple_28, 10, ((PyObject *)__pyx_n_s__t_max));
__Pyx_GIVEREF(((PyObject *)__pyx_n_s__t_max));
__Pyx_INCREF(((PyObject *)__pyx_n_s__err));
PyTuple_SET_ITEM(__pyx_k_tuple_28, 11, ((PyObject *)__pyx_n_s__err));
__Pyx_GIVEREF(((PyObject *)__pyx_n_s__err));
__Pyx_INCREF(((PyObject *)__pyx_n_s__n_st));
PyTuple_SET_ITEM(__pyx_k_tuple_28, 12, ((PyObject *)__pyx_n_s__n_st));
__Pyx_GIVEREF(((PyObject *)__pyx_n_s__n_st));
__Pyx_INCREF(((PyObject *)__pyx_n_s__n_sz));
PyTuple_SET_ITEM(__pyx_k_tuple_28, 13, ((PyObject *)__pyx_n_s__n_sz));
__Pyx_GIVEREF(((PyObject *)__pyx_n_s__n_sz));
__Pyx_INCREF(((PyObject *)__pyx_n_s__use_adaptive));
PyTuple_SET_ITEM(__pyx_k_tuple_28, 14, ((PyObject *)__pyx_n_s__use_adaptive));
__Pyx_GIVEREF(((PyObject *)__pyx_n_s__use_adaptive));
__Pyx_INCREF(((PyObject *)__pyx_n_s__simps_err));
PyTuple_SET_ITEM(__pyx_k_tuple_28, 15, ((PyObject *)__pyx_n_s__simps_err));
__Pyx_GIVEREF(((PyObject *)__pyx_n_s__simps_err));
__Pyx_INCREF(((PyObject *)__pyx_n_s__size));
PyTuple_SET_ITEM(__pyx_k_tuple_28, 16, ((PyObject *)__pyx_n_s__size));
__Pyx_GIVEREF(((PyObject *)__pyx_n_s__size));
__Pyx_INCREF(((PyObject *)__pyx_n_s__i));
PyTuple_SET_ITEM(__pyx_k_tuple_28, 17, ((PyObject *)__pyx_n_s__i));
__Pyx_GIVEREF(((PyObject *)__pyx_n_s__i));
__Pyx_INCREF(((PyObject *)__pyx_n_s__p));
PyTuple_SET_ITEM(__pyx_k_tuple_28, 18, ((PyObject *)__pyx_n_s__p));
__Pyx_GIVEREF(((PyObject *)__pyx_n_s__p));
__Pyx_INCREF(((PyObject *)__pyx_n_s__sum_logp));
PyTuple_SET_ITEM(__pyx_k_tuple_28, 19, ((PyObject *)__pyx_n_s__sum_logp));
__Pyx_GIVEREF(((PyObject *)__pyx_n_s__sum_logp));
__Pyx_INCREF(((PyObject *)__pyx_n_s__n_cont));
PyTuple_SET_ITEM(__pyx_k_tuple_28, 20, ((PyObject *)__pyx_n_s__n_cont));
__Pyx_GIVEREF(((PyObject *)__pyx_n_s__n_cont));
__Pyx_INCREF(((PyObject *)__pyx_n_s__pos_cont));
PyTuple_SET_ITEM(__pyx_k_tuple_28, 21, ((PyObject *)__pyx_n_s__pos_cont));
__Pyx_GIVEREF(((PyObject *)__pyx_n_s__pos_cont));
__Pyx_GIVEREF(((PyObject *)__pyx_k_tuple_28));
__pyx_k_codeobj_29 = (PyObject*)__Pyx_PyCode_New(16, 0, 22, 0, 0, __pyx_empty_bytes, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_k_tuple_28, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_kp_s_21, __pyx_n_s_30, 134, __pyx_empty_bytes); if (unlikely(!__pyx_k_codeobj_29)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 134; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
/* "wfpt.pyx":165
* return sum_logp
*
* def gen_cdf_using_pdf(double v, double sv, double a, double z, double sz, double t, double st, double err, # <<<<<<<<<<<<<<
* int N=500, double time=5., int n_st=2, int n_sz=2, bint use_adaptive=1, double simps_err=1e-3,
* double p_outlier=0, double w_outlier=0):
*/
__pyx_k_tuple_31 = PyTuple_New(19); if (unlikely(!__pyx_k_tuple_31)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 165; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_k_tuple_31);
__Pyx_INCREF(((PyObject *)__pyx_n_s__v));
PyTuple_SET_ITEM(__pyx_k_tuple_31, 0, ((PyObject *)__pyx_n_s__v));
__Pyx_GIVEREF(((PyObject *)__pyx_n_s__v));
__Pyx_INCREF(((PyObject *)__pyx_n_s__sv));
PyTuple_SET_ITEM(__pyx_k_tuple_31, 1, ((PyObject *)__pyx_n_s__sv));
__Pyx_GIVEREF(((PyObject *)__pyx_n_s__sv));
__Pyx_INCREF(((PyObject *)__pyx_n_s__a));
PyTuple_SET_ITEM(__pyx_k_tuple_31, 2, ((PyObject *)__pyx_n_s__a));
__Pyx_GIVEREF(((PyObject *)__pyx_n_s__a));
__Pyx_INCREF(((PyObject *)__pyx_n_s__z));
PyTuple_SET_ITEM(__pyx_k_tuple_31, 3, ((PyObject *)__pyx_n_s__z));
__Pyx_GIVEREF(((PyObject *)__pyx_n_s__z));
__Pyx_INCREF(((PyObject *)__pyx_n_s__sz));
PyTuple_SET_ITEM(__pyx_k_tuple_31, 4, ((PyObject *)__pyx_n_s__sz));
__Pyx_GIVEREF(((PyObject *)__pyx_n_s__sz));
__Pyx_INCREF(((PyObject *)__pyx_n_s__t));
PyTuple_SET_ITEM(__pyx_k_tuple_31, 5, ((PyObject *)__pyx_n_s__t));
__Pyx_GIVEREF(((PyObject *)__pyx_n_s__t));
__Pyx_INCREF(((PyObject *)__pyx_n_s__st));
PyTuple_SET_ITEM(__pyx_k_tuple_31, 6, ((PyObject *)__pyx_n_s__st));
__Pyx_GIVEREF(((PyObject *)__pyx_n_s__st));
__Pyx_INCREF(((PyObject *)__pyx_n_s__err));
PyTuple_SET_ITEM(__pyx_k_tuple_31, 7, ((PyObject *)__pyx_n_s__err));
__Pyx_GIVEREF(((PyObject *)__pyx_n_s__err));
__Pyx_INCREF(((PyObject *)__pyx_n_s__N));
PyTuple_SET_ITEM(__pyx_k_tuple_31, 8, ((PyObject *)__pyx_n_s__N));
__Pyx_GIVEREF(((PyObject *)__pyx_n_s__N));
__Pyx_INCREF(((PyObject *)__pyx_n_s__time));
PyTuple_SET_ITEM(__pyx_k_tuple_31, 9, ((PyObject *)__pyx_n_s__time));
__Pyx_GIVEREF(((PyObject *)__pyx_n_s__time));
__Pyx_INCREF(((PyObject *)__pyx_n_s__n_st));
PyTuple_SET_ITEM(__pyx_k_tuple_31, 10, ((PyObject *)__pyx_n_s__n_st));
__Pyx_GIVEREF(((PyObject *)__pyx_n_s__n_st));
__Pyx_INCREF(((PyObject *)__pyx_n_s__n_sz));
PyTuple_SET_ITEM(__pyx_k_tuple_31, 11, ((PyObject *)__pyx_n_s__n_sz));
__Pyx_GIVEREF(((PyObject *)__pyx_n_s__n_sz));
__Pyx_INCREF(((PyObject *)__pyx_n_s__use_adaptive));
PyTuple_SET_ITEM(__pyx_k_tuple_31, 12, ((PyObject *)__pyx_n_s__use_adaptive));
__Pyx_GIVEREF(((PyObject *)__pyx_n_s__use_adaptive));
__Pyx_INCREF(((PyObject *)__pyx_n_s__simps_err));
PyTuple_SET_ITEM(__pyx_k_tuple_31, 13, ((PyObject *)__pyx_n_s__simps_err));
__Pyx_GIVEREF(((PyObject *)__pyx_n_s__simps_err));
__Pyx_INCREF(((PyObject *)__pyx_n_s__p_outlier));
PyTuple_SET_ITEM(__pyx_k_tuple_31, 14, ((PyObject *)__pyx_n_s__p_outlier));
__Pyx_GIVEREF(((PyObject *)__pyx_n_s__p_outlier));
__Pyx_INCREF(((PyObject *)__pyx_n_s__w_outlier));
PyTuple_SET_ITEM(__pyx_k_tuple_31, 15, ((PyObject *)__pyx_n_s__w_outlier));
__Pyx_GIVEREF(((PyObject *)__pyx_n_s__w_outlier));
__Pyx_INCREF(((PyObject *)__pyx_n_s__x));
PyTuple_SET_ITEM(__pyx_k_tuple_31, 16, ((PyObject *)__pyx_n_s__x));
__Pyx_GIVEREF(((PyObject *)__pyx_n_s__x));
__Pyx_INCREF(((PyObject *)__pyx_n_s__cdf_array));
PyTuple_SET_ITEM(__pyx_k_tuple_31, 17, ((PyObject *)__pyx_n_s__cdf_array));
__Pyx_GIVEREF(((PyObject *)__pyx_n_s__cdf_array));
__Pyx_INCREF(((PyObject *)__pyx_n_s__idx));
PyTuple_SET_ITEM(__pyx_k_tuple_31, 18, ((PyObject *)__pyx_n_s__idx));
__Pyx_GIVEREF(((PyObject *)__pyx_n_s__idx));
__Pyx_GIVEREF(((PyObject *)__pyx_k_tuple_31));
__pyx_k_codeobj_32 = (PyObject*)__Pyx_PyCode_New(16, 0, 19, 0, 0, __pyx_empty_bytes, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_k_tuple_31, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_kp_s_21, __pyx_n_s__gen_cdf_using_pdf, 165, __pyx_empty_bytes); if (unlikely(!__pyx_k_codeobj_32)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 165; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
/* "wfpt.pyx":190
* return x, cdf_array
*
* def split_cdf(np.ndarray[double, ndim=1] x, np.ndarray[double, ndim=1] data): # <<<<<<<<<<<<<<
*
* #get length of data
*/
__pyx_k_tuple_33 = PyTuple_New(7); if (unlikely(!__pyx_k_tuple_33)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 190; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_k_tuple_33);
__Pyx_INCREF(((PyObject *)__pyx_n_s__x));
PyTuple_SET_ITEM(__pyx_k_tuple_33, 0, ((PyObject *)__pyx_n_s__x));
__Pyx_GIVEREF(((PyObject *)__pyx_n_s__x));
__Pyx_INCREF(((PyObject *)__pyx_n_s__data));
PyTuple_SET_ITEM(__pyx_k_tuple_33, 1, ((PyObject *)__pyx_n_s__data));
__Pyx_GIVEREF(((PyObject *)__pyx_n_s__data));
__Pyx_INCREF(((PyObject *)__pyx_n_s__N));
PyTuple_SET_ITEM(__pyx_k_tuple_33, 2, ((PyObject *)__pyx_n_s__N));
__Pyx_GIVEREF(((PyObject *)__pyx_n_s__N));
__Pyx_INCREF(((PyObject *)__pyx_n_s__x_lb));
PyTuple_SET_ITEM(__pyx_k_tuple_33, 3, ((PyObject *)__pyx_n_s__x_lb));
__Pyx_GIVEREF(((PyObject *)__pyx_n_s__x_lb));
__Pyx_INCREF(((PyObject *)__pyx_n_s__lb));
PyTuple_SET_ITEM(__pyx_k_tuple_33, 4, ((PyObject *)__pyx_n_s__lb));
__Pyx_GIVEREF(((PyObject *)__pyx_n_s__lb));
__Pyx_INCREF(((PyObject *)__pyx_n_s__x_ub));
PyTuple_SET_ITEM(__pyx_k_tuple_33, 5, ((PyObject *)__pyx_n_s__x_ub));
__Pyx_GIVEREF(((PyObject *)__pyx_n_s__x_ub));
__Pyx_INCREF(((PyObject *)__pyx_n_s__ub));
PyTuple_SET_ITEM(__pyx_k_tuple_33, 6, ((PyObject *)__pyx_n_s__ub));
__Pyx_GIVEREF(((PyObject *)__pyx_n_s__ub));
__Pyx_GIVEREF(((PyObject *)__pyx_k_tuple_33));
__pyx_k_codeobj_34 = (PyObject*)__Pyx_PyCode_New(2, 0, 7, 0, 0, __pyx_empty_bytes, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_k_tuple_33, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_kp_s_21, __pyx_n_s__split_cdf, 190, __pyx_empty_bytes); if (unlikely(!__pyx_k_codeobj_34)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 190; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_RefNannyFinishContext();
return 0;
__pyx_L1_error:;
__Pyx_RefNannyFinishContext();
return -1;
}
static int __Pyx_InitGlobals(void) {
/* Initialize NaN. The sign is irrelevant, an exponent with all bits 1 and
a nonzero mantissa means NaN. If the first bit in the mantissa is 1, it is
a quiet NaN. */
memset(&__PYX_NAN, 0xFF, sizeof(__PYX_NAN));
PyEval_InitThreads();
if (__Pyx_InitStrings(__pyx_string_tab) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;};
__pyx_int_0 = PyInt_FromLong(0); if (unlikely(!__pyx_int_0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;};
__pyx_int_neg_1 = PyInt_FromLong(-1); if (unlikely(!__pyx_int_neg_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;};
__pyx_int_15 = PyInt_FromLong(15); if (unlikely(!__pyx_int_15)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;};
return 0;
__pyx_L1_error:;
return -1;
}
#if PY_MAJOR_VERSION < 3
PyMODINIT_FUNC initwfpt(void); /*proto*/
PyMODINIT_FUNC initwfpt(void)
#else
PyMODINIT_FUNC PyInit_wfpt(void); /*proto*/
PyMODINIT_FUNC PyInit_wfpt(void)
#endif
{
PyObject *__pyx_t_1 = NULL;
PyObject *__pyx_t_2 = NULL;
__Pyx_RefNannyDeclarations
#if CYTHON_REFNANNY
__Pyx_RefNanny = __Pyx_RefNannyImportAPI("refnanny");
if (!__Pyx_RefNanny) {
PyErr_Clear();
__Pyx_RefNanny = __Pyx_RefNannyImportAPI("Cython.Runtime.refnanny");
if (!__Pyx_RefNanny)
Py_FatalError("failed to import 'refnanny' module");
}
#endif
__Pyx_RefNannySetupContext("PyMODINIT_FUNC PyInit_wfpt(void)");
if ( __Pyx_check_binary_version() < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__pyx_empty_tuple = PyTuple_New(0); if (unlikely(!__pyx_empty_tuple)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__pyx_empty_bytes = PyBytes_FromStringAndSize("", 0); if (unlikely(!__pyx_empty_bytes)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
#ifdef __Pyx_CyFunction_USED
if (__Pyx_CyFunction_init() < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
#endif
#ifdef __Pyx_FusedFunction_USED
if (__pyx_FusedFunction_init() < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
#endif
#ifdef __Pyx_Generator_USED
if (__pyx_Generator_init() < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
#endif
/*--- Library function declarations ---*/
/*--- Threads initialization code ---*/
#if defined(__PYX_FORCE_INIT_THREADS) && __PYX_FORCE_INIT_THREADS
#ifdef WITH_THREAD /* Python build with threading support? */
PyEval_InitThreads();
#endif
#endif
/*--- Module creation code ---*/
#if PY_MAJOR_VERSION < 3
__pyx_m = Py_InitModule4(__Pyx_NAMESTR("wfpt"), __pyx_methods, 0, 0, PYTHON_API_VERSION);
#else
__pyx_m = PyModule_Create(&__pyx_moduledef);
#endif
if (!__pyx_m) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;};
#if PY_MAJOR_VERSION < 3
Py_INCREF(__pyx_m);
#endif
__pyx_b = PyImport_AddModule(__Pyx_NAMESTR(__Pyx_BUILTIN_MODULE_NAME));
if (!__pyx_b) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;};
if (__Pyx_SetAttrString(__pyx_m, "__builtins__", __pyx_b) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;};
/*--- Initialize various global constants etc. ---*/
if (unlikely(__Pyx_InitGlobals() < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
if (__pyx_module_is_main_wfpt) {
if (__Pyx_SetAttrString(__pyx_m, "__name__", __pyx_n_s____main__) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;};
}
/*--- Builtin init code ---*/
if (unlikely(__Pyx_InitCachedBuiltins() < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
/*--- Constants init code ---*/
if (unlikely(__Pyx_InitCachedConstants() < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
/*--- Global init code ---*/
/*--- Variable export code ---*/
/*--- Function export code ---*/
/*--- Type init code ---*/
/*--- Type import code ---*/
__pyx_ptype_5numpy_dtype = __Pyx_ImportType("numpy", "dtype", sizeof(PyArray_Descr), 0); if (unlikely(!__pyx_ptype_5numpy_dtype)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 154; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__pyx_ptype_5numpy_flatiter = __Pyx_ImportType("numpy", "flatiter", sizeof(PyArrayIterObject), 0); if (unlikely(!__pyx_ptype_5numpy_flatiter)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 164; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__pyx_ptype_5numpy_broadcast = __Pyx_ImportType("numpy", "broadcast", sizeof(PyArrayMultiIterObject), 0); if (unlikely(!__pyx_ptype_5numpy_broadcast)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 168; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__pyx_ptype_5numpy_ndarray = __Pyx_ImportType("numpy", "ndarray", sizeof(PyArrayObject), 0); if (unlikely(!__pyx_ptype_5numpy_ndarray)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 177; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__pyx_ptype_5numpy_ufunc = __Pyx_ImportType("numpy", "ufunc", sizeof(PyUFuncObject), 0); if (unlikely(!__pyx_ptype_5numpy_ufunc)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 860; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
/*--- Variable import code ---*/
/*--- Function import code ---*/
/*--- Execution code ---*/
/* "wfpt.pyx":15
* # GPLv3
*
* import hddm # <<<<<<<<<<<<<<
*
* import scipy.integrate as integrate
*/
__pyx_t_1 = __Pyx_Import(((PyObject *)__pyx_n_s__hddm), 0, -1); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 15; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_1);
if (PyObject_SetAttr(__pyx_m, __pyx_n_s__hddm, __pyx_t_1) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 15; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
/* "wfpt.pyx":17
* import hddm
*
* import scipy.integrate as integrate # <<<<<<<<<<<<<<
* from copy import copy
* import numpy as np
*/
__pyx_t_1 = PyList_New(1); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 17; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_1);
__Pyx_INCREF(((PyObject *)__pyx_n_s_18));
PyList_SET_ITEM(__pyx_t_1, 0, ((PyObject *)__pyx_n_s_18));
__Pyx_GIVEREF(((PyObject *)__pyx_n_s_18));
__pyx_t_2 = __Pyx_Import(((PyObject *)__pyx_n_s_17), ((PyObject *)__pyx_t_1), -1); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 17; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_2);
__Pyx_DECREF(((PyObject *)__pyx_t_1)); __pyx_t_1 = 0;
if (PyObject_SetAttr(__pyx_m, __pyx_n_s__integrate, __pyx_t_2) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 17; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
/* "wfpt.pyx":18
*
* import scipy.integrate as integrate
* from copy import copy # <<<<<<<<<<<<<<
* import numpy as np
*
*/
__pyx_t_2 = PyList_New(1); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 18; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_2);
__Pyx_INCREF(((PyObject *)__pyx_n_s__copy));
PyList_SET_ITEM(__pyx_t_2, 0, ((PyObject *)__pyx_n_s__copy));
__Pyx_GIVEREF(((PyObject *)__pyx_n_s__copy));
__pyx_t_1 = __Pyx_Import(((PyObject *)__pyx_n_s__copy), ((PyObject *)__pyx_t_2), -1); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 18; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_1);
__Pyx_DECREF(((PyObject *)__pyx_t_2)); __pyx_t_2 = 0;
__pyx_t_2 = PyObject_GetAttr(__pyx_t_1, __pyx_n_s__copy);
if (__pyx_t_2 == NULL) {
if (PyErr_ExceptionMatches(PyExc_AttributeError)) __Pyx_RaiseImportError(__pyx_n_s__copy);
if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 18; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
}
__Pyx_GOTREF(__pyx_t_2);
if (PyObject_SetAttr(__pyx_m, __pyx_n_s__copy, __pyx_t_2) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 18; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
/* "wfpt.pyx":19
* import scipy.integrate as integrate
* from copy import copy
* import numpy as np # <<<<<<<<<<<<<<
*
* cimport numpy as np
*/
__pyx_t_1 = __Pyx_Import(((PyObject *)__pyx_n_s__numpy), 0, -1); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 19; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_1);
if (PyObject_SetAttr(__pyx_m, __pyx_n_s__np, __pyx_t_1) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 19; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
/* "/home/wiecki/working/projects/hddm/src/integrate.pxi":4
* #wraparound=False cython: boundscheck=False
*
* import numpy as np # <<<<<<<<<<<<<<
* cimport numpy as np
* cimport cython
*/
__pyx_t_1 = __Pyx_Import(((PyObject *)__pyx_n_s__numpy), 0, -1); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[3]; __pyx_lineno = 4; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_1);
if (PyObject_SetAttr(__pyx_m, __pyx_n_s__np, __pyx_t_1) < 0) {__pyx_filename = __pyx_f[3]; __pyx_lineno = 4; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
/* "wfpt.pyx":30
* include 'integrate.pxi'
*
* def pdf_array(np.ndarray[double, ndim=1] x, double v, double sv, double a, double z, double sz, # <<<<<<<<<<<<<<
* double t, double st, double err=1e-4, bint logp=0, int n_st=2, int n_sz=2, bint use_adaptive=1,
* double simps_err=1e-3, double p_outlier=0, double w_outlier=0):
*/
__pyx_t_1 = PyCFunction_NewEx(&__pyx_mdef_4wfpt_3pdf_array, NULL, __pyx_n_s__wfpt); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 30; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_1);
if (PyObject_SetAttr(__pyx_m, __pyx_n_s__pdf_array, __pyx_t_1) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 30; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
/* "wfpt.pyx":49
* cdef inline bint p_outlier_in_range(double p_outlier): return (p_outlier >= 0) & (p_outlier <= 1)
*
* def wiener_like(np.ndarray[double, ndim=1] x, double v, double sv, double a, double z, double sz, double t, # <<<<<<<<<<<<<<
* double st, double err, int n_st=10, int n_sz=10, bint use_adaptive=1, double simps_err=1e-8,
* double p_outlier=0, double w_outlier=0):
*/
__pyx_t_1 = PyCFunction_NewEx(&__pyx_mdef_4wfpt_5wiener_like, NULL, __pyx_n_s__wfpt); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 49; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_1);
if (PyObject_SetAttr(__pyx_m, __pyx_n_s__wiener_like, __pyx_t_1) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 49; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
/* "wfpt.pyx":74
*
*
* def wiener_like_multi(np.ndarray[double, ndim=1] x, v, sv, a, z, sz, t, st, double err, multi=None, # <<<<<<<<<<<<<<
* int n_st=10, int n_sz=10, bint use_adaptive=1, double simps_err=1e-3,
* double p_outlier=0, double w_outlier=0):
*/
__pyx_t_1 = PyCFunction_NewEx(&__pyx_mdef_4wfpt_7wiener_like_multi, NULL, __pyx_n_s__wfpt); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 74; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_1);
if (PyObject_SetAttr(__pyx_m, __pyx_n_s__wiener_like_multi, __pyx_t_1) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 74; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
/* "wfpt.pyx":101
* return sum_logp
*
* def gen_rts_from_cdf(double v, double sv, double a, double z, double sz, double t, \ # <<<<<<<<<<<<<<
* double st, int samples=1000, double cdf_lb=-6, double cdf_ub=6, double dt=1e-2):
*
*/
__pyx_t_1 = PyCFunction_NewEx(&__pyx_mdef_4wfpt_9gen_rts_from_cdf, NULL, __pyx_n_s__wfpt); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 101; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_1);
if (PyObject_SetAttr(__pyx_m, __pyx_n_s__gen_rts_from_cdf, __pyx_t_1) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 101; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
/* "wfpt.pyx":134
* return rts
*
* def wiener_like_contaminant(np.ndarray[double, ndim=1] x, np.ndarray[int, ndim=1] cont_x, double v, \ # <<<<<<<<<<<<<<
* double sv, double a, double z, double sz, double t, double st, double t_min, \
* double t_max, double err, int n_st= 10, int n_sz=10, bint use_adaptive=1, \
*/
__pyx_t_1 = PyCFunction_NewEx(&__pyx_mdef_4wfpt_11wiener_like_contaminant, NULL, __pyx_n_s__wfpt); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 134; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_1);
if (PyObject_SetAttr(__pyx_m, __pyx_n_s_30, __pyx_t_1) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 134; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
/* "wfpt.pyx":165
* return sum_logp
*
* def gen_cdf_using_pdf(double v, double sv, double a, double z, double sz, double t, double st, double err, # <<<<<<<<<<<<<<
* int N=500, double time=5., int n_st=2, int n_sz=2, bint use_adaptive=1, double simps_err=1e-3,
* double p_outlier=0, double w_outlier=0):
*/
__pyx_t_1 = PyCFunction_NewEx(&__pyx_mdef_4wfpt_13gen_cdf_using_pdf, NULL, __pyx_n_s__wfpt); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 165; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_1);
if (PyObject_SetAttr(__pyx_m, __pyx_n_s__gen_cdf_using_pdf, __pyx_t_1) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 165; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
/* "wfpt.pyx":190
* return x, cdf_array
*
* def split_cdf(np.ndarray[double, ndim=1] x, np.ndarray[double, ndim=1] data): # <<<<<<<<<<<<<<
*
* #get length of data
*/
__pyx_t_1 = PyCFunction_NewEx(&__pyx_mdef_4wfpt_15split_cdf, NULL, __pyx_n_s__wfpt); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 190; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_1);
if (PyObject_SetAttr(__pyx_m, __pyx_n_s__split_cdf, __pyx_t_1) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 190; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
/* "wfpt.pyx":1
* #cython: embedsignature=True # <<<<<<<<<<<<<<
* #cython: cdivision=True
* #cython: wraparound=False
*/
__pyx_t_1 = PyDict_New(); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(((PyObject *)__pyx_t_1));
if (PyObject_SetAttr(__pyx_m, __pyx_n_s____test__, ((PyObject *)__pyx_t_1)) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_DECREF(((PyObject *)__pyx_t_1)); __pyx_t_1 = 0;
/* "numpy.pxd":974
* arr.base = baseptr
*
* cdef inline object get_array_base(ndarray arr): # <<<<<<<<<<<<<<
* if arr.base is NULL:
* return None
*/
goto __pyx_L0;
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_1);
__Pyx_XDECREF(__pyx_t_2);
if (__pyx_m) {
__Pyx_AddTraceback("init wfpt", __pyx_clineno, __pyx_lineno, __pyx_filename);
Py_DECREF(__pyx_m); __pyx_m = 0;
} else if (!PyErr_Occurred()) {
PyErr_SetString(PyExc_ImportError, "init wfpt");
}
__pyx_L0:;
__Pyx_RefNannyFinishContext();
#if PY_MAJOR_VERSION < 3
return;
#else
return __pyx_m;
#endif
}
/* Runtime support code */
#if CYTHON_REFNANNY
static __Pyx_RefNannyAPIStruct *__Pyx_RefNannyImportAPI(const char *modname) {
PyObject *m = NULL, *p = NULL;
void *r = NULL;
m = PyImport_ImportModule((char *)modname);
if (!m) goto end;
p = PyObject_GetAttrString(m, (char *)"RefNannyAPI");
if (!p) goto end;
r = PyLong_AsVoidPtr(p);
end:
Py_XDECREF(p);
Py_XDECREF(m);
return (__Pyx_RefNannyAPIStruct *)r;
}
#endif /* CYTHON_REFNANNY */
static PyObject *__Pyx_GetName(PyObject *dict, PyObject *name) {
PyObject *result;
result = PyObject_GetAttr(dict, name);
if (!result) {
if (dict != __pyx_b) {
PyErr_Clear();
result = PyObject_GetAttr(__pyx_b, name);
}
if (!result) {
PyErr_SetObject(PyExc_NameError, name);
}
}
return result;
}
static void __Pyx_RaiseArgtupleInvalid(
const char* func_name,
int exact,
Py_ssize_t num_min,
Py_ssize_t num_max,
Py_ssize_t num_found)
{
Py_ssize_t num_expected;
const char *more_or_less;
if (num_found < num_min) {
num_expected = num_min;
more_or_less = "at least";
} else {
num_expected = num_max;
more_or_less = "at most";
}
if (exact) {
more_or_less = "exactly";
}
PyErr_Format(PyExc_TypeError,
"%s() takes %s %"PY_FORMAT_SIZE_T"d positional argument%s (%"PY_FORMAT_SIZE_T"d given)",
func_name, more_or_less, num_expected,
(num_expected == 1) ? "" : "s", num_found);
}
static void __Pyx_RaiseDoubleKeywordsError(
const char* func_name,
PyObject* kw_name)
{
PyErr_Format(PyExc_TypeError,
#if PY_MAJOR_VERSION >= 3
"%s() got multiple values for keyword argument '%U'", func_name, kw_name);
#else
"%s() got multiple values for keyword argument '%s'", func_name,
PyString_AS_STRING(kw_name));
#endif
}
static int __Pyx_ParseOptionalKeywords(
PyObject *kwds,
PyObject **argnames[],
PyObject *kwds2,
PyObject *values[],
Py_ssize_t num_pos_args,
const char* function_name)
{
PyObject *key = 0, *value = 0;
Py_ssize_t pos = 0;
PyObject*** name;
PyObject*** first_kw_arg = argnames + num_pos_args;
while (PyDict_Next(kwds, &pos, &key, &value)) {
name = first_kw_arg;
while (*name && (**name != key)) name++;
if (*name) {
values[name-argnames] = value;
} else {
#if PY_MAJOR_VERSION < 3
if (unlikely(!PyString_CheckExact(key)) && unlikely(!PyString_Check(key))) {
#else
if (unlikely(!PyUnicode_CheckExact(key)) && unlikely(!PyUnicode_Check(key))) {
#endif
goto invalid_keyword_type;
} else {
for (name = first_kw_arg; *name; name++) {
#if PY_MAJOR_VERSION >= 3
if (PyUnicode_GET_SIZE(**name) == PyUnicode_GET_SIZE(key) &&
PyUnicode_Compare(**name, key) == 0) break;
#else
if (PyString_GET_SIZE(**name) == PyString_GET_SIZE(key) &&
_PyString_Eq(**name, key)) break;
#endif
}
if (*name) {
values[name-argnames] = value;
} else {
for (name=argnames; name != first_kw_arg; name++) {
if (**name == key) goto arg_passed_twice;
#if PY_MAJOR_VERSION >= 3
if (PyUnicode_GET_SIZE(**name) == PyUnicode_GET_SIZE(key) &&
PyUnicode_Compare(**name, key) == 0) goto arg_passed_twice;
#else
if (PyString_GET_SIZE(**name) == PyString_GET_SIZE(key) &&
_PyString_Eq(**name, key)) goto arg_passed_twice;
#endif
}
if (kwds2) {
if (unlikely(PyDict_SetItem(kwds2, key, value))) goto bad;
} else {
goto invalid_keyword;
}
}
}
}
}
return 0;
arg_passed_twice:
__Pyx_RaiseDoubleKeywordsError(function_name, **name);
goto bad;
invalid_keyword_type:
PyErr_Format(PyExc_TypeError,
"%s() keywords must be strings", function_name);
goto bad;
invalid_keyword:
PyErr_Format(PyExc_TypeError,
#if PY_MAJOR_VERSION < 3
"%s() got an unexpected keyword argument '%s'",
function_name, PyString_AsString(key));
#else
"%s() got an unexpected keyword argument '%U'",
function_name, key);
#endif
bad:
return -1;
}
static int __Pyx_ArgTypeTest(PyObject *obj, PyTypeObject *type, int none_allowed,
const char *name, int exact)
{
if (!type) {
PyErr_Format(PyExc_SystemError, "Missing type object");
return 0;
}
if (none_allowed && obj == Py_None) return 1;
else if (exact) {
if (Py_TYPE(obj) == type) return 1;
}
else {
if (PyObject_TypeCheck(obj, type)) return 1;
}
PyErr_Format(PyExc_TypeError,
"Argument '%s' has incorrect type (expected %s, got %s)",
name, type->tp_name, Py_TYPE(obj)->tp_name);
return 0;
}
static CYTHON_INLINE int __Pyx_IsLittleEndian(void) {
unsigned int n = 1;
return *(unsigned char*)(&n) != 0;
}
static void __Pyx_BufFmt_Init(__Pyx_BufFmt_Context* ctx,
__Pyx_BufFmt_StackElem* stack,
__Pyx_TypeInfo* type) {
stack[0].field = &ctx->root;
stack[0].parent_offset = 0;
ctx->root.type = type;
ctx->root.name = "buffer dtype";
ctx->root.offset = 0;
ctx->head = stack;
ctx->head->field = &ctx->root;
ctx->fmt_offset = 0;
ctx->head->parent_offset = 0;
ctx->new_packmode = '@';
ctx->enc_packmode = '@';
ctx->new_count = 1;
ctx->enc_count = 0;
ctx->enc_type = 0;
ctx->is_complex = 0;
ctx->is_valid_array = 0;
ctx->struct_alignment = 0;
while (type->typegroup == 'S') {
++ctx->head;
ctx->head->field = type->fields;
ctx->head->parent_offset = 0;
type = type->fields->type;
}
}
static int __Pyx_BufFmt_ParseNumber(const char** ts) {
int count;
const char* t = *ts;
if (*t < '0' || *t > '9') {
return -1;
} else {
count = *t++ - '0';
while (*t >= '0' && *t < '9') {
count *= 10;
count += *t++ - '0';
}
}
*ts = t;
return count;
}
static int __Pyx_BufFmt_ExpectNumber(const char **ts) {
int number = __Pyx_BufFmt_ParseNumber(ts);
if (number == -1) /* First char was not a digit */
PyErr_Format(PyExc_ValueError,\
"Does not understand character buffer dtype format string ('%c')", **ts);
return number;
}
static void __Pyx_BufFmt_RaiseUnexpectedChar(char ch) {
PyErr_Format(PyExc_ValueError,
"Unexpected format string character: '%c'", ch);
}
static const char* __Pyx_BufFmt_DescribeTypeChar(char ch, int is_complex) {
switch (ch) {
case 'b': return "'char'";
case 'B': return "'unsigned char'";
case 'h': return "'short'";
case 'H': return "'unsigned short'";
case 'i': return "'int'";
case 'I': return "'unsigned int'";
case 'l': return "'long'";
case 'L': return "'unsigned long'";
case 'q': return "'long long'";
case 'Q': return "'unsigned long long'";
case 'f': return (is_complex ? "'complex float'" : "'float'");
case 'd': return (is_complex ? "'complex double'" : "'double'");
case 'g': return (is_complex ? "'complex long double'" : "'long double'");
case 'T': return "a struct";
case 'O': return "Python object";
case 'P': return "a pointer";
case 's': case 'p': return "a string";
case 0: return "end";
default: return "unparseable format string";
}
}
static size_t __Pyx_BufFmt_TypeCharToStandardSize(char ch, int is_complex) {
switch (ch) {
case '?': case 'c': case 'b': case 'B': case 's': case 'p': return 1;
case 'h': case 'H': return 2;
case 'i': case 'I': case 'l': case 'L': return 4;
case 'q': case 'Q': return 8;
case 'f': return (is_complex ? 8 : 4);
case 'd': return (is_complex ? 16 : 8);
case 'g': {
PyErr_SetString(PyExc_ValueError, "Python does not define a standard format string size for long double ('g')..");
return 0;
}
case 'O': case 'P': return sizeof(void*);
default:
__Pyx_BufFmt_RaiseUnexpectedChar(ch);
return 0;
}
}
static size_t __Pyx_BufFmt_TypeCharToNativeSize(char ch, int is_complex) {
switch (ch) {
case 'c': case 'b': case 'B': case 's': case 'p': return 1;
case 'h': case 'H': return sizeof(short);
case 'i': case 'I': return sizeof(int);
case 'l': case 'L': return sizeof(long);
#ifdef HAVE_LONG_LONG
case 'q': case 'Q': return sizeof(PY_LONG_LONG);
#endif
case 'f': return sizeof(float) * (is_complex ? 2 : 1);
case 'd': return sizeof(double) * (is_complex ? 2 : 1);
case 'g': return sizeof(long double) * (is_complex ? 2 : 1);
case 'O': case 'P': return sizeof(void*);
default: {
__Pyx_BufFmt_RaiseUnexpectedChar(ch);
return 0;
}
}
}
typedef struct { char c; short x; } __Pyx_st_short;
typedef struct { char c; int x; } __Pyx_st_int;
typedef struct { char c; long x; } __Pyx_st_long;
typedef struct { char c; float x; } __Pyx_st_float;
typedef struct { char c; double x; } __Pyx_st_double;
typedef struct { char c; long double x; } __Pyx_st_longdouble;
typedef struct { char c; void *x; } __Pyx_st_void_p;
#ifdef HAVE_LONG_LONG
typedef struct { char c; PY_LONG_LONG x; } __Pyx_st_longlong;
#endif
static size_t __Pyx_BufFmt_TypeCharToAlignment(char ch, int is_complex) {
switch (ch) {
case '?': case 'c': case 'b': case 'B': case 's': case 'p': return 1;
case 'h': case 'H': return sizeof(__Pyx_st_short) - sizeof(short);
case 'i': case 'I': return sizeof(__Pyx_st_int) - sizeof(int);
case 'l': case 'L': return sizeof(__Pyx_st_long) - sizeof(long);
#ifdef HAVE_LONG_LONG
case 'q': case 'Q': return sizeof(__Pyx_st_longlong) - sizeof(PY_LONG_LONG);
#endif
case 'f': return sizeof(__Pyx_st_float) - sizeof(float);
case 'd': return sizeof(__Pyx_st_double) - sizeof(double);
case 'g': return sizeof(__Pyx_st_longdouble) - sizeof(long double);
case 'P': case 'O': return sizeof(__Pyx_st_void_p) - sizeof(void*);
default:
__Pyx_BufFmt_RaiseUnexpectedChar(ch);
return 0;
}
}
/* These are for computing the padding at the end of the struct to align
on the first member of the struct. This will probably the same as above,
but we don't have any guarantees.
*/
typedef struct { short x; char c; } __Pyx_pad_short;
typedef struct { int x; char c; } __Pyx_pad_int;
typedef struct { long x; char c; } __Pyx_pad_long;
typedef struct { float x; char c; } __Pyx_pad_float;
typedef struct { double x; char c; } __Pyx_pad_double;
typedef struct { long double x; char c; } __Pyx_pad_longdouble;
typedef struct { void *x; char c; } __Pyx_pad_void_p;
#ifdef HAVE_LONG_LONG
typedef struct { PY_LONG_LONG x; char c; } __Pyx_pad_longlong;
#endif
static size_t __Pyx_BufFmt_TypeCharToPadding(char ch, int is_complex) {
switch (ch) {
case '?': case 'c': case 'b': case 'B': case 's': case 'p': return 1;
case 'h': case 'H': return sizeof(__Pyx_pad_short) - sizeof(short);
case 'i': case 'I': return sizeof(__Pyx_pad_int) - sizeof(int);
case 'l': case 'L': return sizeof(__Pyx_pad_long) - sizeof(long);
#ifdef HAVE_LONG_LONG
case 'q': case 'Q': return sizeof(__Pyx_pad_longlong) - sizeof(PY_LONG_LONG);
#endif
case 'f': return sizeof(__Pyx_pad_float) - sizeof(float);
case 'd': return sizeof(__Pyx_pad_double) - sizeof(double);
case 'g': return sizeof(__Pyx_pad_longdouble) - sizeof(long double);
case 'P': case 'O': return sizeof(__Pyx_pad_void_p) - sizeof(void*);
default:
__Pyx_BufFmt_RaiseUnexpectedChar(ch);
return 0;
}
}
static char __Pyx_BufFmt_TypeCharToGroup(char ch, int is_complex) {
switch (ch) {
case 'c': case 'b': case 'h': case 'i':
case 'l': case 'q': case 's': case 'p':
return 'I';
case 'B': case 'H': case 'I': case 'L': case 'Q':
return 'U';
case 'f': case 'd': case 'g':
return (is_complex ? 'C' : 'R');
case 'O':
return 'O';
case 'P':
return 'P';
default: {
__Pyx_BufFmt_RaiseUnexpectedChar(ch);
return 0;
}
}
}
static void __Pyx_BufFmt_RaiseExpected(__Pyx_BufFmt_Context* ctx) {
if (ctx->head == NULL || ctx->head->field == &ctx->root) {
const char* expected;
const char* quote;
if (ctx->head == NULL) {
expected = "end";
quote = "";
} else {
expected = ctx->head->field->type->name;
quote = "'";
}
PyErr_Format(PyExc_ValueError,
"Buffer dtype mismatch, expected %s%s%s but got %s",
quote, expected, quote,
__Pyx_BufFmt_DescribeTypeChar(ctx->enc_type, ctx->is_complex));
} else {
__Pyx_StructField* field = ctx->head->field;
__Pyx_StructField* parent = (ctx->head - 1)->field;
PyErr_Format(PyExc_ValueError,
"Buffer dtype mismatch, expected '%s' but got %s in '%s.%s'",
field->type->name, __Pyx_BufFmt_DescribeTypeChar(ctx->enc_type, ctx->is_complex),
parent->type->name, field->name);
}
}
static int __Pyx_BufFmt_ProcessTypeChunk(__Pyx_BufFmt_Context* ctx) {
char group;
size_t size, offset, arraysize = 1;
if (ctx->enc_type == 0) return 0;
if (ctx->head->field->type->arraysize[0]) {
int i, ndim = 0;
if (ctx->enc_type == 's' || ctx->enc_type == 'p') {
ctx->is_valid_array = ctx->head->field->type->ndim == 1;
ndim = 1;
if (ctx->enc_count != ctx->head->field->type->arraysize[0]) {
PyErr_Format(PyExc_ValueError,
"Expected a dimension of size %zu, got %zu",
ctx->head->field->type->arraysize[0], ctx->enc_count);
return -1;
}
}
if (!ctx->is_valid_array) {
PyErr_Format(PyExc_ValueError, "Expected %d dimensions, got %d",
ctx->head->field->type->ndim, ndim);
return -1;
}
for (i = 0; i < ctx->head->field->type->ndim; i++) {
arraysize *= ctx->head->field->type->arraysize[i];
}
ctx->is_valid_array = 0;
ctx->enc_count = 1;
}
group = __Pyx_BufFmt_TypeCharToGroup(ctx->enc_type, ctx->is_complex);
do {
__Pyx_StructField* field = ctx->head->field;
__Pyx_TypeInfo* type = field->type;
if (ctx->enc_packmode == '@' || ctx->enc_packmode == '^') {
size = __Pyx_BufFmt_TypeCharToNativeSize(ctx->enc_type, ctx->is_complex);
} else {
size = __Pyx_BufFmt_TypeCharToStandardSize(ctx->enc_type, ctx->is_complex);
}
if (ctx->enc_packmode == '@') {
size_t align_at = __Pyx_BufFmt_TypeCharToAlignment(ctx->enc_type, ctx->is_complex);
size_t align_mod_offset;
if (align_at == 0) return -1;
align_mod_offset = ctx->fmt_offset % align_at;
if (align_mod_offset > 0) ctx->fmt_offset += align_at - align_mod_offset;
if (ctx->struct_alignment == 0)
ctx->struct_alignment = __Pyx_BufFmt_TypeCharToPadding(ctx->enc_type,
ctx->is_complex);
}
if (type->size != size || type->typegroup != group) {
if (type->typegroup == 'C' && type->fields != NULL) {
size_t parent_offset = ctx->head->parent_offset + field->offset;
++ctx->head;
ctx->head->field = type->fields;
ctx->head->parent_offset = parent_offset;
continue;
}
__Pyx_BufFmt_RaiseExpected(ctx);
return -1;
}
offset = ctx->head->parent_offset + field->offset;
if (ctx->fmt_offset != offset) {
PyErr_Format(PyExc_ValueError,
"Buffer dtype mismatch; next field is at offset %"PY_FORMAT_SIZE_T"d but %"PY_FORMAT_SIZE_T"d expected",
(Py_ssize_t)ctx->fmt_offset, (Py_ssize_t)offset);
return -1;
}
ctx->fmt_offset += size;
if (arraysize)
ctx->fmt_offset += (arraysize - 1) * size;
--ctx->enc_count; /* Consume from buffer string */
while (1) {
if (field == &ctx->root) {
ctx->head = NULL;
if (ctx->enc_count != 0) {
__Pyx_BufFmt_RaiseExpected(ctx);
return -1;
}
break; /* breaks both loops as ctx->enc_count == 0 */
}
ctx->head->field = ++field;
if (field->type == NULL) {
--ctx->head;
field = ctx->head->field;
continue;
} else if (field->type->typegroup == 'S') {
size_t parent_offset = ctx->head->parent_offset + field->offset;
if (field->type->fields->type == NULL) continue; /* empty struct */
field = field->type->fields;
++ctx->head;
ctx->head->field = field;
ctx->head->parent_offset = parent_offset;
break;
} else {
break;
}
}
} while (ctx->enc_count);
ctx->enc_type = 0;
ctx->is_complex = 0;
return 0;
}
static CYTHON_INLINE PyObject *
__pyx_buffmt_parse_array(__Pyx_BufFmt_Context* ctx, const char** tsp)
{
const char *ts = *tsp;
int i = 0, number;
int ndim = ctx->head->field->type->ndim;
;
++ts;
if (ctx->new_count != 1) {
PyErr_SetString(PyExc_ValueError,
"Cannot handle repeated arrays in format string");
return NULL;
}
if (__Pyx_BufFmt_ProcessTypeChunk(ctx) == -1) return NULL;
while (*ts && *ts != ')') {
if (isspace(*ts))
continue;
number = __Pyx_BufFmt_ExpectNumber(&ts);
if (number == -1) return NULL;
if (i < ndim && (size_t) number != ctx->head->field->type->arraysize[i])
return PyErr_Format(PyExc_ValueError,
"Expected a dimension of size %zu, got %d",
ctx->head->field->type->arraysize[i], number);
if (*ts != ',' && *ts != ')')
return PyErr_Format(PyExc_ValueError,
"Expected a comma in format string, got '%c'", *ts);
if (*ts == ',') ts++;
i++;
}
if (i != ndim)
return PyErr_Format(PyExc_ValueError, "Expected %d dimension(s), got %d",
ctx->head->field->type->ndim, i);
if (!*ts) {
PyErr_SetString(PyExc_ValueError,
"Unexpected end of format string, expected ')'");
return NULL;
}
ctx->is_valid_array = 1;
ctx->new_count = 1;
*tsp = ++ts;
return Py_None;
}
static const char* __Pyx_BufFmt_CheckString(__Pyx_BufFmt_Context* ctx, const char* ts) {
int got_Z = 0;
while (1) {
switch(*ts) {
case 0:
if (ctx->enc_type != 0 && ctx->head == NULL) {
__Pyx_BufFmt_RaiseExpected(ctx);
return NULL;
}
if (__Pyx_BufFmt_ProcessTypeChunk(ctx) == -1) return NULL;
if (ctx->head != NULL) {
__Pyx_BufFmt_RaiseExpected(ctx);
return NULL;
}
return ts;
case ' ':
case 10:
case 13:
++ts;
break;
case '<':
if (!__Pyx_IsLittleEndian()) {
PyErr_SetString(PyExc_ValueError, "Little-endian buffer not supported on big-endian compiler");
return NULL;
}
ctx->new_packmode = '=';
++ts;
break;
case '>':
case '!':
if (__Pyx_IsLittleEndian()) {
PyErr_SetString(PyExc_ValueError, "Big-endian buffer not supported on little-endian compiler");
return NULL;
}
ctx->new_packmode = '=';
++ts;
break;
case '=':
case '@':
case '^':
ctx->new_packmode = *ts++;
break;
case 'T': /* substruct */
{
const char* ts_after_sub;
size_t i, struct_count = ctx->new_count;
size_t struct_alignment = ctx->struct_alignment;
ctx->new_count = 1;
++ts;
if (*ts != '{') {
PyErr_SetString(PyExc_ValueError, "Buffer acquisition: Expected '{' after 'T'");
return NULL;
}
if (__Pyx_BufFmt_ProcessTypeChunk(ctx) == -1) return NULL;
ctx->enc_type = 0; /* Erase processed last struct element */
ctx->enc_count = 0;
ctx->struct_alignment = 0;
++ts;
ts_after_sub = ts;
for (i = 0; i != struct_count; ++i) {
ts_after_sub = __Pyx_BufFmt_CheckString(ctx, ts);
if (!ts_after_sub) return NULL;
}
ts = ts_after_sub;
if (struct_alignment) ctx->struct_alignment = struct_alignment;
}
break;
case '}': /* end of substruct; either repeat or move on */
{
size_t alignment = ctx->struct_alignment;
++ts;
if (__Pyx_BufFmt_ProcessTypeChunk(ctx) == -1) return NULL;
ctx->enc_type = 0; /* Erase processed last struct element */
if (alignment && ctx->fmt_offset % alignment) {
ctx->fmt_offset += alignment - (ctx->fmt_offset % alignment);
}
}
return ts;
case 'x':
if (__Pyx_BufFmt_ProcessTypeChunk(ctx) == -1) return NULL;
ctx->fmt_offset += ctx->new_count;
ctx->new_count = 1;
ctx->enc_count = 0;
ctx->enc_type = 0;
ctx->enc_packmode = ctx->new_packmode;
++ts;
break;
case 'Z':
got_Z = 1;
++ts;
if (*ts != 'f' && *ts != 'd' && *ts != 'g') {
__Pyx_BufFmt_RaiseUnexpectedChar('Z');
return NULL;
} /* fall through */
case 'c': case 'b': case 'B': case 'h': case 'H': case 'i': case 'I':
case 'l': case 'L': case 'q': case 'Q':
case 'f': case 'd': case 'g':
case 'O': case 's': case 'p':
if (ctx->enc_type == *ts && got_Z == ctx->is_complex &&
ctx->enc_packmode == ctx->new_packmode) {
ctx->enc_count += ctx->new_count;
} else {
if (__Pyx_BufFmt_ProcessTypeChunk(ctx) == -1) return NULL;
ctx->enc_count = ctx->new_count;
ctx->enc_packmode = ctx->new_packmode;
ctx->enc_type = *ts;
ctx->is_complex = got_Z;
}
++ts;
ctx->new_count = 1;
got_Z = 0;
break;
case ':':
++ts;
while(*ts != ':') ++ts;
++ts;
break;
case '(':
if (!__pyx_buffmt_parse_array(ctx, &ts)) return NULL;
break;
default:
{
int number = __Pyx_BufFmt_ExpectNumber(&ts);
if (number == -1) return NULL;
ctx->new_count = (size_t)number;
}
}
}
}
static CYTHON_INLINE void __Pyx_ZeroBuffer(Py_buffer* buf) {
buf->buf = NULL;
buf->obj = NULL;
buf->strides = __Pyx_zeros;
buf->shape = __Pyx_zeros;
buf->suboffsets = __Pyx_minusones;
}
static CYTHON_INLINE int __Pyx_GetBufferAndValidate(
Py_buffer* buf, PyObject* obj, __Pyx_TypeInfo* dtype, int flags,
int nd, int cast, __Pyx_BufFmt_StackElem* stack)
{
if (obj == Py_None || obj == NULL) {
__Pyx_ZeroBuffer(buf);
return 0;
}
buf->buf = NULL;
if (__Pyx_GetBuffer(obj, buf, flags) == -1) goto fail;
if (buf->ndim != nd) {
PyErr_Format(PyExc_ValueError,
"Buffer has wrong number of dimensions (expected %d, got %d)",
nd, buf->ndim);
goto fail;
}
if (!cast) {
__Pyx_BufFmt_Context ctx;
__Pyx_BufFmt_Init(&ctx, stack, dtype);
if (!__Pyx_BufFmt_CheckString(&ctx, buf->format)) goto fail;
}
if ((unsigned)buf->itemsize != dtype->size) {
PyErr_Format(PyExc_ValueError,
"Item size of buffer (%"PY_FORMAT_SIZE_T"d byte%s) does not match size of '%s' (%"PY_FORMAT_SIZE_T"d byte%s)",
buf->itemsize, (buf->itemsize > 1) ? "s" : "",
dtype->name, (Py_ssize_t)dtype->size, (dtype->size > 1) ? "s" : "");
goto fail;
}
if (buf->suboffsets == NULL) buf->suboffsets = __Pyx_minusones;
return 0;
fail:;
__Pyx_ZeroBuffer(buf);
return -1;
}
static CYTHON_INLINE void __Pyx_SafeReleaseBuffer(Py_buffer* info) {
if (info->buf == NULL) return;
if (info->suboffsets == __Pyx_minusones) info->suboffsets = NULL;
__Pyx_ReleaseBuffer(info);
}
static CYTHON_INLINE int __Pyx_TypeTest(PyObject *obj, PyTypeObject *type) {
if (unlikely(!type)) {
PyErr_Format(PyExc_SystemError, "Missing type object");
return 0;
}
if (likely(PyObject_TypeCheck(obj, type)))
return 1;
PyErr_Format(PyExc_TypeError, "Cannot convert %.200s to %.200s",
Py_TYPE(obj)->tp_name, type->tp_name);
return 0;
}
static void __Pyx_RaiseBufferFallbackError(void) {
PyErr_Format(PyExc_ValueError,
"Buffer acquisition failed on assignment; and then reacquiring the old buffer failed too!");
}
static CYTHON_INLINE void __Pyx_ErrRestore(PyObject *type, PyObject *value, PyObject *tb) {
PyObject *tmp_type, *tmp_value, *tmp_tb;
PyThreadState *tstate = PyThreadState_GET();
tmp_type = tstate->curexc_type;
tmp_value = tstate->curexc_value;
tmp_tb = tstate->curexc_traceback;
tstate->curexc_type = type;
tstate->curexc_value = value;
tstate->curexc_traceback = tb;
Py_XDECREF(tmp_type);
Py_XDECREF(tmp_value);
Py_XDECREF(tmp_tb);
}
static CYTHON_INLINE void __Pyx_ErrFetch(PyObject **type, PyObject **value, PyObject **tb) {
PyThreadState *tstate = PyThreadState_GET();
*type = tstate->curexc_type;
*value = tstate->curexc_value;
*tb = tstate->curexc_traceback;
tstate->curexc_type = 0;
tstate->curexc_value = 0;
tstate->curexc_traceback = 0;
}
static CYTHON_INLINE void __Pyx_RaiseNoneIndexingError(void) {
PyErr_SetString(PyExc_TypeError, "'NoneType' object is unsubscriptable");
}
#if PY_MAJOR_VERSION < 3
static void __Pyx_Raise(PyObject *type, PyObject *value, PyObject *tb,
CYTHON_UNUSED PyObject *cause) {
/* cause is unused */
Py_XINCREF(type);
Py_XINCREF(value);
Py_XINCREF(tb);
/* First, check the traceback argument, replacing None with NULL. */
if (tb == Py_None) {
Py_DECREF(tb);
tb = 0;
}
else if (tb != NULL && !PyTraceBack_Check(tb)) {
PyErr_SetString(PyExc_TypeError,
"raise: arg 3 must be a traceback or None");
goto raise_error;
}
/* Next, replace a missing value with None */
if (value == NULL) {
value = Py_None;
Py_INCREF(value);
}
#if PY_VERSION_HEX < 0x02050000
if (!PyClass_Check(type))
#else
if (!PyType_Check(type))
#endif
{
/* Raising an instance. The value should be a dummy. */
if (value != Py_None) {
PyErr_SetString(PyExc_TypeError,
"instance exception may not have a separate value");
goto raise_error;
}
/* Normalize to raise <class>, <instance> */
Py_DECREF(value);
value = type;
#if PY_VERSION_HEX < 0x02050000
if (PyInstance_Check(type)) {
type = (PyObject*) ((PyInstanceObject*)type)->in_class;
Py_INCREF(type);
}
else {
type = 0;
PyErr_SetString(PyExc_TypeError,
"raise: exception must be an old-style class or instance");
goto raise_error;
}
#else
type = (PyObject*) Py_TYPE(type);
Py_INCREF(type);
if (!PyType_IsSubtype((PyTypeObject *)type, (PyTypeObject *)PyExc_BaseException)) {
PyErr_SetString(PyExc_TypeError,
"raise: exception class must be a subclass of BaseException");
goto raise_error;
}
#endif
}
__Pyx_ErrRestore(type, value, tb);
return;
raise_error:
Py_XDECREF(value);
Py_XDECREF(type);
Py_XDECREF(tb);
return;
}
#else /* Python 3+ */
static void __Pyx_Raise(PyObject *type, PyObject *value, PyObject *tb, PyObject *cause) {
if (tb == Py_None) {
tb = 0;
} else if (tb && !PyTraceBack_Check(tb)) {
PyErr_SetString(PyExc_TypeError,
"raise: arg 3 must be a traceback or None");
goto bad;
}
if (value == Py_None)
value = 0;
if (PyExceptionInstance_Check(type)) {
if (value) {
PyErr_SetString(PyExc_TypeError,
"instance exception may not have a separate value");
goto bad;
}
value = type;
type = (PyObject*) Py_TYPE(value);
} else if (!PyExceptionClass_Check(type)) {
PyErr_SetString(PyExc_TypeError,
"raise: exception class must be a subclass of BaseException");
goto bad;
}
if (cause) {
PyObject *fixed_cause;
if (PyExceptionClass_Check(cause)) {
fixed_cause = PyObject_CallObject(cause, NULL);
if (fixed_cause == NULL)
goto bad;
}
else if (PyExceptionInstance_Check(cause)) {
fixed_cause = cause;
Py_INCREF(fixed_cause);
}
else {
PyErr_SetString(PyExc_TypeError,
"exception causes must derive from "
"BaseException");
goto bad;
}
if (!value) {
value = PyObject_CallObject(type, NULL);
}
PyException_SetCause(value, fixed_cause);
}
PyErr_SetObject(type, value);
if (tb) {
PyThreadState *tstate = PyThreadState_GET();
PyObject* tmp_tb = tstate->curexc_traceback;
if (tb != tmp_tb) {
Py_INCREF(tb);
tstate->curexc_traceback = tb;
Py_XDECREF(tmp_tb);
}
}
bad:
return;
}
#endif
static CYTHON_INLINE void __Pyx_RaiseNeedMoreValuesError(Py_ssize_t index) {
PyErr_Format(PyExc_ValueError,
"need more than %"PY_FORMAT_SIZE_T"d value%s to unpack",
index, (index == 1) ? "" : "s");
}
static CYTHON_INLINE void __Pyx_RaiseTooManyValuesError(Py_ssize_t expected) {
PyErr_Format(PyExc_ValueError,
"too many values to unpack (expected %"PY_FORMAT_SIZE_T"d)", expected);
}
static CYTHON_INLINE void __Pyx_RaiseNoneNotIterableError(void) {
PyErr_SetString(PyExc_TypeError, "'NoneType' object is not iterable");
}
static void __Pyx_UnpackTupleError(PyObject *t, Py_ssize_t index) {
if (t == Py_None) {
__Pyx_RaiseNoneNotIterableError();
} else if (PyTuple_GET_SIZE(t) < index) {
__Pyx_RaiseNeedMoreValuesError(PyTuple_GET_SIZE(t));
} else {
__Pyx_RaiseTooManyValuesError(index);
}
}
#if PY_MAJOR_VERSION < 3
static int __Pyx_GetBuffer(PyObject *obj, Py_buffer *view, int flags) {
#if PY_VERSION_HEX >= 0x02060000
if (PyObject_CheckBuffer(obj)) return PyObject_GetBuffer(obj, view, flags);
#endif
if (PyObject_TypeCheck(obj, __pyx_ptype_5numpy_ndarray)) return __pyx_pw_5numpy_7ndarray_1__getbuffer__(obj, view, flags);
else {
PyErr_Format(PyExc_TypeError, "'%100s' does not have the buffer interface", Py_TYPE(obj)->tp_name);
return -1;
}
}
static void __Pyx_ReleaseBuffer(Py_buffer *view) {
PyObject* obj = view->obj;
if (obj) {
#if PY_VERSION_HEX >= 0x02060000
if (PyObject_CheckBuffer(obj)) {PyBuffer_Release(view); return;}
#endif
if (PyObject_TypeCheck(obj, __pyx_ptype_5numpy_ndarray)) __pyx_pw_5numpy_7ndarray_3__releasebuffer__(obj, view);
Py_DECREF(obj);
view->obj = NULL;
}
}
#endif
static PyObject *__Pyx_Import(PyObject *name, PyObject *from_list, long level) {
PyObject *py_import = 0;
PyObject *empty_list = 0;
PyObject *module = 0;
PyObject *global_dict = 0;
PyObject *empty_dict = 0;
PyObject *list;
py_import = __Pyx_GetAttrString(__pyx_b, "__import__");
if (!py_import)
goto bad;
if (from_list)
list = from_list;
else {
empty_list = PyList_New(0);
if (!empty_list)
goto bad;
list = empty_list;
}
global_dict = PyModule_GetDict(__pyx_m);
if (!global_dict)
goto bad;
empty_dict = PyDict_New();
if (!empty_dict)
goto bad;
#if PY_VERSION_HEX >= 0x02050000
{
PyObject *py_level = PyInt_FromLong(level);
if (!py_level)
goto bad;
module = PyObject_CallFunctionObjArgs(py_import,
name, global_dict, empty_dict, list, py_level, NULL);
Py_DECREF(py_level);
}
#else
if (level>0) {
PyErr_SetString(PyExc_RuntimeError, "Relative import is not supported for Python <=2.4.");
goto bad;
}
module = PyObject_CallFunctionObjArgs(py_import,
name, global_dict, empty_dict, list, NULL);
#endif
bad:
Py_XDECREF(empty_list);
Py_XDECREF(py_import);
Py_XDECREF(empty_dict);
return module;
}
static CYTHON_INLINE void __Pyx_RaiseImportError(PyObject *name) {
#if PY_MAJOR_VERSION < 3
PyErr_Format(PyExc_ImportError, "cannot import name %.230s",
PyString_AsString(name));
#else
PyErr_Format(PyExc_ImportError, "cannot import name %S", name);
#endif
}
static CYTHON_INLINE PyObject *__Pyx_PyInt_to_py_Py_intptr_t(Py_intptr_t val) {
const Py_intptr_t neg_one = (Py_intptr_t)-1, const_zero = (Py_intptr_t)0;
const int is_unsigned = const_zero < neg_one;
if ((sizeof(Py_intptr_t) == sizeof(char)) ||
(sizeof(Py_intptr_t) == sizeof(short))) {
return PyInt_FromLong((long)val);
} else if ((sizeof(Py_intptr_t) == sizeof(int)) ||
(sizeof(Py_intptr_t) == sizeof(long))) {
if (is_unsigned)
return PyLong_FromUnsignedLong((unsigned long)val);
else
return PyInt_FromLong((long)val);
} else if (sizeof(Py_intptr_t) == sizeof(PY_LONG_LONG)) {
if (is_unsigned)
return PyLong_FromUnsignedLongLong((unsigned PY_LONG_LONG)val);
else
return PyLong_FromLongLong((PY_LONG_LONG)val);
} else {
int one = 1; int little = (int)*(unsigned char *)&one;
unsigned char *bytes = (unsigned char *)&val;
return _PyLong_FromByteArray(bytes, sizeof(Py_intptr_t),
little, !is_unsigned);
}
}
#if CYTHON_CCOMPLEX
#ifdef __cplusplus
static CYTHON_INLINE __pyx_t_float_complex __pyx_t_float_complex_from_parts(float x, float y) {
return ::std::complex< float >(x, y);
}
#else
static CYTHON_INLINE __pyx_t_float_complex __pyx_t_float_complex_from_parts(float x, float y) {
return x + y*(__pyx_t_float_complex)_Complex_I;
}
#endif
#else
static CYTHON_INLINE __pyx_t_float_complex __pyx_t_float_complex_from_parts(float x, float y) {
__pyx_t_float_complex z;
z.real = x;
z.imag = y;
return z;
}
#endif
#if CYTHON_CCOMPLEX
#else
static CYTHON_INLINE int __Pyx_c_eqf(__pyx_t_float_complex a, __pyx_t_float_complex b) {
return (a.real == b.real) && (a.imag == b.imag);
}
static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_sumf(__pyx_t_float_complex a, __pyx_t_float_complex b) {
__pyx_t_float_complex z;
z.real = a.real + b.real;
z.imag = a.imag + b.imag;
return z;
}
static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_difff(__pyx_t_float_complex a, __pyx_t_float_complex b) {
__pyx_t_float_complex z;
z.real = a.real - b.real;
z.imag = a.imag - b.imag;
return z;
}
static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_prodf(__pyx_t_float_complex a, __pyx_t_float_complex b) {
__pyx_t_float_complex z;
z.real = a.real * b.real - a.imag * b.imag;
z.imag = a.real * b.imag + a.imag * b.real;
return z;
}
static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_quotf(__pyx_t_float_complex a, __pyx_t_float_complex b) {
__pyx_t_float_complex z;
float denom = b.real * b.real + b.imag * b.imag;
z.real = (a.real * b.real + a.imag * b.imag) / denom;
z.imag = (a.imag * b.real - a.real * b.imag) / denom;
return z;
}
static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_negf(__pyx_t_float_complex a) {
__pyx_t_float_complex z;
z.real = -a.real;
z.imag = -a.imag;
return z;
}
static CYTHON_INLINE int __Pyx_c_is_zerof(__pyx_t_float_complex a) {
return (a.real == 0) && (a.imag == 0);
}
static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_conjf(__pyx_t_float_complex a) {
__pyx_t_float_complex z;
z.real = a.real;
z.imag = -a.imag;
return z;
}
#if 1
static CYTHON_INLINE float __Pyx_c_absf(__pyx_t_float_complex z) {
#if !defined(HAVE_HYPOT) || defined(_MSC_VER)
return sqrtf(z.real*z.real + z.imag*z.imag);
#else
return hypotf(z.real, z.imag);
#endif
}
static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_powf(__pyx_t_float_complex a, __pyx_t_float_complex b) {
__pyx_t_float_complex z;
float r, lnr, theta, z_r, z_theta;
if (b.imag == 0 && b.real == (int)b.real) {
if (b.real < 0) {
float denom = a.real * a.real + a.imag * a.imag;
a.real = a.real / denom;
a.imag = -a.imag / denom;
b.real = -b.real;
}
switch ((int)b.real) {
case 0:
z.real = 1;
z.imag = 0;
return z;
case 1:
return a;
case 2:
z = __Pyx_c_prodf(a, a);
return __Pyx_c_prodf(a, a);
case 3:
z = __Pyx_c_prodf(a, a);
return __Pyx_c_prodf(z, a);
case 4:
z = __Pyx_c_prodf(a, a);
return __Pyx_c_prodf(z, z);
}
}
if (a.imag == 0) {
if (a.real == 0) {
return a;
}
r = a.real;
theta = 0;
} else {
r = __Pyx_c_absf(a);
theta = atan2f(a.imag, a.real);
}
lnr = logf(r);
z_r = expf(lnr * b.real - theta * b.imag);
z_theta = theta * b.real + lnr * b.imag;
z.real = z_r * cosf(z_theta);
z.imag = z_r * sinf(z_theta);
return z;
}
#endif
#endif
#if CYTHON_CCOMPLEX
#ifdef __cplusplus
static CYTHON_INLINE __pyx_t_double_complex __pyx_t_double_complex_from_parts(double x, double y) {
return ::std::complex< double >(x, y);
}
#else
static CYTHON_INLINE __pyx_t_double_complex __pyx_t_double_complex_from_parts(double x, double y) {
return x + y*(__pyx_t_double_complex)_Complex_I;
}
#endif
#else
static CYTHON_INLINE __pyx_t_double_complex __pyx_t_double_complex_from_parts(double x, double y) {
__pyx_t_double_complex z;
z.real = x;
z.imag = y;
return z;
}
#endif
#if CYTHON_CCOMPLEX
#else
static CYTHON_INLINE int __Pyx_c_eq(__pyx_t_double_complex a, __pyx_t_double_complex b) {
return (a.real == b.real) && (a.imag == b.imag);
}
static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_sum(__pyx_t_double_complex a, __pyx_t_double_complex b) {
__pyx_t_double_complex z;
z.real = a.real + b.real;
z.imag = a.imag + b.imag;
return z;
}
static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_diff(__pyx_t_double_complex a, __pyx_t_double_complex b) {
__pyx_t_double_complex z;
z.real = a.real - b.real;
z.imag = a.imag - b.imag;
return z;
}
static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_prod(__pyx_t_double_complex a, __pyx_t_double_complex b) {
__pyx_t_double_complex z;
z.real = a.real * b.real - a.imag * b.imag;
z.imag = a.real * b.imag + a.imag * b.real;
return z;
}
static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_quot(__pyx_t_double_complex a, __pyx_t_double_complex b) {
__pyx_t_double_complex z;
double denom = b.real * b.real + b.imag * b.imag;
z.real = (a.real * b.real + a.imag * b.imag) / denom;
z.imag = (a.imag * b.real - a.real * b.imag) / denom;
return z;
}
static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_neg(__pyx_t_double_complex a) {
__pyx_t_double_complex z;
z.real = -a.real;
z.imag = -a.imag;
return z;
}
static CYTHON_INLINE int __Pyx_c_is_zero(__pyx_t_double_complex a) {
return (a.real == 0) && (a.imag == 0);
}
static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_conj(__pyx_t_double_complex a) {
__pyx_t_double_complex z;
z.real = a.real;
z.imag = -a.imag;
return z;
}
#if 1
static CYTHON_INLINE double __Pyx_c_abs(__pyx_t_double_complex z) {
#if !defined(HAVE_HYPOT) || defined(_MSC_VER)
return sqrt(z.real*z.real + z.imag*z.imag);
#else
return hypot(z.real, z.imag);
#endif
}
static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_pow(__pyx_t_double_complex a, __pyx_t_double_complex b) {
__pyx_t_double_complex z;
double r, lnr, theta, z_r, z_theta;
if (b.imag == 0 && b.real == (int)b.real) {
if (b.real < 0) {
double denom = a.real * a.real + a.imag * a.imag;
a.real = a.real / denom;
a.imag = -a.imag / denom;
b.real = -b.real;
}
switch ((int)b.real) {
case 0:
z.real = 1;
z.imag = 0;
return z;
case 1:
return a;
case 2:
z = __Pyx_c_prod(a, a);
return __Pyx_c_prod(a, a);
case 3:
z = __Pyx_c_prod(a, a);
return __Pyx_c_prod(z, a);
case 4:
z = __Pyx_c_prod(a, a);
return __Pyx_c_prod(z, z);
}
}
if (a.imag == 0) {
if (a.real == 0) {
return a;
}
r = a.real;
theta = 0;
} else {
r = __Pyx_c_abs(a);
theta = atan2(a.imag, a.real);
}
lnr = log(r);
z_r = exp(lnr * b.real - theta * b.imag);
z_theta = theta * b.real + lnr * b.imag;
z.real = z_r * cos(z_theta);
z.imag = z_r * sin(z_theta);
return z;
}
#endif
#endif
static CYTHON_INLINE unsigned char __Pyx_PyInt_AsUnsignedChar(PyObject* x) {
const unsigned char neg_one = (unsigned char)-1, const_zero = 0;
const int is_unsigned = neg_one > const_zero;
if (sizeof(unsigned char) < sizeof(long)) {
long val = __Pyx_PyInt_AsLong(x);
if (unlikely(val != (long)(unsigned char)val)) {
if (!unlikely(val == -1 && PyErr_Occurred())) {
PyErr_SetString(PyExc_OverflowError,
(is_unsigned && unlikely(val < 0)) ?
"can't convert negative value to unsigned char" :
"value too large to convert to unsigned char");
}
return (unsigned char)-1;
}
return (unsigned char)val;
}
return (unsigned char)__Pyx_PyInt_AsUnsignedLong(x);
}
static CYTHON_INLINE unsigned short __Pyx_PyInt_AsUnsignedShort(PyObject* x) {
const unsigned short neg_one = (unsigned short)-1, const_zero = 0;
const int is_unsigned = neg_one > const_zero;
if (sizeof(unsigned short) < sizeof(long)) {
long val = __Pyx_PyInt_AsLong(x);
if (unlikely(val != (long)(unsigned short)val)) {
if (!unlikely(val == -1 && PyErr_Occurred())) {
PyErr_SetString(PyExc_OverflowError,
(is_unsigned && unlikely(val < 0)) ?
"can't convert negative value to unsigned short" :
"value too large to convert to unsigned short");
}
return (unsigned short)-1;
}
return (unsigned short)val;
}
return (unsigned short)__Pyx_PyInt_AsUnsignedLong(x);
}
static CYTHON_INLINE unsigned int __Pyx_PyInt_AsUnsignedInt(PyObject* x) {
const unsigned int neg_one = (unsigned int)-1, const_zero = 0;
const int is_unsigned = neg_one > const_zero;
if (sizeof(unsigned int) < sizeof(long)) {
long val = __Pyx_PyInt_AsLong(x);
if (unlikely(val != (long)(unsigned int)val)) {
if (!unlikely(val == -1 && PyErr_Occurred())) {
PyErr_SetString(PyExc_OverflowError,
(is_unsigned && unlikely(val < 0)) ?
"can't convert negative value to unsigned int" :
"value too large to convert to unsigned int");
}
return (unsigned int)-1;
}
return (unsigned int)val;
}
return (unsigned int)__Pyx_PyInt_AsUnsignedLong(x);
}
static CYTHON_INLINE char __Pyx_PyInt_AsChar(PyObject* x) {
const char neg_one = (char)-1, const_zero = 0;
const int is_unsigned = neg_one > const_zero;
if (sizeof(char) < sizeof(long)) {
long val = __Pyx_PyInt_AsLong(x);
if (unlikely(val != (long)(char)val)) {
if (!unlikely(val == -1 && PyErr_Occurred())) {
PyErr_SetString(PyExc_OverflowError,
(is_unsigned && unlikely(val < 0)) ?
"can't convert negative value to char" :
"value too large to convert to char");
}
return (char)-1;
}
return (char)val;
}
return (char)__Pyx_PyInt_AsLong(x);
}
static CYTHON_INLINE short __Pyx_PyInt_AsShort(PyObject* x) {
const short neg_one = (short)-1, const_zero = 0;
const int is_unsigned = neg_one > const_zero;
if (sizeof(short) < sizeof(long)) {
long val = __Pyx_PyInt_AsLong(x);
if (unlikely(val != (long)(short)val)) {
if (!unlikely(val == -1 && PyErr_Occurred())) {
PyErr_SetString(PyExc_OverflowError,
(is_unsigned && unlikely(val < 0)) ?
"can't convert negative value to short" :
"value too large to convert to short");
}
return (short)-1;
}
return (short)val;
}
return (short)__Pyx_PyInt_AsLong(x);
}
static CYTHON_INLINE int __Pyx_PyInt_AsInt(PyObject* x) {
const int neg_one = (int)-1, const_zero = 0;
const int is_unsigned = neg_one > const_zero;
if (sizeof(int) < sizeof(long)) {
long val = __Pyx_PyInt_AsLong(x);
if (unlikely(val != (long)(int)val)) {
if (!unlikely(val == -1 && PyErr_Occurred())) {
PyErr_SetString(PyExc_OverflowError,
(is_unsigned && unlikely(val < 0)) ?
"can't convert negative value to int" :
"value too large to convert to int");
}
return (int)-1;
}
return (int)val;
}
return (int)__Pyx_PyInt_AsLong(x);
}
static CYTHON_INLINE signed char __Pyx_PyInt_AsSignedChar(PyObject* x) {
const signed char neg_one = (signed char)-1, const_zero = 0;
const int is_unsigned = neg_one > const_zero;
if (sizeof(signed char) < sizeof(long)) {
long val = __Pyx_PyInt_AsLong(x);
if (unlikely(val != (long)(signed char)val)) {
if (!unlikely(val == -1 && PyErr_Occurred())) {
PyErr_SetString(PyExc_OverflowError,
(is_unsigned && unlikely(val < 0)) ?
"can't convert negative value to signed char" :
"value too large to convert to signed char");
}
return (signed char)-1;
}
return (signed char)val;
}
return (signed char)__Pyx_PyInt_AsSignedLong(x);
}
static CYTHON_INLINE signed short __Pyx_PyInt_AsSignedShort(PyObject* x) {
const signed short neg_one = (signed short)-1, const_zero = 0;
const int is_unsigned = neg_one > const_zero;
if (sizeof(signed short) < sizeof(long)) {
long val = __Pyx_PyInt_AsLong(x);
if (unlikely(val != (long)(signed short)val)) {
if (!unlikely(val == -1 && PyErr_Occurred())) {
PyErr_SetString(PyExc_OverflowError,
(is_unsigned && unlikely(val < 0)) ?
"can't convert negative value to signed short" :
"value too large to convert to signed short");
}
return (signed short)-1;
}
return (signed short)val;
}
return (signed short)__Pyx_PyInt_AsSignedLong(x);
}
static CYTHON_INLINE signed int __Pyx_PyInt_AsSignedInt(PyObject* x) {
const signed int neg_one = (signed int)-1, const_zero = 0;
const int is_unsigned = neg_one > const_zero;
if (sizeof(signed int) < sizeof(long)) {
long val = __Pyx_PyInt_AsLong(x);
if (unlikely(val != (long)(signed int)val)) {
if (!unlikely(val == -1 && PyErr_Occurred())) {
PyErr_SetString(PyExc_OverflowError,
(is_unsigned && unlikely(val < 0)) ?
"can't convert negative value to signed int" :
"value too large to convert to signed int");
}
return (signed int)-1;
}
return (signed int)val;
}
return (signed int)__Pyx_PyInt_AsSignedLong(x);
}
static CYTHON_INLINE int __Pyx_PyInt_AsLongDouble(PyObject* x) {
const int neg_one = (int)-1, const_zero = 0;
const int is_unsigned = neg_one > const_zero;
if (sizeof(int) < sizeof(long)) {
long val = __Pyx_PyInt_AsLong(x);
if (unlikely(val != (long)(int)val)) {
if (!unlikely(val == -1 && PyErr_Occurred())) {
PyErr_SetString(PyExc_OverflowError,
(is_unsigned && unlikely(val < 0)) ?
"can't convert negative value to int" :
"value too large to convert to int");
}
return (int)-1;
}
return (int)val;
}
return (int)__Pyx_PyInt_AsLong(x);
}
static CYTHON_INLINE unsigned long __Pyx_PyInt_AsUnsignedLong(PyObject* x) {
const unsigned long neg_one = (unsigned long)-1, const_zero = 0;
const int is_unsigned = neg_one > const_zero;
#if PY_VERSION_HEX < 0x03000000
if (likely(PyInt_Check(x))) {
long val = PyInt_AS_LONG(x);
if (is_unsigned && unlikely(val < 0)) {
PyErr_SetString(PyExc_OverflowError,
"can't convert negative value to unsigned long");
return (unsigned long)-1;
}
return (unsigned long)val;
} else
#endif
if (likely(PyLong_Check(x))) {
if (is_unsigned) {
if (unlikely(Py_SIZE(x) < 0)) {
PyErr_SetString(PyExc_OverflowError,
"can't convert negative value to unsigned long");
return (unsigned long)-1;
}
return (unsigned long)PyLong_AsUnsignedLong(x);
} else {
return (unsigned long)PyLong_AsLong(x);
}
} else {
unsigned long val;
PyObject *tmp = __Pyx_PyNumber_Int(x);
if (!tmp) return (unsigned long)-1;
val = __Pyx_PyInt_AsUnsignedLong(tmp);
Py_DECREF(tmp);
return val;
}
}
static CYTHON_INLINE unsigned PY_LONG_LONG __Pyx_PyInt_AsUnsignedLongLong(PyObject* x) {
const unsigned PY_LONG_LONG neg_one = (unsigned PY_LONG_LONG)-1, const_zero = 0;
const int is_unsigned = neg_one > const_zero;
#if PY_VERSION_HEX < 0x03000000
if (likely(PyInt_Check(x))) {
long val = PyInt_AS_LONG(x);
if (is_unsigned && unlikely(val < 0)) {
PyErr_SetString(PyExc_OverflowError,
"can't convert negative value to unsigned PY_LONG_LONG");
return (unsigned PY_LONG_LONG)-1;
}
return (unsigned PY_LONG_LONG)val;
} else
#endif
if (likely(PyLong_Check(x))) {
if (is_unsigned) {
if (unlikely(Py_SIZE(x) < 0)) {
PyErr_SetString(PyExc_OverflowError,
"can't convert negative value to unsigned PY_LONG_LONG");
return (unsigned PY_LONG_LONG)-1;
}
return (unsigned PY_LONG_LONG)PyLong_AsUnsignedLongLong(x);
} else {
return (unsigned PY_LONG_LONG)PyLong_AsLongLong(x);
}
} else {
unsigned PY_LONG_LONG val;
PyObject *tmp = __Pyx_PyNumber_Int(x);
if (!tmp) return (unsigned PY_LONG_LONG)-1;
val = __Pyx_PyInt_AsUnsignedLongLong(tmp);
Py_DECREF(tmp);
return val;
}
}
static CYTHON_INLINE long __Pyx_PyInt_AsLong(PyObject* x) {
const long neg_one = (long)-1, const_zero = 0;
const int is_unsigned = neg_one > const_zero;
#if PY_VERSION_HEX < 0x03000000
if (likely(PyInt_Check(x))) {
long val = PyInt_AS_LONG(x);
if (is_unsigned && unlikely(val < 0)) {
PyErr_SetString(PyExc_OverflowError,
"can't convert negative value to long");
return (long)-1;
}
return (long)val;
} else
#endif
if (likely(PyLong_Check(x))) {
if (is_unsigned) {
if (unlikely(Py_SIZE(x) < 0)) {
PyErr_SetString(PyExc_OverflowError,
"can't convert negative value to long");
return (long)-1;
}
return (long)PyLong_AsUnsignedLong(x);
} else {
return (long)PyLong_AsLong(x);
}
} else {
long val;
PyObject *tmp = __Pyx_PyNumber_Int(x);
if (!tmp) return (long)-1;
val = __Pyx_PyInt_AsLong(tmp);
Py_DECREF(tmp);
return val;
}
}
static CYTHON_INLINE PY_LONG_LONG __Pyx_PyInt_AsLongLong(PyObject* x) {
const PY_LONG_LONG neg_one = (PY_LONG_LONG)-1, const_zero = 0;
const int is_unsigned = neg_one > const_zero;
#if PY_VERSION_HEX < 0x03000000
if (likely(PyInt_Check(x))) {
long val = PyInt_AS_LONG(x);
if (is_unsigned && unlikely(val < 0)) {
PyErr_SetString(PyExc_OverflowError,
"can't convert negative value to PY_LONG_LONG");
return (PY_LONG_LONG)-1;
}
return (PY_LONG_LONG)val;
} else
#endif
if (likely(PyLong_Check(x))) {
if (is_unsigned) {
if (unlikely(Py_SIZE(x) < 0)) {
PyErr_SetString(PyExc_OverflowError,
"can't convert negative value to PY_LONG_LONG");
return (PY_LONG_LONG)-1;
}
return (PY_LONG_LONG)PyLong_AsUnsignedLongLong(x);
} else {
return (PY_LONG_LONG)PyLong_AsLongLong(x);
}
} else {
PY_LONG_LONG val;
PyObject *tmp = __Pyx_PyNumber_Int(x);
if (!tmp) return (PY_LONG_LONG)-1;
val = __Pyx_PyInt_AsLongLong(tmp);
Py_DECREF(tmp);
return val;
}
}
static CYTHON_INLINE signed long __Pyx_PyInt_AsSignedLong(PyObject* x) {
const signed long neg_one = (signed long)-1, const_zero = 0;
const int is_unsigned = neg_one > const_zero;
#if PY_VERSION_HEX < 0x03000000
if (likely(PyInt_Check(x))) {
long val = PyInt_AS_LONG(x);
if (is_unsigned && unlikely(val < 0)) {
PyErr_SetString(PyExc_OverflowError,
"can't convert negative value to signed long");
return (signed long)-1;
}
return (signed long)val;
} else
#endif
if (likely(PyLong_Check(x))) {
if (is_unsigned) {
if (unlikely(Py_SIZE(x) < 0)) {
PyErr_SetString(PyExc_OverflowError,
"can't convert negative value to signed long");
return (signed long)-1;
}
return (signed long)PyLong_AsUnsignedLong(x);
} else {
return (signed long)PyLong_AsLong(x);
}
} else {
signed long val;
PyObject *tmp = __Pyx_PyNumber_Int(x);
if (!tmp) return (signed long)-1;
val = __Pyx_PyInt_AsSignedLong(tmp);
Py_DECREF(tmp);
return val;
}
}
static CYTHON_INLINE signed PY_LONG_LONG __Pyx_PyInt_AsSignedLongLong(PyObject* x) {
const signed PY_LONG_LONG neg_one = (signed PY_LONG_LONG)-1, const_zero = 0;
const int is_unsigned = neg_one > const_zero;
#if PY_VERSION_HEX < 0x03000000
if (likely(PyInt_Check(x))) {
long val = PyInt_AS_LONG(x);
if (is_unsigned && unlikely(val < 0)) {
PyErr_SetString(PyExc_OverflowError,
"can't convert negative value to signed PY_LONG_LONG");
return (signed PY_LONG_LONG)-1;
}
return (signed PY_LONG_LONG)val;
} else
#endif
if (likely(PyLong_Check(x))) {
if (is_unsigned) {
if (unlikely(Py_SIZE(x) < 0)) {
PyErr_SetString(PyExc_OverflowError,
"can't convert negative value to signed PY_LONG_LONG");
return (signed PY_LONG_LONG)-1;
}
return (signed PY_LONG_LONG)PyLong_AsUnsignedLongLong(x);
} else {
return (signed PY_LONG_LONG)PyLong_AsLongLong(x);
}
} else {
signed PY_LONG_LONG val;
PyObject *tmp = __Pyx_PyNumber_Int(x);
if (!tmp) return (signed PY_LONG_LONG)-1;
val = __Pyx_PyInt_AsSignedLongLong(tmp);
Py_DECREF(tmp);
return val;
}
}
static int __Pyx_check_binary_version(void) {
char ctversion[4], rtversion[4];
PyOS_snprintf(ctversion, 4, "%d.%d", PY_MAJOR_VERSION, PY_MINOR_VERSION);
PyOS_snprintf(rtversion, 4, "%s", Py_GetVersion());
if (ctversion[0] != rtversion[0] || ctversion[2] != rtversion[2]) {
char message[200];
PyOS_snprintf(message, sizeof(message),
"compiletime version %s of module '%.100s' "
"does not match runtime version %s",
ctversion, __Pyx_MODULE_NAME, rtversion);
#if PY_VERSION_HEX < 0x02050000
return PyErr_Warn(NULL, message);
#else
return PyErr_WarnEx(NULL, message, 1);
#endif
}
return 0;
}
#ifndef __PYX_HAVE_RT_ImportType
#define __PYX_HAVE_RT_ImportType
static PyTypeObject *__Pyx_ImportType(const char *module_name, const char *class_name,
size_t size, int strict)
{
PyObject *py_module = 0;
PyObject *result = 0;
PyObject *py_name = 0;
char warning[200];
py_module = __Pyx_ImportModule(module_name);
if (!py_module)
goto bad;
py_name = __Pyx_PyIdentifier_FromString(class_name);
if (!py_name)
goto bad;
result = PyObject_GetAttr(py_module, py_name);
Py_DECREF(py_name);
py_name = 0;
Py_DECREF(py_module);
py_module = 0;
if (!result)
goto bad;
if (!PyType_Check(result)) {
PyErr_Format(PyExc_TypeError,
"%s.%s is not a type object",
module_name, class_name);
goto bad;
}
if (!strict && ((PyTypeObject *)result)->tp_basicsize > (Py_ssize_t)size) {
PyOS_snprintf(warning, sizeof(warning),
"%s.%s size changed, may indicate binary incompatibility",
module_name, class_name);
#if PY_VERSION_HEX < 0x02050000
if (PyErr_Warn(NULL, warning) < 0) goto bad;
#else
if (PyErr_WarnEx(NULL, warning, 0) < 0) goto bad;
#endif
}
else if (((PyTypeObject *)result)->tp_basicsize != (Py_ssize_t)size) {
PyErr_Format(PyExc_ValueError,
"%s.%s has the wrong size, try recompiling",
module_name, class_name);
goto bad;
}
return (PyTypeObject *)result;
bad:
Py_XDECREF(py_module);
Py_XDECREF(result);
return NULL;
}
#endif
#ifndef __PYX_HAVE_RT_ImportModule
#define __PYX_HAVE_RT_ImportModule
static PyObject *__Pyx_ImportModule(const char *name) {
PyObject *py_name = 0;
PyObject *py_module = 0;
py_name = __Pyx_PyIdentifier_FromString(name);
if (!py_name)
goto bad;
py_module = PyImport_Import(py_name);
Py_DECREF(py_name);
return py_module;
bad:
Py_XDECREF(py_name);
return 0;
}
#endif
static void __pyx_clear_code_object_cache(void) {
__Pyx_CodeObjectCacheEntry* entries = __pyx_code_cache.entries;
int count = __pyx_code_cache.count;
int i;
if (entries == NULL) {
return;
}
__pyx_code_cache.count = 0;
__pyx_code_cache.max_count = 0;
__pyx_code_cache.entries = NULL;
for (i=0; i<count; i++) {
Py_DECREF(entries[i].code_object);
}
PyMem_Free(entries);
}
static int __pyx_bisect_code_objects(__Pyx_CodeObjectCacheEntry* entries, int count, int code_line) {
int start = 0, mid = 0, end = count - 1;
if (end >= 0 && code_line > entries[end].code_line) {
return count;
}
while (start < end) {
mid = (start + end) / 2;
if (code_line < entries[mid].code_line) {
end = mid;
} else if (code_line > entries[mid].code_line) {
start = mid + 1;
} else {
return mid;
}
}
if (code_line <= entries[mid].code_line) {
return mid;
} else {
return mid + 1;
}
}
static PyCodeObject *__pyx_find_code_object(int code_line) {
PyCodeObject* code_object;
int pos;
if (unlikely(!code_line) || unlikely(!__pyx_code_cache.entries)) {
return NULL;
}
pos = __pyx_bisect_code_objects(__pyx_code_cache.entries, __pyx_code_cache.count, code_line);
if (unlikely(pos >= __pyx_code_cache.count) || unlikely(__pyx_code_cache.entries[pos].code_line != code_line)) {
return NULL;
}
code_object = __pyx_code_cache.entries[pos].code_object;
Py_INCREF(code_object);
return code_object;
}
static void __pyx_insert_code_object(int code_line, PyCodeObject* code_object) {
int pos, i;
__Pyx_CodeObjectCacheEntry* entries = __pyx_code_cache.entries;
if (unlikely(!code_line)) {
return;
}
if (unlikely(!entries)) {
entries = (__Pyx_CodeObjectCacheEntry*)PyMem_Malloc(64*sizeof(__Pyx_CodeObjectCacheEntry));
if (likely(entries)) {
__pyx_code_cache.entries = entries;
__pyx_code_cache.max_count = 64;
__pyx_code_cache.count = 1;
entries[0].code_line = code_line;
entries[0].code_object = code_object;
Py_INCREF(code_object);
}
return;
}
pos = __pyx_bisect_code_objects(__pyx_code_cache.entries, __pyx_code_cache.count, code_line);
if ((pos < __pyx_code_cache.count) && unlikely(__pyx_code_cache.entries[pos].code_line == code_line)) {
PyCodeObject* tmp = entries[pos].code_object;
entries[pos].code_object = code_object;
Py_DECREF(tmp);
return;
}
if (__pyx_code_cache.count == __pyx_code_cache.max_count) {
int new_max = __pyx_code_cache.max_count + 64;
entries = (__Pyx_CodeObjectCacheEntry*)PyMem_Realloc(
__pyx_code_cache.entries, new_max*sizeof(__Pyx_CodeObjectCacheEntry));
if (unlikely(!entries)) {
return;
}
__pyx_code_cache.entries = entries;
__pyx_code_cache.max_count = new_max;
}
for (i=__pyx_code_cache.count; i>pos; i--) {
entries[i] = entries[i-1];
}
entries[pos].code_line = code_line;
entries[pos].code_object = code_object;
__pyx_code_cache.count++;
Py_INCREF(code_object);
}
#include "compile.h"
#include "frameobject.h"
#include "traceback.h"
static PyCodeObject* __Pyx_CreateCodeObjectForTraceback(
const char *funcname, int __pyx_clineno,
int __pyx_lineno, const char *__pyx_filename) {
PyCodeObject *py_code = 0;
PyObject *py_srcfile = 0;
PyObject *py_funcname = 0;
#if PY_MAJOR_VERSION < 3
py_srcfile = PyString_FromString(__pyx_filename);
#else
py_srcfile = PyUnicode_FromString(__pyx_filename);
#endif
if (!py_srcfile) goto bad;
if (__pyx_clineno) {
#if PY_MAJOR_VERSION < 3
py_funcname = PyString_FromFormat( "%s (%s:%d)", funcname, __pyx_cfilenm, __pyx_clineno);
#else
py_funcname = PyUnicode_FromFormat( "%s (%s:%d)", funcname, __pyx_cfilenm, __pyx_clineno);
#endif
}
else {
#if PY_MAJOR_VERSION < 3
py_funcname = PyString_FromString(funcname);
#else
py_funcname = PyUnicode_FromString(funcname);
#endif
}
if (!py_funcname) goto bad;
py_code = __Pyx_PyCode_New(
0, /*int argcount,*/
0, /*int kwonlyargcount,*/
0, /*int nlocals,*/
0, /*int stacksize,*/
0, /*int flags,*/
__pyx_empty_bytes, /*PyObject *code,*/
__pyx_empty_tuple, /*PyObject *consts,*/
__pyx_empty_tuple, /*PyObject *names,*/
__pyx_empty_tuple, /*PyObject *varnames,*/
__pyx_empty_tuple, /*PyObject *freevars,*/
__pyx_empty_tuple, /*PyObject *cellvars,*/
py_srcfile, /*PyObject *filename,*/
py_funcname, /*PyObject *name,*/
__pyx_lineno, /*int firstlineno,*/
__pyx_empty_bytes /*PyObject *lnotab*/
);
Py_DECREF(py_srcfile);
Py_DECREF(py_funcname);
return py_code;
bad:
Py_XDECREF(py_srcfile);
Py_XDECREF(py_funcname);
return NULL;
}
static void __Pyx_AddTraceback(const char *funcname, int __pyx_clineno,
int __pyx_lineno, const char *__pyx_filename) {
PyCodeObject *py_code = 0;
PyObject *py_globals = 0;
PyFrameObject *py_frame = 0;
py_code = __pyx_find_code_object(__pyx_clineno ? __pyx_clineno : __pyx_lineno);
if (!py_code) {
py_code = __Pyx_CreateCodeObjectForTraceback(
funcname, __pyx_clineno, __pyx_lineno, __pyx_filename);
if (!py_code) goto bad;
__pyx_insert_code_object(__pyx_clineno ? __pyx_clineno : __pyx_lineno, py_code);
}
py_globals = PyModule_GetDict(__pyx_m);
if (!py_globals) goto bad;
py_frame = PyFrame_New(
PyThreadState_GET(), /*PyThreadState *tstate,*/
py_code, /*PyCodeObject *code,*/
py_globals, /*PyObject *globals,*/
0 /*PyObject *locals*/
);
if (!py_frame) goto bad;
py_frame->f_lineno = __pyx_lineno;
PyTraceBack_Here(py_frame);
bad:
Py_XDECREF(py_code);
Py_XDECREF(py_frame);
}
static int __Pyx_InitStrings(__Pyx_StringTabEntry *t) {
while (t->p) {
#if PY_MAJOR_VERSION < 3
if (t->is_unicode) {
*t->p = PyUnicode_DecodeUTF8(t->s, t->n - 1, NULL);
} else if (t->intern) {
*t->p = PyString_InternFromString(t->s);
} else {
*t->p = PyString_FromStringAndSize(t->s, t->n - 1);
}
#else /* Python 3+ has unicode identifiers */
if (t->is_unicode | t->is_str) {
if (t->intern) {
*t->p = PyUnicode_InternFromString(t->s);
} else if (t->encoding) {
*t->p = PyUnicode_Decode(t->s, t->n - 1, t->encoding, NULL);
} else {
*t->p = PyUnicode_FromStringAndSize(t->s, t->n - 1);
}
} else {
*t->p = PyBytes_FromStringAndSize(t->s, t->n - 1);
}
#endif
if (!*t->p)
return -1;
++t;
}
return 0;
}
/* Type Conversion Functions */
static CYTHON_INLINE int __Pyx_PyObject_IsTrue(PyObject* x) {
int is_true = x == Py_True;
if (is_true | (x == Py_False) | (x == Py_None)) return is_true;
else return PyObject_IsTrue(x);
}
static CYTHON_INLINE PyObject* __Pyx_PyNumber_Int(PyObject* x) {
PyNumberMethods *m;
const char *name = NULL;
PyObject *res = NULL;
#if PY_VERSION_HEX < 0x03000000
if (PyInt_Check(x) || PyLong_Check(x))
#else
if (PyLong_Check(x))
#endif
return Py_INCREF(x), x;
m = Py_TYPE(x)->tp_as_number;
#if PY_VERSION_HEX < 0x03000000
if (m && m->nb_int) {
name = "int";
res = PyNumber_Int(x);
}
else if (m && m->nb_long) {
name = "long";
res = PyNumber_Long(x);
}
#else
if (m && m->nb_int) {
name = "int";
res = PyNumber_Long(x);
}
#endif
if (res) {
#if PY_VERSION_HEX < 0x03000000
if (!PyInt_Check(res) && !PyLong_Check(res)) {
#else
if (!PyLong_Check(res)) {
#endif
PyErr_Format(PyExc_TypeError,
"__%s__ returned non-%s (type %.200s)",
name, name, Py_TYPE(res)->tp_name);
Py_DECREF(res);
return NULL;
}
}
else if (!PyErr_Occurred()) {
PyErr_SetString(PyExc_TypeError,
"an integer is required");
}
return res;
}
static CYTHON_INLINE Py_ssize_t __Pyx_PyIndex_AsSsize_t(PyObject* b) {
Py_ssize_t ival;
PyObject* x = PyNumber_Index(b);
if (!x) return -1;
ival = PyInt_AsSsize_t(x);
Py_DECREF(x);
return ival;
}
static CYTHON_INLINE PyObject * __Pyx_PyInt_FromSize_t(size_t ival) {
#if PY_VERSION_HEX < 0x02050000
if (ival <= LONG_MAX)
return PyInt_FromLong((long)ival);
else {
unsigned char *bytes = (unsigned char *) &ival;
int one = 1; int little = (int)*(unsigned char*)&one;
return _PyLong_FromByteArray(bytes, sizeof(size_t), little, 0);
}
#else
return PyInt_FromSize_t(ival);
#endif
}
static CYTHON_INLINE size_t __Pyx_PyInt_AsSize_t(PyObject* x) {
unsigned PY_LONG_LONG val = __Pyx_PyInt_AsUnsignedLongLong(x);
if (unlikely(val == (unsigned PY_LONG_LONG)-1 && PyErr_Occurred())) {
return (size_t)-1;
} else if (unlikely(val != (unsigned PY_LONG_LONG)(size_t)val)) {
PyErr_SetString(PyExc_OverflowError,
"value too large to convert to size_t");
return (size_t)-1;
}
return (size_t)val;
}
#endif /* Py_PYTHON_H */
|
pooling_2x2_pack4_bf16s.h | // Tencent is pleased to support the open source community by making ncnn available.
//
// Copyright (C) 2022 THL A29 Limited, a Tencent company. All rights reserved.
//
// Licensed under the BSD 3-Clause License (the "License"); you may not use this file except
// in compliance with the License. You may obtain a copy of the License at
//
// https://opensource.org/licenses/BSD-3-Clause
//
// Unless required by applicable law or agreed to in writing, software distributed
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
// specific language governing permissions and limitations under the License.
static void pooling2x2s2_max_pack4_bf16s_neon(const Mat& bottom_blob, Mat& top_blob, const Option& opt)
{
int w = bottom_blob.w;
int inch = bottom_blob.c;
int outw = top_blob.w;
int outh = top_blob.h;
const int tailstep = (w - 2 * outw + w) * 4;
#pragma omp parallel for num_threads(opt.num_threads)
for (int q = 0; q < inch; q++)
{
const Mat img0 = bottom_blob.channel(q);
unsigned short* outptr = top_blob.channel(q);
const unsigned short* r0 = img0.row<const unsigned short>(0);
const unsigned short* r1 = img0.row<const unsigned short>(1);
for (int i = 0; i < outh; i++)
{
int j = 0;
for (; j + 3 < outw; j += 4)
{
#if __aarch64__
asm volatile(
"prfm pldl1keep, [%1, #256] \n"
"ld1 {v0.4h, v1.4h, v2.4h, v3.4h}, [%1], #32 \n"
"shll v0.4s, v0.4h, #16 \n"
"shll v1.4s, v1.4h, #16 \n"
"shll v2.4s, v2.4h, #16 \n"
"shll v3.4s, v3.4h, #16 \n"
"fmax v0.4s, v0.4s, v1.4s \n"
"fmax v2.4s, v2.4s, v3.4s \n"
"prfm pldl1keep, [%1, #256] \n"
"ld1 {v4.4h, v5.4h, v6.4h, v7.4h}, [%1], #32 \n"
"shll v4.4s, v4.4h, #16 \n"
"shll v5.4s, v5.4h, #16 \n"
"shll v6.4s, v6.4h, #16 \n"
"shll v7.4s, v7.4h, #16 \n"
"fmax v4.4s, v4.4s, v5.4s \n"
"fmax v6.4s, v6.4s, v7.4s \n"
"prfm pldl1keep, [%2, #256] \n"
"ld1 {v16.4h, v17.4h, v18.4h, v19.4h}, [%2], #32 \n"
"shll v16.4s, v16.4h, #16 \n"
"shll v17.4s, v17.4h, #16 \n"
"shll v18.4s, v18.4h, #16 \n"
"shll v19.4s, v19.4h, #16 \n"
"fmax v16.4s, v16.4s, v17.4s \n"
"fmax v18.4s, v18.4s, v19.4s \n"
"prfm pldl1keep, [%2, #256] \n"
"ld1 {v20.4h, v21.4h, v22.4h, v23.4h}, [%2], #32 \n"
"shll v20.4s, v20.4h, #16 \n"
"shll v21.4s, v21.4h, #16 \n"
"shll v22.4s, v22.4h, #16 \n"
"shll v23.4s, v23.4h, #16 \n"
"fmax v20.4s, v20.4s, v21.4s \n"
"fmax v22.4s, v22.4s, v23.4s \n"
"fmax v0.4s, v0.4s, v16.4s \n"
"fmax v1.4s, v2.4s, v18.4s \n"
"fmax v2.4s, v4.4s, v20.4s \n"
"fmax v3.4s, v6.4s, v22.4s \n"
"shrn v0.4h, v0.4s, #16 \n"
"shrn v1.4h, v1.4s, #16 \n"
"shrn v2.4h, v2.4s, #16 \n"
"shrn v3.4h, v3.4s, #16 \n"
"st1 {v0.4h, v1.4h, v2.4h, v3.4h}, [%0], #32 \n"
: "=r"(outptr), // %0
"=r"(r0), // %1
"=r"(r1) // %2
: "0"(outptr),
"1"(r0),
"2"(r1)
: "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23");
#else // __aarch64__
asm volatile(
"pld [%1, #256] \n"
"vld1.u16 {d4-d7}, [%1]! \n"
"vshll.u16 q0, d4, #16 \n"
"vshll.u16 q1, d5, #16 \n"
"vshll.u16 q2, d6, #16 \n"
"vshll.u16 q3, d7, #16 \n"
"vmax.f32 q0, q0, q1 \n"
"vmax.f32 q2, q2, q3 \n"
"pld [%1, #256] \n"
"vld1.u16 {d12-d15}, [%1]! \n"
"vshll.u16 q4, d12, #16 \n"
"vshll.u16 q5, d13, #16 \n"
"vshll.u16 q6, d14, #16 \n"
"vshll.u16 q7, d15, #16 \n"
"vmax.f32 q4, q4, q5 \n"
"vmax.f32 q6, q6, q7 \n"
"pld [%2, #256] \n"
"vld1.u16 {d20-d23}, [%2]! \n"
"vshll.u16 q8, d20, #16 \n"
"vshll.u16 q9, d21, #16 \n"
"vshll.u16 q10, d22, #16 \n"
"vshll.u16 q11, d23, #16 \n"
"vmax.f32 q8, q8, q9 \n"
"vmax.f32 q10, q10, q11 \n"
"pld [%2, #256] \n"
"vld1.u16 {d28-d31}, [%2]! \n"
"vshll.u16 q12, d28, #16 \n"
"vshll.u16 q13, d29, #16 \n"
"vshll.u16 q14, d30, #16 \n"
"vshll.u16 q15, d31, #16 \n"
"vmax.f32 q12, q12, q13 \n"
"vmax.f32 q14, q14, q15 \n"
"vmax.f32 q0, q0, q8 \n"
"vmax.f32 q1, q2, q10 \n"
"vmax.f32 q2, q4, q12 \n"
"vmax.f32 q3, q6, q14 \n"
"vshrn.u32 d0, q0, #16 \n"
"vshrn.u32 d1, q1, #16 \n"
"vshrn.u32 d2, q2, #16 \n"
"vshrn.u32 d3, q3, #16 \n"
"vst1.u16 {d0-d3}, [%0]! \n"
: "=r"(outptr), // %0
"=r"(r0), // %1
"=r"(r1) // %2
: "0"(outptr),
"1"(r0),
"2"(r1)
: "memory", "q0", "q1", "q2", "q3", "q4", "q5", "q6", "q7", "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15");
#endif // __aarch64__
}
for (; j < outw; j++)
{
float32x4_t _r00 = vcvt_f32_bf16(vld1_u16(r0));
float32x4_t _r01 = vcvt_f32_bf16(vld1_u16(r0 + 4));
float32x4_t _r10 = vcvt_f32_bf16(vld1_u16(r1));
float32x4_t _r11 = vcvt_f32_bf16(vld1_u16(r1 + 4));
float32x4_t _max0 = vmaxq_f32(_r00, _r01);
float32x4_t _max1 = vmaxq_f32(_r10, _r11);
float32x4_t _max = vmaxq_f32(_max0, _max1);
vst1_u16(outptr, vcvt_bf16_f32(_max));
r0 += 8;
r1 += 8;
outptr += 4;
}
r0 += tailstep;
r1 += tailstep;
}
}
}
|
GB_binop__bxnor_int64.c | //------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_emult.h"
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB (_AaddB__bxnor_int64)
// A.*B function (eWiseMult): GB (_AemultB)
// A.*B function (eWiseMult): GB (_AemultB_02__bxnor_int64)
// A.*B function (eWiseMult): GB (_AemultB_03__bxnor_int64)
// A.*B function (eWiseMult): GB (_AemultB_bitmap__bxnor_int64)
// A*D function (colscale): GB ((none))
// D*A function (rowscale): GB ((node))
// C+=B function (dense accum): GB (_Cdense_accumB__bxnor_int64)
// C+=b function (dense accum): GB (_Cdense_accumb__bxnor_int64)
// C+=A+B function (dense ewise3): GB ((none))
// C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__bxnor_int64)
// C=scalar+B GB (_bind1st__bxnor_int64)
// C=scalar+B' GB (_bind1st_tran__bxnor_int64)
// C=A+scalar GB (_bind2nd__bxnor_int64)
// C=A'+scalar GB (_bind2nd_tran__bxnor_int64)
// C type: int64_t
// A type: int64_t
// B,b type: int64_t
// BinaryOp: cij = ~((aij) ^ (bij))
#define GB_ATYPE \
int64_t
#define GB_BTYPE \
int64_t
#define GB_CTYPE \
int64_t
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
int64_t aij = Ax [pA]
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB) \
int64_t bij = Bx [pB]
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
int64_t t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA) \
cij = Ax [pA]
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB) \
cij = Bx [pB]
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z, x, y, i, j) \
z = ~((x) ^ (y)) ;
// true if the binop must be flipped
#define GB_BINOP_FLIP \
0
// op is second
#define GB_OP_IS_SECOND \
0
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_BXNOR || GxB_NO_INT64 || GxB_NO_BXNOR_INT64)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
#if 0
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB ((none))
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
#endif
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_ewise3_noaccum__bxnor_int64)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_dense_ewise3_noaccum_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumB__bxnor_int64)
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumb__bxnor_int64)
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type int64_t
int64_t bwork = (*((int64_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
#if 0
GrB_Info GB ((none))
(
GrB_Matrix C,
const GrB_Matrix A, bool A_is_pattern,
const GrB_Matrix D, bool D_is_pattern,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t *restrict Cx = (int64_t *) C->x ;
#include "GB_AxB_colscale_meta.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
#if 0
GrB_Info GB ((node))
(
GrB_Matrix C,
const GrB_Matrix D, bool D_is_pattern,
const GrB_Matrix B, bool B_is_pattern,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t *restrict Cx = (int64_t *) C->x ;
#include "GB_AxB_rowscale_meta.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
//------------------------------------------------------------------------------
// eWiseAdd: C = A+B or C<M> = A+B
//------------------------------------------------------------------------------
GrB_Info GB (_AaddB__bxnor_int64)
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool Ch_is_Mh,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GB_WERK_DECLARE (M_ek_slicing, int64_t) ;
GB_WERK_DECLARE (A_ek_slicing, int64_t) ;
GB_WERK_DECLARE (B_ek_slicing, int64_t) ;
#include "GB_add_template.c"
GB_FREE_WORK ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C = A.*B or C<M> = A.*B
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_01__bxnor_int64)
(
GrB_Matrix C,
const int C_sparsity,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_01_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_02__bxnor_int64)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool flipxy,
const int64_t *restrict Cp_kfirst,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if GB_BINOP_FLIP
// The operator is not commutative, and does not have a flipped
// variant. For example z=atan2(y,x).
if (flipxy)
{
// use fmult(y,x)
#undef GB_FLIPPED
#define GB_FLIPPED 1
#include "GB_emult_02_template.c"
}
else
{
// use fmult(x,y)
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
}
#else
// No need to handle the flip: the operator is either commutative, or
// has been handled by changing z=div(y,x) to z=rdiv(x,y) for example.
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_03__bxnor_int64)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict Cp_kfirst,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_03_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_bitmap__bxnor_int64)
(
GrB_Matrix C,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_bitmap_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB (_bind1st__bxnor_int64)
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *restrict Bb,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t *Cx = (int64_t *) Cx_output ;
int64_t x = (*((int64_t *) x_input)) ;
int64_t *Bx = (int64_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Bb, p)) continue ;
int64_t bij = Bx [p] ;
Cx [p] = ~((x) ^ (bij)) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB (_bind2nd__bxnor_int64)
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *restrict Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
int64_t *Cx = (int64_t *) Cx_output ;
int64_t *Ax = (int64_t *) Ax_input ;
int64_t y = (*((int64_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
int64_t aij = Ax [p] ;
Cx [p] = ~((aij) ^ (y)) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
int64_t aij = Ax [pA] ; \
Cx [pC] = ~((x) ^ (aij)) ; \
}
GrB_Info GB (_bind1st_tran__bxnor_int64)
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
int64_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t x = (*((const int64_t *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
int64_t
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
int64_t aij = Ax [pA] ; \
Cx [pC] = ~((aij) ^ (y)) ; \
}
GrB_Info GB (_bind2nd_tran__bxnor_int64)
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t y = (*((const int64_t *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
omp1.c |
#include <Python.h>
#include <numpy/arrayobject.h>
#include <math.h>
#include <omp.h>
// Forward declarations of our function.
static PyObject *evolve(PyObject *self, PyObject *args);
// Boilerplate: function list.
static PyMethodDef methods[] = {
{ "evolve", evolve, METH_VARARGS, "Doc string."},
{ NULL, NULL, 0, NULL } /* Sentinel */
};
// Boilerplate: Module initialization.
PyMODINIT_FUNC initomp1(void) {
(void) Py_InitModule("omp1", methods);
import_array();
}
/*****************************************************************************
* compute_F *
*****************************************************************************/
static inline void compute_F(npy_int64 threads,
npy_int64 N,
npy_float64 *m,
npy_float64 *r,
npy_float64 *F,
npy_float64 *Ft) {
npy_int64 id, i, j, xi, yi, xj, yj, Nid;
npy_float64 sx, sy, Fx, Fy, s3, tmp;
#pragma omp parallel private(id, i, j, xi, yi, xj, yj, Nid, sx, sy, Fx, Fy, s3, tmp)
{
id = omp_get_thread_num();
Nid = 2 * N * id; // Zero-index in thread-local array Ft.
// Zero out the thread-local force arrays.
for(i = 0; i < N; i++) {
xi = 2*(N*id + i);
yi = xi + 1;
Ft[xi] = Ft[yi] = 0;
}
// Compute forces between pairs of bodies.
#pragma omp for schedule(dynamic, 8)
for(i = 0; i < N; ++i) {
xi = 2*i;
yi = xi + 1;
F[xi] = F[yi] = 0;
for(j = i + 1; j < N; ++j) {
xj = 2*j;
yj = xj + 1;
sx = r[xj] - r[xi];
sy = r[yj] - r[yi];
s3 = sqrt(sx*sx + sy*sy);
s3 *= s3 * s3;
tmp = m[i] * m[j] / s3;
Fx = tmp * sx;
Fy = tmp * sy;
Ft[Nid + xi] += Fx;
Ft[Nid + yi] += Fy;
Ft[Nid + xj] -= Fx;
Ft[Nid + yj] -= Fy;
}
}
// Sum the thread-local forces computed above.
#pragma omp for
for(i = 0; i < N; ++i) {
xi = 2*i;
yi = xi + 1;
for(id = 0; id < threads; ++id) {
xj = 2*(N*id + i);
yj = xj + 1;
F[xi] += Ft[xj];
F[yi] += Ft[yj];
}
}
}
}
/*****************************************************************************
* evolve *
*****************************************************************************/
static PyObject *evolve(PyObject *self, PyObject *args) {
// Variable declarations.
npy_int64 N, threads, steps, step, i, xi, yi;
npy_float64 dt;
PyArrayObject *py_m, *py_r, *py_v, *py_F, *py_Ft;
npy_float64 *m, *r, *v, *F, *Ft;
// Parse arguments.
if(!PyArg_ParseTuple(args, "ldllO!O!O!O!O!",
&threads,
&dt,
&steps,
&N,
&PyArray_Type, &py_m,
&PyArray_Type, &py_r,
&PyArray_Type, &py_v,
&PyArray_Type, &py_F,
&PyArray_Type, &py_Ft)) {
return NULL;
}
omp_set_num_threads(threads);
// Get underlying arrays from numpy arrays.
m = (npy_float64*)PyArray_DATA(py_m);
r = (npy_float64*)PyArray_DATA(py_r);
v = (npy_float64*)PyArray_DATA(py_v);
F = (npy_float64*)PyArray_DATA(py_F);
Ft = (npy_float64*)PyArray_DATA(py_Ft);
// Evolve the world.
for(step = 0; step < steps; ++step) {
compute_F(threads, N, m, r, F, Ft);
#pragma omp parallel for private(i, xi, yi)
for(i = 0; i < N; ++i) {
xi = 2 * i;
yi = xi + 1;
v[xi] += F[xi] * dt / m[i];
v[yi] += F[yi] * dt / m[i];
r[xi] += v[xi] * dt;
r[yi] += v[yi] * dt;
}
}
Py_RETURN_NONE;
}
|
GB_unaryop__identity_bool_int8.c | //------------------------------------------------------------------------------
// GB_unaryop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_iterator.h"
#include "GB_unaryop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop__identity_bool_int8
// op(A') function: GB_tran__identity_bool_int8
// C type: bool
// A type: int8_t
// cast: bool cij = (bool) aij
// unaryop: cij = aij
#define GB_ATYPE \
int8_t
#define GB_CTYPE \
bool
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
int8_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = x ;
// casting
#define GB_CASTING(z, x) \
bool z = (bool) x ;
// cij = op (cast (aij))
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
GB_GETA (aij, Ax, pA) ; \
/* Cx [pC] = op (cast (aij)) */ \
GB_CASTING (x, aij) ; \
GB_OP (GB_CX (pC), x) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_IDENTITY || GxB_NO_BOOL || GxB_NO_INT8)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop__identity_bool_int8
(
bool *restrict Cx,
const int8_t *restrict Ax,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (int64_t p = 0 ; p < anz ; p++)
{
GB_CAST_OP (p, p) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_tran__identity_bool_int8
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *restrict *Rowcounts,
GBI_single_iterator Iter,
const int64_t *restrict A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#define GB_PHASE_2_OF_2
#include "GB_unaryop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
UsefulFunctions.c | // -------------------------------------------------------------------------------------
// Taken from COSMOLOGY.H
// -------------------------------------------------------------------------------------
#define Ho (double) (cosmo_params_ufunc->hlittle*3.2407e-18) // s^-1 at z=0
#define RHOcrit (double) ( (3.0*Ho*Ho / (8.0*PI*G)) * (CMperMPC*CMperMPC*CMperMPC)/Msun) // Msun Mpc^-3 ---- at z=0
#define RHOcrit_cgs (double) (3.0*Ho*Ho / (8.0*PI*G)) // g pcm^-3 ---- at z=0
#define No (double) (RHOcrit_cgs*cosmo_params_ufunc->OMb*(1-global_params.Y_He)/m_p) // current hydrogen number density estimate (#/cm^3) ~1.92e-7
#define He_No (double) (RHOcrit_cgs*cosmo_params_ufunc->OMb*global_params.Y_He/(4.0*m_p)) // current helium number density estimate
#define N_b0 (double) (No+He_No) // present-day baryon num density, H + He
#define f_H (double) (No/(No+He_No)) // hydrogen number fraction
#define f_He (double) (He_No/(No+He_No)) // helium number fraction
struct CosmoParams *cosmo_params_ufunc;
struct UserParams *user_params_ufunc;
void Broadcast_struct_global_UF(struct UserParams *user_params, struct CosmoParams *cosmo_params){
cosmo_params_ufunc = cosmo_params;
user_params_ufunc = user_params;
}
float ComputeFullyIoinizedTemperature(float z_re, float z, float delta){
// z_re: the redshift of reionization
// z: the current redshift
// delta:the density contrast
float result, delta_re;
// just be fully ionized
if (fabs(z - z_re) < 1e-4)
result = 1;
else{
// linearly extrapolate to get density at reionization
delta_re = delta * (1. + z ) / (1. + z_re);
if (delta_re<=-1) delta_re=-1. + global_params.MIN_DENSITY_LOW_LIMIT;
// evolving ionized box eq. 6 of McQuinn 2015, ignored the dependency of density at ionization
if (delta<=-1) delta=-1. + global_params.MIN_DENSITY_LOW_LIMIT;
result = pow((1. + delta) / (1. + delta_re), 1.1333);
result *= pow((1. + z) / (1. + z_re), 3.4);
result *= expf(pow((1. + z)/7.1, 2.5) - pow((1. + z_re)/7.1, 2.5));
}
result *= pow(global_params.T_RE, 1.7);
// 1e4 before helium reionization; double it after
result += pow(1e4 * ((1. + z)/4.), 1.7) * ( 1 + delta);
result = pow(result, 0.5882);
//LOG_DEBUG("z_re=%.4f, z=%.4f, delta=%e, Tk=%.f", z_re, z, delta, result);
return result;
}
float ComputePartiallyIoinizedTemperature(float T_HI, float res_xH){
if (res_xH<=0.) return global_params.T_RE;
if (res_xH>=1) return T_HI;
return T_HI * res_xH + global_params.T_RE * (1. - res_xH);
}
void filter_box(fftwf_complex *box, int RES, int filter_type, float R){
int n_x, n_z, n_y, dimension,midpoint;
float k_x, k_y, k_z, k_mag, kR;
switch(RES) {
case 0:
dimension = user_params_ufunc->DIM;
midpoint = MIDDLE;
break;
case 1:
dimension = user_params_ufunc->HII_DIM;
midpoint = HII_MIDDLE;
break;
}
// loop through k-box
#pragma omp parallel shared(box) private(n_x,n_y,n_z,k_x,k_y,k_z,k_mag,kR) num_threads(user_params_ufunc->N_THREADS)
{
#pragma omp for
for (n_x=0; n_x<dimension; n_x++){
// for (n_x=dimension; n_x--;){
if (n_x>midpoint) {k_x =(n_x-dimension) * DELTA_K;}
else {k_x = n_x * DELTA_K;}
for (n_y=0; n_y<dimension; n_y++){
// for (n_y=dimension; n_y--;){
if (n_y>midpoint) {k_y =(n_y-dimension) * DELTA_K;}
else {k_y = n_y * DELTA_K;}
// for (n_z=(midpoint+1); n_z--;){
for (n_z=0; n_z<=midpoint; n_z++){
k_z = n_z * DELTA_K;
if (filter_type == 0){ // real space top-hat
k_mag = sqrt(k_x*k_x + k_y*k_y + k_z*k_z);
kR = k_mag*R; // real space top-hat
if (kR > 1e-4){
if(RES==1) { box[HII_C_INDEX(n_x, n_y, n_z)] *= 3.0*pow(kR, -3) * (sin(kR) - cos(kR)*kR); }
if(RES==0) { box[C_INDEX(n_x, n_y, n_z)] *= 3.0*pow(kR, -3) * (sin(kR) - cos(kR)*kR); }
}
}
else if (filter_type == 1){ // k-space top hat
// This is actually (kR^2) but since we zero the value and find kR > 1 this is more computationally efficient
// as we don't need to evaluate the slower sqrt function
// kR = 0.17103765852*( k_x*k_x + k_y*k_y + k_z*k_z )*R*R;
k_mag = sqrt(k_x*k_x + k_y*k_y + k_z*k_z);
kR = k_mag*R; // real space top-hat
kR *= 0.413566994; // equates integrated volume to the real space top-hat (9pi/2)^(-1/3)
if (kR > 1){
if(RES==1) { box[HII_C_INDEX(n_x, n_y, n_z)] = 0; }
if(RES==0) { box[C_INDEX(n_x, n_y, n_z)] = 0; }
}
}
else if (filter_type == 2){ // gaussian
// This is actually (kR^2) but since we zero the value and find kR > 1 this is more computationally efficient
// as we don't need to evaluate the slower sqrt function
kR = 0.643*0.643*( k_x*k_x + k_y*k_y + k_z*k_z )*R*R;
// kR *= 0.643; // equates integrated volume to the real space top-hat
if(RES==1) { box[HII_C_INDEX(n_x, n_y, n_z)] *= pow(E, -kR/2.0); }
if(RES==0) { box[C_INDEX(n_x, n_y, n_z)] *= pow(E, -kR/2.0); }
}
else{
if ( (n_x==0) && (n_y==0) && (n_z==0) )
LOG_WARNING("Filter type %i is undefined. Box is unfiltered.", filter_type);
}
}
}
} // end looping through k box
}
return;
}
double MtoR(double M);
double RtoM(double R);
double TtoM(double z, double T, double mu);
double dicke(double z);
double dtdz(float z);
double ddickedt(double z);
double omega_mz(float z);
double Deltac_nonlinear(float z);
double drdz(float z); /* comoving distance, (1+z)*C*dtdz(in cm) per unit z */
double alpha_A(double T);
/* returns the case B hydrogen recombination coefficient (Spitzer 1978) in cm^3 s^-1*/
double alpha_B(double T);
double HeI_ion_crosssec(double nu);
double HeII_ion_crosssec(double nu);
double HI_ion_crosssec(double nu);
/* R in Mpc, M in Msun */
double MtoR(double M){
// set R according to M<->R conversion defined by the filter type in ../Parameter_files/COSMOLOGY.H
if (global_params.FILTER == 0) //top hat M = (4/3) PI <rho> R^3
return pow(3*M/(4*PI*cosmo_params_ufunc->OMm*RHOcrit), 1.0/3.0);
else if (global_params.FILTER == 1) //gaussian: M = (2PI)^1.5 <rho> R^3
return pow( M/(pow(2*PI, 1.5) * cosmo_params_ufunc->OMm * RHOcrit), 1.0/3.0 );
else // filter not defined
LOG_ERROR("No such filter = %i. Results are bogus.", global_params.FILTER);
Throw ValueError;
}
/* R in Mpc, M in Msun */
double RtoM(double R){
// set M according to M<->R conversion defined by the filter type in ../Parameter_files/COSMOLOGY.H
if (global_params.FILTER == 0) //top hat M = (4/3) PI <rho> R^3
return (4.0/3.0)*PI*pow(R,3)*(cosmo_params_ufunc->OMm*RHOcrit);
else if (global_params.FILTER == 1) //gaussian: M = (2PI)^1.5 <rho> R^3
return pow(2*PI, 1.5) * cosmo_params_ufunc->OMm*RHOcrit * pow(R, 3);
else // filter not defined
LOG_ERROR("No such filter = %i. Results are bogus.", global_params.FILTER);
Throw ValueError;
}
/*
T in K, M in Msun, mu is mean molecular weight
from Barkana & Loeb 2001
SUPRESS = 0 for no radiation field supression;
SUPRESS = 1 for supression (step function at z=z_ss, at v=v_zz)
*/
double TtoM(double z, double T, double mu){
return 7030.97 / (cosmo_params_ufunc->hlittle) * sqrt( omega_mz(z) / (cosmo_params_ufunc->OMm*Deltac_nonlinear(z))) *
pow( T/(mu * (1+z)), 1.5 );
/* if (!SUPRESS || (z >= z_re) ) // pre-reionization or don't worry about supression
return 7030.97 / hlittle * sqrt( omega_mz(z) / (OMm*Deltac_nonlinear(z)) ) *
pow( T/(mu * (1+z)), 1.5 );
if (z >= z_ss) // self-shielding dominates, use T = 1e4 K
return 7030.97 / hlittle * sqrt( omega_mz(z) / (OMm*Deltac_nonlinear(z)) ) *
pow( 1.0e4 /(mu * (1+z)), 1.5 );
// optically thin
return 7030.97 / hlittle * sqrt( omega_mz(z) / (OMm*Deltac_nonlinear(z)) ) *
pow( VcirtoT(v_ss, mu) /(mu * (1+z)), 1.5 );
*/
}
/* Physical (non-linear) overdensity at virialization (relative to critical density)
i.e. answer is rho / rho_crit
In Einstein de sitter model = 178
(fitting formula from Bryan & Norman 1998) */
double Deltac_nonlinear(float z){
double d;
d = omega_mz(z) - 1.0;
return 18*PI*PI + 82*d - 39*d*d;
}
/* Omega matter at redshift z */
double omega_mz(float z){
return cosmo_params_ufunc->OMm*pow(1+z,3) / (cosmo_params_ufunc->OMm*pow(1+z,3) + cosmo_params_ufunc->OMl + global_params.OMr*pow(1+z,4) + global_params.OMk*pow(1+z, 2));
}
/*
FUNCTION dicke(z)
Computes the dicke growth function at redshift z, i.e. the z dependance part of sigma
References: Peebles, "Large-Scale...", pg.53 (eq. 11.16). Includes omega<=1
Nonzero Lambda case from Liddle et al, astro-ph/9512102, eqs. 6-8.
and quintessence case from Wang et al, astro-ph/9804015
Normalized to dicke(z=0)=1
*/
double dicke(double z){
double omegaM_z, dick_z, dick_0, x, x_0;
double tiny = 1e-4;
if (fabs(cosmo_params_ufunc->OMm-1.0) < tiny){ //OMm = 1 (Einstein de-Sitter)
return 1.0/(1.0+z);
}
else if ( (cosmo_params_ufunc->OMl > (-tiny)) && (fabs(cosmo_params_ufunc->OMl+cosmo_params_ufunc->OMm+global_params.OMr-1.0) < 0.01) && (fabs(global_params.wl+1.0) < tiny) ){
//this is a flat, cosmological CONSTANT universe, with only lambda, matter and radiation
//it is taken from liddle et al.
omegaM_z = cosmo_params_ufunc->OMm*pow(1+z,3) / ( cosmo_params_ufunc->OMl + cosmo_params_ufunc->OMm*pow(1+z,3) + global_params.OMr*pow(1+z,4) );
dick_z = 2.5*omegaM_z / ( 1.0/70.0 + omegaM_z*(209-omegaM_z)/140.0 + pow(omegaM_z, 4.0/7.0) );
dick_0 = 2.5*cosmo_params_ufunc->OMm / ( 1.0/70.0 + cosmo_params_ufunc->OMm*(209-cosmo_params_ufunc->OMm)/140.0 + pow(cosmo_params_ufunc->OMm, 4.0/7.0) );
return dick_z / (dick_0 * (1.0+z));
}
else if ( (global_params.OMtot < (1+tiny)) && (fabs(cosmo_params_ufunc->OMl) < tiny) ){ //open, zero lambda case (peebles, pg. 53)
x_0 = 1.0/(cosmo_params_ufunc->OMm+0.0) - 1.0;
dick_0 = 1 + 3.0/x_0 + 3*log(sqrt(1+x_0)-sqrt(x_0))*sqrt(1+x_0)/pow(x_0,1.5);
x = fabs(1.0/(cosmo_params_ufunc->OMm+0.0) - 1.0) / (1+z);
dick_z = 1 + 3.0/x + 3*log(sqrt(1+x)-sqrt(x))*sqrt(1+x)/pow(x,1.5);
return dick_z/dick_0;
}
else if ( (cosmo_params_ufunc->OMl > (-tiny)) && (fabs(global_params.OMtot-1.0) < tiny) && (fabs(global_params.wl+1) > tiny) ){
LOG_WARNING("IN WANG.");
Throw ValueError;
}
LOG_ERROR("No growth function!");
Throw ValueError;
}
/* function DTDZ returns the value of dt/dz at the redshift parameter z. */
double dtdz(float z){
double x, dxdz, const1, denom, numer;
x = sqrt( cosmo_params_ufunc->OMl/cosmo_params_ufunc->OMm ) * pow(1+z, -3.0/2.0);
dxdz = sqrt( cosmo_params_ufunc->OMl/cosmo_params_ufunc->OMm ) * pow(1+z, -5.0/2.0) * (-3.0/2.0);
const1 = 2 * sqrt( 1 + cosmo_params_ufunc->OMm/cosmo_params_ufunc->OMl ) / (3.0 * Ho) ;
numer = dxdz * (1 + x*pow( pow(x,2) + 1, -0.5));
denom = x + sqrt(pow(x,2) + 1);
return (const1 * numer / denom);
}
/* Time derivative of the growth function at z */
double ddickedt(double z){
float dz = 1e-10;
double omegaM_z, ddickdz, dick_0, x, x_0, domegaMdz;
double tiny = 1e-4;
return (dicke(z+dz)-dicke(z))/dz/dtdz(z); // lazy non-analytic form getting
if (fabs(cosmo_params_ufunc->OMm-1.0) < tiny){ //OMm = 1 (Einstein de-Sitter)
return -pow(1+z,-2)/dtdz(z);
}
else if ( (cosmo_params_ufunc->OMl > (-tiny)) && (fabs(cosmo_params_ufunc->OMl+cosmo_params_ufunc->OMm+global_params.OMr-1.0) < 0.01) && (fabs(global_params.wl+1.0) < tiny) ){
//this is a flat, cosmological CONSTANT universe, with only lambda, matter and radiation
//it is taken from liddle et al.
omegaM_z = cosmo_params_ufunc->OMm*pow(1+z,3) / ( cosmo_params_ufunc->OMl + cosmo_params_ufunc->OMm*pow(1+z,3) + global_params.OMr*pow(1+z,4) );
domegaMdz = omegaM_z*3/(1+z) - cosmo_params_ufunc->OMm*pow(1+z,3)*pow(cosmo_params_ufunc->OMl + cosmo_params_ufunc->OMm*pow(1+z,3) + global_params.OMr*pow(1+z,4), -2) * (3*cosmo_params_ufunc->OMm*(1+z)*(1+z) + 4*global_params.OMr*pow(1+z,3));
dick_0 = cosmo_params_ufunc->OMm / ( 1.0/70.0 + cosmo_params_ufunc->OMm*(209-cosmo_params_ufunc->OMm)/140.0 + pow(cosmo_params_ufunc->OMm, 4.0/7.0) );
ddickdz = (domegaMdz/(1+z)) * (1.0/70.0*pow(omegaM_z,-2) + 1.0/140.0 + 3.0/7.0*pow(omegaM_z, -10.0/3.0)) * pow(1.0/70.0/omegaM_z + (209.0-omegaM_z)/140.0 + pow(omegaM_z, -3.0/7.0) , -2);
ddickdz -= pow(1+z,-2)/(1.0/70.0/omegaM_z + (209.0-omegaM_z)/140.0 + pow(omegaM_z, -3.0/7.0));
return ddickdz / dick_0 / dtdz(z);
}
LOG_ERROR("No growth function!");
Throw ValueError;
}
/* returns the hubble "constant" (in 1/sec) at z */
double hubble(float z){
return Ho*sqrt(cosmo_params_ufunc->OMm*pow(1+z,3) + global_params.OMr*pow(1+z,4) + cosmo_params_ufunc->OMl);
}
/* returns hubble time (in sec), t_h = 1/H */
double t_hubble(float z){
return 1.0/hubble(z);
}
/* comoving distance (in cm) per unit redshift */
double drdz(float z){
return (1.0+z)*C*dtdz(z);
}
/* returns the case A hydrogen recombination coefficient (Abel et al. 1997) in cm^3 s^-1*/
double alpha_A(double T){
double logT, ans;
logT = log(T/(double)1.1604505e4);
ans = pow(E, -28.6130338 - 0.72411256*logT - 2.02604473e-2*pow(logT, 2)
- 2.38086188e-3*pow(logT, 3) - 3.21260521e-4*pow(logT, 4)
- 1.42150291e-5*pow(logT, 5) + 4.98910892e-6*pow(logT, 6)
+ 5.75561414e-7*pow(logT, 7) - 1.85676704e-8*pow(logT, 8)
- 3.07113524e-9 * pow(logT, 9));
return ans;
}
/* returns the case B hydrogen recombination coefficient (Spitzer 1978) in cm^3 s^-1*/
double alpha_B(double T){
return alphaB_10k * pow (T/1.0e4, -0.75);
}
/*
Function NEUTRAL_FRACTION returns the hydrogen neutral fraction, chi, given:
hydrogen density (pcm^-3)
gas temperature (10^4 K)
ionization rate (1e-12 s^-1)
*/
double neutral_fraction(double density, double T4, double gamma, int usecaseB){
double chi, b, alpha, corr_He = 1.0/(4.0/global_params.Y_He - 3);
if (usecaseB)
alpha = alpha_B(T4*1e4);
else
alpha = alpha_A(T4*1e4);
gamma *= 1e-12;
// approximation chi << 1
chi = (1+corr_He)*density * alpha / gamma;
if (chi < TINY){ return 0;}
if (chi < 1e-5)
return chi;
// this code, while mathematically accurate, is numerically buggy for very small x_HI, so i will use valid approximation x_HI <<1 above when x_HI < 1e-5, and this otherwise... the two converge seemlessly
//get solutions of quadratic of chi (neutral fraction)
b = -2 - gamma / (density*(1+corr_He)*alpha);
chi = ( -b - sqrt(b*b - 4) ) / 2.0; //correct root
return chi;
}
/* function HeI_ion_crosssec returns the HI ionization cross section at parameter frequency
(taken from Verner et al (1996) */
double HeI_ion_crosssec(double nu){
double x,y,Fy;
if (nu < HeI_NUIONIZATION)
return 0;
x = nu/NU_over_EV/13.61 - 0.4434;
y = sqrt(x*x + pow(2.136, 2));
return 9.492e-16*((x-1)*(x-1) + 2.039*2.039) *
pow(y, (0.5 * 3.188 - 5.5))
* pow(1.0 + sqrt(y/1.469), -3.188);
}
/* function HeII_ion_crosssec returns the HeII ionization cross section at parameter frequency
(taken from Osterbrock, pg. 14) */
double HeII_ion_crosssec(double nu){
double epsilon, Z = 2;
if (nu < HeII_NUIONIZATION)
return 0;
if (nu == HeII_NUIONIZATION)
nu+=TINY;
epsilon = sqrt( nu/HeII_NUIONIZATION - 1);
return (6.3e-18)/Z/Z * pow(HeII_NUIONIZATION/nu, 4)
* pow(E, 4-(4*atan(epsilon)/epsilon)) / (1-pow(E, -2*PI/epsilon));
}
/* function HI_ion_crosssec returns the HI ionization cross section at parameter frequency
(taken from Osterbrock, pg. 14) */
double HI_ion_crosssec(double nu){
double epsilon, Z = 1;
if (nu < NUIONIZATION)
return 0;
if (nu == NUIONIZATION)
nu+=TINY;
epsilon = sqrt( nu/NUIONIZATION - 1);
return (6.3e-18)/Z/Z * pow(NUIONIZATION/nu, 4)
* pow(E, 4-(4*atan(epsilon)/epsilon)) / (1-pow(E, -2*PI/epsilon));
}
/* Return the thomspon scattering optical depth from zstart to zend through fully ionized IGM.
The hydrogen reionization history is given by the zarry and xHarry parameters, in increasing
redshift order of length len.*/
typedef struct{
float *z, *xH;
int len;
} tau_e_params;
double dtau_e_dz(double z, void *params){
float xH, xi;
int i=1;
tau_e_params p = *(tau_e_params *)params;
if ((p.len == 0) || !(p.z)) {
return (1+z)*(1+z)*drdz(z);
}
else{
// find where we are in the redshift array
if (p.z[0]>z) // ionization fraction is 1 prior to start of array
return (1+z)*(1+z)*drdz(z);
while ( (i < p.len) && (p.z[i] < z) ) {i++;}
if (i == p.len)
return 0;
// linearly interpolate in redshift
xH = p.xH[i-1] + (p.xH[i] - p.xH[i-1])/(p.z[i] - p.z[i-1]) * (z - p.z[i-1]);
xi = 1.0-xH;
if (xi<0){
LOG_WARNING("in taue: funny business xi=%e, changing to 0.", xi);
xi=0;
}
if (xi>1){
LOG_WARNING("in taue: funny business xi=%e, changing to 1", xi);
xi=1;
}
return xi*(1+z)*(1+z)*drdz(z);
}
}
double tau_e(float zstart, float zend, float *zarry, float *xHarry, int len){
double prehelium, posthelium, error;
gsl_function F;
double rel_tol = 1e-3; //<- relative tolerance
gsl_integration_workspace * w
= gsl_integration_workspace_alloc (1000);
tau_e_params p;
if (zstart >= zend){
LOG_ERROR("in tau_e: First parameter must be smaller than the second.\n");
Throw ValueError;
}
F.function = &dtau_e_dz;
p.z = zarry;
p.xH = xHarry;
p.len = len;
F.params = &p;
if ((len > 0) && zarry)
zend = zarry[len-1] - FRACT_FLOAT_ERR;
int status;
gsl_set_error_handler_off();
if (zend > global_params.Zreion_HeII){// && (zstart < Zreion_HeII)){
if (zstart < global_params.Zreion_HeII){
status = gsl_integration_qag (&F, global_params.Zreion_HeII, zstart, 0, rel_tol,
1000, GSL_INTEG_GAUSS61, w, &prehelium, &error);
if(status!=0) {
LOG_ERROR("gsl integration error occured!");
LOG_ERROR("(function argument): lower_limit=%e upper_limit=%e rel_tol=%e result=%e error=%e",global_params.Zreion_HeII,zstart,rel_tol,prehelium,error);
LOG_ERROR("data: zstart=%e zend=%e",zstart,zend);
GSL_ERROR(status);
}
status = gsl_integration_qag (&F, zend, global_params.Zreion_HeII, 0, rel_tol,
1000, GSL_INTEG_GAUSS61, w, &posthelium, &error);
if(status!=0) {
LOG_ERROR("gsl integration error occured!");
LOG_ERROR("(function argument): lower_limit=%e upper_limit=%e rel_tol=%e result=%e error=%e",zend,global_params.Zreion_HeII,rel_tol,posthelium,error);
LOG_ERROR("data: zstart=%e zend=%e",zstart,zend);
GSL_ERROR(status);
}
}
else{
prehelium = 0;
status = gsl_integration_qag (&F, zend, zstart, 0, rel_tol,
1000, GSL_INTEG_GAUSS61, w, &posthelium, &error);
if(status!=0) {
LOG_ERROR("gsl integration error occured!");
LOG_ERROR("(function argument): lower_limit=%e upper_limit=%e rel_tol=%e result=%e error=%e",zend,zstart,rel_tol,posthelium,error);
GSL_ERROR(status);
}
}
}
else{
posthelium = 0;
status = gsl_integration_qag (&F, zend, zstart, 0, rel_tol,
1000, GSL_INTEG_GAUSS61, w, &prehelium, &error);
if(status!=0) {
LOG_ERROR("gsl integration error occured!");
LOG_ERROR("(function argument): lower_limit=%e upper_limit=%e rel_tol=%e result=%e error=%e",zend,zstart,rel_tol,prehelium,error);
GSL_ERROR(status);
}
}
gsl_integration_workspace_free (w);
return SIGMAT * ( (N_b0+He_No)*prehelium + N_b0*posthelium );
}
float ComputeTau(struct UserParams *user_params, struct CosmoParams *cosmo_params, int NPoints, float *redshifts, float *global_xHI) {
int i;
float tau;
Broadcast_struct_global_UF(user_params,cosmo_params);
tau = tau_e(0, redshifts[NPoints-1], redshifts, global_xHI, NPoints);
return tau;
}
void writeUserParams(struct UserParams *p){
LOG_INFO("UserParams: [HII_DIM=%d, DIM=%d, BOX_LEN=%f, HMF=%d, POWER_SPECTRUM=%d, USE_RELATIVE_VELOCITIES=%d, N_THREADS=%d, PERTURB_ON_HIGH_RES=%d, NO_RNG=%d, USE_FFTW_WISDOM=%d, USE_INTERPOLATION_TABLES=%d, FAST_FCOLL_TABLES=%d]",
p->HII_DIM, p->DIM, p->BOX_LEN, p->HMF, p->POWER_SPECTRUM, p->USE_RELATIVE_VELOCITIES, p->N_THREADS, p->PERTURB_ON_HIGH_RES, p->NO_RNG, p->USE_FFTW_WISDOM, p->USE_INTERPOLATION_TABLES, p->FAST_FCOLL_TABLES);
}
void writeCosmoParams(struct CosmoParams *p){
LOG_INFO("CosmoParams: [SIGMA_8=%f, hlittle=%f, OMm=%f, OMl=%f, OMb=%f, POWER_INDEX=%f]",
p->SIGMA_8, p->hlittle, p->OMm, p->OMl, p->OMb, p->POWER_INDEX);
}
void writeAstroParams(struct FlagOptions *fo, struct AstroParams *p){
if(fo->USE_MASS_DEPENDENT_ZETA) {
LOG_INFO("AstroParams: [HII_EFF_FACTOR=%f, ALPHA_STAR=%f, ALPHA_STAR_MINI=%f, F_ESC10=%f (F_ESC7_MINI=%f), ALPHA_ESC=%f, M_TURN=%f, R_BUBBLE_MAX=%f, L_X=%e (L_X_MINI=%e), NU_X_THRESH=%f, X_RAY_SPEC_INDEX=%f, F_STAR10=%f (F_STAR7_MINI=%f), t_STAR=%f, N_RSD_STEPS=%f]",
p->HII_EFF_FACTOR, p->ALPHA_STAR, p->ALPHA_STAR_MINI, p->F_ESC10,p->F_ESC7_MINI, p->ALPHA_ESC, p->M_TURN,
p->R_BUBBLE_MAX, p->L_X, p->L_X_MINI, p->NU_X_THRESH, p->X_RAY_SPEC_INDEX, p->F_STAR10, p->F_STAR7_MINI, p->t_STAR, p->N_RSD_STEPS);
}
else {
LOG_INFO("AstroParams: [HII_EFF_FACTOR=%f, ION_Tvir_MIN=%f, X_RAY_Tvir_MIN=%f, R_BUBBLE_MAX=%f, L_X=%e, NU_X_THRESH=%f, X_RAY_SPEC_INDEX=%f, F_STAR10=%f, t_STAR=%f, N_RSD_STEPS=%f]",
p->HII_EFF_FACTOR, p->ION_Tvir_MIN, p->X_RAY_Tvir_MIN,
p->R_BUBBLE_MAX, p->L_X, p->NU_X_THRESH, p->X_RAY_SPEC_INDEX, p->F_STAR10, p->t_STAR, p->N_RSD_STEPS);
}
}
void writeFlagOptions(struct FlagOptions *p){
LOG_INFO("FlagOptions: [USE_HALO_FIELD=%d, USE_MINI_HALOS=%d, USE_MASS_DEPENDENT_ZETA=%d, SUBCELL_RSD=%d, INHOMO_RECO=%d, USE_TS_FLUCT=%d, M_MIN_in_Mass=%d, PHOTON_CONS=%d]",
p->USE_HALO_FIELD, p->USE_MINI_HALOS, p->USE_MASS_DEPENDENT_ZETA, p->SUBCELL_RSD, p->INHOMO_RECO, p->USE_TS_FLUCT, p->M_MIN_in_Mass, p->PHOTON_CONS);
}
char *print_output_header(int print_pid, const char *name){
char * pid = malloc(12*sizeof(char));
if(print_pid){
sprintf(pid, "<%d>\t", getpid());
}else{
sprintf(pid, "");
}
printf("%s%s:\n", pid, name);
return (pid);
}
void print_corners_real(float *x, int size){
int s = size-1;
int i,j,k;
for(i=0;i<size;i=i+s){
for(j=0;j<size;j=j+s){
for(k=0;k<size;k=k+s){
printf("%f, ", x[k + size*(j + size*i)]);
}
}
}
printf("\n");
}
void debugSummarizeBox(float *box, int size, char *indent){
if(LOG_LEVEL >= SUPER_DEBUG_LEVEL){
float corners[8];
int i,j,k, counter;
int s = size-1;
counter = 0;
for(i=0;i<size;i=i+s){
for(j=0;j<size;j=j+s){
for(k=0;k<size;k=k+s){
corners[counter] = box[k + size*(j + size*i)];
counter++;
}
}
}
LOG_SUPER_DEBUG("%sCorners: %f %f %f %f %f %f %f %f",
indent,
corners[0], corners[1], corners[2], corners[3],
corners[4], corners[5], corners[6], corners[7]
);
float sum, mean, mn, mx;
sum=0;
mn=box[0];
mx=box[0];
for (i=0; i<size*size*size; i++){
sum+=box[i];
mn=fminf(mn, box[i]);
mx = fmaxf(mx, box[i]);
}
mean=sum/(size*size*size);
LOG_SUPER_DEBUG("%sSum/Mean/Min/Max: %f, %f, %f, %f", indent, sum, mean, mn, mx);
}
}
void debugSummarizeBoxDouble(double *box, int size, char *indent){
if(LOG_LEVEL >= SUPER_DEBUG_LEVEL){
double corners[8];
int i,j,k, counter;
int s = size-1;
counter = 0;
for(i=0;i<size;i=i+s){
for(j=0;j<size;j=j+s){
for(k=0;k<size;k=k+s){
corners[counter] = box[k + size*(j + size*i)];
counter++;
}
}
}
LOG_SUPER_DEBUG("%sCorners: %lf %lf %lf %lf %lf %lf %lf %lf",
indent,
corners[0], corners[1], corners[2], corners[3],
corners[4], corners[5], corners[6], corners[7]
);
double sum, mean, mn, mx;
sum=0;
mn=box[0];
mx=box[0];
for (i=0; i<size*size*size; i++){
sum+=box[i];
mn=fmin(mn, box[i]);
mx = fmax(mx, box[i]);
}
mean=sum/(size*size*size);
LOG_SUPER_DEBUG("%sSum/Mean/Min/Max: %lf, %lf, %lf, %lf", indent, sum, mean, mn, mx);
}
}
void debugSummarizeIC(struct InitialConditions *x, int HII_DIM, int DIM){
LOG_SUPER_DEBUG("Summary of InitialConditions:");
LOG_SUPER_DEBUG(" lowres_density: ");
debugSummarizeBox(x->lowres_density, HII_DIM, " ");
LOG_SUPER_DEBUG(" hires_density: ");
debugSummarizeBox(x->hires_density, DIM, " ");
LOG_SUPER_DEBUG(" lowres_vx: ");
debugSummarizeBox(x->lowres_vx, HII_DIM, " ");
LOG_SUPER_DEBUG(" lowres_vy: ");
debugSummarizeBox(x->lowres_vy, HII_DIM, " ");
LOG_SUPER_DEBUG(" lowres_vz: ");
debugSummarizeBox(x->lowres_vz, HII_DIM, " ");
}
void debugSummarizePerturbField(struct PerturbedField *x, int HII_DIM){
LOG_SUPER_DEBUG("Summary of PerturbedField:");
LOG_SUPER_DEBUG(" density: ");
debugSummarizeBox(x->density, HII_DIM, " ");
LOG_SUPER_DEBUG(" velocity: ");
debugSummarizeBox(x->velocity, HII_DIM, " ");
}
void inspectInitialConditions(struct InitialConditions *x, int print_pid, int print_corners, int print_first,
int HII_DIM){
int i;
char *pid = print_output_header(print_pid, "InitialConditions");
if(print_first){
printf("%s\tFirstRow: ",pid);
printf("%s\t\tlowres_density: ");
for(i=0;i<10;i++){
printf("%f, ", x->lowres_density[i]);
}
printf("\n");
printf("%s\t\tlowres_vx : ");
for(i=0;i<10;i++){
printf("%f, ", x->lowres_vx[i]);
}
printf("\n");
printf("%s\t\tlowres_vx_2LPT: ");
for(i=0;i<10;i++){
printf("%f, ", x->lowres_vx_2LPT[i]);
}
printf("\n");
}
if(print_corners){
printf("%s\tCorners: ",pid);
printf("%s\t\tlowres_density: ",pid);
print_corners_real(x->lowres_density, HII_DIM);
printf("%s\t\tlowres_vx : ", pid);
print_corners_real(x->lowres_vx, HII_DIM);
printf("%s\t\tlowres_vx_2LPT: ", pid);
print_corners_real(x->lowres_vx_2LPT, HII_DIM);
}
}
void inspectPerturbedField(struct PerturbedField *x, int print_pid, int print_corners, int print_first,
int HII_DIM){
int i;
char *pid = print_output_header(print_pid, "PerturbedField");
if(print_first){
printf("%s\tFirstRow: \n",pid);
printf("%s\t\tdensity: ", pid);
for(i=0;i<10;i++){
printf("%f, ", x->density[i]);
}
printf("\n");
printf("%s\t\tvelocity: ", pid);
for(i=0;i<10;i++){
printf("%f, ", x->velocity[i]);
}
printf("\n");
}
if(print_corners){
printf("%s\tCorners: \n",pid);
printf("%s\t\tdensity: ",pid);
print_corners_real(x->density, HII_DIM);
printf("%s\t\tvelocity: ", pid);
print_corners_real(x->velocity, HII_DIM);
}
}
void inspectTsBox(struct TsBox *x, int print_pid, int print_corners, int print_first, int HII_DIM){
int i;
char *pid = print_output_header(print_pid, "TsBox");
if(print_first){
printf("%s\tFirstRow: ",pid);
printf("%s\t\tTs_box : ");
for(i=0;i<10;i++){
printf("%f, ", x->Ts_box[i]);
}
printf("\n");
printf("%s\t\tx_e_box: ");
for(i=0;i<10;i++){
printf("%f, ", x->x_e_box[i]);
}
printf("\n");
printf("%s\t\tTk_box : ");
for(i=0;i<10;i++){
printf("%f, ", x->Tk_box[i]);
}
printf("\n");
}
if(print_corners){
printf("%s\tCorners: ",pid);
printf("%s\t\tTs_box : ",pid);
print_corners_real(x->Ts_box, HII_DIM);
printf("%s\t\tx_e_box: ", pid);
print_corners_real(x->x_e_box, HII_DIM);
printf("%s\t\tTk_box : ", pid);
print_corners_real(x->Tk_box, HII_DIM);
}
}
void inspectIonizedBox(struct IonizedBox *x, int print_pid, int print_corners, int print_first, int HII_DIM){
int i;
char *pid = print_output_header(print_pid, "IonizedBox");
if(print_first){
printf("%s\tFirstRow: ",pid);
printf("%s\t\txH_box : ");
for(i=0;i<10;i++){
printf("%f, ", x->xH_box[i]);
}
printf("\n");
printf("%s\t\tGamma12_box: ");
for(i=0;i<10;i++){
printf("%f, ", x->Gamma12_box[i]);
}
printf("\n");
printf("%s\t\tz_re_box : ");
for(i=0;i<10;i++){
printf("%f, ", x->z_re_box[i]);
}
printf("\n");
printf("%s\t\tdNrec_box : ");
for(i=0;i<10;i++){
printf("%f, ", x->dNrec_box[i]);
}
printf("\n");
}
if(print_corners){
printf("%s\tCorners: ",pid);
printf("%s\t\txH_box : ",pid);
print_corners_real(x->xH_box, HII_DIM);
printf("%s\t\tGamma12_box: ", pid);
print_corners_real(x->Gamma12_box, HII_DIM);
printf("%s\t\tz_re_box : ", pid);
print_corners_real(x->z_re_box, HII_DIM);
printf("%s\t\tdNrec_box : ", pid);
print_corners_real(x->dNrec_box, HII_DIM);
}
}
void inspectBrightnessTemp(struct BrightnessTemp *x, int print_pid, int print_corners, int print_first, int HII_DIM){
int i;
char *pid = print_output_header(print_pid, "BrightnessTemp");
if(print_first){
printf("%s\tFirstRow: ",pid);
printf("%s\t\tbrightness_temp: ");
for(i=0;i<10;i++){
printf("%f, ", x->brightness_temp[i]);
}
printf("\n");
}
if(print_corners){
printf("%s\tCorners: ",pid);
printf("%s\t\tbrightness_temp: ",pid);
print_corners_real(x->brightness_temp, HII_DIM);
}
}
double atomic_cooling_threshold(float z){
return TtoM(z, 1e4, 0.59);
}
double molecular_cooling_threshold(float z){
return TtoM(z, 600, 1.22);
}
double lyman_werner_threshold(float z, float J_21_LW, float vcb, struct AstroParams *astro_params){
// correction follows Schauer+20, fit jointly to LW feedback and relative velocities. They find weaker effect of LW feedback than before (Stacy+11, Greif+11, etc.) due to HII self shielding.
double mcrit_noLW = 3.314e7 * pow( 1.+z, -1.5);// this follows Visbal+15, which is taken as the optimal fit from Fialkov+12 which was calibrated with the simulations of Stacy+11 and Greif+11;
double f_LW = 1.0 + astro_params->A_LW * pow(J_21_LW, astro_params->BETA_LW);
double f_vcb = pow(1.0 + astro_params->A_VCB * vcb/SIGMAVCB, astro_params->BETA_VCB);
// double mcrit_LW = mcrit_noLW * (1.0 + 10. * sqrt(J_21_LW)); //Eq. (12) in Schauer+20
// return pow(10.0, log10(mcrit_LW) + 0.416 * vcb/SIGMAVCB ); //vcb and sigmacb in km/s, from Eq. (9)
return (mcrit_noLW * f_LW * f_vcb);
}
double reionization_feedback(float z, float Gamma_halo_HII, float z_IN){
if (z_IN<=1e-19)
return 1e-40;
return REION_SM13_M0 * pow(HALO_BIAS * Gamma_halo_HII, REION_SM13_A) * pow((1.+z)/10, REION_SM13_B) *
pow(1 - pow((1.+z)/(1.+z_IN), REION_SM13_C), REION_SM13_D);
}
/*
The following functions are simply for testing the exception framework
*/
void FunctionThatThrows(){
Throw(PhotonConsError);
}
int SomethingThatCatches(bool sub_func){
// A simple function that catches a thrown error.
int status;
Try{
if(sub_func) FunctionThatThrows();
else Throw(PhotonConsError);
}
Catch(status){
return status;
}
return 0;
}
int FunctionThatCatches(bool sub_func, bool pass, double *result){
int status;
if(!pass){
Try{
if(sub_func) FunctionThatThrows();
else Throw(PhotonConsError);
}
Catch(status){
LOG_DEBUG("Caught the problem with status %d.", status);
return status;
}
}
*result = 5.0;
return 0;
}
|
kt.c | /*!
\file
\brief The various k-truss decomposition routines
\date Started 6/3/2017
\author George
\version\verbatim $Id: cmdline.c 20946 2017-05-10 23:12:48Z karypis $ \endverbatim
*/
#include "kt.h"
#define hfun1(vi, vj, i, range) \
(((ssize_t)(((((ssize_t)vi)+5)^((ssize_t)vj)*(((ssize_t)vi>>32)+1)^((ssize_t)vj<<7)) + (i)*((1+((ssize_t)vi>>3)+1)^((ssize_t)vj<<5))))%range)
#ifndef DYNAMIC_CHUNK
#define DYNAMIC_CHUNK 16
#endif
/*************************************************************************/
/*! Determine the iperm for the key order using counting sort.
*/
/*************************************************************************/
int32_t *gk_i32kvipermi(int32_t n, gk_i32kv_t *cand)
{
int i, j, k, range;
int32_t *counts, *iperm;
for (range=0, i=0; i<n; i++) {
if (cand[i].key > range)
range = cand[i].key;
}
range++;
counts = gk_i32smalloc(range+1, 0, "counts");
for (i=0; i<n; i++)
counts[cand[i].key]++;
MAKECSR(i, range, counts);
iperm = gk_i32smalloc(n, 0, "iperm");
for (i=0; i<n; i++)
iperm[counts[cand[i].key]++] = i;
gk_free((void **)&counts, LTERM);
return iperm;
}
/*************************************************************************/
/*! Reorder the vertices in the graph in inc degree and return the upper
triangular part of the reordered graph in which the adjancency lists
are sorted in increasing order.
*/
/*************************************************************************/
gk_graph_t *kt_PreprocessAndExtractUpper(params_t *params, vault_t *vault)
{
int32_t vi, vj, vk, nvtxs;
ssize_t ei, eiend, ej, ejend, nedges;
ssize_t *xadj, *uxadj;
int32_t *adjncy, *uadjncy, *perm=NULL, *iperm=NULL;
gk_i32kv_t *cand=NULL;
gk_graph_t *graph;
nvtxs = vault->graph->nvtxs;
xadj = vault->graph->xadj;
adjncy = vault->graph->adjncy;
cand = gk_i32kvmalloc(nvtxs, "cand");
for (vi=0; vi<nvtxs; vi++) {
cand[vi].key = (int32_t)(xadj[vi+1]-xadj[vi]);
cand[vi].val = vi;
}
perm = vault->perm = gk_i32smalloc(nvtxs, -1, "perm"); /* perm[old-vtx-num] => new-vtx-num */
iperm = vault->iperm = gk_i32kvipermi(nvtxs, cand); /* iperm[new-vtx-num] => old-vtx-num */
for (vi=0; vi<nvtxs; vi++)
perm[iperm[vi]] = vi;
/* create the reordered/sorted upper triangular portion of the graph */
graph = gk_graph_Create();
graph->nvtxs = nvtxs;
graph->xadj = uxadj = gk_zmalloc(nvtxs+1, "uxadj");
graph->adjncy = uadjncy = gk_i32malloc(10+(xadj[nvtxs]>>1), "uadjncy");
uxadj[0] = nedges = 0;
for (vi=0; vi<nvtxs; vi++) {
vj = iperm[vi];
for (ej=xadj[vj], ejend=xadj[vj+1]; ej<ejend; ej++) {
assert(adjncy[ej] < nvtxs);
if ((vk = perm[adjncy[ej]]) > vi) /* keep only the upper part */
uadjncy[nedges++] = vk;
}
uxadj[vi+1] = nedges;
if (nedges-uxadj[vi] > 1)
gk_i32sorti(nedges-uxadj[vi], uadjncy+uxadj[vi]); /* sort adjncy list */
}
printf("Upper nedges: %zu out of %zu\n", uxadj[nvtxs], xadj[nvtxs]);
gk_free((void **)&cand, LTERM);
return graph;
}
/*************************************************************************/
/*! Creates the transpose of the upper-triangular graph with location
offsets at +1 locations.
This is used for the JIK algorithm.
*/
/*************************************************************************/
gk_graph_t *kt_TransposeUforJIK(params_t *params, gk_graph_t *graph)
{
int32_t vi, vj, nvtxs;
ssize_t ei, eiend, nedges;
ssize_t *xadj, *txadj;
int32_t *adjncy, *tadjncy;
gk_graph_t *tgraph;
nvtxs = graph->nvtxs;
xadj = graph->xadj;
adjncy = graph->adjncy;
tgraph = gk_graph_Create();
tgraph->nvtxs = nvtxs;
tgraph->xadj = txadj = gk_zsmalloc(nvtxs+1, 0, "txadj");
tgraph->adjncy = tadjncy = gk_i32malloc(2*(xadj[nvtxs]+1), "tadjncy");
for (vi=0; vi<nvtxs; vi++) {
if (xadj[vi+1]-xadj[vi] < 2)
continue;
for (ei=xadj[vi], eiend=xadj[vi+1]; ei<eiend-1; ei++)
txadj[adjncy[ei]] += 2;
}
MAKECSR(vi, nvtxs, txadj);
for (vi=0; vi<nvtxs; vi++) {
if (xadj[vi+1]-xadj[vi] < 2)
continue;
for (ei=xadj[vi], eiend=xadj[vi+1]; ei<eiend-1; ei++) {
vj = adjncy[ei];
tadjncy[txadj[vj]++] = vi;
tadjncy[txadj[vj]++] = ei-xadj[vi]+1; /* row-offset */
}
}
SHIFTCSR(vi, nvtxs, txadj);
return tgraph;
}
/*************************************************************************/
/*! Checks if the supports computed by the TC code is correct.
*/
/*************************************************************************/
void kt_CheckInitialSupport(params_t *params, vault_t *vault)
{
int32_t uvi, vi, vik, vj, vjk, vk, nvtxs, nh;
ssize_t uei, ei, ej;
ssize_t *xadj, *uxadj;
int32_t *adjncy, *uadjncy, *uadjwgt;
int32_t *map;
nvtxs = vault->graph->nvtxs;
xadj = vault->graph->xadj;
adjncy = vault->graph->adjncy;
uxadj = vault->ugraph->xadj;
uadjncy = vault->ugraph->adjncy;
uadjwgt = vault->ugraph->iadjwgt;
map = gk_i32smalloc(nvtxs, -1, "map");
for (uvi=0; uvi<nvtxs; uvi++) {
vi = vault->iperm[uvi];
for (ei=xadj[vi]; ei<xadj[vi+1]; ei++)
map[adjncy[ei]] = vi;
for (uei=uxadj[uvi]; uei<uxadj[uvi+1]; uei++) {
vj = vault->iperm[uadjncy[uei]];
nh = uadjwgt[uei];
for (ej=xadj[vj]; ej<xadj[vj+1]; ej++)
nh -= (map[adjncy[ej]] == vi ? 1 : 0);
GKASSERT(nh == 0);
}
}
gk_free((void **)&map, LTERM);
}
/*************************************************************************/
/*! Checks if the supports computed by the TC code is correct.
*/
/*************************************************************************/
void kt_CheckKTrussDecomposition(params_t *params, vault_t *vault)
{
int32_t k, vi, vj, vk, nvtxs, knvtxs, nh;
ssize_t ei, ej, nedges;
ssize_t *xadj;
int32_t *adjncy;
int32_t *map;
ktedge_t *ktedges;
nvtxs = vault->graph->nvtxs;
nedges = vault->nedges;
ktedges = vault->ktedges;
for (k=1; k<=vault->ktmax; k++) {
xadj = gk_zsmalloc(nvtxs+1, 0, "xadj");
for (ei=0; ei<nedges; ei++) {
if (ktedges[ei].k >= k) {
xadj[ktedges[ei].vi]++;
xadj[ktedges[ei].vj]++;
}
}
for (knvtxs=0, vi=0; vi<nvtxs; vi++)
knvtxs += (xadj[vi] > 0 ? 1 : 0);
MAKECSR(vi, nvtxs, xadj);
adjncy = gk_i32malloc(xadj[nvtxs], "adjncy");
for (ei=0; ei<nedges; ei++) {
if (ktedges[ei].k >= k) {
adjncy[xadj[ktedges[ei].vi]++] = ktedges[ei].vj;
adjncy[xadj[ktedges[ei].vj]++] = ktedges[ei].vi;
}
}
SHIFTCSR(vi, nvtxs, xadj);
map = gk_i32smalloc(nvtxs, -1, "map");
for (vi=0; vi<nvtxs; vi++) {
for (ei=xadj[vi]; ei<xadj[vi+1]; ei++)
map[adjncy[ei]] = vi;
for (ei=xadj[vi]; ei<xadj[vi+1]; ei++) {
vj = adjncy[ei];
for (nh=0, ej=xadj[vj]; ej<xadj[vj+1]; ej++)
nh += (map[adjncy[ej]] == vi ? 1 : 0);
GKASSERT(nh >= k);
}
}
printf("k-truss: %4d, nvtxs: %7d, nedges: %8zu\n", k+2, knvtxs, xadj[nvtxs]);
gk_free((void **)&xadj, &adjncy, &map, LTERM);
}
}
/*************************************************************************/
/*! Takes the sups[] array associated with the edges and creates the
ktedges information in the vault.
*/
/*************************************************************************/
void kt_Sups2KTEdges(params_t *params, vault_t *vault, int32_t ktmax, int32_t *sups)
{
int32_t vi, nvtxs;
ssize_t ei, eiend, ej, nedges;
ssize_t *xadj;
int32_t *adjncy, *adjwgt;
if (params->outfile == NULL)
return;
nvtxs = vault->ugraph->nvtxs;
xadj = vault->ugraph->xadj;
adjncy = vault->ugraph->adjncy;
adjwgt = vault->ugraph->iadjwgt;
vault->nedges = xadj[nvtxs];
vault->ktmax = ktmax;
vault->ktedges = (ktedge_t *)gk_malloc(xadj[nvtxs]*sizeof(ktedge_t), "ktedges");
for (ej=0, nedges=0, vi=0; vi<nvtxs; vi++) {
for (ei=xadj[vi], eiend=xadj[vi+1]; ei<eiend; ei++, ej++) {
vault->ktedges[ej].vi = gk_min(vault->iperm[vi], vault->iperm[adjncy[ei]]);
vault->ktedges[ej].vj = gk_max(vault->iperm[vi], vault->iperm[adjncy[ei]]);
if (adjwgt[ei] > 0)
vault->ktedges[ej].k = -sups[nedges++] + 2;
else
vault->ktedges[ej].k = 2;
}
}
}
/*************************************************************************/
/*! The hash-map-based edge-triangle-support counting routine that uses
the JIK triangle enumeration scheme.
This is the mapjikv2 tc version.
*/
/*************************************************************************/
int64_t kt_ComputeEdgeSupport(params_t *params, vault_t *vault)
{
int32_t vi, vj, vk, vl, nvtxs, nlocal;
ssize_t ei, eiend, ej, ejstart, ejend;
int64_t ntriangles, ntriangles2;
ssize_t *xadj, *txadj;
int32_t *adjncy, *tadjncy, *adjwgt;
int32_t l, tnc, nc, hmsize, tlsize, tlstart, *hmap, *tmap;
gk_startwctimer(vault->timer_2);
nvtxs = vault->ugraph->nvtxs;
xadj = vault->ugraph->xadj;
adjncy = vault->ugraph->adjncy;
adjwgt = vault->ugraph->iadjwgt;
txadj = vault->lgraph->xadj;
tadjncy = vault->lgraph->adjncy;
/* determine the size of the hash-map and convert it into a format
that is compatible with a bitwise AND operation */
for (hmsize=0, vi=0; vi<nvtxs; vi++)
hmsize = gk_max(hmsize, (int32_t)(xadj[vi+1]-xadj[vi]));
for (l=1; hmsize>(1<<l); l++);
hmsize = (1<<(l+4))-1;
hmap = gk_i32smalloc(hmsize+1, -1, "hmap");
printf("& compatible maximum hmsize: %"PRId32"\n", hmsize);
/* determine the size of the tail-map and allocate memory for it */
for (vi=(nvtxs>>2); vi<nvtxs; vi++) {
if ((txadj[vi+1]-txadj[vi])<<9 > vi)
break;
if ((xadj[vi+1]-xadj[vi])<<4 > nvtxs-vi)
break;
}
tlsize = nvtxs - vi + 100;
tlstart = nvtxs-tlsize;
tmap = gk_i32smalloc(tlsize, -1, "tmap");
tmap -= tlstart; /* make indexing simpler */
printf("tlsize: %"PRId32"\n", tlsize);
/* start counting triangles */
if (params->dbglvl&1)
gk_startwctimer(vault->timer_4);
/* use a combination of hmap and tmap */
ntriangles = 0;
hmsize = 0;
tnc = 0;
for (vj=1; vj<tlstart; vj++) {
if (xadj[vj+1] == xadj[vj] || txadj[vj+1] == txadj[vj])
continue;
/* if needed, increase the working hmsize */
if ((xadj[vj+1]-xadj[vj])<<3 > 1 + (hmsize>>4) + (hmsize>>1)) {
hmsize = xadj[vj+1]-xadj[vj];
for (l=1; hmsize>(1<<l); l++);
hmsize = (1<<(l+4))-1;
if (params->dbglvl&1) {
gk_stopwctimer(vault->timer_4);
printf("vj: %9d tlstart: %d degree: %5zu %7zu hmsize: %6d tnc: %7d time: %5.2lfs\n",
vj, tlstart, xadj[vj+1]-xadj[vj], txadj[vj+1]-txadj[vj],
hmsize, tnc, gk_getwctimer(vault->timer_4));
tnc = 0;
gk_clearwctimer(vault->timer_4);
gk_startwctimer(vault->timer_4);
}
}
/* hash Adj(vj) using hmap for the front and tmap for the last tlsize indices */
for (nc=0, ej=ejstart=xadj[vj], ejend=xadj[vj+1]; ej<ejend; ej++) {
if ((vk = adjncy[ej]) >= tlstart)
break;
for (l=(vk&hmsize); hmap[l]!=-1; l=((l+1)&hmsize), nc++);
hmap[l] = ej-ejstart;
}
for (; ej<ejend; ej++)
tmap[adjncy[ej]] = ej-ejstart;
tnc += nc;
/* find intersections */
if (nc > 0) { /* we had collisions */
for (ej=txadj[vj], ejend=txadj[vj+1]; ej<ejend; ej+=2) {
vi = tadjncy[ej];
for (nlocal=0, ei=xadj[vi]+tadjncy[ej+1], eiend=xadj[vi+1]; ei<eiend; ei++) {
if ((vk = adjncy[ei]) >= tlstart)
break;
l = vk&hmsize;
if (hmap[l] == -1)
continue;
if (adjncy[ejstart+hmap[l]] == vk) {
adjwgt[ei]++;
adjwgt[ejstart+hmap[l]]++;
nlocal++;
continue;
}
for (l=((l+1)&hmsize); hmap[l]!=-1 && adjncy[ejstart+hmap[l]]!=vk; l=((l+1)&hmsize));
if (hmap[l]!=-1 && adjncy[ejstart+hmap[l]] == vk) {
adjwgt[ei]++;
adjwgt[ejstart+hmap[l]]++;
nlocal++;
}
}
for (; ei<eiend; ei++) {
if (tmap[adjncy[ei]] != -1) {
assert(adjncy[ejstart+tmap[adjncy[ei]]] == adjncy[ei]);
adjwgt[ei]++;
adjwgt[ejstart+tmap[adjncy[ei]]]++;
nlocal++;
}
}
if (nlocal > 0) {
ntriangles += nlocal;
assert(adjncy[xadj[vi]+tadjncy[ej+1]-1] == vj);
adjwgt[xadj[vi]+tadjncy[ej+1]-1] += nlocal;
}
}
/* reset hmap/tmap */
for (ej=xadj[vj], ejend=xadj[vj+1]; ej<ejend; ej++) {
if ((vk = adjncy[ej]) >= tlstart)
break;
for (l=(vk&hmsize); hmap[l]==-1 || adjncy[ejstart+hmap[l]]!=vk; l=((l+1)&hmsize));
hmap[l] = -1;
}
for (; ej<ejend; ej++)
tmap[adjncy[ej]] = -1;
}
else { /* there were no collisons */
for (ej=txadj[vj], ejend=txadj[vj+1]; ej<ejend; ej+=2) {
vi = tadjncy[ej];
for (nlocal=0, ei=xadj[vi]+tadjncy[ej+1], eiend=xadj[vi+1]; ei<eiend; ei++) {
if ((vk = adjncy[ei]) >= tlstart)
break;
if (hmap[vk&hmsize]!=-1 && adjncy[ejstart+hmap[vk&hmsize]] == vk) {
adjwgt[ei]++;
adjwgt[ejstart+hmap[vk&hmsize]]++;
nlocal++;
}
}
for (; ei<eiend; ei++) {
if (tmap[adjncy[ei]] != -1) {
assert(adjncy[ejstart+tmap[adjncy[ei]]] == adjncy[ei]);
adjwgt[ei]++;
adjwgt[ejstart+tmap[adjncy[ei]]]++;
nlocal++;
}
}
if (nlocal > 0) {
ntriangles += nlocal;
assert(adjncy[xadj[vi]+tadjncy[ej+1]-1] == vj);
adjwgt[xadj[vi]+tadjncy[ej+1]-1] += nlocal;
}
}
/* reset hmap/tmap */
for (ej=xadj[vj], ejend=xadj[vj+1]; ej<ejend; ej++) {
if ((vk = adjncy[ej]) >= tlstart)
break;
hmap[vk&hmsize] = -1;
}
for (; ej<ejend; ej++)
tmap[adjncy[ej]] = -1;
}
}
printf("ntriangles: %"PRId64"\n", ntriangles);
if (params->dbglvl&1) {
gk_stopwctimer(vault->timer_4);
printf("vj: %9d tlstart: %d degree: %5zu %7zu hmsize: %6d tnc: %7d time: %5.2lfs\n",
vj, tlstart, xadj[vj+1]-xadj[vj], txadj[vj+1]-txadj[vj],
hmsize, tnc, gk_getwctimer(vault->timer_4));
tnc = 0;
gk_clearwctimer(vault->timer_4);
gk_startwctimer(vault->timer_4);
}
/* use tmap for the last tlsize rows */
for (; vj<nvtxs; vj++) {
if (1 || xadj[vj+1]-xadj[vj] < nvtxs-vj-1) {
/* hash Adj(vj) */
for (ej=ejstart=xadj[vj], ejend=xadj[vj+1]; ej<ejend; ej++)
tmap[adjncy[ej]] = ej-ejstart;
/* find intersections */
for (ej=txadj[vj], ejend=txadj[vj+1]; ej<ejend; ej+=2) {
vi = tadjncy[ej];
for (nlocal=0, ei=xadj[vi]+tadjncy[ej+1], eiend=xadj[vi+1]; ei<eiend; ei++) {
if (tmap[adjncy[ei]] != -1) {
adjwgt[ei]++;
adjwgt[ejstart+tmap[adjncy[ei]]]++;
nlocal++;
}
}
if (nlocal > 0) {
ntriangles += nlocal;
assert(adjncy[xadj[vi]+tadjncy[ej+1]-1] == vj);
adjwgt[xadj[vi]+tadjncy[ej+1]-1] += nlocal;
}
}
/* reset tmap */
for (ej=xadj[vj], ejend=xadj[vj+1]; ej<ejend; ej++)
tmap[adjncy[ej]] = -1;
}
else { /* the row is dense */ /* TODO: This has not been updated */
tnc++;
/* find intersections */
for (nlocal=0, ej=txadj[vj], ejend=txadj[vj+1]; ej<ejend; ej+=2) {
vi = tadjncy[ej];
nlocal += xadj[vi+1]-xadj[vi]-tadjncy[ej+1];
}
ntriangles += nlocal;
}
}
gk_stopwctimer(vault->timer_2);
if (params->dbglvl&1) {
gk_stopwctimer(vault->timer_4);
vj = nvtxs-2;
printf("vj: %9d tlstart: %d degree: %5zu %7zu hmsize: %6d tnc: %7d time: %5.2lfs\n",
vj, tlstart, xadj[vj+1]-xadj[vj], txadj[vj+1]-txadj[vj],
hmsize, tnc, gk_getwctimer(vault->timer_4));
for (ntriangles2=0, ei=0; ei<xadj[nvtxs]; ei++)
ntriangles2 += adjwgt[ei];
printf("Sanity check: ntriangles: %"PRId64" %"PRId64" %"PRId64"\n", ntriangles, ntriangles2/3, ntriangles2%3);
}
tmap += tlstart;
gk_free((void **)&hmap, &tmap, LTERM);
return ntriangles;
}
/*************************************************************************/
/*! This is the baseline serial version of k-truss decomposition.
*/
/*************************************************************************/
int64_t kt_serial(params_t *params, vault_t *vault)
{
struct edge_s {
int32_t vi, vj;
ssize_t eij, eji;
} *edges;
struct aii_s {
int32_t vj;
int32_t inc, dec;
} *aii;
struct xaii_s {
int64_t start, end;
} *xaii;
struct slist_s {
ssize_t neid, peid;
int32_t sup;
} *slist;
int32_t vi, vik, vj, vjk, vk, nvtxs, nltriangles, sup;
ssize_t ti, ei, eistart, eiend, ej, ejstart, ejend;
int64_t nedges, nleft, ntriangles;
ssize_t *xadj;
int32_t *adjncy, *adjwgt;
int32_t k, nsups, *sups;
ssize_t *ids, *shead;
ssize_t nupdates, nmaxupdates, *updindices;
double timer_currk = 0.;
gk_startwctimer(vault->timer_tcsetup);
vault->ugraph = kt_PreprocessAndExtractUpper(params, vault);
vault->lgraph = kt_TransposeUforJIK(params, vault->ugraph);
nvtxs = vault->ugraph->nvtxs;
xadj = vault->ugraph->xadj;
adjncy = vault->ugraph->adjncy;
/* where the support values will be stored */
adjwgt = vault->ugraph->iadjwgt = gk_i32smalloc(xadj[nvtxs], 0, "adjwgt");
gk_stopwctimer(vault->timer_tcsetup);
gk_startwctimer(vault->timer_esupport);
ntriangles = kt_ComputeEdgeSupportPar(params, vault);
gk_stopwctimer(vault->timer_esupport);
#if VERBOSE
printf("supports:\n");
for(int v=0; v < nvtxs; ++v) {
for(ssize_t e=xadj[v]; e < xadj[v+1]; ++e) {
printf("(%2d, %2d) perm[%2d, %2d] = %d\n",
v+1, adjncy[e]+1,
vault->iperm[v]+1, vault->iperm[adjncy[e]]+1,
adjwgt[e]);
}
}
#endif
gk_startwctimer(vault->timer_ktsetup);
/* determine the number of edges with non-zero support */
for (nedges=0, ei=0, eiend=xadj[nvtxs]; ei<eiend; ei++) {
if (adjwgt[ei] > 0)
nedges++;
}
/* allocate memory for the adjancency lists, which in addition to the
adjancent vertex it will store the decrement (for skip-list) and
the ID for priority queue */
xaii = (struct xaii_s *)gk_malloc((nvtxs+1)*sizeof(struct xaii_s), "xaii");
aii = (struct aii_s *)gk_malloc((2*nedges+1)*sizeof(struct aii_s), "aii");
edges = (struct edge_s *)gk_malloc((nedges+1)*sizeof(struct edge_s), "edges");
sups = gk_i32malloc(nedges, "sups");
ids = gk_zmalloc(2*nedges+1, "ids");
for (vi=0; vi<nvtxs; vi++)
xaii[vi].start = 0;
/* determine sizes */
for (nedges=0, vi=0; vi<nvtxs; vi++) {
for (ei=xadj[vi], eiend=xadj[vi+1]; ei<eiend; ei++) {
if (adjwgt[ei] > 0) {
xaii[vi].start++;
xaii[adjncy[ei]].start++;
edges[nedges].vi = vi;
edges[nedges].vj = adjncy[ei];
sups[nedges] = adjwgt[ei];
nedges++;
}
}
}
/* the MAKECSR equivalent */
for (vi=1; vi<nvtxs; vi++)
xaii[vi].start += xaii[vi-1].start;
for (vi=nvtxs; vi>0; vi--)
xaii[vi].start = xaii[vi-1].start;
xaii[0].start = 0;
/* populate it into two steps to ensure that the sorted order is maintained */
for (nedges=0, vi=0; vi<nvtxs; vi++) {
for (ei=xadj[vi], eiend=xadj[vi+1]; ei<eiend; ei++) {
if (adjwgt[ei] > 0) {
vj = adjncy[ei];
aii[xaii[vj].start].vj = vi;
aii[xaii[vj].start].inc = 1;
aii[xaii[vj].start].dec = 1;
ids[xaii[vj].start] = nedges;
edges[nedges].eji = xaii[vj].start++;
nedges++;
}
}
}
for (nedges=0, vi=0; vi<nvtxs; vi++) {
for (ei=xadj[vi], eiend=xadj[vi+1]; ei<eiend; ei++) {
if (adjwgt[ei] > 0) {
aii[xaii[vi].start].vj = adjncy[ei];
aii[xaii[vi].start].inc = 1;
aii[xaii[vi].start].dec = 1;
ids[xaii[vi].start] = nedges;
edges[nedges].eij = xaii[vi].start++;
nedges++;
}
}
}
/* the SHIFTCSR equivalent */
for (vi=nvtxs; vi>0; vi--)
xaii[vi].start = xaii[vi-1].start;
xaii[0].start = 0;
/* record the end in xaii[vi] and from now own, you will be using that */
for (vi=0; vi<nvtxs; vi++)
xaii[vi].end = xaii[vi+1].start;
/* setup the support buckets and all associated information */
nsups = gk_i32max(nedges, sups, 1) + 1;
printf("nsups: %d\n", nsups);
/* the heads and "link list" that form the support buckets */
shead = gk_zsmalloc(nsups, -1, "shead");
slist = (struct slist_s *)gk_malloc((nedges+1)*sizeof(struct slist_s), "slist");
slist++; /* this is to allow slist[-1] to be valid */
for (ei=0; ei<nedges; ei++) {
slist[ei].sup = sups[ei];
slist[ei].peid = -1;
slist[ei].neid = shead[sups[ei]];
if (shead[sups[ei]] != -1)
slist[shead[sups[ei]]].peid = ei;
shead[sups[ei]] = ei;
}
nmaxupdates = nedges + 2*nvtxs;
updindices = gk_zmalloc(nmaxupdates, "updindices");
gk_stopwctimer(vault->timer_ktsetup);
printf("#triangles before peeling: %"PRId64"\n", ntriangles);
ntriangles = 0;
nleft = nedges;
gk_startwctimer(vault->timer_ktpeeling);
/* get into the k-truss enumeration loop */
for (k=1; k<nsups && nleft>0; k++) {
nltriangles = 0;
gk_clearwctimer(timer_currk);
gk_startwctimer(timer_currk);
BACK:
nupdates = 0;
for (ti=shead[k]; ti!=-1; ti=slist[ti].neid) {
if (nupdates + 2*nvtxs > nmaxupdates)
break;
nleft--;
vi = edges[ti].vi;
vj = edges[ti].vj;
#if 0
printf("(%d %d) = %d\n", vi+1, vj+1, sups[ti]);
#endif
/* remove the edge from both adjacency lists */
ei = edges[ti].eij;
if (ei == xaii[vi].start)
xaii[vi].start += aii[ei].inc;
else
aii[ei-aii[ei].dec].inc += aii[ei].inc;
if (ei == xaii[vi].end-1)
xaii[vi].end -= aii[ei].dec;
else
aii[ei+aii[ei].inc].dec += aii[ei].dec;
ej = edges[ti].eji;
if (ej == xaii[vj].start)
xaii[vj].start += aii[ej].inc;
else
aii[ej-aii[ej].dec].inc += aii[ej].inc;
if (ej == xaii[vj].end-1)
xaii[vj].end -= aii[ej].dec;
else
aii[ej+aii[ej].inc].dec += aii[ej].dec;
if (sups[ti] > 0) {
sup = sups[ti];
nltriangles += sup;
ei = xaii[vi].end-1;
eistart = xaii[vi].start;
vik = aii[ei].vj;
ej = xaii[vj].end-1;
ejstart = xaii[vj].start;
vjk = aii[ej].vj;
/* decrease the support of the intersection */
while (ei >= eistart && ej >= ejstart) {
if (vik > vjk) {
ei -= aii[ei].dec;
vik = aii[ei].vj;
}
else if (vjk > vik) {
ej -= aii[ej].dec;
vjk = aii[ej].vj;
}
else {
updindices[nupdates++] = ids[ei];
updindices[nupdates++] = ids[ej];
sups[ids[ei]]--;
ei -= aii[ei].dec;
vik = aii[ei].vj;
sups[ids[ej]]--;
ej -= aii[ej].dec;
vjk = aii[ej].vj;
if (--sup == 0)
break;
}
}
GKASSERT(sup == 0);
}
sups[ti] = -k; /* this is used for encoding the maximal value of k of that edge */
}
/* update the shead[k] information, for the subsequent updates */
shead[k] = ti;
slist[ti].peid = -1;
/* add up sups[:] */
int64_t total_sup = 0;
#pragma omp parallel for schedule(static) reduction(+:total_sup)
for(int64_t e = 0; e < nedges; ++e) {
if(sups[e] >= 0) {
total_sup += sups[e];
}
}
#if VERBOSE
printf(" edges-left: %7"PRId64" (%5.2f%%), total-support: %7"PRId64"\n",
nleft, 100. * (double)nleft / (double)nedges, total_sup);
#endif
if (nupdates > 0) {
gk_startwctimer(vault->timer_4);
for (ei=0; ei<nupdates; ei++) {
ti = updindices[ei];
if (sups[ti] < 0 || sups[ti] == slist[ti].sup)
continue; /* we have already deleted or updated this */
/* remove ti from its current list */
sup = (slist[ti].sup <= k ? k : slist[ti].sup); /* see the comment in the "add" */
if (shead[sup] != ti) { /* if ti was not the head */
slist[slist[ti].peid].neid = slist[ti].neid;
slist[slist[ti].neid].peid = slist[ti].peid;
}
else {
shead[sup] = slist[ti].neid;
slist[slist[ti].neid].peid = -1;
}
/* add ti to the head of the new list */
sup = (sups[ti] <= k ? k : sups[ti]); /* put all the <k support into the support
list that we are currently operating on */
slist[ti].sup = sups[ti];
slist[ti].peid = -1;
slist[ti].neid = shead[sup];
slist[shead[sup]].peid = ti;
shead[sup] = ti;
}
gk_stopwctimer(vault->timer_4);
goto BACK;
}
gk_stopwctimer(timer_currk);
/* add up sups[:] */
total_sup = 0;
#pragma omp parallel for schedule(static) reduction(+:total_sup)
for(int64_t e = 0; e < nedges; ++e) {
if(sups[e] >= 0) {
total_sup += sups[e];
}
}
printf("k: %7d; edges-left: %7"PRId64" (%5.2f%%), total-support: %7"PRId64", "
"nltriangles: %7d, time (s): %6.3f\n",
k+2, nleft, 100. * (double)nleft / (double)nedges, total_sup,
nltriangles, timer_currk);
gk_clearwctimer(timer_currk);
ntriangles += nltriangles;
}
gk_stopwctimer(vault->timer_ktpeeling);
printf("#triangles after peeling: %"PRId64"\n", ntriangles);
/* create the output of the decomposition */
kt_Sups2KTEdges(params, vault, k-1, sups);
slist--;
gk_free((void **)&edges, &aii, &xaii, &ids, &sups, &shead, &slist, &updindices, LTERM);
return ntriangles;
}
/*************************************************************************/
/*! The hash-map-based edge-triangle-support counting routine that uses
the JIK triangle enumeration scheme.
This is the mapjikv2 tc version.
*/
/*************************************************************************/
int64_t kt_ComputeEdgeSupportPar(params_t *params, vault_t *vault)
{
int32_t vi, vj, vk, vl, nvtxs, nlocal;
ssize_t ei, eiend, ej, ejstart, ejend;
int64_t ntriangles, ntriangles2;
ssize_t *xadj, *txadj;
int32_t *adjncy, *tadjncy, *adjwgt;
int32_t l, tnc, nc, hmsize, tlsize, tlstart;
gk_startwctimer(vault->timer_2);
nvtxs = vault->ugraph->nvtxs;
xadj = vault->ugraph->xadj;
adjncy = vault->ugraph->adjncy;
adjwgt = vault->ugraph->iadjwgt;
txadj = vault->lgraph->xadj;
tadjncy = vault->lgraph->adjncy;
/* determine the size of the hash-map and convert it into a format
that is compatible with a bitwise AND operation */
for (hmsize=0, vi=0; vi<nvtxs; vi++)
hmsize = gk_max(hmsize, (int32_t)(xadj[vi+1]-xadj[vi]));
for (l=1; hmsize>(1<<l); l++);
hmsize = (1<<(l+4))-1;
printf("& compatible maximum hmsize: %"PRId32"\n", hmsize);
/* determine the size of the tail-map and allocate memory for it */
for (vi=(nvtxs>>2); vi<nvtxs; vi++) {
if ((txadj[vi+1]-txadj[vi])<<9 > vi)
break;
if ((xadj[vi+1]-xadj[vi])<<4 > nvtxs-vi)
break;
}
tlsize = nvtxs - vi + 100;
tlstart = nvtxs-tlsize;
printf("tlsize: %"PRId32"\n", tlsize);
printf("tlstart: %"PRId32"\n", tlstart);
/* start counting triangles */
if (params->dbglvl&1)
gk_startwctimer(vault->timer_4);
/* use a combination of hmap and tmap */
ntriangles = 0;
tnc = 0;
#pragma omp parallel default(none) shared(xadj, txadj, hmsize, params, tlstart, tlsize, adjncy, tadjncy, adjwgt) private(nc, ej, ejstart, ejend, l, nlocal, vi, vk, ei, eiend) reduction(+:ntriangles) reduction(+:tnc)
{
int32_t *hmap = gk_i32smalloc(hmsize+1, -1, "hmap");
int32_t *tmap = gk_i32smalloc(tlsize, -1, "tmap");
tmap -= tlstart; /* make indexing simpler */
int32_t hmsizel = 0;
#pragma omp for schedule(dynamic, DYNAMIC_CHUNK)
for (vj=1; vj<tlstart; vj++) {
if (xadj[vj+1] == xadj[vj] || txadj[vj+1] == txadj[vj])
continue;
/* if needed, increase the working hmsize */
if ((xadj[vj+1]-xadj[vj])<<3 > 1 + (hmsizel>>4) + (hmsizel>>1)) {
hmsizel = xadj[vj+1]-xadj[vj];
for (l=1; hmsizel>(1<<l); l++);
hmsizel = (1<<(l+4))-1;
}
/* hash Adj(vj) using hmap for the front and tmap for the last tlsize indices */
for (nc=0, ej=ejstart=xadj[vj], ejend=xadj[vj+1]; ej<ejend; ej++) {
if ((vk = adjncy[ej]) >= tlstart)
break;
for (l=(vk&hmsizel); hmap[l]!=-1; l=((l+1)&hmsizel), nc++);
hmap[l] = ej-ejstart;
}
for (; ej<ejend; ej++)
tmap[adjncy[ej]] = ej-ejstart;
/* find intersections */
if (nc > 0) { /* we had collisions */
for (ej=txadj[vj], ejend=txadj[vj+1]; ej<ejend; ej+=2) {
vi = tadjncy[ej];
for (nlocal=0, ei=xadj[vi]+tadjncy[ej+1], eiend=xadj[vi+1]; ei<eiend; ei++) {
if ((vk = adjncy[ei]) >= tlstart)
break;
l = vk&hmsizel;
if (hmap[l] == -1)
continue;
if (adjncy[ejstart+hmap[l]] == vk) {
#pragma omp atomic
adjwgt[ei]++;
#pragma omp atomic
adjwgt[ejstart+hmap[l]]++;
nlocal++;
continue;
}
for (l=((l+1)&hmsizel); hmap[l]!=-1 && adjncy[ejstart+hmap[l]]!=vk; l=((l+1)&hmsizel));
if (hmap[l]!=-1 && adjncy[ejstart+hmap[l]] == vk) {
#pragma omp atomic
adjwgt[ei]++;
#pragma omp atomic
adjwgt[ejstart+hmap[l]]++;
nlocal++;
}
}
for (; ei<eiend; ei++) {
if (tmap[adjncy[ei]] != -1) {
assert(adjncy[ejstart+tmap[adjncy[ei]]] == adjncy[ei]);
#pragma omp atomic
adjwgt[ei]++;
#pragma omp atomic
adjwgt[ejstart+tmap[adjncy[ei]]]++;
nlocal++;
}
}
if (nlocal > 0) {
ntriangles += nlocal;
assert(adjncy[xadj[vi]+tadjncy[ej+1]-1] == vj);
#pragma omp atomic
adjwgt[xadj[vi]+tadjncy[ej+1]-1] += nlocal;
}
}
/* reset hmap/tmap */
for (ej=xadj[vj], ejend=xadj[vj+1]; ej<ejend; ej++) {
if ((vk = adjncy[ej]) >= tlstart)
break;
for (l=(vk&hmsizel); hmap[l]==-1 || adjncy[ejstart+hmap[l]]!=vk; l=((l+1)&hmsizel));
hmap[l] = -1;
}
for (; ej<ejend; ej++)
tmap[adjncy[ej]] = -1;
}
else { /* there were no collisons */
for (ej=txadj[vj], ejend=txadj[vj+1]; ej<ejend; ej+=2) {
vi = tadjncy[ej];
for (nlocal=0, ei=xadj[vi]+tadjncy[ej+1], eiend=xadj[vi+1]; ei<eiend; ei++) {
if ((vk = adjncy[ei]) >= tlstart)
break;
if (hmap[vk&hmsizel]!=-1 && adjncy[ejstart+hmap[vk&hmsizel]] == vk) {
#pragma omp atomic
adjwgt[ei]++;
#pragma omp atomic
adjwgt[ejstart+hmap[vk&hmsizel]]++;
nlocal++;
}
}
for (; ei<eiend; ei++) {
if (tmap[adjncy[ei]] != -1) {
assert(adjncy[ejstart+tmap[adjncy[ei]]] == adjncy[ei]);
#pragma omp atomic
adjwgt[ei]++;
#pragma omp atomic
adjwgt[ejstart+tmap[adjncy[ei]]]++;
nlocal++;
}
}
if (nlocal > 0) {
ntriangles += nlocal;
assert(adjncy[xadj[vi]+tadjncy[ej+1]-1] == vj);
#pragma omp atomic
adjwgt[xadj[vi]+tadjncy[ej+1]-1] += nlocal;
}
}
/* reset hmap/tmap */
for (ej=xadj[vj], ejend=xadj[vj+1]; ej<ejend; ej++) {
if ((vk = adjncy[ej]) >= tlstart)
break;
hmap[vk&hmsizel] = -1;
}
for (; ej<ejend; ej++)
tmap[adjncy[ej]] = -1;
}
}
tmap += tlstart;
gk_free((void **)&hmap, &tmap, LTERM);
}
int32_t tlstart_idx = tlstart;
if (tlstart < 0)
tlstart_idx = 0;
#pragma omp parallel default(none) shared(nvtxs, tlstart, tlstart_idx, tlsize, xadj, txadj, adjncy, tadjncy, adjwgt) private(nlocal, ej, ejend, ejstart, vi, ei, eiend) reduction(+:ntriangles) reduction(+:tnc)
{
int32_t *tmap1 = gk_i32smalloc(tlsize, -1, "tmap1");
tmap1 -= tlstart; /* make indexing simpler */
/* use tmap for the last tlsize rows */
#pragma omp for schedule(dynamic, DYNAMIC_CHUNK)
for (vj=tlstart_idx; vj<nvtxs; vj++) {
if (1 || xadj[vj+1]-xadj[vj] < nvtxs-vj-1) {
/* hash Adj(vj) */
for (ej=ejstart=xadj[vj], ejend=xadj[vj+1]; ej<ejend; ej++)
tmap1[adjncy[ej]] = ej-ejstart;
/* find intersections */
for (ej=txadj[vj], ejend=txadj[vj+1]; ej<ejend; ej+=2) {
vi = tadjncy[ej];
for (nlocal=0, ei=xadj[vi]+tadjncy[ej+1], eiend=xadj[vi+1]; ei<eiend; ei++) {
if (tmap1[adjncy[ei]] != -1) {
#pragma omp atomic
adjwgt[ei]++;
#pragma omp atomic
adjwgt[ejstart+tmap1[adjncy[ei]]]++;
nlocal++;
}
}
if (nlocal > 0) {
ntriangles += nlocal;
assert(adjncy[xadj[vi]+tadjncy[ej+1]-1] == vj);
#pragma omp atomic
adjwgt[xadj[vi]+tadjncy[ej+1]-1] += nlocal;
}
}
/* reset tmap */
for (ej=xadj[vj], ejend=xadj[vj+1]; ej<ejend; ej++)
tmap1[adjncy[ej]] = -1;
}
else { /* the row is dense */ /* TODO: This has not been updated */
tnc++;
/* find intersections */
for (nlocal=0, ej=txadj[vj], ejend=txadj[vj+1]; ej<ejend; ej+=2) {
vi = tadjncy[ej];
nlocal += xadj[vi+1]-xadj[vi]-tadjncy[ej+1];
}
ntriangles += nlocal;
}
}
tmap1 += tlstart;
gk_free((void **)&tmap1, LTERM);
}
gk_stopwctimer(vault->timer_2);
return ntriangles;
}
|
clang-282491-3.c | #include <stdlib.h>
#include <stdio.h>
#include <omp.h>
#define N 100
typedef struct myvec{
size_t len;
double *data;
} myvec_t;
int main(){
myvec_t s;
s.data = (double *)calloc(N,sizeof(double));
if(!s.data){
fprintf(stderr, "alloc failed\n");
exit(1);
}
s.len = N;
printf("CPU: Array at %p with length %zu\n", s.data, s.len);
#pragma omp target map(s)
printf("GPU: Array at %p with length %zu\n", s.data, s.len);
}
|
omp1.c | #include <stdio.h>
#include <assert.h>
#ifdef _CIVL
#include <civlc.cvh>
#define n 10
$input double u[n];
#else
#define n 10
#endif
int nEdges = n-1;
void residualPrllel(double uin[n], double resout[n], int edges[nEdges][2], int colourIA[3]) {
for(int c=0; c<2; c++) {
#pragma omp parallel for default(none) shared(nEdges,edges,uin,resout,colourIA,c)
for(int e=colourIA[c]; e<colourIA[c+1]; e++) {
int i = edges[e][0];
int j = edges[e][1];
resout[i] = resout[i] + uin[i]*uin[j];
resout[j] = resout[j] + 2*uin[i]+2*uin[j];
}
}
}
void residualSerial(double uin[n], double resout[n], int edges[nEdges][2]) {
for(int e=0; e<nEdges; e++) {
int i = edges[e][0];
int j = edges[e][1];
resout[i] = resout[i] + uin[i]*uin[j];
resout[j] = resout[j] + 2*uin[i]+2*uin[j];
}
}
int main(int argc, char** argv) {
#ifndef _CIVL
double u[n];
for(int i=0; i<n; i++) {
u[i] = 1;
}
#endif
int edgesSerial[nEdges][2];
for(int i=0; i<nEdges; i++) {
edgesSerial[i][0] = i;
edgesSerial[i][1] = i+1;
printf("serial edge #%d=(%d, %d)\n", i, i, i+1);
}
int edgesPrllel[nEdges][2];
for(int i=0; i<(nEdges+1)/2; i++) {
edgesPrllel[i][0] = 2*i;
edgesPrllel[i][1] = 2*i+1;
printf("parallel edge #%d=(%d, %d) color A\n", i, 2*i, 2*i+1);
}
for(int i=1; i<(nEdges+1)/2; i++) {
edgesPrllel[(nEdges+1)/2+i-1][0] = 2*i-1;
edgesPrllel[(nEdges+1)/2+i-1][1] = 2*i;
printf("parallel edge #%d=(%d, %d) color B\n", (nEdges+1)/2+i-1, 2*i-1, 2*i);
}
int colourIA[3] = {0, (nEdges+1)/2, nEdges};
printf("colour markers at %d %d %d\n", colourIA[0], colourIA[1], colourIA[2]);
double resPrllel[n];
double resSerial[n];
for(int i=0; i<n; i++) {
resSerial[i] = 0;
resPrllel[i] = 0;
}
residualSerial(u, resSerial, edgesSerial);
residualPrllel(u, resPrllel, edgesPrllel, colourIA);
for(int i=0; i<n; i++) {
printf("residual(%d) = %e or %e\n", i, resPrllel[i], resSerial[i]);
assert(resSerial[i] == resPrllel[i]);
}
}
|
ten_tusscher_2004_epi_S1_17.c | //Original Ten Tusscher
#include <assert.h>
#include <stdlib.h>
#include "ten_tusscher_2004_epi_S1_17.h"
GET_CELL_MODEL_DATA(init_cell_model_data) {
assert(cell_model);
if(get_initial_v)
cell_model->initial_v = INITIAL_V;
if(get_neq)
cell_model->number_of_ode_equations = NEQ;
}
//TODO: this should be called only once for the whole mesh, like in the GPU code
SET_ODE_INITIAL_CONDITIONS_CPU(set_model_initial_conditions_cpu) {
// Default initial conditions
/*
sv[0] = INITIAL_V; // V; millivolt
sv[1] = 0.f; //M
sv[2] = 0.75; //H
sv[3] = 0.75f; //J
sv[4] = 0.f; //Xr1
sv[5] = 1.f; //Xr2
sv[6] = 0.f; //Xs
sv[7] = 1.f; //S
sv[8] = 0.f; //R
sv[9] = 0.f; //D
sv[10] = 1.f; //F
sv[11] = 1.f; //FCa
sv[12] = 1.f; //G
sv[13] = 0.0002; //Cai
sv[14] = 0.2f; //CaSR
sv[15] = 11.6f; //Nai
sv[16] = 138.3f; //Ki
*/
// Elnaz's steady-state initial conditions
real sv_sst[]={-86.5625425078510,0.00129164511648619,0.779570574758225,0.779427091418077,0.000174878991569467,0.485030733457084,0.00294149421393105,0.999998346195388,1.93532833226023e-08,1.89250710693833e-05,0.999770305344151,1.00711648268532,0.999995670118449,4.46785769336173e-05,0.704594271439916,9.53343199663547,139.935102489521};
for (uint32_t i = 0; i < NEQ; i++)
sv[i] = sv_sst[i];
}
SOLVE_MODEL_ODES_CPU(solve_model_odes_cpu) {
uint32_t sv_id;
int i;
#pragma omp parallel for private(sv_id)
for (i = 0; i < num_cells_to_solve; i++) {
if(cells_to_solve)
sv_id = cells_to_solve[i];
else
sv_id = i;
for (int j = 0; j < num_steps; ++j) {
solve_model_ode_cpu(dt, sv + (sv_id * NEQ), stim_currents[i]);
}
}
}
void solve_model_ode_cpu(real dt, real *sv, real stim_current) {
assert(sv);
real rY[NEQ], rDY[NEQ];
for(int i = 0; i < NEQ; i++)
rY[i] = sv[i];
RHS_cpu(rY, rDY, stim_current, dt);
for(int i = 0; i < NEQ; i++)
sv[i] = rDY[i];
}
void RHS_cpu(const real *sv, real *rDY_, real stim_current, real dt) {
// State variables
real svolt = sv[0];
real sm = sv[1];
real sh = sv[2];
real sj = sv[3];
real sxr1 = sv[4];
real sxr2 = sv[5];
real sxs = sv[6];
real ss = sv[7];
real sr = sv[8];
real sd = sv[9];
real sf = sv[10];
real sfca = sv[11];
real sg = sv[12];
real Cai = sv[13];
real CaSR = sv[14];
real Nai = sv[15];
real Ki = sv[16];
//External concentrations
real Ko=5.4;
real Cao=2.0;
real Nao=140.0;
//Intracellular volumes
real Vc=0.016404;
real Vsr=0.001094;
//Calcium dynamics
real Bufc=0.15f;
real Kbufc=0.001f;
real Bufsr=10.f;
real Kbufsr=0.3f;
real taufca=2.f;
real taug=2.f;
real Vmaxup=0.000425f;
real Kup=0.00025f;
//Constants
const real R = 8314.472f;
const real F = 96485.3415f;
const real T =310.0f;
real RTONF =(R*T)/F;
//Cellular capacitance
real CAPACITANCE=0.185;
//Parameters for currents
//Parameters for IKr
real Gkr=0.096;
//Parameters for Iks
real pKNa=0.03;
///#ifdef EPI
real Gks=0.245;
///#endif
///#ifdef ENDO
/// real Gks=0.245;
///#endif
///#ifdef MCELL
/// real Gks=0.062;
///#endif
//Parameters for Ik1
real GK1=5.405;
//Parameters for Ito
//#ifdef EPI
real Gto=0.294;
//#endif
// #ifdef ENDO
// real Gto=0.073;
//#endif
//#ifdef MCELL
// real Gto=0.294;
///#endif
//Parameters for INa
real GNa=14.838;
//Parameters for IbNa
real GbNa=0.00029;
//Parameters for INaK
real KmK=1.0;
real KmNa=40.0;
real knak=1.362;
//Parameters for ICaL
real GCaL=0.000175;
//Parameters for IbCa
real GbCa=0.000592;
//Parameters for INaCa
real knaca=1000;
real KmNai=87.5;
real KmCa=1.38;
real ksat=0.1;
real n=0.35;
//Parameters for IpCa
real GpCa=0.825;
real KpCa=0.0005;
//Parameters for IpK;
real GpK=0.0146;
real parameters []={13.9775467344317,0.000166600423473182,0.000157288679125758,0.000709118450301612,0.263558270150583,0.168176898499067,0.121036017649477,3.67579958026615,0.0132247972184402,2.23991491317412,1099.99539877590,0.000482074874077319,0.582903159280657,0.0176425810465345,0.00547174746535614,2.73565215234459e-05};
GNa=parameters[0];
GbNa=parameters[1];
GCaL=parameters[2];
GbCa=parameters[3];
Gto=parameters[4];
Gkr=parameters[5];
Gks=parameters[6];
GK1=parameters[7];
GpK=parameters[8];
knak=parameters[9];
knaca=parameters[10];
Vmaxup=parameters[11];
GpCa=parameters[12];
real arel=parameters[13];
real crel=parameters[14];
real Vleak=parameters[15];
real IKr;
real IKs;
real IK1;
real Ito;
real INa;
real IbNa;
real ICaL;
real IbCa;
real INaCa;
real IpCa;
real IpK;
real INaK;
real Irel;
real Ileak;
real dNai;
real dKi;
real dCai;
real dCaSR;
real A;
// real BufferFactorc;
// real BufferFactorsr;
real SERCA;
real Caisquare;
real CaSRsquare;
real CaCurrent;
real CaSRCurrent;
real fcaold;
real gold;
real Ek;
real Ena;
real Eks;
real Eca;
real CaCSQN;
real bjsr;
real cjsr;
real CaBuf;
real bc;
real cc;
real Ak1;
real Bk1;
real rec_iK1;
real rec_ipK;
real rec_iNaK;
real AM;
real BM;
real AH_1;
real BH_1;
real AH_2;
real BH_2;
real AJ_1;
real BJ_1;
real AJ_2;
real BJ_2;
real M_INF;
real H_INF;
real J_INF;
real TAU_M;
real TAU_H;
real TAU_J;
real axr1;
real bxr1;
real axr2;
real bxr2;
real Xr1_INF;
real Xr2_INF;
real TAU_Xr1;
real TAU_Xr2;
real Axs;
real Bxs;
real Xs_INF;
real TAU_Xs;
real R_INF;
real TAU_R;
real S_INF;
real TAU_S;
real Ad;
real Bd;
real Cd;
real TAU_D;
real D_INF;
real TAU_F;
real F_INF;
real FCa_INF;
real G_INF;
real inverseVcF2=1/(2*Vc*F);
real inverseVcF=1./(Vc*F);
real Kupsquare=Kup*Kup;
// real BufcKbufc=Bufc*Kbufc;
// real Kbufcsquare=Kbufc*Kbufc;
// real Kbufc2=2*Kbufc;
// real BufsrKbufsr=Bufsr*Kbufsr;
// const real Kbufsrsquare=Kbufsr*Kbufsr;
// const real Kbufsr2=2*Kbufsr;
const real exptaufca=exp(-dt/taufca);
const real exptaug=exp(-dt/taug);
real sItot;
//Needed to compute currents
Ek=RTONF*(log((Ko/Ki)));
Ena=RTONF*(log((Nao/Nai)));
Eks=RTONF*(log((Ko+pKNa*Nao)/(Ki+pKNa*Nai)));
Eca=0.5*RTONF*(log((Cao/Cai)));
Ak1=0.1/(1.+exp(0.06*(svolt-Ek-200)));
Bk1=(3.*exp(0.0002*(svolt-Ek+100))+
exp(0.1*(svolt-Ek-10)))/(1.+exp(-0.5*(svolt-Ek)));
rec_iK1=Ak1/(Ak1+Bk1);
rec_iNaK=(1./(1.+0.1245*exp(-0.1*svolt*F/(R*T))+0.0353*exp(-svolt*F/(R*T))));
rec_ipK=1./(1.+exp((25-svolt)/5.98));
//Compute currents
INa=GNa*sm*sm*sm*sh*sj*(svolt-Ena);
ICaL=GCaL*sd*sf*sfca*4*svolt*(F*F/(R*T))*
(exp(2*svolt*F/(R*T))*Cai-0.341*Cao)/(exp(2*svolt*F/(R*T))-1.);
Ito=Gto*sr*ss*(svolt-Ek);
IKr=Gkr*sqrt(Ko/5.4)*sxr1*sxr2*(svolt-Ek);
IKs=Gks*sxs*sxs*(svolt-Eks);
IK1=GK1*rec_iK1*(svolt-Ek);
INaCa=knaca*(1./(KmNai*KmNai*KmNai+Nao*Nao*Nao))*(1./(KmCa+Cao))*
(1./(1+ksat*exp((n-1)*svolt*F/(R*T))))*
(exp(n*svolt*F/(R*T))*Nai*Nai*Nai*Cao-
exp((n-1)*svolt*F/(R*T))*Nao*Nao*Nao*Cai*2.5);
INaK=knak*(Ko/(Ko+KmK))*(Nai/(Nai+KmNa))*rec_iNaK;
IpCa=GpCa*Cai/(KpCa+Cai);
IpK=GpK*rec_ipK*(svolt-Ek);
IbNa=GbNa*(svolt-Ena);
IbCa=GbCa*(svolt-Eca);
//Determine total current
(sItot) = IKr +
IKs +
IK1 +
Ito +
INa +
IbNa +
ICaL +
IbCa +
INaK +
INaCa +
IpCa +
IpK +
stim_current;
//update concentrations
Caisquare=Cai*Cai;
CaSRsquare=CaSR*CaSR;
CaCurrent=-(ICaL+IbCa+IpCa-2.0f*INaCa)*inverseVcF2*CAPACITANCE;
///A=0.016464f*CaSRsquare/(0.0625f+CaSRsquare)+0.008232f;
A=arel*CaSRsquare/(0.0625f+CaSRsquare)+crel;
Irel=A*sd*sg;
///Ileak=0.00008f*(CaSR-Cai);
Ileak=Vleak*(CaSR-Cai);
SERCA=Vmaxup/(1.f+(Kupsquare/Caisquare));
CaSRCurrent=SERCA-Irel-Ileak;
CaCSQN=Bufsr*CaSR/(CaSR+Kbufsr);
dCaSR=dt*(Vc/Vsr)*CaSRCurrent;
bjsr=Bufsr-CaCSQN-dCaSR-CaSR+Kbufsr;
cjsr=Kbufsr*(CaCSQN+dCaSR+CaSR);
CaSR=(sqrt(bjsr*bjsr+4.*cjsr)-bjsr)/2.;
CaBuf=Bufc*Cai/(Cai+Kbufc);
dCai=dt*(CaCurrent-CaSRCurrent);
bc=Bufc-CaBuf-dCai-Cai+Kbufc;
cc=Kbufc*(CaBuf+dCai+Cai);
Cai=(sqrt(bc*bc+4*cc)-bc)/2;
dNai=-(INa+IbNa+3*INaK+3*INaCa)*inverseVcF*CAPACITANCE;
Nai+=dt*dNai;
dKi=-(stim_current+IK1+Ito+IKr+IKs-2*INaK+IpK)*inverseVcF*CAPACITANCE;
Ki+=dt*dKi;
//compute steady state values and time constants
AM=1./(1.+exp((-60.-svolt)/5.));
BM=0.1/(1.+exp((svolt+35.)/5.))+0.10/(1.+exp((svolt-50.)/200.));
TAU_M=AM*BM;
M_INF=1./((1.+exp((-56.86-svolt)/9.03))*(1.+exp((-56.86-svolt)/9.03)));
if (svolt>=-40.)
{
AH_1=0.;
BH_1=(0.77/(0.13*(1.+exp(-(svolt+10.66)/11.1))));
TAU_H= 1.0/(AH_1+BH_1);
}
else
{
AH_2=(0.057*exp(-(svolt+80.)/6.8));
BH_2=(2.7*exp(0.079*svolt)+(3.1e5)*exp(0.3485*svolt));
TAU_H=1.0/(AH_2+BH_2);
}
H_INF=1./((1.+exp((svolt+71.55)/7.43))*(1.+exp((svolt+71.55)/7.43)));
if(svolt>=-40.)
{
AJ_1=0.;
BJ_1=(0.6*exp((0.057)*svolt)/(1.+exp(-0.1*(svolt+32.))));
TAU_J= 1.0/(AJ_1+BJ_1);
}
else
{
AJ_2=(((-2.5428e4)*exp(0.2444*svolt)-(6.948e-6)*
exp(-0.04391*svolt))*(svolt+37.78)/
(1.+exp(0.311*(svolt+79.23))));
BJ_2=(0.02424*exp(-0.01052*svolt)/(1.+exp(-0.1378*(svolt+40.14))));
TAU_J= 1.0/(AJ_2+BJ_2);
}
J_INF=H_INF;
Xr1_INF=1./(1.+exp((-26.-svolt)/7.));
axr1=450./(1.+exp((-45.-svolt)/10.));
bxr1=6./(1.+exp((svolt-(-30.))/11.5));
TAU_Xr1=axr1*bxr1;
Xr2_INF=1./(1.+exp((svolt-(-88.))/24.));
axr2=3./(1.+exp((-60.-svolt)/20.));
bxr2=1.12/(1.+exp((svolt-60.)/20.));
TAU_Xr2=axr2*bxr2;
Xs_INF=1./(1.+exp((-5.-svolt)/14.));
Axs=1100./(sqrt(1.+exp((-10.-svolt)/6)));
Bxs=1./(1.+exp((svolt-60.)/20.));
TAU_Xs=Axs*Bxs;
#ifdef EPI
R_INF=1./(1.+exp((20-svolt)/6.));
S_INF=1./(1.+exp((svolt+20)/5.));
TAU_R=9.5*exp(-(svolt+40.)*(svolt+40.)/1800.)+0.8;
TAU_S=85.*exp(-(svolt+45.)*(svolt+45.)/320.)+5./(1.+exp((svolt-20.)/5.))+3.;
#endif
#ifdef ENDO
R_INF=1./(1.+exp((20-svolt)/6.));
S_INF=1./(1.+exp((svolt+28)/5.));
TAU_R=9.5*exp(-(svolt+40.)*(svolt+40.)/1800.)+0.8;
TAU_S=1000.*exp(-(svolt+67)*(svolt+67)/1000.)+8.;
#endif
#ifdef MCELL
R_INF=1./(1.+exp((20-svolt)/6.));
S_INF=1./(1.+exp((svolt+20)/5.));
TAU_R=9.5*exp(-(svolt+40.)*(svolt+40.)/1800.)+0.8;
TAU_S=85.*exp(-(svolt+45.)*(svolt+45.)/320.)+5./(1.+exp((svolt-20.)/5.))+3.;
#endif
D_INF=1./(1.+exp((-5-svolt)/7.5));
Ad=1.4/(1.+exp((-35-svolt)/13))+0.25;
Bd=1.4/(1.+exp((svolt+5)/5));
Cd=1./(1.+exp((50-svolt)/20));
TAU_D=Ad*Bd+Cd;
F_INF=1./(1.+exp((svolt+20)/7));
TAU_F=1125*exp(-(svolt+27)*(svolt+27)/240)+80+165/(1.+exp((25-svolt)/10));
FCa_INF=(1./(1.+pow((Cai/0.000325),8))+
0.1/(1.+exp((Cai-0.0005)/0.0001))+
0.20/(1.+exp((Cai-0.00075)/0.0008))+
0.23 )/1.46;
if(Cai<0.00035)
G_INF=1./(1.+pow((Cai/0.00035),6));
else
G_INF=1./(1.+pow((Cai/0.00035),16));
//Update gates
rDY_[1] = M_INF-(M_INF-sm)*exp(-dt/TAU_M);
rDY_[2] = H_INF-(H_INF-sh)*exp(-dt/TAU_H);
rDY_[3] = J_INF-(J_INF-sj)*exp(-dt/TAU_J);
rDY_[4] = Xr1_INF-(Xr1_INF-sxr1)*exp(-dt/TAU_Xr1);
rDY_[5] = Xr2_INF-(Xr2_INF-sxr2)*exp(-dt/TAU_Xr2);
rDY_[6] = Xs_INF-(Xs_INF-sxs)*exp(-dt/TAU_Xs);
rDY_[7] = S_INF-(S_INF-ss)*exp(-dt/TAU_S);
rDY_[8] = R_INF-(R_INF-sr)*exp(-dt/TAU_R);
rDY_[9] = D_INF-(D_INF-sd)*exp(-dt/TAU_D);
rDY_[10] = F_INF-(F_INF-sf)*exp(-dt/TAU_F);
fcaold= sfca;
sfca = FCa_INF-(FCa_INF-sfca)*exptaufca;
if(sfca>fcaold && (svolt)>-37.0)
sfca = fcaold;
gold = sg;
sg = G_INF-(G_INF-sg)*exptaug;
if(sg>gold && (svolt)>-37.0)
sg=gold;
//update voltage
rDY_[0] = svolt + dt*(-sItot);
rDY_[11] = sfca;
rDY_[12] = sg;
rDY_[13] = Cai;
rDY_[14] = CaSR;
rDY_[15] = Nai;
rDY_[16] = Ki;
}
|
bucle-forModificado.c | #include <stdio.h>
#include <stdlib.h>
#include <omp.h>
int main(int argc, char **argv) {
int i, n = 9;
if(argc < 2) {
fprintf(stderr,"\n[ERROR]- Falta nº iteraciones\n");
exit(- 1);
}
n = atoi(argv[1]);
#pragma omp parallel for
for (i = 0; i < n; i++)
printf("thread %d ejecuta la iteración %d del bucle\n",
omp_get_thread_num(),i);
return(0);
}
|
nufft.c | /* Copyright 2014-2015. The Regents of the University of California.
* Copyright 2016-2020. Uecker Lab. University Medical Center Göttingen.
* All rights reserved. Use of this source code is governed by
* a BSD-style license which can be found in the LICENSE file.
*
* Authors:
* 2014-2017 Frank Ong
* 2014-2020 Martin Uecker
* 2018 Sebastian Rosenzweig
*
*/
#include <math.h>
#include <complex.h>
#include <assert.h>
#include <stdbool.h>
#include "misc/misc.h"
#include "misc/debug.h"
#include "num/multind.h"
#include "num/flpmath.h"
#include "num/filter.h"
#include "num/fft.h"
#include "num/shuffle.h"
#include "num/ops.h"
#ifdef USE_CUDA
#include "num/gpuops.h"
#endif
#include "linops/linop.h"
#include "linops/someops.h"
#include "linops/fmac.h"
#include "noncart/grid.h"
#include "nufft.h"
#define FFT_FLAGS (MD_BIT(0)|MD_BIT(1)|MD_BIT(2))
struct nufft_conf_s nufft_conf_defaults = {
.toeplitz = true,
.pcycle = false,
.periodic = false,
.lowmem = false,
.loopdim = -1,
.flags = FFT_FLAGS,
.cfft = 0u,
.decomp = true,
};
#include "nufft_priv.h"
DEF_TYPEID(nufft_data);
static void nufft_free_data(const linop_data_t* data);
static void nufft_apply(const linop_data_t* _data, complex float* dst, const complex float* src);
static void nufft_apply_adjoint(const linop_data_t* _data, complex float* dst, const complex float* src);
static void nufft_apply_normal(const linop_data_t* _data, complex float* dst, const complex float* src);
static void toeplitz_mult(const struct nufft_data* data, complex float* dst, const complex float* src);
static complex float* compute_linphases(int N, long lph_dims[N + 1], unsigned long flags, const long img_dims[N + 1])
{
int T = bitcount(flags);
float shifts[1 << T][T];
int s = 0;
for(int i = 0; i < (1 << T); i++) {
bool skip = false;
for(int j = 0; j < T; j++) {
shifts[s][j] = 0.;
if (MD_IS_SET(i, j)) {
skip = skip || (1 == img_dims[j]);
shifts[s][j] = -0.5;
}
}
if (!skip)
s++;
}
int ND = N + 1;
md_select_dims(ND, flags, lph_dims, img_dims);
lph_dims[N] = s;
complex float* linphase = md_alloc(ND, lph_dims, CFL_SIZE);
#pragma omp parallel for shared(linphase)
for(int i = 0; i < s; i++) {
float shifts2[ND];
for (int j = 0; j < ND; j++)
shifts2[j] = 0.;
for (int j = 0, t = 0; j < N; j++)
if (MD_IS_SET(flags, j))
shifts2[j] = shifts[i][t++];
linear_phase(ND, img_dims, shifts2,
linphase + i * md_calc_size(ND, img_dims));
}
return linphase;
}
static void compute_kern_basis(unsigned int N, unsigned int flags, const long pos[N],
const long krn_dims[N], complex float* krn,
const long bas_dims[N], const complex float* basis,
const long wgh_dims[N], const complex float* weights)
{
// assert(1 == krn_dims[N - 1]);
assert(1 == wgh_dims[N - 1]);
assert(1 == bas_dims[N - 1]);
long baT_dims[N];
md_copy_dims(N, baT_dims, bas_dims);
baT_dims[N - 1] = bas_dims[5];
baT_dims[5] = 1;
long wgT_dims[N];
md_copy_dims(N, wgT_dims, wgh_dims);
wgT_dims[N - 1] = wgh_dims[5];
wgT_dims[5] = 1;
long max_dims[N];
md_max_dims(N, ~0u, max_dims, baT_dims, wgT_dims);
long max_strs[N];
md_calc_strides(N, max_strs, max_dims, CFL_SIZE);
long bas_strs[N];
md_calc_strides(N, bas_strs, bas_dims, CFL_SIZE);
long baT_strs[N];
md_copy_strides(N, baT_strs, bas_strs);
baT_strs[N - 1] = bas_strs[5];
baT_strs[5] = 0;
long wgh_strs[N];
md_calc_strides(N, wgh_strs, wgh_dims, CFL_SIZE);
long wgT_strs[N];
md_copy_strides(N, wgT_strs, wgh_strs);
wgT_strs[N - 1] = wgh_strs[5];
wgT_strs[5] = 0;
debug_printf(DP_DEBUG1, "Allocating %ld\n", md_calc_size(N, max_dims));
complex float* tmp = md_alloc(N, max_dims, CFL_SIZE);
md_copy2(N, max_dims, max_strs, tmp, baT_strs, basis, CFL_SIZE);
md_zmul2(N, max_dims, max_strs, tmp, max_strs, tmp, wgT_strs, weights);
md_zmulc2(N, max_dims, max_strs, tmp, max_strs, tmp, wgT_strs, weights);
baT_dims[5] = baT_dims[6];
baT_dims[6] = 1;
baT_strs[5] = baT_strs[6];
baT_strs[6] = 0;
long krn_strs[N];
md_calc_strides(N, krn_strs, krn_dims, CFL_SIZE);
long ma2_dims[N];
md_tenmul_dims(N, ma2_dims, krn_dims, max_dims, baT_dims);
long ma3_dims[N];
md_select_dims(N, flags, ma3_dims, ma2_dims);
long tmp_off = md_calc_offset(N, max_strs, pos);
long bas_off = md_calc_offset(N, baT_strs, pos);
md_zsmul(N, max_dims, tmp, tmp, (double)bas_dims[6]); // FIXME: Why?
md_ztenmulc2(N, ma3_dims, krn_strs, krn,
max_strs, (void*)tmp + tmp_off,
baT_strs, (void*)basis + bas_off);
md_free(tmp);
}
static void compute_kern(unsigned int N, unsigned int flags, const long pos[N],
const long krn_dims[N], complex float* krn,
const long bas_dims[N], const complex float* basis,
const long wgh_dims[N], const complex float* weights)
{
if (NULL != basis)
return compute_kern_basis(N, flags, pos, krn_dims, krn, bas_dims, basis, wgh_dims, weights);
assert(~0u == flags);
md_zfill(N, krn_dims, krn, 1.);
if (NULL != weights) {
long krn_strs[N];
md_calc_strides(N, krn_strs, krn_dims, CFL_SIZE);
long wgh_strs[N];
md_calc_strides(N, wgh_strs, wgh_dims, CFL_SIZE);
md_zmul2(N, krn_dims, krn_strs, krn, krn_strs, krn, wgh_strs, weights);
md_zmulc2(N, krn_dims, krn_strs, krn, krn_strs, krn, wgh_strs, weights);
}
return;
}
complex float* compute_psf(unsigned int N, const long img_dims[N], const long trj_dims[N], const complex float* traj,
const long bas_dims[N], const complex float* basis,
const long wgh_dims[N], const complex float* weights,
bool periodic, bool lowmem)
{
long img2_dims[N + 1];
md_copy_dims(N, img2_dims, img_dims);
img2_dims[N] = 1;
long trj2_dims[N + 1];
md_copy_dims(N, trj2_dims, trj_dims);
trj2_dims[N] = 1;
long bas2_dims[N + 1];
md_copy_dims(N, bas2_dims, bas_dims);
bas2_dims[N] = 1;
long wgh2_dims[N + 1];
md_copy_dims(N, wgh2_dims, wgh_dims);
wgh2_dims[N] = 1;
N++;
long ksp2_dims[N];
md_copy_dims(N, ksp2_dims, img2_dims);
md_select_dims(3, ~MD_BIT(0), ksp2_dims, trj2_dims);
if (NULL != basis) {
assert(1 == trj2_dims[6]);
ksp2_dims[N - 1] = trj2_dims[5];
trj2_dims[N - 1] = trj2_dims[5];
trj2_dims[5] = 1; // FIXME copy?
}
struct nufft_conf_s conf = nufft_conf_defaults;
conf.periodic = periodic;
conf.toeplitz = false; // avoid infinite loop
conf.lowmem = lowmem;
debug_printf(DP_DEBUG2, "nufft kernel dims: ");
debug_print_dims(DP_DEBUG2, N, ksp2_dims);
debug_printf(DP_DEBUG2, "nufft psf dims: ");
debug_print_dims(DP_DEBUG2, N, img2_dims);
debug_printf(DP_DEBUG2, "nufft traj dims: ");
debug_print_dims(DP_DEBUG2, N, trj2_dims);
complex float* psft = NULL;
long pos[N];
for (unsigned int i = 0; i < N; i++)
pos[i] = 0;
long A = md_calc_size(N, ksp2_dims);
long B = md_calc_size(N - 1, ksp2_dims) + md_calc_size(N - 1, img2_dims);
long C = md_calc_size(N, img2_dims);
if ((A <= B) || !lowmem) {
debug_printf(DP_DEBUG1, "Allocating %ld (vs. %ld) + %ld\n", A, B, C);
complex float* ones = md_alloc(N, ksp2_dims, CFL_SIZE);
compute_kern(N, ~0u, pos, ksp2_dims, ones, bas2_dims, basis, wgh2_dims, weights);
psft = md_alloc(N, img2_dims, CFL_SIZE);
struct linop_s* op2 = nufft_create(N, ksp2_dims, img2_dims, trj2_dims, traj, NULL, conf);
linop_adjoint_unchecked(op2, psft, ones);
linop_free(op2);
md_free(ones);
} else {
debug_printf(DP_DEBUG1, "Allocating %ld (vs. %ld) + %ld\n", B, A, C);
psft = md_calloc(N, img2_dims, CFL_SIZE);
long trj2_strs[N];
md_calc_strides(N, trj2_strs, trj2_dims, CFL_SIZE);
complex float* ones = md_alloc(N - 1, ksp2_dims, CFL_SIZE);
complex float* tmp = md_alloc(N - 1, img2_dims, CFL_SIZE);
assert(!((1 != trj2_dims[N - 1]) && (NULL == basis)));
for (long i = 0; i < trj2_dims[N - 1]; i++) {
debug_printf(DP_DEBUG1, "KERN %03ld\n", i);
unsigned int flags = ~0u;
if (1 != trj2_dims[N - 1])
flags = ~(1u << (N - 1u));
pos[N - 1] = i;
compute_kern(N, flags, pos, ksp2_dims, ones, bas2_dims, basis, wgh2_dims, weights);
struct linop_s* op2 = nufft_create(N - 1, ksp2_dims, img2_dims, trj2_dims, (void*)traj + i * trj2_strs[N - 1], NULL, conf);
linop_adjoint_unchecked(op2, tmp, ones);
md_zadd(N - 1, img2_dims, psft, psft, tmp);
linop_free(op2);
}
md_free(ones);
md_free(tmp);
}
return psft;
}
static complex float* compute_psf2(int N, const long psf_dims[N + 1], unsigned long flags, const long trj_dims[N + 1], const complex float* traj,
const long bas_dims[N + 1], const complex float* basis, const long wgh_dims[N + 1], const complex float* weights,
bool periodic, bool lowmem)
{
int ND = N + 1;
long img_dims[ND];
long img_strs[ND];
md_select_dims(ND, ~MD_BIT(N + 0), img_dims, psf_dims);
md_calc_strides(ND, img_strs, img_dims, CFL_SIZE);
// PSF 2x size
long img2_dims[ND];
long img2_strs[ND];
md_copy_dims(ND, img2_dims, img_dims);
for (int i = 0; i < N; i++)
if (MD_IS_SET(flags, i))
img2_dims[i] = (1 == img_dims[i]) ? 1 : (2 * img_dims[i]);
md_calc_strides(ND, img2_strs, img2_dims, CFL_SIZE);
complex float* traj2 = md_alloc(ND, trj_dims, CFL_SIZE);
md_zsmul(ND, trj_dims, traj2, traj, 2.);
complex float* psft = compute_psf(ND, img2_dims, trj_dims, traj2, bas_dims, basis, wgh_dims, weights, periodic, lowmem);
md_free(traj2);
fftuc(ND, img2_dims, flags, psft, psft);
float scale = 1.;
for (int i = 0; i < N; i++)
scale *= ((img2_dims[i] > 1) && (MD_IS_SET(flags, i))) ? 4. : 1.;
md_zsmul(ND, img2_dims, psft, psft, scale);
// reformat
complex float* psf = md_alloc(ND, psf_dims, CFL_SIZE);
long factors[N];
for (int i = 0; i < N; i++)
factors[i] = ((img_dims[i] > 1) && (MD_IS_SET(flags, i))) ? 2 : 1;
md_decompose(N + 0, factors, psf_dims, psf, img2_dims, psft, CFL_SIZE);
md_free(psft);
return psf;
}
static struct linop_s* nufft_create3(unsigned int N,
const long ksp_dims[N],
const long cim_dims[N],
const long traj_dims[N],
const complex float* traj,
const long wgh_dims[N],
const complex float* weights,
const long bas_dims[N],
const complex float* basis,
struct nufft_conf_s conf)
{
PTR_ALLOC(struct nufft_data, data);
SET_TYPEID(nufft_data, data);
data->N = N;
data->traj = traj;
data->conf = conf;
data->flags = conf.flags;
data->width = 3.;
data->beta = calc_beta(2., data->width);
debug_printf(DP_DEBUG1, "ksp : ");
debug_print_dims(DP_DEBUG1, N, ksp_dims);
debug_printf(DP_DEBUG1, "cim : ");
debug_print_dims(DP_DEBUG1, N, cim_dims);
debug_printf(DP_DEBUG1, "traj: ");
debug_print_dims(DP_DEBUG1, N, traj_dims);
if (NULL != weights) {
debug_printf(DP_DEBUG1, "wgh : ");
debug_print_dims(DP_DEBUG1, N, wgh_dims);
}
if (NULL != basis) {
debug_printf(DP_DEBUG1, "bas : ");
debug_print_dims(DP_DEBUG1, N, bas_dims);
}
// dim 0 must be transformed (we treat this special in the trajectory)
assert(MD_IS_SET(data->flags, 0));
// assert(md_check_compat(N, ~data->flags, ksp_dims, cim_dims));
assert(md_check_bounds(N, ~data->flags, cim_dims, ksp_dims));
assert(0 == (data->flags & conf.cfft));
assert(!((!conf.decomp) && conf.toeplitz));
data->grid_conf = (struct grid_conf_s){
.width = data->width,
.os = 2.,
.periodic = data->conf.periodic,
.beta = data->beta,
};
// extend internal dimensions by one for linear phases
unsigned int ND = N + 1;
data->ksp_dims = *TYPE_ALLOC(long[ND]);
data->cim_dims = *TYPE_ALLOC(long[ND]);
data->cml_dims = *TYPE_ALLOC(long[ND]);
data->img_dims = *TYPE_ALLOC(long[ND]);
data->trj_dims = *TYPE_ALLOC(long[ND]);
data->lph_dims = *TYPE_ALLOC(long[ND]);
data->psf_dims = *TYPE_ALLOC(long[ND]);
data->wgh_dims = *TYPE_ALLOC(long[ND]);
data->bas_dims = *TYPE_ALLOC(long[ND]);
data->out_dims = *TYPE_ALLOC(long[ND]);
data->ciT_dims = *TYPE_ALLOC(long[ND]);
data->cmT_dims = *TYPE_ALLOC(long[ND]);
data->ksp_strs = *TYPE_ALLOC(long[ND]);
data->cim_strs = *TYPE_ALLOC(long[ND]);
data->cml_strs = *TYPE_ALLOC(long[ND]);
data->img_strs = *TYPE_ALLOC(long[ND]);
data->trj_strs = *TYPE_ALLOC(long[ND]);
data->lph_strs = *TYPE_ALLOC(long[ND]);
data->psf_strs = *TYPE_ALLOC(long[ND]);
data->wgh_strs = *TYPE_ALLOC(long[ND]);
data->bas_strs = *TYPE_ALLOC(long[ND]);
data->out_strs = *TYPE_ALLOC(long[ND]);
md_copy_dims(N, data->cim_dims, cim_dims);
data->cim_dims[N] = 1;
md_copy_dims(N, data->ksp_dims, ksp_dims);
data->ksp_dims[N] = 1;
md_copy_dims(ND, data->ciT_dims, data->cim_dims);
md_copy_dims(ND, data->out_dims, data->ksp_dims);
md_select_dims(ND, data->flags, data->img_dims, data->cim_dims);
assert(bitcount(data->flags) == traj_dims[0]);
long chk_dims[N];
md_select_dims(N, ~data->flags, chk_dims, traj_dims);
assert(md_check_compat(N, ~0ul, chk_dims, ksp_dims));
// assert(md_check_bounds(N, ~0ul, chk_dims, ksp_dims));
md_copy_dims(N, data->trj_dims, traj_dims);
data->trj_dims[N] = 1;
md_calc_strides(ND, data->cim_strs, data->cim_dims, CFL_SIZE);
md_calc_strides(ND, data->img_strs, data->img_dims, CFL_SIZE);
md_calc_strides(ND, data->trj_strs, data->trj_dims, CFL_SIZE);
md_calc_strides(ND, data->ksp_strs, data->ksp_dims, CFL_SIZE);
md_calc_strides(ND, data->out_strs, data->out_dims, CFL_SIZE);
data->basis = NULL;
if (NULL != basis) {
// conf.toeplitz = false;
assert(!md_check_dimensions(N, bas_dims, (1 << 5) | (1 << 6)));
data->out_dims[5] = bas_dims[5]; // TE
data->out_dims[6] = 1; // COEFF
assert(data->ksp_dims[6] == bas_dims[6]);
// recompute
md_calc_strides(ND, data->out_strs, data->out_dims, CFL_SIZE);
md_copy_dims(N, data->bas_dims, bas_dims);
data->bas_dims[N] = 1;
md_calc_strides(ND, data->bas_strs, data->bas_dims, CFL_SIZE);
complex float* tmp = md_alloc(ND, data->bas_dims, CFL_SIZE);
md_copy(ND, data->bas_dims, tmp, basis, CFL_SIZE);
data->basis = tmp;
}
data->weights = NULL;
if (NULL != weights) {
md_copy_dims(N, data->wgh_dims, wgh_dims);
data->wgh_dims[N] = 1;
md_calc_strides(ND, data->wgh_strs, data->wgh_dims, CFL_SIZE);
complex float* tmp = md_alloc(ND, data->wgh_dims, CFL_SIZE);
md_copy(ND, data->wgh_dims, tmp, weights, CFL_SIZE);
data->weights = tmp;
}
complex float* roll = md_alloc(ND, data->img_dims, CFL_SIZE);
rolloff_correction(conf.decomp ? 1. : data->grid_conf.os, data->width, data->beta, data->img_dims, roll);
data->roll = roll;
complex float* linphase = NULL;
if (conf.decomp) {
linphase = compute_linphases(N, data->lph_dims, data->flags, data->img_dims);
md_calc_strides(ND, data->lph_strs, data->lph_dims, CFL_SIZE);
if (!conf.toeplitz)
md_zmul2(ND, data->lph_dims, data->lph_strs, linphase, data->lph_strs, linphase, data->img_strs, data->roll);
} else {
linphase = roll;
data->roll = NULL;
md_copy_dims(ND, data->lph_dims, data->img_dims);
md_calc_strides(ND, data->lph_strs, data->lph_dims, CFL_SIZE);
}
fftmod(ND, data->lph_dims, data->flags, linphase, linphase);
fftscale(ND, data->lph_dims, data->flags, linphase, linphase);
float scale = 1.;
for (int i = 0; i < (int)N; i++)
if ((data->lph_dims[i] > 1) && MD_IS_SET(data->flags, i))
scale *= conf.decomp ? 0.5 : sqrtf(0.5);
md_zsmul(ND, data->lph_dims, linphase, linphase, scale);
data->factors = *TYPE_ALLOC(long[data->N]);
for (int i = 0; i < (int)data->N; i++)
if (data->conf.decomp && (data->img_dims[i] > 1) && MD_IS_SET(data->flags, i))
data->factors[i] = 2;
else
data->factors[i] = 1;
complex float* fftm = md_alloc(ND, data->img_dims, CFL_SIZE);
md_zfill(ND, data->img_dims, fftm, 1.);
fftmod(ND, data->img_dims, data->flags, fftm, fftm);
data->fftmod = fftm;
data->linphase = linphase;
data->psf = NULL;
#ifdef USE_CUDA
data->linphase_gpu = NULL;
data->psf_gpu = NULL;
#endif
if (conf.toeplitz) {
debug_printf(DP_DEBUG1, "NUFFT: Toeplitz mode\n");
md_copy_dims(ND, data->psf_dims, data->lph_dims);
for (int i = 0; i < (int)N; i++)
if (!MD_IS_SET(data->flags, i))
data->psf_dims[i] = data->trj_dims[i];
if (NULL != basis) {
debug_printf(DP_DEBUG3, "psf_dims: ");
debug_print_dims(DP_DEBUG3, N, data->psf_dims);
data->psf_dims[6] = data->bas_dims[6];
data->psf_dims[5] = data->bas_dims[6];
}
md_calc_strides(ND, data->psf_strs, data->psf_dims, CFL_SIZE);
data->psf = compute_psf2(N, data->psf_dims, data->flags, data->trj_dims, data->traj,
data->bas_dims, data->basis, data->wgh_dims, data->weights,
true /*conf.periodic*/, conf.lowmem);
}
md_copy_dims(ND, data->cml_dims, data->cim_dims);
data->cml_dims[N + 0] = data->lph_dims[N + 0];
md_copy_dims(ND, data->cmT_dims, data->cml_dims);
if (NULL != basis) {
assert(1 == data->cml_dims[5]);
data->cmT_dims[5] = data->cml_dims[6];
data->cmT_dims[6] = 1;
assert(1 == data->cim_dims[5]);
data->ciT_dims[5] = data->cim_dims[6];
data->ciT_dims[6] = 1;
}
md_calc_strides(ND, data->cml_strs, data->cml_dims, CFL_SIZE);
data->cm2_dims = *TYPE_ALLOC(long[ND]);
// !
md_copy_dims(ND, data->cm2_dims, data->cim_dims);
for (int i = 0; i < (int)N; i++)
if (conf.decomp && MD_IS_SET(data->flags, i))
data->cm2_dims[i] = (1 == cim_dims[i]) ? 1 : (2 * cim_dims[i]);
data->fft_op = linop_fft_create(ND, data->cml_dims, data->flags | data->conf.cfft);
if (conf.pcycle || conf.lowmem) {
debug_printf(DP_DEBUG1, "NUFFT: %s mode\n", conf.lowmem ? "low-mem" : "pcycle");
data->cycle = 0;
data->cfft_op = linop_fft_create(N, data->cim_dims, data->flags | data->conf.cfft);
}
long out_dims[N];
md_copy_dims(N, out_dims, data->out_dims);
return linop_create(N, out_dims, N, cim_dims,
CAST_UP(PTR_PASS(data)), nufft_apply, nufft_apply_adjoint, nufft_apply_normal, NULL, nufft_free_data);
}
struct linop_s* nufft_create2(unsigned int N,
const long ksp_dims[N],
const long cim_dims[N],
const long traj_dims[N],
const complex float* traj,
const long wgh_dims[N],
const complex float* weights,
const long bas_dims[N],
const complex float* basis,
struct nufft_conf_s conf)
{
if (0 <= conf.loopdim) {
int d = conf.loopdim;
const long L = ksp_dims[d];
assert(d < (int)N);
assert((NULL == weights) || (1 == wgh_dims[d]));
assert((NULL == basis) || (1 == bas_dims[d]));
assert(1 == traj_dims[d]);
assert(L == cim_dims[d]);
if (1 < L) {
debug_printf(DP_WARN, "NEW NUFFT LOOP CODE\n");
long ksp1_dims[N];
md_select_dims(N, ~MD_BIT(d), ksp1_dims, ksp_dims);
long cim1_dims[N];
md_select_dims(N, ~MD_BIT(d), cim1_dims, cim_dims);
auto nu = nufft_create2(N, ksp1_dims, cim1_dims, traj_dims, traj, wgh_dims, weights, bas_dims, basis, conf);
long out_dims[N];
md_copy_dims(N, out_dims, ksp_dims);
if (NULL != basis)
out_dims[6] = 1;
long istrs[N];
long ostrs[N];
md_calc_strides(N, istrs, cim_dims, CFL_SIZE);
md_calc_strides(N, ostrs, out_dims, CFL_SIZE);
istrs[d] = 0;
ostrs[d] = 0;
auto nu1 = linop_copy_wrapper(N, istrs, ostrs, nu);
long loop_dims[N];
md_select_dims(N, MD_BIT(d), loop_dims, out_dims);
auto nu2 = linop_loop(N, loop_dims, nu1);
linop_free(nu);
linop_free(nu1);
return nu2;
}
}
return nufft_create3(N, ksp_dims, cim_dims,
traj_dims, traj, wgh_dims, weights,
bas_dims, basis, conf);
}
struct linop_s* nufft_create(unsigned int N, ///< Number of dimension
const long ksp_dims[N], ///< kspace dimension
const long cim_dims[N], ///< Coil images dimension
const long traj_dims[N], ///< Trajectory dimension
const complex float* traj, ///< Trajectory
const complex float* weights, ///< Weights, ex, soft-gating or density compensation
struct nufft_conf_s conf) ///< NUFFT configuration options
{
long wgh_dims[N];
md_select_dims(N, ~MD_BIT(0), wgh_dims, traj_dims);
return nufft_create2(N, ksp_dims, cim_dims, traj_dims, traj, wgh_dims, weights, NULL, NULL, conf);
}
static void nufft_free_data(const linop_data_t* _data)
{
auto data = CAST_DOWN(nufft_data, _data);
xfree(data->ksp_dims);
xfree(data->cim_dims);
xfree(data->cml_dims);
xfree(data->img_dims);
xfree(data->trj_dims);
xfree(data->lph_dims);
xfree(data->psf_dims);
xfree(data->wgh_dims);
xfree(data->bas_dims);
xfree(data->out_dims);
xfree(data->ciT_dims);
xfree(data->cmT_dims);
xfree(data->ksp_strs);
xfree(data->cim_strs);
xfree(data->cml_strs);
xfree(data->img_strs);
xfree(data->trj_strs);
xfree(data->lph_strs);
xfree(data->psf_strs);
xfree(data->wgh_strs);
xfree(data->bas_strs);
xfree(data->out_strs);
xfree(data->cm2_dims);
xfree(data->factors);
md_free(data->linphase);
md_free(data->psf);
md_free(data->fftmod);
md_free(data->weights);
md_free(data->roll);
md_free(data->basis);
#ifdef USE_CUDA
md_free(data->linphase_gpu);
md_free(data->psf_gpu);
#endif
linop_free(data->fft_op);
if (data->conf.pcycle || data->conf.lowmem)
linop_free(data->cfft_op);
xfree(data);
}
// Forward: from image to kspace
static void nufft_apply(const linop_data_t* _data, complex float* dst, const complex float* src)
{
auto data = CAST_DOWN(nufft_data, _data);
#ifdef USE_CUDA
assert(!cuda_ondevice(src));
#endif
assert(!data->conf.toeplitz); // if toeplitz linphase has no roll, so would need to be added
int ND = data->N + 1;
complex float* grid = md_alloc(ND, data->cml_dims, CFL_SIZE);
md_zmul2(ND, data->cml_dims, data->cml_strs, grid, data->cim_strs, src, data->lph_strs, data->linphase);
linop_forward(data->fft_op, ND, data->cml_dims, grid, ND, data->cml_dims, grid);
md_zmul2(ND, data->cml_dims, data->cml_strs, grid, data->cml_strs, grid, data->img_strs, data->fftmod);
complex float* gridX = md_alloc(data->N, data->cm2_dims, CFL_SIZE);
md_recompose(data->N, data->factors, data->cm2_dims, gridX, data->cml_dims, grid, CFL_SIZE);
md_free(grid);
complex float* tmp = dst;
if (NULL != data->basis)
tmp = md_alloc(ND, data->ksp_dims, CFL_SIZE);
md_clear(ND, data->ksp_dims, tmp, CFL_SIZE);
grid2H(&data->grid_conf, ND, data->trj_dims, data->traj, data->ksp_dims, tmp, data->cm2_dims, gridX);
md_free(gridX);
if (NULL != data->basis) {
md_ztenmul(data->N, data->out_dims, dst, data->ksp_dims, tmp, data->bas_dims, data->basis);
md_free(tmp);
}
if (NULL != data->weights)
md_zmul2(data->N, data->out_dims, data->out_strs, dst, data->out_strs, dst, data->wgh_strs, data->weights);
}
static void split_nufft_adjoint (const struct nufft_data* data, int ND, complex float* grid, const complex float* src)
{
debug_printf(DP_DEBUG1, "nufft_adj split calculation for lowmem\n");
// FFT_FLAGS, because the image dimensions can always occur in the trajectory
long nontriv_traj_flags = FFT_FLAGS | md_nontriv_dims(data->N, data->trj_dims);
long cm2_reduced_dims[ND];
md_select_dims(ND, nontriv_traj_flags, cm2_reduced_dims, data->cm2_dims);
// everything not in traj dims is done separately
long max_dims[ND];
md_set_dims(ND, max_dims, 1);
md_max_dims(ND, ~nontriv_traj_flags, max_dims, data->cm2_dims, data->ksp_dims);
long iter_dims[data->N];
// All dimension not in the nontriv_traj_flags and all dimensions in ksp dims but not in cm2 dims
// We need to exclude these last dimensions, because we have to sum over sum in the gridding procedure
long iter_flags = ~( nontriv_traj_flags
| ( md_nontriv_dims(ND, data->ksp_dims)
& ~md_nontriv_dims(ND, data->cm2_dims)));
md_select_dims(data->N, iter_flags, iter_dims, max_dims);
long ksp_reduced_dims[ND];
md_select_dims(ND, nontriv_traj_flags, ksp_reduced_dims, data->ksp_dims);
long ksp_reduced_strs[ND];
md_calc_strides(ND, ksp_reduced_strs, ksp_reduced_dims, CFL_SIZE);
long ksp_strs[ND];
md_calc_strides(ND, ksp_strs, data->ksp_dims, CFL_SIZE);
long cml_reduced_dims[ND];
cml_reduced_dims[data->N] = data->cml_dims[data->N];
md_select_dims(data->N, nontriv_traj_flags, cml_reduced_dims, data->cml_dims);
long cml_reduced_strs[ND];
md_calc_strides(ND, cml_reduced_strs, cml_reduced_dims, CFL_SIZE);
complex float* grid_reduced = md_alloc(ND, cml_reduced_dims, CFL_SIZE);
complex float* gridX = md_alloc(ND, cm2_reduced_dims, CFL_SIZE);
complex float* src_reduced = md_alloc(ND, ksp_reduced_dims, CFL_SIZE);
long pos[ND];
md_set_dims(ND, pos, 0L);
do {
// sum over additional dimensions in the k-space
long sum_dims[ND];
long sum_flags = ~(nontriv_traj_flags | iter_flags);
md_select_dims(ND, sum_flags, sum_dims, max_dims);
md_clear(ND, cm2_reduced_dims, gridX, CFL_SIZE);
do {
md_copy_block2(data->N, pos, ksp_reduced_dims, ksp_reduced_strs, src_reduced, data->ksp_dims, ksp_strs, src, CFL_SIZE);
grid2(&data->grid_conf, ND, data->trj_dims, data->traj, cm2_reduced_dims, gridX, ksp_reduced_dims, src_reduced);
} while(md_next(ND, sum_dims, sum_flags, pos));
md_decompose(data->N, data->factors, cml_reduced_dims, grid_reduced, cm2_reduced_dims, gridX, CFL_SIZE);
md_copy_block2(ND, pos, data->cml_dims, data->cml_strs, grid, cml_reduced_dims, cml_reduced_strs, grid_reduced, CFL_SIZE);
} while(md_next(data->N, iter_dims, ~0L, pos));
md_zmulc2(ND, data->cml_dims, data->cml_strs, grid, data->cml_strs, grid, data->img_strs, data->fftmod);
md_free(grid_reduced);
md_free(gridX);
md_free(src_reduced);
}
// Adjoint: from kspace to image
static void nufft_apply_adjoint(const linop_data_t* _data, complex float* dst, const complex float* src)
{
auto data = CAST_DOWN(nufft_data, _data);
#ifdef USE_CUDA
assert(!cuda_ondevice(src));
#endif
int ND = data->N + 1;
complex float* wdat = NULL;
if (NULL != data->weights) {
wdat = md_alloc(data->N, data->out_dims, CFL_SIZE);
md_zmulc2(data->N, data->out_dims, data->out_strs, wdat, data->out_strs, src, data->wgh_strs, data->weights);
src = wdat;
}
complex float* bdat = NULL;
if (NULL != data->basis) {
bdat = md_alloc(data->N, data->ksp_dims, CFL_SIZE);
md_ztenmulc(data->N, data->ksp_dims, bdat, data->out_dims, src, data->bas_dims, data->basis);
src = bdat;
}
complex float* grid = md_alloc(ND, data->cml_dims, CFL_SIZE);
complex float* gridX = NULL;
if (data->conf.lowmem) {
split_nufft_adjoint(data, ND, grid, src);
} else {
gridX = md_calloc(data->N, data->cm2_dims, CFL_SIZE);
grid2(&data->grid_conf, ND, data->trj_dims, data->traj, data->cm2_dims, gridX, data->ksp_dims, src);
}
md_free(bdat);
md_free(wdat);
if (!data->conf.lowmem) {
md_decompose(data->N, data->factors, data->cml_dims, grid, data->cm2_dims, gridX, CFL_SIZE);
md_free(gridX);
md_zmulc2(ND, data->cml_dims, data->cml_strs, grid, data->cml_strs, grid, data->img_strs, data->fftmod);
}
linop_adjoint(data->fft_op, ND, data->cml_dims, grid, ND, data->cml_dims, grid);
md_clear(ND, data->cim_dims, dst, CFL_SIZE);
md_zfmacc2(ND, data->cml_dims, data->cim_strs, dst, data->cml_strs, grid, data->lph_strs, data->linphase);
md_free(grid);
if (data->conf.toeplitz)
md_zmul2(ND, data->cim_dims, data->cim_strs, dst, data->cim_strs, dst, data->img_strs, data->roll);
}
#ifdef USE_CUDA
static void gpu_alloc(const struct nufft_data* data)
{
unsigned int ND = data->N + 1;
if (NULL == data->linphase_gpu)
((struct nufft_data*)data)->linphase_gpu = md_gpu_move(ND, data->lph_dims, data->linphase, CFL_SIZE);
if (NULL == data->psf_gpu)
((struct nufft_data*)data)->psf_gpu = md_gpu_move(ND, data->psf_dims, data->psf, CFL_SIZE);
}
#endif
static void toeplitz_mult(const struct nufft_data* data, complex float* dst, const complex float* src)
{
unsigned int ND = data->N + 1;
const complex float* linphase = data->linphase;
const complex float* psf = data->psf;
#ifdef USE_CUDA
if (cuda_ondevice(src)) {
gpu_alloc(data);
linphase = data->linphase_gpu;
psf = data->psf_gpu;
}
#endif
complex float* grid = md_alloc_sameplace(ND, data->cml_dims, CFL_SIZE, dst);
md_zmul2(ND, data->cml_dims, data->cml_strs, grid, data->cim_strs, src, data->lph_strs, linphase);
linop_forward(data->fft_op, ND, data->cml_dims, grid, ND, data->cml_dims, grid);
complex float* gridT = md_alloc_sameplace(ND, data->cmT_dims, CFL_SIZE, dst);
md_ztenmul(ND, data->cmT_dims, gridT, data->cml_dims, grid, data->psf_dims, psf);
md_free(grid);
linop_adjoint(data->fft_op, ND, data->cml_dims, gridT, ND, data->cml_dims, gridT);
md_clear(ND, data->cim_dims, dst, CFL_SIZE);
md_zfmacc2(ND, data->cml_dims, data->cim_strs, dst, data->cml_strs, gridT, data->lph_strs, linphase);
md_free(gridT);
}
static void toeplitz_mult_lowmem(const struct nufft_data* data, int i, complex float* dst, const complex float* src)
{
const complex float* linphase = data->linphase;
const complex float* psf = data->psf;
#ifdef USE_CUDA
if (cuda_ondevice(src)) {
gpu_alloc(data);
linphase = data->linphase_gpu;
psf = data->psf_gpu;
}
#endif
const complex float* clinphase = linphase + i * md_calc_size(data->N, data->lph_dims);
const complex float* cpsf = psf + i * md_calc_size(data->N, data->psf_dims);
complex float* grid = md_alloc_sameplace(data->N, data->cim_dims, CFL_SIZE, dst);
md_zmul2(data->N, data->cim_dims, data->cim_strs, grid, data->cim_strs, src, data->img_strs, clinphase);
linop_forward(data->cfft_op, data->N, data->cim_dims, grid, data->N, data->cim_dims, grid);
complex float* gridT = md_alloc_sameplace(data->N, data->ciT_dims, CFL_SIZE, dst);
md_ztenmul(data->N, data->ciT_dims, gridT, data->cim_dims, grid, data->psf_dims, cpsf);
md_free(grid);
linop_adjoint(data->cfft_op, data->N, data->cim_dims, gridT, data->N, data->cim_dims, gridT);
md_zfmacc2(data->N, data->cim_dims, data->cim_strs, dst, data->cim_strs, gridT, data->img_strs, clinphase);
md_free(gridT);
}
static void toeplitz_mult_pcycle(const struct nufft_data* data, complex float* dst, const complex float* src)
{
unsigned int ncycles = data->lph_dims[data->N];
((struct nufft_data*) data)->cycle = (data->cycle + 1) % ncycles; // FIXME:
assert(dst != src);
md_clear(data->N, data->cim_dims, dst, CFL_SIZE);
toeplitz_mult_lowmem(data, data->cycle, dst, src);
}
static void nufft_apply_normal(const linop_data_t* _data, complex float* dst, const complex float* src)
{
auto data = CAST_DOWN(nufft_data, _data);
if (data->conf.toeplitz) {
if (data->conf.pcycle) {
toeplitz_mult_pcycle(data, dst, src);
} else if (data->conf.lowmem) {
int ncycles = data->lph_dims[data->N];
assert(dst != src);
md_clear(data->N, data->cim_dims, dst, CFL_SIZE);
for (int i = 0; i < ncycles; i++)
toeplitz_mult_lowmem(data, i, dst, src);
} else {
toeplitz_mult(data, dst, src);
}
} else {
complex float* tmp_ksp = md_alloc(data->N + 1, data->out_dims, CFL_SIZE);
nufft_apply(_data, tmp_ksp, src);
nufft_apply_adjoint(_data, dst, tmp_ksp);
md_free(tmp_ksp);
}
}
/**
* Estimate image dimensions from trajectory
*/
void estimate_im_dims(int N, unsigned long flags, long dims[N], const long tdims[N], const complex float* traj)
{
int T = tdims[0];
assert(T == (int)bitcount(flags));
float max_dims[T];
for (int i = 0; i < T; i++)
max_dims[i] = 0.;
for (long i = 0; i < md_calc_size(N - 1, tdims + 1); i++)
for(int j = 0; j < tdims[0]; j++)
max_dims[j] = MAX(cabsf(traj[j + tdims[0] * i]), max_dims[j]);
for (int j = 0, t = 0; j < N; j++) {
dims[j] = 1;
if (MD_IS_SET(flags, j)) {
dims[t] = (0. == max_dims[t]) ? 1 : (2 * ceilf(max_dims[t]));
t++;
}
}
}
/**
* Estimate fast square image dimensions from trajectory
*/
void estimate_fast_sq_im_dims(unsigned int N, long dims[3], const long tdims[N], const complex float* traj)
{
float max_dims[3] = { 0., 0., 0. };
for (long i = 0; i < md_calc_size(N - 1, tdims + 1); i++)
for(int j = 0; j < 3; j++)
max_dims[j] = MAX(cabsf(traj[j + tdims[0] * i]), max_dims[j]);
// 2* is needed since we take the absolute value of the trajectory above, and it is scaled from
// -DIM/2 to DIM/2
long max_square = 2 * MAX(MAX(max_dims[0], max_dims[1]), max_dims[2]);
// compute next fast size for Fourier transform.
// That is the next number only composed of small prime factors,
// i.e. 2, 3, 5 (and possibly 7?)
// to avoid an infinite loop here, we constrain our search
long fast_size = max_square;
for ( ; fast_size <= 4 * max_square; ++fast_size) {
long n = fast_size;
while (0 == n % 2l) { n /= 2l; }
while (0 == n % 3l) { n /= 3l; }
while (0 == n % 5l) { n /= 5l; }
while (0 == n % 7l) { n /= 7l; }
if (n <= 1)
break;
}
for (int j = 0; j < 3; j++)
dims[j] = (0. == max_dims[j]) ? 1 : fast_size;
}
|
omp_task_final.c | // RUN: %libomp-compile-and-run
#include <stdio.h>
#include <math.h>
#include "omp_testsuite.h"
#include "omp_my_sleep.h"
int test_omp_task_final()
{
int tids[NUM_TASKS];
int includedtids[NUM_TASKS];
int i;
int error = 0;
#pragma omp parallel
{
#pragma omp single
{
for (i = 0; i < NUM_TASKS; i++) {
/* First we have to store the value of the loop index in a new variable
* which will be private for each task because otherwise it will be overwritten
* if the execution of the task takes longer than the time which is needed to
* enter the next step of the loop!
*/
int myi;
myi = i;
#pragma omp task final(i>=10)
{
tids[myi] = omp_get_thread_num();
/* we generate included tasks for final tasks */
if(myi >= 10) {
int included = myi;
#pragma omp task
{
my_sleep (SLEEPTIME);
includedtids[included] = omp_get_thread_num();
} /* end of omp included task of the final task */
my_sleep (SLEEPTIME);
} /* end of if it is a final task*/
} /* end of omp task */
} /* end of for */
} /* end of single */
} /*end of parallel */
/* Now we ckeck if more than one thread executed the final task and its included task. */
for (i = 10; i < NUM_TASKS; i++) {
if (tids[i] != includedtids[i]) {
error++;
}
}
return (error==0);
} /* end of check_paralel_for_private */
int main()
{
int i;
int num_failed=0;
for(i = 0; i < REPETITIONS; i++) {
if(!test_omp_task_final()) {
num_failed++;
}
}
return num_failed;
}
|
gemm.c | #include <stdlib.h>
#include <math.h>
#include "standard.h"
void gemm_bin(int M, int N, int K, float ALPHA,
char *A, int lda,
float *B, int ldb,
float *C, int ldc)
{
int i,j,k;
for(i = 0; i < M; ++i){
for(k = 0; k < K; ++k){
char A_PART = A[i*lda+k];
if(A_PART){
for(j = 0; j < N; ++j){
C[i*ldc+j] += B[k*ldb+j];
}
} else {
for(j = 0; j < N; ++j){
C[i*ldc+j] -= B[k*ldb+j];
}
}
}
}
}
float *random_matrix(int rows, int cols)
{
int i;
float *m = calloc(rows*cols, sizeof(float));
for(i = 0; i < rows*cols; ++i){
m[i] = (float)rand()/RAND_MAX;
}
return m;
}
void time_random_matrix(int TA, int TB, int m, int k, int n)
{
float *a;
if(!TA) a = random_matrix(m,k);
else a = random_matrix(k,m);
int lda = (!TA)?k:m;
float *b;
if(!TB) b = random_matrix(k,n);
else b = random_matrix(n,k);
int ldb = (!TB)?n:k;
float *c = random_matrix(m,n);
int i;
//clock_t start = clock(), end;
for(i = 0; i<10; ++i){
gemm_cpu(TA,TB,0,m,n,k,1,a,lda,b,ldb,1,c,n);
}
// //end = clock();
//printf("Matrix Multiplication %dx%d * %dx%d, TA=%d, TB=%d: %lf ms\n",m,k,k,n, TA, TB, (float)(end-start)/CLOCKS_PER_SEC);
free(a);
free(b);
free(c);
}
void gemm(int TA, int TB, int M, int N, int K, float ALPHA,
float *A, int lda,
float *B, int ldb,
float BETA,
float *C, int ldc)
{
int i;
for (i = 0; i < g_num_threads; i++) {
if ((!TA && TB) || (!TA && !TB)) {
g_gemm_args_pointer[i].i_start = (i+1) * M / (g_num_threads + 1);
g_gemm_args_pointer[i].M = g_gemm_args_pointer[i].i_start + M / (g_num_threads + 1);
g_gemm_args_pointer[i].K = K;
}
if (TA && !TB) {
g_gemm_args_pointer[i].i_start = (i+1) * K / (g_num_threads + 1);
g_gemm_args_pointer[i].K = g_gemm_args_pointer[i].i_start + K / (g_num_threads + 1);
g_gemm_args_pointer[i].M = M;
}
g_gemm_args_pointer[i].TA = TA;
g_gemm_args_pointer[i].TB = TB;
g_gemm_args_pointer[i].N = N;
g_gemm_args_pointer[i].ALPHA = ALPHA;
g_gemm_args_pointer[i].A = A;
g_gemm_args_pointer[i].lda = lda;
g_gemm_args_pointer[i].B = B;
g_gemm_args_pointer[i].BETA = BETA;
g_gemm_args_pointer[i].ldb = ldb;
g_gemm_args_pointer[i].C = C;
g_gemm_args_pointer[i].ldc = ldc;
sgx_spin_unlock(&g_spin_locks[i]);
}
// ocall_start_measuring_training(9, 10);
if ((!TA && TB) || (!TA && !TB)) {
gemm_cpu(TA, TB, 0, M / (g_num_threads + 1), N, K, ALPHA, A, lda, B, ldb, BETA, C, ldc);
}
if (TA && !TB) {
gemm_cpu(TA, TB, 0, M, N, K / (g_num_threads + 1), ALPHA, A, lda, B, ldb, BETA, C, ldc);
}
// ocall_end_measuring_training(9, 10);
// printf("before wait\n");
int waiting = 1;
while (waiting) {
waiting = 0;
for (i = 0; i < g_num_threads; i++) {
if (g_finished[i] == 0)
waiting = 1;
}
}
// printf("after wait\n");
for (i = 0; i < g_num_threads; i++)
sgx_spin_lock(&g_spin_locks[i]);
// printf("nach locked\n");
for (i = 0; i < g_num_threads; i++)
g_finished[i] = 0;
// printf("both locked again\n");
// gemm_cpu( TA, TB, M, N, K, ALPHA,A,lda, B, ldb,BETA,C,ldc);
}
void gemm_nn(int i_start, int M, int N, int K, float ALPHA,
float *A, int lda,
float *B, int ldb,
float *C, int ldc)
{
int i,j,k;
// #pragma omp parallel for
for(i = i_start; i < M; ++i){
for(k = 0; k < K; ++k){
register float A_PART = ALPHA*A[i*lda+k];
for(j = 0; j < N; ++j){
C[i*ldc+j] += A_PART*B[k*ldb+j];
}
}
}
}
void gemm_nt(int i_start, int M, int N, int K, float ALPHA,
float *A, int lda,
float *B, int ldb,
float *C, int ldc)
{
int i,j,k;
// #pragma omp parallel for
for(i = i_start; i < M; ++i){ // 10
for(j = 0; j < N; ++j){ // 1000
register float sum = 0;
for(k = 0; k < K; ++k){ // 784
sum += ALPHA*A[i*lda+k]*B[j*ldb + k];
// if (k > K - 10 && j == 0)
// printf("sum=%f, k=%d\n", sum, k);
}
C[i*ldc+j] += sum;
// if (j > N - 10)
// printf("C=%f", C[i*ldc+j]);
}
}
}
void gemm_tn(int i_start, int M, int N, int K, float ALPHA,
float *A, int lda,
float *B, int ldb,
float *C, int ldc)
{
int i,j,k;
// #pragma omp parallel for
for(i = 0; i < M; ++i){
for(k = i_start; k < K; ++k){
register float A_PART = ALPHA*A[k*lda+i];
for(j = 0; j < N; ++j){
C[i*ldc+j] += A_PART*B[k*ldb+j];
}
}
}
}
void gemm_tt(int i_start, int M, int N, int K, float ALPHA,
float *A, int lda,
float *B, int ldb,
float *C, int ldc)
{
int i,j,k;
// #pragma omp parallel for
for(i = i_start; i < M; ++i){
for(j = 0; j < N; ++j){
register float sum = 0;
for(k = 0; k < K; ++k){
sum += ALPHA*A[i+k*lda]*B[k+j*ldb];
}
C[i*ldc+j] += sum;
}
}
}
void gemm_cpu(int TA, int TB, int i_start, int M, int N, int K, float ALPHA,
float *A, int lda,
float *B, int ldb,
float BETA,
float *C, int ldc)
{
////printf("cpu: %d %d %d %d %d %f %d %d %f %d\n",TA, TB, M, N, K, ALPHA, lda, ldb, BETA, ldc);
int i, j;
for(i = i_start; i < M; ++i){
for(j = 0; j < N; ++j){
C[i*ldc + j] *= BETA;
}
}
if(!TA && !TB)
gemm_nn(i_start, M, N, K, ALPHA,A,lda, B, ldb,C,ldc);
else if(TA && !TB)
gemm_tn(i_start, M, N, K, ALPHA,A,lda, B, ldb,C,ldc);
else if(!TA && TB) {
gemm_nt(i_start, M, N, K, ALPHA,A,lda, B, ldb,C,ldc);
}
else
gemm_tt(i_start, M, N, K, ALPHA,A,lda, B, ldb,C,ldc);
}
#ifdef GPU
#include <math.h>
void gemm_gpu(int TA, int TB, int M, int N, int K, float ALPHA,
float *A_gpu, int lda,
float *B_gpu, int ldb,
float BETA,
float *C_gpu, int ldc)
{
cublasHandle_t handle = blas_handle();
cudaError_t status = cublasSgemm(handle, (TB ? CUBLAS_OP_T : CUBLAS_OP_N),
(TA ? CUBLAS_OP_T : CUBLAS_OP_N), N, M, K, &ALPHA, B_gpu, ldb, A_gpu, lda, &BETA, C_gpu, ldc);
check_error(status);
}
#include <stdlib.h>
#include <string.h>
#include <time.h>
void time_gpu_random_matrix(int TA, int TB, int m, int k, int n)
{
float *a;
if(!TA) a = random_matrix(m,k);
else a = random_matrix(k,m);
int lda = (!TA)?k:m;
float *b;
if(!TB) b = random_matrix(k,n);
else b = random_matrix(n,k);
int ldb = (!TB)?n:k;
float *c = random_matrix(m,n);
int i;
//clock_t start = clock(), end;
for(i = 0; i<32; ++i){
gemm_gpu(TA,TB,m,n,k,1,a,lda,b,ldb,1,c,n);
}
// end = clock();
////printf("Matrix Multiplication %dx%d * %dx%d, TA=%d, TB=%d: %lf s\n",m,k,k,n, TA, TB, (float)(end-start)/CLOCKS_PER_SEC);
free(a);
free(b);
free(c);
}
void time_gpu(int TA, int TB, int m, int k, int n)
{
int iter = 10;
float *a = random_matrix(m,k);
float *b = random_matrix(k,n);
int lda = (!TA)?k:m;
int ldb = (!TB)?n:k;
float *c = random_matrix(m,n);
float *a_cl = cuda_make_array(a, m*k);
float *b_cl = cuda_make_array(b, k*n);
float *c_cl = cuda_make_array(c, m*n);
int i;
//clock_t start = clock(), end;
for(i = 0; i<iter; ++i){
gemm_gpu(TA,TB,m,n,k,1,a_cl,lda,b_cl,ldb,1,c_cl,n);
cudaThreadSynchronize();
}
double flop = ((double)m)*n*(2.*k + 2.)*iter;
double gflop = flop/pow(10., 9);
end = clock();
//double seconds = sec(end-start);
//printf("Matrix Multiplication %dx%d * %dx%d, TA=%d, TB=%d: %lf s, %lf GFLOPS\n",m,k,k,n, TA, TB, seconds, gflop/seconds);
cuda_free(a_cl);
cuda_free(b_cl);
cuda_free(c_cl);
free(a);
free(b);
free(c);
}
void test_gpu_accuracy(int TA, int TB, int m, int k, int n)
{
srand(0);
float *a;
if(!TA) a = random_matrix(m,k);
else a = random_matrix(k,m);
int lda = (!TA)?k:m;
float *b;
if(!TB) b = random_matrix(k,n);
else b = random_matrix(n,k);
int ldb = (!TB)?n:k;
float *c = random_matrix(m,n);
float *c_gpu = random_matrix(m,n);
memset(c, 0, m*n*sizeof(float));
memset(c_gpu, 0, m*n*sizeof(float));
int i;
//pm(m,k,b);
gemm_gpu(TA,TB,m,n,k,1,a,lda,b,ldb,1,c_gpu,n);
////printf("GPU\n");
//pm(m, n, c_gpu);
gemm_cpu(TA,TB,m,n,k,1,a,lda,b,ldb,1,c,n);
////printf("\n\nCPU\n");
//pm(m, n, c);
double sse = 0;
for(i = 0; i < m*n; ++i) {
////printf("%f %f\n", c[i], c_gpu[i]);
sse += pow(c[i]-c_gpu[i], 2);
}
//printf("Matrix Multiplication %dx%d * %dx%d, TA=%d, TB=%d: %g SSE\n",m,k,k,n, TA, TB, sse/(m*n));
free(a);
free(b);
free(c);
free(c_gpu);
}
int test_gpu_blas()
{
/*
test_gpu_accuracy(0,0,10,576,75);
test_gpu_accuracy(0,0,17,10,10);
test_gpu_accuracy(1,0,17,10,10);
test_gpu_accuracy(0,1,17,10,10);
test_gpu_accuracy(1,1,17,10,10);
test_gpu_accuracy(0,0,1000,10,100);
test_gpu_accuracy(1,0,1000,10,100);
test_gpu_accuracy(0,1,1000,10,100);
test_gpu_accuracy(1,1,1000,10,100);
test_gpu_accuracy(0,0,10,10,10);
time_gpu(0,0,64,2916,363);
time_gpu(0,0,64,2916,363);
time_gpu(0,0,64,2916,363);
time_gpu(0,0,192,729,1600);
time_gpu(0,0,384,196,1728);
time_gpu(0,0,256,196,3456);
time_gpu(0,0,256,196,2304);
time_gpu(0,0,128,4096,12544);
time_gpu(0,0,128,4096,4096);
*/
time_gpu(0,0,64,75,12544);
time_gpu(0,0,64,75,12544);
time_gpu(0,0,64,75,12544);
time_gpu(0,0,64,576,12544);
time_gpu(0,0,256,2304,784);
time_gpu(1,1,2304,256,784);
time_gpu(0,0,512,4608,196);
time_gpu(1,1,4608,512,196);
return 0;
}
#endif
|
fibonacci.c | #include <stdio.h>
#include <omp.h>
#define NUM 10
int fibonacci(int n){
int x, y;
printf("%d\n", omp_get_thread_num());
if (n<2)
return n;
#pragma omp task shared(x)
x = fibonacci(n-1);
#pragma omp task shared(y)
y = fibonacci(n-2);
#pragma omp taskwait
return x + y;
}
int main(){
int res;
#pragma omp parallel
res = fibonacci(5);
printf("Fibonacci = %d\n", res);
return 0;
}
|
Searching.202007271527.gather_top_m.subsearch.h | //
// Created by Zhen Peng on 7/27/2020.
//
#ifndef BATCH_SEARCHING_SEARCHING_H
#define BATCH_SEARCHING_SEARCHING_H
#include <vector>
#include <boost/dynamic_bitset.hpp>
//#include <boost/sort/sort.hpp>
#include <iostream>
#include <fstream>
#include <unordered_map>
#include <immintrin.h>
#include <cstring>
#include <unordered_set>
#include <set>
#include <cfloat>
#include <algorithm>
//#include <omp.h>
#include "../include/definitions.h"
//#include "../include/efanna2e/neighbor.h"
#include "../include/utils.h"
#include "../include/Candidate.h"
#include "../include/parallelization.h"
#include "../include/bitvector.h"
namespace PANNS {
class Searching {
//private:
public:
idi num_v_ = 0;
edgei num_e_ = 0;
idi num_queries_ = 0;
uint64_t dimension_ = 0;
idi width_ = 0; // NSG largest degree
idi ep_ = 0; // Start point
// std::vector<dataf> data_load_;
// std::vector<dataf> queries_load_;
// std::vector< std::vector<dataf> > data_load_;
// std::vector< std::vector<dataf> > queries_load_;
// std::vector<distf> norms_;
dataf *data_load_ = nullptr;
dataf *queries_load_ = nullptr;
// dataf *norms_;
// std::vector< std::vector<idi> > nsg_graph_;
// idi *nsg_graph_indices_;
// idi *nsg_graph_out_edges_;
// std::vector< std::vector<idi> > edge_list_;
char *opt_nsg_graph_ = nullptr;
uint64_t data_bytes_;
uint64_t neighbor_bytes_;
uint64_t vertex_bytes_;
// For multithreads
int num_threads_ = 1;
// int num_real_threads_ = 1;
// int num_threads_intra_query_ = 1;
// int num_threads_inter_query_ = 1;
dataf compute_norm(
const dataf *data) const;
// idi vertex_id);
// const std::vector<PANNS::dataf> &data);
// size_t loc_start,
// idi dimension)
dataf compute_distance_with_norm(
const dataf *v_data,
const dataf *q_data,
// idi vertex_id,
// idi query_id,
// const std::vector<dataf> &d_data,
// const std::vector<dataf> &q_data,
// PANNS::idi d_start,
// PANNS::idi q_start,
const dataf vertex_norm) const;
// idi dimension)
static idi insert_into_queue(
std::vector<Candidate> &c_queue,
idi c_queue_top,
Candidate cand);
static idi add_into_queue(
std::vector<PANNS::Candidate> &queue,
idi &queue_top,
const idi queue_size,
const PANNS::Candidate &cand);
static idi add_into_queue(
std::vector<PANNS::Candidate> &queue,
const idi queue_start,
idi &queue_size,
const idi queue_capacity,
const PANNS::Candidate &cand);
static void add_into_queue_at(
const Candidate &cand,
std::vector<Candidate> &queue,
const idi insert_index, // The insertion location, independent with queue_start
const idi queue_start,
idi &queue_top, // The number of elements in queue, independent with queue_start
const idi queue_size); // The maximum capacity of queue, independent with queue_start.
static void insert_one_element_at(
// const T &cand,
// T *queue_base,
const Candidate &cand,
std::vector<Candidate> &queue_base,
const idi insert_index,
const idi queue_start,
const idi queue_size);
// idi insert_into_queue_nsg(
// std::vector< Candidate > &c_queue,
// idi c_queue_top,
// Candidate cand);
static idi merge_two_queues_into_1st_queue_seq_fixed(
std::vector<Candidate> &queue1,
const idi queue1_start,
const idi queue1_size,
std::vector<Candidate> &queue2,
const idi queue2_start,
const idi queue2_size);
static void merge_two_queues_into_1st_queue_seq_incr(
std::vector<Candidate> &queue1,
const idi queue1_start,
idi &queue1_size, // The number of element in queue1, independent with queue1_start.
const idi queue1_length, // The maximum capacity of queue1, independent with queue1_start.
std::vector<Candidate> &queue2,
const idi queue2_start,
const idi queue2_size);
idi merge_all_queues_para_list(
std::vector< std::vector<Candidate> > &local_queues_list,
std::vector<idi> &local_queues_ends,
std::vector<Candidate> &set_L,
const idi L);
// idi merge_all_queues_para_array(
//// std::vector< std::vector<Candidate> > &local_queues_list,
// std::vector<Candidate> &local_queues_array,
// std::vector<idi> &local_queues_ends,
// const idi local_queue_length,
// std::vector<Candidate> &set_L,
// const idi L);
idi merge_all_queues_para_array(
std::vector<Candidate> &set_L,
// std::vector<Candidate> &local_queues_array,
std::vector<idi> &local_queues_ends,
const idi local_queue_length,
// std::vector<Candidate> &set_L,
const idi L);
idi merge_all_queues_queue_base(
// std::vector< std::vector<Candidate> > &local_queues_list,
std::vector<Candidate> &set_L,
// std::vector<Candidate> &local_queues_array,
std::vector<idi> &local_queues_ends,
const idi queue_base,
const int real_threads,
const idi local_queue_length,
// std::vector<Candidate> &set_L,
const idi L);
void merge_two_consecutive_queues_in_place(
std::vector<Candidate> &two_queues,
const idi base_1,
// const idi &end_1,
const idi base_2,
const idi &length_2);
void merge_in_set_L(
std::vector<Candidate> &set_L,
const idi set_L_length,
const idi num_queues,
const idi local_queue_length);
distf selecting_top_L_seq(
std::vector<Candidate> &set_L,
const idi global_L,
// const idi local_L,
const idi num_queues,
const std::vector<idi> &local_queues_starts,
std::vector<idi> &local_queues_sizes);
void selecting_unchecked_top_M_seq(
const idi query_id,
const idi iter,
std::vector<Candidate> &set_L,
const std::vector<idi> &pointers_starts,
const idi value_M,
const idi num_queues,
const std::vector<idi> &local_queues_starts,
const std::vector<idi> &local_queues_sizes,
std::vector<idi> &local_m_counts);
void gather_unchecked_top_M_seq(
const idi query_id,
const idi iter,
std::vector<Candidate> &set_L,
const std::vector<idi> &pointers_starts,
const idi value_M,
const idi num_queues,
const std::vector<idi> &local_queues_starts,
const std::vector<idi> &local_queues_sizes,
std::vector<idi> &top_m_candidates,
idi &top_m_candidates_size,
std::vector<idi> &bound_subs);
// idi merge_all_queues_all_together_in_sequential(
// std::vector<Candidate> &set_L,
// std::vector<idi> &local_queues_ends,
// const idi local_queue_length,
// const idi L);
// idi min_all_queues_at_heads(
// const std::vector<Candidate> &set_L,
// std::vector<idi> &queue_heads,
// const std::vector<idi> &local_queues_ends,
// const idi local_queue_length,
// const idi L);
public:
// For Profiling
// L3CacheMissRate cache_miss_kernel;
uint64_t count_distance_computation_ = 0;
// uint64_t count_add_to_queue_ = 0;
// uint64_t count_single_query_computation_ = 0;
// distf dist_min_ = 0;
// distf dist_max_ = 0;
double time_merge_ = 0;
double time_select_ = 0;
// double time_select_L_ = 0.0;
// double time_select_M_ = 0.0;
// double time_initialization_ = 0;
// double time_sequential_phase_ = 0;
// double time_parallel_phase_ = 0;
// double time_ending_ = 0.0;
// double time_assign_s_ = 0.0;
// double time_expand_ = 0.0;
// double time_pick_top_m_ = 0.0;
// double time_distance_computation_ = 0.0;
// double time_add_to_queue_ = 0.0;
// double time_insert_ = 0;
// double time_compare_minimum_ = 0;
// double time_memmove_ = 0;
// std::vector<double> time_memmove_list_;
// L3CacheMissRate profile_miss_rate;
// uint64_t number_local_elements_ = 0;
// std::vector<idi> L_ids_;
// std::vector<idi> M_ids_;
~Searching()
{
free(data_load_);
data_load_ = nullptr;
// free(queries_load_);
// _mm_free(data_load_);
free(queries_load_);
queries_load_ = nullptr;
// free(norms_);
// free(nsg_graph_indices_);
// free(nsg_graph_out_edges_);
free(opt_nsg_graph_);
opt_nsg_graph_ = nullptr;
}
void load_data_load(char *filename);
void load_queries_load(char *filename);
void load_nsg_graph(char *filename);
// void build_opt_graph();
void prepare_init_ids(
std::vector<unsigned> &init_ids,
const unsigned L) const;
// void prepare_candidate_queue_list(
// const float *query_load,
// std::vector<std::vector<efanna2e::Neighbor> > &retset_list,
// std::vector<boost::dynamic_bitset<> > &is_visited_list,
// const std::vector<unsigned> &init_ids,
// const boost::dynamic_bitset<> &flags,
// unsigned batch_start,
// unsigned batch_size,
// unsigned L);
// void search_in_batch(
//// const float *query_load,
// size_t K,
// size_t L,
// unsigned batch_start,
// unsigned batch_size,
// std::vector< std::vector<Candidate> > &set_L_list,
// std::vector< boost::dynamic_bitset<> > &is_visited_list,
// const std::vector<idi> &init_ids,
// const boost::dynamic_bitset<> &is_visited,
// std::vector<std::vector<idi> > &set_K_list);
void search_in_sequential(
idi query_id,
idi K,
idi L,
std::vector<Candidate> &set_L,
// boost::dynamic_bitset<> &is_visited,
// boost::dynamic_bitset<> is_visited,
// std::vector<idi> &init_ids,
const std::vector<idi> &init_ids,
std::vector<idi> &set_K);
// void search_in_sequential_BitVector(
// const idi query_id,
// const idi K,
// const idi L,
// std::vector<Candidate> &set_L,
// const std::vector<idi> &init_ids,
// std::vector<idi> &set_K);
// idi get_out_degree(idi v_id) const
// {
// if (v_id < num_v_ - 1) {
// return nsg_graph_indices_[v_id + 1] - nsg_graph_indices_[v_id];
// } else {
// return num_e_ - nsg_graph_indices_[v_id];
// }
// }
void search_with_top_m(
idi M,
idi query_id,
idi K,
idi L,
std::vector<Candidate> &set_L,
const std::vector<idi> &init_ids,
std::vector<idi> &set_K);
// std::vector< std::vector<idi> > &top_m_list);
void search_with_top_m_scale_m(
const PANNS::idi value_M_max,
const PANNS::idi query_id,
const PANNS::idi K,
const PANNS::idi L,
std::vector<Candidate> &set_L,
const std::vector<idi> &init_ids,
std::vector<idi> &set_K,
std::vector<idi> &top_m_candidates,
boost::dynamic_bitset<> &is_visited);
// void search_with_top_m_myths_M(
// const PANNS::idi M,
// const PANNS::idi query_id,
// const PANNS::idi K,
// const PANNS::idi L,
// std::vector<Candidate> &set_L,
// const std::vector<idi> &init_ids,
// std::vector<idi> &set_K);
// void search_with_top_m_to_get_distance_range(
// const PANNS::idi M,
// const PANNS::idi query_id,
//// const PANNS::idi K,
// const PANNS::idi L,
// std::vector<Candidate> &set_L,
// const std::vector<idi> &init_ids);
// void search_with_top_m_profile_bit_CAS(
// const PANNS::idi M,
// const PANNS::idi query_id,
// const PANNS::idi K,
// const PANNS::idi L,
// std::vector<Candidate> &set_L,
// const std::vector<idi> &init_ids,
// std::vector<idi> &set_K);
// void search_with_top_m_no_local_arrays(
// const PANNS::idi M,
// const PANNS::idi query_id,
// const PANNS::idi K,
// const PANNS::idi L,
// std::vector<Candidate> &set_L,
// const std::vector<idi> &init_ids,
// std::vector<idi> &set_K,
// boost::dynamic_bitset<> &is_visited);
void search_with_top_m_in_batch(
PANNS::idi M,
PANNS::idi batch_start,
PANNS::idi batch_size,
PANNS::idi K,
PANNS::idi L,
std::vector< std::vector<Candidate> > &set_L_list,
const std::vector<idi> &init_ids,
std::vector< std::vector<idi> > &set_K_list);
// void para_search_with_top_m_critical_area(
// idi M,
// idi query_id,
// idi K,
// idi L,
// std::vector<Candidate> &set_L,
// const std::vector<idi> &init_ids,
// std::vector<idi> &set_K);
// void para_search_with_top_m_critical_area_no_omp(
// idi M,
// idi query_id,
// idi K,
// idi L,
// std::vector<Candidate> &set_L,
// const std::vector<idi> &init_ids,
// std::vector<idi> &set_K);
// void para_search_with_top_m_critical_area_yes_omp(
// idi M,
// idi query_id,
// idi K,
// idi L,
// std::vector<Candidate> &set_L,
// const std::vector<idi> &init_ids,
// std::vector<idi> &set_K);
// void para_search_with_top_m_visited_array(
// const PANNS::idi M,
// const PANNS::idi query_id,
// const PANNS::idi K,
// const PANNS::idi L,
// std::vector<Candidate> &set_L,
// const std::vector<idi> &init_ids,
// std::vector<idi> &set_K,
// std::vector<uint8_t> &is_visited);
// void para_search_with_top_m_merge_queues(
// const idi M,
// const idi query_id,
// const idi K,
// const idi L,
// std::vector<Candidate> &set_L,
// const std::vector<idi> &init_ids,
// std::vector<idi> &set_K);
// void para_search_with_top_m_queues_seq_merge(
// const PANNS::idi M,
// const PANNS::idi query_id,
// const PANNS::idi K,
// const PANNS::idi L,
// std::vector<Candidate> &set_L,
// const std::vector<idi> &init_ids,
// std::vector<idi> &set_K);
// void para_search_with_top_m_merge_queues_no_CAS(
// const idi M,
// const idi query_id,
// const idi K,
// const idi L,
// std::vector<Candidate> &set_L,
// const std::vector<idi> &init_ids,
// std::vector<idi> &set_K,
// const idi local_queue_length,
// std::vector< std::vector<Candidate> > &local_queues_list,
// std::vector<idi> &local_queues_ends,
//// std::vector<uint8_t> &is_visited);
// boost::dynamic_bitset<> &is_visited);
// void para_search_with_top_m_merge_queues_in_array(
// void para_search_with_top_m_merge_queues_new_threshold(
// const idi M,
// const idi query_id,
// const idi K,
// const idi L,
// std::vector<Candidate> &set_L,
// const std::vector<idi> &init_ids,
// std::vector<idi> &set_K,
// const idi local_queue_length, // Maximum size of local queue
//// std::vector< std::vector<Candidate> > &local_queues_list,
// std::vector<Candidate> &local_queues_array,
// std::vector<idi> &local_queues_ends, // Sizes of local queue
// BitVector &is_visited);
// void para_search_with_top_m_merge_queues_by_sort(
// const idi M,
// const idi query_id,
// const idi K,
// const idi L,
// std::vector<Candidate> &set_L,
// const std::vector<idi> &init_ids,
// std::vector<idi> &set_K,
// const idi local_queue_length, // Maximum size of local queue
//// std::vector<Candidate> &local_queues_array,
// std::vector<idi> &local_queues_ends, // Sizes of local queue
// std::vector<idi> &dest_offsets,
// const std::vector<idi> &offsets_load_set_L, // Offsets for store into set_L.
// BitVector &is_visited);
// void para_search_with_top_m_merge_queues_better_merge_v0(
// const idi M,
// const idi query_id,
// const idi K,
// const idi L,
// std::vector<Candidate> &set_L,
// const std::vector<idi> &init_ids,
// std::vector<idi> &set_K,
// const idi local_queue_length, // Maximum size of local queue
// std::vector<idi> &local_queues_ends, // Sizes of local queue
//// std::vector<Candidate> &top_m_candidates,
// std::vector<idi> &top_m_candidates,
//// std::vector<uint8_t> &is_visited);
// boost::dynamic_bitset<> &is_visited);
//// BitVector &is_visited);
// void para_search_with_top_m_merge_queues_better_merge_v2(
// const idi M,
// const idi query_id,
// const idi K,
// const idi L,
// std::vector<Candidate> &set_L,
// const std::vector<idi> &init_ids,
// std::vector<idi> &set_K,
// const idi local_queue_length, // Maximum size of local queue
// std::vector<idi> &local_queues_ends, // Sizes of local queue
//// std::vector<Candidate> &top_m_candidates,
// std::vector<idi> &top_m_candidates,
//// std::vector<uint8_t> &is_visited)
// boost::dynamic_bitset<> &is_visited,
// std::vector<distf> &local_thresholds);
//// BitVector &is_visited)
// void para_search_with_top_m_merge_queues_better_merge_v1(
// const idi M,
// const idi query_id,
// const idi K,
// const idi L,
// std::vector<Candidate> &set_L,
// const std::vector<idi> &init_ids,
// std::vector<idi> &set_K,
// const idi local_queue_length, // Maximum size of local queue
// std::vector<idi> &local_queues_ends, // Sizes of local queue
// std::vector<Candidate> &top_m_candidates,
//// std::vector<idi> &top_m_candidates,
//// std::vector<uint8_t> &is_visited);
// boost::dynamic_bitset<> &is_visited);
//// BitVector &is_visited);
// void para_search_with_top_m_merge_queues_better_merge_v0_0(
// const idi M,
// const idi query_id,
// const idi K,
// const idi L,
// std::vector<Candidate> &set_L,
// const std::vector<idi> &init_ids,
// std::vector<idi> &set_K,
// const idi local_queue_length, // Maximum size of local queue
// std::vector<idi> &local_queues_ends, // Sizes of local queue
//// std::vector<Candidate> &top_m_candidates,
// std::vector<idi> &top_m_candidates,
//// std::vector<uint8_t> &is_visited)
// boost::dynamic_bitset<> &is_visited);
//// BitVector &is_visited)
// void para_search_with_top_m_merge_queues_less_merge(
// const idi M,
// const idi query_id,
// const idi K,
// const idi L,
// std::vector<Candidate> &set_L,
// const std::vector<idi> &init_ids,
// std::vector<idi> &set_K,
// const idi local_queue_length, // Maximum size of local queue
// std::vector<idi> &local_queues_ends, // Sizes of local queue
//// std::vector<Candidate> &top_m_candidates,
// std::vector<idi> &top_m_candidates,
//// std::vector<uint8_t> &is_visited)
// boost::dynamic_bitset<> &is_visited,
// std::vector<distf> &local_thresholds);
//// BitVector &is_visited)
// void para_search_with_top_m_merge_queues_no_merge(
// const idi M,
// const idi query_id,
// const idi K,
// const idi L,
// std::vector<Candidate> &set_L,
// const std::vector<idi> &init_ids,
// std::vector<idi> &set_K,
// const idi local_queue_length, // Maximum size of local queue
// std::vector<idi> &local_queues_ends, // Sizes of local queue
//// std::vector<Candidate> &top_m_candidates,
// std::vector<idi> &top_m_candidates,
//// std::vector<uint8_t> &is_visited)
// boost::dynamic_bitset<> &is_visited,
// std::vector<distf> &local_thresholds,
// const uint64_t computation_threshold);
// void para_search_with_top_m_merge_queues_scale_m_v0(
// const idi value_M_max,
// const idi query_id,
// const idi K,
// const idi L,
// std::vector<Candidate> &set_L,
// const std::vector<idi> &init_ids,
// std::vector<idi> &set_K,
// const idi local_queue_length, // Maximum size of local queue
// std::vector<idi> &local_queues_ends, // Sizes of local queue
//// std::vector<Candidate> &top_m_candidates,
// std::vector<idi> &top_m_candidates,
//// std::vector<uint8_t> &is_visited);
// boost::dynamic_bitset<> &is_visited);
void para_search_with_top_m_merge_queues_middle_m(
const idi value_M_middle,
const idi value_M_max,
const idi query_id,
const idi K,
const idi L,
std::vector<Candidate> &set_L,
const std::vector<idi> &init_ids,
std::vector<idi> &set_K,
const idi local_queue_length, // Maximum size of local queue
const idi base_set_L, // base_set_L = (num_threads_ - 1) * local_queue_length;
std::vector<idi> &local_queues_ends, // Sizes of local queue
// std::vector<Candidate> &top_m_candidates,
std::vector<idi> &top_m_candidates,
// std::vector<uint8_t> &is_visited)
boost::dynamic_bitset<> &is_visited);
// std::vector<distf> &local_thresholds);
// BitVector &is_visited)
// void para_search_with_top_m_merge_queues_scale_m_v2(
// const idi value_M_min,
// const idi value_M_max,
// const idi query_id,
// const idi K,
// const idi L,
// std::vector<Candidate> &set_L,
// const std::vector<idi> &init_ids,
// std::vector<idi> &set_K,
// const idi local_queue_length, // Maximum size of local queue
// const idi base_set_L, // base_set_L = (num_threads_ - 1) * local_queue_length;
// std::vector<idi> &local_queues_ends, // Sizes of local queue
//// std::vector<Candidate> &top_m_candidates,
// std::vector<idi> &top_m_candidates,
//// std::vector<uint8_t> &is_visited)
// boost::dynamic_bitset<> &is_visited);
// void para_search_with_top_m_merge_queues_scale_m_v3(
// const idi value_M_middle,
// const idi value_M_max,
// const idi query_id,
// const idi K,
// const idi L,
// std::vector<Candidate> &set_L,
// const std::vector<idi> &init_ids,
// std::vector<idi> &set_K,
// const idi local_queue_length, // Maximum size of local queue
// const idi base_set_L, // base_set_L = (num_threads_ - 1) * local_queue_length;
// std::vector<idi> &local_queues_ends, // Sizes of local queue
//// std::vector<Candidate> &top_m_candidates,
// std::vector<idi> &top_m_candidates,
//// std::vector<uint8_t> &is_visited)
// boost::dynamic_bitset<> &is_visited);
void para_search_with_top_m_merge_queues_middle_m_no_merge(
const uint64_t computation_threshold,
const idi value_M_middle,
const idi value_M_max,
const idi query_id,
const idi K,
const idi L,
const idi init_size,
std::vector<Candidate> &set_L,
const std::vector<idi> &init_ids,
std::vector<idi> &set_K,
const idi local_queue_length, // Maximum size of local queue
const idi base_set_L, // base_set_L = (num_threads_ - 1) * local_queue_length;
std::vector<idi> &local_queues_ends, // Sizes of local queue
std::vector<idi> &top_m_candidates,
boost::dynamic_bitset<> &is_visited);
void para_search_with_top_m_merge_queues_sequential_merge(
const idi value_M_middle,
const idi value_M_max,
const idi query_id,
const idi K,
const idi L,
std::vector<Candidate> &set_L,
const std::vector<idi> &init_ids,
std::vector<idi> &set_K,
const idi local_queue_length, // Maximum size of local queue
const idi base_set_L, // base_set_L = (num_threads_ - 1) * local_queue_length;
std::vector<idi> &local_queues_ends, // Sizes of local queue
std::vector<idi> &top_m_candidates,
boost::dynamic_bitset<> &is_visited);
void para_search_with_top_m_nested_para(
const idi batch_start,
const idi batch_size,
const idi value_M_middle,
const idi value_M_max,
const idi K,
const idi L,
std::vector< std::vector<Candidate> > &set_L_list,
const std::vector<idi> &init_ids,
std::vector< std::vector<idi> > &set_K_list,
const idi local_queue_length, // Maximum size of local queue
const idi base_set_L, // base_set_L = (num_threads_ - 1) * local_queue_length;
std::vector< std::vector<idi> > &local_queues_ends_list, // Sizes of local queue
std::vector< std::vector<idi> > &top_m_candidates_list,
std::vector< boost::dynamic_bitset<> > &is_visited_list);
void subsearch_with_top_m(
const idi value_M_max,
const idi query_id,
const idi local_L,
std::vector<Candidate> &set_L,
const idi set_L_start,
idi &set_L_size,
std::vector<idi> &local_top_m_candidates,
boost::dynamic_bitset<> &is_visited,
uint64_t &local_count_distance_computation);
void subsearch_top_m_for_one_iteration(
const idi iter,
idi &k_uc,
const idi value_M,
const idi query_id,
const dataf *query_data,
const idi L,
std::vector<Candidate> &set_L,
const idi set_L_start,
idi &set_L_size,
std::vector<idi> &top_m_candidates,
boost::dynamic_bitset<> &is_visited,
uint64_t &count_distance_computation);
// void subsearch_top_m_for_one_iteration_lth(
// const distf bound_lth,
// const idi iter,
// idi &k_uc,
// const idi value_M,
// const idi query_id,
// const dataf *query_data,
// const idi L,
// std::vector<Candidate> &set_L,
// const idi set_L_start,
// idi &set_L_size,
// std::vector<idi> &top_m_candidates,
// boost::dynamic_bitset<> &is_visited,
// uint64_t &count_distance_computation);
void subsearch_top_m_for_one_iteration_lth_mth(
const distf bound_lth,
// const idi top_m_position,
const idi iter,
idi &k_uc,
const idi local_m_count,
const idi query_id,
const dataf *query_data,
const idi L,
std::vector<Candidate> &set_L,
const idi set_L_start,
idi &set_L_size,
std::vector<idi> &top_m_candidates,
boost::dynamic_bitset<> &is_visited,
uint64_t &count_distance_computation,
double &time_pick_top_m,
uint64_t &count_add_to_queue,
double &time_distance_computation,
double &time_add_to_queue);
// void para_search_with_top_m_subsearch_v3(
// const idi local_M_max,
// const idi local_M_middle,
// const idi query_id,
// const idi K,
// const idi global_L,
// const idi local_L,
//// const idi total_L,
//// const idi init_queue_size,
// std::vector<Candidate> &set_L,
// const std::vector<idi> &init_ids,
// std::vector<idi> &set_K,
// const std::vector<idi> &local_queues_starts,
// std::vector<idi> &local_queues_sizes,
// std::vector<idi> &local_m_counts,
// std::vector< std::vector<idi> > &top_m_candidates_list,
// boost::dynamic_bitset<> &is_visited);
void para_search_with_top_m_subsearch_v4(
const idi local_M_max,
const idi local_M_middle,
const idi query_id,
const idi K,
const idi global_L,
const idi local_L,
std::vector<Candidate> &set_L,
const std::vector<idi> &init_ids,
std::vector<idi> &set_K,
const std::vector<idi> &local_queues_starts,
std::vector<idi> &local_queues_sizes,
// std::vector<idi> &local_m_counts,
std::vector<idi> &top_m_candidates,
boost::dynamic_bitset<> &is_visited);
void subsearch_for_simple_search(
const idi query_id,
const idi local_L,
std::vector<Candidate> &set_L,
const idi base_set_L,
idi &set_L_end,
// std::vector<uint8_t> &is_visited,
boost::dynamic_bitset<> &is_visited,
uint64_t &local_count_distance_computation);
void para_simple_search_subsearch(
const idi query_id,
const idi K,
const idi L,
std::vector<Candidate> &set_L,
const std::vector<idi> &init_ids,
std::vector<idi> &set_K,
// std::vector<uint8_t> &is_visited);
boost::dynamic_bitset<> &is_visited);
// void para_search_with_top_m_merge_queues_global_threshold(
// const idi value_M_middle,
// const idi value_M_max,
// const idi query_id,
// const idi K,
// const idi L,
// std::vector<Candidate> &set_L,
// const std::vector<idi> &init_ids,
// std::vector<idi> &set_K,
// const idi local_queue_length, // Maximum size of local queue
// const idi base_set_L, // base_set_L = (num_threads_ - 1) * local_queue_length;
// std::vector<idi> &local_queues_ends, // Sizes of local queue
// std::vector<idi> &top_m_candidates,
// boost::dynamic_bitset<> &is_visited);
// void para_search_with_top_m_merge_queues_distance_threshold_m(
//// const idi value_M_middle,
//// const idi value_M_max,
// const distf relative_dist_threshold,
// const idi query_id,
// const idi K,
// const idi L,
// std::vector<Candidate> &set_L,
// const std::vector<idi> &init_ids,
// std::vector<idi> &set_K,
// const idi local_queue_length, // Maximum size of local queue
// const idi base_set_L, // base_set_L = (num_threads_ - 1) * local_queue_length;
// std::vector<idi> &local_queues_ends, // Sizes of local queue
//// std::vector<Candidate> &top_m_candidates,
// std::vector<idi> &top_m_candidates,
//// std::vector<uint8_t> &is_visited)
// boost::dynamic_bitset<> &is_visited);
//// std::vector<distf> &local_thresholds)
//// BitVector &is_visited)
// void para_search_with_top_m_merge_queues_distance_threshold_m_middle_iteration(
//// const idi value_M_middle,
//// const idi value_M_max,
// const distf relative_dist_threshold,
// const idi middle_iteration,
// const idi query_id,
// const idi K,
// const idi L,
// std::vector<Candidate> &set_L,
// const std::vector<idi> &init_ids,
// std::vector<idi> &set_K,
// const idi local_queue_length, // Maximum size of local queue
// const idi base_set_L, // base_set_L = (num_threads_ - 1) * local_queue_length;
// std::vector<idi> &local_queues_ends, // Sizes of local queue
//// std::vector<Candidate> &top_m_candidates,
// std::vector<idi> &top_m_candidates,
//// std::vector<uint8_t> &is_visited)
// boost::dynamic_bitset<> &is_visited);
// void para_search_with_top_m_merge_queues_collectors(
// const idi value_M_middle,
// const idi value_M_max,
// const idi query_id,
// const idi K,
// const idi L,
// std::vector<Candidate> &set_L,
// const std::vector<idi> &init_ids,
// std::vector<idi> &set_K,
// const idi local_queue_length, // Maximum size of local queue
// const idi base_set_L, // base_set_L = (num_threads_ - 1) * local_queue_length;
// std::vector<idi> &local_queues_ends, // Sizes of local queue
//// std::vector<Candidate> &top_m_candidates,
// std::vector<idi> &top_m_candidates,
//// std::vector<uint8_t> &is_visited)
// boost::dynamic_bitset<> &is_visited);
// void para_search_with_top_m_merge_queues_selecting(
// const idi value_M_middle,
// const idi value_M_max,
// const idi query_id,
// const idi K,
// const idi L,
// std::vector<Candidate> &set_L,
// const std::vector<idi> &init_ids,
// std::vector<idi> &set_K,
// const idi local_queue_length, // Maximum size of local queue
// const idi base_set_L, // base_set_L = (num_threads_ - 1) * local_queue_length;
// std::vector<idi> &local_queues_ends, // Sizes of local queue
//// std::vector<Candidate> &top_m_candidates,
// std::vector<idi> &top_m_candidates,
//// std::vector<uint8_t> &is_visited)
// boost::dynamic_bitset<> &is_visited);
// void para_search_with_top_m_merge_queues_myths(
// const idi M,
// const idi query_id,
// const idi K,
// const idi L,
// std::vector<Candidate> &set_L,
// const std::vector<idi> &init_ids,
// std::vector<idi> &set_K,
// const idi local_queue_length, // Maximum size of local queue
//// std::vector< std::vector<Candidate> > &local_queues_list,
// std::vector<Candidate> &local_queues_array,
// std::vector<idi> &local_queues_ends, // Sizes of local queue
// BitVector &is_visited);
//// std::vector<uint8_t> &is_visited);
//// boost::dynamic_bitset<> &is_visited);
//// void para_prepare_init_ids(
//// std::vector<unsigned> &init_ids,
//// unsigned L) const;
// void para_search_with_top_m_in_batch_embarassing_para(
// const PANNS::idi M,
// const PANNS::idi batch_start,
// const PANNS::idi batch_size,
// const PANNS::idi K,
// const PANNS::idi L,
// std::vector< std::vector<Candidate> > &set_L_list,
// const std::vector<idi> &init_ids,
// std::vector< std::vector<idi> > &set_K_list,
// std::vector< boost::dynamic_bitset<> > &is_visited_list);
// void test_neighbors_distance_to_father(
// const idi num_selected) const;
// void test_neighbors_normalized_distance_to_father(
// const idi num_selected) const;
void load_true_NN(
const char *filename,
std::vector< std::vector<idi> > &true_nn_list);
void get_recall_for_all_queries(
const std::vector< std::vector<idi> > &true_nn_list,
const std::vector<std::vector<unsigned>> &set_K_list,
std::unordered_map<unsigned, double> &recalls) const;
}; // Class Searching
/**
* Input the data from the file.
* @param filename
*/
inline void Searching::load_data_load(char *filename)
{
auto old_d = dimension_;
DiskIO::load_data(
filename,
data_load_,
num_v_,
dimension_);
if (old_d) {
if (old_d != dimension_) {
std::cerr << "Error: data dimension " << dimension_
<< " is not equal to query dimension " << old_d << "." << std::endl;
exit(EXIT_FAILURE);
}
}
}
/**
* Input queries from the file.
* @param filename
*/
inline void Searching::load_queries_load(char *filename)
{
auto old_d = dimension_;
DiskIO::load_data(
filename,
queries_load_,
num_queries_,
dimension_);
if (old_d) {
if (old_d != dimension_) {
std::cerr << "Error: query dimension " << dimension_
<< " is not equal to data dimension " << old_d << "." << std::endl;
exit(EXIT_FAILURE);
}
}
}
/**
* Input the NSG graph from the file.
* Reference: https://github.com/ZJULearning/nsg/blob/master/src/index_nsg.cpp
* @param filename
*/
inline void Searching::load_nsg_graph(char *filename)
{
std::ifstream fin(filename);
if (!fin.is_open()) {
std::cerr << "Error: cannot read file " << filename << " ." << std::endl;
exit(EXIT_FAILURE);
}
fin.read(reinterpret_cast<char *>(&width_), sizeof(unsigned));
fin.read(reinterpret_cast<char *>(&ep_), sizeof(unsigned));
data_bytes_ = (1 + dimension_) * sizeof(dataf);
neighbor_bytes_ = (1 + width_) * sizeof(idi);
vertex_bytes_ = data_bytes_ + neighbor_bytes_;
opt_nsg_graph_ = (char *) malloc(num_v_ * vertex_bytes_);
if (!opt_nsg_graph_) {
std::cerr << "Error: no enough memory for opt_nsg_graph_." << std::endl;
exit(EXIT_FAILURE);
}
idi v_id = 0;
num_e_ = 0;
char *base_location = opt_nsg_graph_;
while (true) {
idi degree;
fin.read(reinterpret_cast<char *>(°ree), sizeof(unsigned));
if (fin.eof()) {
break;
}
num_e_ += degree;
// std::vector<idi> tmp_ngbrs(degree);
// fin.read(reinterpret_cast<char *>(tmp_ngbrs.data()), degree * sizeof(unsigned));
// Norm and data
distf norm = compute_norm(data_load_ + v_id * dimension_);
// distf norm = compute_norm(v_id);
std::memcpy(base_location, &norm, sizeof(distf)); // Norm
memcpy(base_location + sizeof(distf), data_load_ + v_id * dimension_, dimension_ * sizeof(dataf)); // Data
base_location += data_bytes_;
// Neighbors
memcpy(base_location, °ree, sizeof(idi)); // Number of neighbors
fin.read(base_location + sizeof(idi), degree * sizeof(unsigned)); // Neighbors
// memcpy(location + sizeof(idi), tmp_ngbrs.data(), degree * sizeof(unsigned));
base_location += neighbor_bytes_;
++v_id;
}
if (v_id != num_v_) {
std::cerr << "Error: NSG data has " << v_id
<< " vertices, but origin data has " << num_v_ << " vertices." << std::endl;
exit(EXIT_FAILURE);
}
free(data_load_);
data_load_ = nullptr;
// ////////////////////////
// idi v_id = 0;
// num_e_ = 0;
// while (true) {
// idi degree;
// fin.read(reinterpret_cast<char *>(°ree), sizeof(unsigned));
// if (fin.eof()) {
// break;
// }
// num_e_ += degree;
//
// std::vector<idi> ngbrs(degree);
// fin.read(reinterpret_cast<char *>(ngbrs.data()), degree * sizeof(unsigned));
//// nsg_graph_.push_back(ngbrs);
//// tmp_edge_list.push_back(ngbrs);
// edge_list_.push_back(ngbrs);
// ++v_id;
// }
// if (v_id != num_v_) {
// std::cerr << "Error: NSG data has " << v_id
// << " vertices, but origin data has " << num_v_ << " vertices." << std::endl;
// exit(EXIT_FAILURE);
// }
}
/**
* Load those true top-K neighbors (ground truth) of queries
* @param filename
* @param[out] true_nn_list
*/
inline void Searching::load_true_NN(
const char *filename,
std::vector< std::vector<idi> > &true_nn_list)
// unsigned &t_K)
{
std::ifstream fin(filename);
if (!fin.is_open()) {
fprintf(stderr, "Error: cannot open file %s\n", filename);
exit(EXIT_FAILURE);
}
idi t_query_num;
idi t_K;
// unsigned t_K;
fin.read(reinterpret_cast<char *>(&t_query_num), sizeof(t_query_num));
fin.read(reinterpret_cast<char *>(&t_K), sizeof(t_K));
// if (t_query_num != query_num) {
// fprintf(stderr, "Error: query_num %u is not equal to the record %u in true-NN file %s\n",
// query_num, t_query_num, filename);
// exit(EXIT_FAILURE);
// }
if (t_query_num < num_queries_) {
fprintf(stderr, "Error: t_query_num %u is smaller than num_queries_ %u\n", t_query_num, num_queries_);
exit(EXIT_FAILURE);
}
if (t_K < 100) {
fprintf(stderr, "Error: t_K %u is smaller than 100.\n", t_K);
exit(EXIT_FAILURE);
}
// data = new unsigned[(size_t) t_query_num * (size_t) t_K];
true_nn_list.resize(t_query_num);
for (idi q_i = 0; q_i < t_query_num; ++q_i) {
true_nn_list[q_i].resize(t_K);
}
for (unsigned q_i = 0; q_i < t_query_num; ++q_i) {
// size_t offset = q_i * t_K;
for (unsigned n_i = 0; n_i < t_K; ++n_i) {
unsigned id;
float dist;
fin.read(reinterpret_cast<char *>(&id), sizeof(id));
fin.read(reinterpret_cast<char *>(&dist), sizeof(dist));
// data[offset + n_i] = id;
true_nn_list[q_i][n_i] = id;
}
}
fin.close();
}
inline void Searching::get_recall_for_all_queries(
const std::vector< std::vector<idi> > &true_nn_list,
const std::vector<std::vector<unsigned>> &set_K_list,
std::unordered_map<unsigned, double> &recalls) const
{
// if (t_K < 100) {
// fprintf(stderr, "Error: t_K %u is smaller than 100.\n", t_K);
// exit(EXIT_FAILURE);
// }
if (true_nn_list[0].size() < 100) {
fprintf(stderr, "Error: Number of true nearest neighbors of a query is smaller than 100.\n");
exit(EXIT_FAILURE);
}
recalls[1] = 0.0;
recalls[5] = 0.0;
recalls[10] = 0.0;
recalls[20] = 0.0;
recalls[50] = 0.0;
recalls[100] = 0.0;
for (unsigned q_i = 0; q_i < num_queries_; ++q_i) {
// size_t offset = q_i * t_K;
for (unsigned top_i = 0; top_i < 100; ++top_i) {
unsigned true_id = true_nn_list[q_i][top_i];
for (unsigned n_i = 0; n_i < 100; ++n_i) {
if (set_K_list[q_i][n_i] == true_id) {
if (n_i < 1) recalls[1] += 1;
if (n_i < 5) recalls[5] += 1;
if (n_i < 10) recalls[10] += 1;
if (n_i < 20) recalls[20] += 1;
if (n_i < 50) recalls[50] += 1;
if (n_i < 100) recalls[100] += 1;
}
}
}
}
recalls[1] /= 1.0 * num_queries_;
recalls[5] /= 5.0 * num_queries_;
recalls[10] /= 10.0 * num_queries_;
recalls[20] /= 20.0 * num_queries_;
recalls[50] /= 50.0 * num_queries_;
recalls[100] /= 100.0 * num_queries_;
}
inline void Searching::search_in_sequential(
const idi query_id,
const idi K,
const idi L,
std::vector<Candidate> &set_L,
const std::vector<idi> &init_ids,
std::vector<idi> &set_K)
{
// {//test
// printf("Iteration: Relative_Distance:\n");
//// printf("Iteration: Relative_Distance:\n");
//// printf("----query: %u----\n", query_id);
// }
boost::dynamic_bitset<> is_visited(num_v_);
for (idi v_i = 0; v_i < L; ++v_i) {
is_visited[init_ids[v_i]] = true;
}
const dataf *query_data = queries_load_ + query_id * dimension_;
for (idi v_i = 0; v_i < L; ++v_i) {
idi v_id = init_ids[v_i];
_mm_prefetch(opt_nsg_graph_ + v_id * vertex_bytes_, _MM_HINT_T0);
}
// Get the distances of all candidates, store in the set set_L.
for (unsigned i = 0; i < L; i++) {
unsigned v_id = init_ids[i];
auto *v_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + v_id * vertex_bytes_);
dataf norm = *v_data++;
++count_distance_computation_;
distf dist = compute_distance_with_norm(v_data, query_data, norm);
set_L[i] = Candidate(v_id, dist, false); // False means not checked.
}
std::sort(set_L.begin(), set_L.begin() + L);
idi k = 0; // Index of every queue's first unchecked candidate.
idi tmp_count = 0; // for debug
// {// Print relative distance
//// distf top_dist = set_L[0].distance_;
// for (idi i_l = 0; i_l < L; ++i_l) {
// printf("%u %f\n",
// tmp_count, set_L[i_l].distance_);
//// tmp_count, set_L[i_l].distance_ - top_dist);
// }
// }
while (k < L) {
Candidate &top_cand = set_L[k];
unsigned nk = L;
if (!top_cand.is_checked_) {
++tmp_count;
top_cand.is_checked_ = true;
idi v_id = top_cand.id_; // Vertex ID.
_mm_prefetch(opt_nsg_graph_ + v_id * vertex_bytes_ + data_bytes_, _MM_HINT_T0);
idi *out_edges = (idi *) (opt_nsg_graph_ + v_id * vertex_bytes_ + data_bytes_);
idi out_degree = *out_edges++;
for (idi n_i = 0; n_i < out_degree; ++n_i) {
_mm_prefetch(opt_nsg_graph_ + out_edges[n_i] * vertex_bytes_, _MM_HINT_T0);
}
// Traverse v_id's all neighbors, pushing them into the queue
for (idi e_i = 0; e_i < out_degree; ++e_i) {
idi nb_id = out_edges[e_i];
if (is_visited[nb_id]) {
continue;
}
is_visited[nb_id] = true;
auto *nb_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + nb_id * vertex_bytes_);
dataf norm = *nb_data++;
// Compute the distance
++count_distance_computation_;
distf dist = compute_distance_with_norm(nb_data, query_data, norm);
if (dist > set_L[L-1].distance_) {
continue;
}
// if (dist >= set_L[L-1].distance_) {
// continue;
// }
Candidate cand(nb_id, dist, false);
// Insert into the queue
idi r = insert_into_queue(set_L, L, cand);
if (r < nk) {
nk = r;
}
}
// {// Print relative distance
//// distf top_dist = set_L[0].distance_;
// for (idi i_l = 0; i_l < L; ++i_l) {
// printf("%u %f\n",
// tmp_count, set_L[i_l].distance_);
//// tmp_count, set_L[i_l].distance_ - top_dist);
// }
// }
}
if (nk <= k) {
k = nk;
} else {
++k;
}
}
// cache_miss_kernel.measure_stop();
for (size_t k_i = 0; k_i < K; ++k_i) {
set_K[k_i] = set_L[k_i].id_;
}
// {//test
// if (0 == query_id) {
// exit(1);
// }
// }
}
//inline void Searching::search_in_sequential_BitVector(
// const idi query_id,
// const idi K,
// const idi L,
// std::vector<Candidate> &set_L,
// const std::vector<idi> &init_ids,
// std::vector<idi> &set_K)
//{
//// boost::dynamic_bitset<> is_visited(num_v_);
// BitVector is_visited(num_v_);
//
//#pragma omp parallel for
// for (idi v_i = 0; v_i < L; ++v_i) {
//// is_visited[init_ids[v_i]] = true;
// is_visited.atomic_set_bit(init_ids[v_i]);
// }
//
// const dataf *query_data = queries_load_ + query_id * dimension_;
//
//#pragma omp parallel for
// for (idi v_i = 0; v_i < L; ++v_i) {
// idi v_id = init_ids[v_i];
// _mm_prefetch(opt_nsg_graph_ + v_id * vertex_bytes_, _MM_HINT_T0);
// }
// // Get the distances of all candidates, store in the set set_L.
//#pragma omp parallel for
// for (unsigned i = 0; i < L; i++) {
// unsigned v_id = init_ids[i];
// auto *v_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + v_id * vertex_bytes_);
// dataf norm = *v_data++;
// distf dist = compute_distance_with_norm(v_data, query_data, norm);
// set_L[i] = Candidate(v_id, dist, false); // False means not checked.
// }
// std::sort(set_L.begin(), set_L.begin() + L);
// idi k = 0; // Index of every queue's first unchecked candidate.
// while (k < L) {
// Candidate &top_cand = set_L[k];
// unsigned nk = L;
// if (!top_cand.is_checked_) {
// top_cand.is_checked_ = true;
// idi v_id = top_cand.id_; // Vertex ID.
// _mm_prefetch(opt_nsg_graph_ + v_id * vertex_bytes_ + data_bytes_, _MM_HINT_T0);
// idi *out_edges = (idi *) (opt_nsg_graph_ + v_id * vertex_bytes_ + data_bytes_);
// idi out_degree = *out_edges++;
// for (idi n_i = 0; n_i < out_degree; ++n_i) {
// _mm_prefetch(opt_nsg_graph_ + out_edges[n_i] * vertex_bytes_, _MM_HINT_T0);
// }
// // Traverse v_id's all neighbors, pushing them into the queue
// for (idi e_i = 0; e_i < out_degree; ++e_i) {
// idi nb_id = out_edges[e_i];
//// if (is_visited[nb_id]) {
//// continue;
//// }
//// is_visited[nb_id] = true;
//
// {// Self-defined BitVector
// if (is_visited.atomic_is_bit_set(nb_id)) {
// continue;
// }
// is_visited.atomic_set_bit(nb_id);
// }
//
// auto *nb_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + nb_id * vertex_bytes_);
// dataf norm = *nb_data++;
// // Compute the distance
// distf dist = compute_distance_with_norm(nb_data, query_data, norm);
// if (dist > set_L[L-1].distance_) {
// continue;
// }
//// if (dist >= set_L[L-1].distance_) {
//// continue;
//// }
// Candidate cand(nb_id, dist, false);
// // Insert into the queue
// idi r = insert_into_queue(set_L, L, cand);
// if (r < nk) {
// nk = r;
// }
// }
// }
// if (nk <= k) {
// k = nk;
// } else {
// ++k;
// }
// }
//// cache_miss_kernel.measure_stop();
//#pragma omp parallel for
// for (size_t k_i = 0; k_i < K; ++k_i) {
// set_K[k_i] = set_L[k_i].id_;
// }
//}
/**
* Prepare init_ids and flags, as they are constant for all queries.
* @param[out] init_ids
* @param L
*/
inline void Searching::prepare_init_ids(
std::vector<unsigned int> &init_ids,
const unsigned L) const
{
// idi num_ngbrs = get_out_degree(ep_);
// edgei edge_start = nsg_graph_indices_[ep_];
// // Store ep_'s neighbors as candidates
// idi tmp_l = 0;
// for (; tmp_l < L && tmp_l < num_ngbrs; tmp_l++) {
// init_ids[tmp_l] = nsg_graph_out_edges_[edge_start + tmp_l];
// }
// std::unordered_set<idi> visited_ids;
boost::dynamic_bitset<> is_selected(num_v_);
idi *out_edges = (idi *) (opt_nsg_graph_ + ep_ * vertex_bytes_ + data_bytes_);
idi out_degree = *out_edges++;
idi init_ids_end = 0;
// for (; tmp_l < L && tmp_l < out_degree; tmp_l++) {
for (idi e_i = 0; e_i < out_degree && init_ids_end < L; ++e_i) {
// idi v_id = out_edges[tmp_l];
idi v_id = out_edges[e_i];
if(is_selected[v_id]) {
continue;
}
is_selected[v_id] = true;
// init_ids[tmp_l] = v_id;
init_ids[init_ids_end++] = v_id;
// init_ids[tmp_l] = out_edges[tmp_l];
// visited_ids.insert(init_ids[tmp_l]);
}
// for (idi i = 0; i < tmp_l; ++i) {
// is_visited[init_ids[i]] = true;
// }
// If ep_'s neighbors are not enough, add other random vertices
idi tmp_id = ep_ + 1; // use tmp_id to replace rand().
while (init_ids_end < L) {
tmp_id %= num_v_;
idi v_id = tmp_id++;
if (is_selected[v_id]) {
continue;
}
// if (visited_ids.find(id) != visited_ids.end()) {
// continue;
// }
is_selected[v_id] = true;
// visited_ids.insert(id);
init_ids[init_ids_end++] = v_id;
// tmp_l++;
}
}
// TODO: re-code in AVX-512
inline dataf Searching::compute_norm(
const dataf *data) const
// idi vertex_id)
// const std::vector<PANNS::dataf> &data)
// size_t loc_start,
// idi dimension)
{
// const dataf *a = data.data() + loc_start;
// const dataf *a = data_load_ + vertex_id * dimension_;
// idi size = dimension_;
dataf result = 0;
//#define AVX_L2NORM(addr, dest, tmp) \
// tmp = _mm256_load_ps(addr); \
// tmp = _mm256_mul_ps(tmp, tmp); \
// dest = _mm256_add_ps(dest, tmp);
#define AVX_L2NORM(addr, dest, tmp) \
tmp = _mm256_loadu_ps(addr); \
tmp = _mm256_mul_ps(tmp, tmp); \
dest = _mm256_add_ps(dest, tmp);
__m256 sum;
__m256 l0, l1;
unsigned D = (dimension_ + 7) & ~7U;
unsigned DR = D % 16;
unsigned DD = D - DR;
const float *l = data;
const float *e_l = l + DD;
float unpack[8] __attribute__ ((aligned (32))) = {0, 0, 0, 0, 0, 0, 0, 0};
sum = _mm256_load_ps(unpack);
// sum = _mm256_loadu_ps(unpack);
if (DR) { AVX_L2NORM(e_l, sum, l0); }
for (unsigned i = 0; i < DD; i += 16, l += 16) {
AVX_L2NORM(l, sum, l0);
AVX_L2NORM(l + 8, sum, l1);
}
_mm256_store_ps(unpack, sum);
// _mm256_storeu_ps(unpack, sum);
result = unpack[0] + unpack[1] + unpack[2] + unpack[3] + unpack[4] + unpack[5] + unpack[6] + unpack[7];
return result;
}
inline dataf Searching::compute_distance_with_norm(
const dataf *v_data,
const dataf *q_data,
// idi vertex_id,
// idi query_id,
// const std::vector<PANNS::dataf> &d_data,
// const std::vector<PANNS::dataf> &q_data,
// PANNS::idi d_start,
// PANNS::idi q_start,
const dataf vertex_norm) const
// idi dimension)
{
// idi size = dimension_;
float result = 0;
//#define AVX_DOT(addr1, addr2, dest, tmp1, tmp2) \
// tmp1 = _mm256_load_ps(addr1);\
// tmp2 = _mm256_load_ps(addr2);\
// tmp1 = _mm256_mul_ps(tmp1, tmp2); \
// dest = _mm256_add_ps(dest, tmp1);
#define AVX_DOT(addr1, addr2, dest, tmp1, tmp2) \
tmp1 = _mm256_loadu_ps(addr1);\
tmp2 = _mm256_loadu_ps(addr2);\
tmp1 = _mm256_mul_ps(tmp1, tmp2); \
dest = _mm256_add_ps(dest, tmp1);
__m256 sum;
__m256 l0, l1;
__m256 r0, r1;
unsigned D = (dimension_ + 7) & ~7U;
unsigned DR = D % 16;
unsigned DD = D - DR;
const float *l = v_data;
const float *r = q_data;
// const float *l = (float *) (opt_nsg_graph_ + vertex_id * vertex_bytes_ + sizeof(distf));
// const float *r = queries_load_ + query_id * dimension_;
const float *e_l = l + DD;
const float *e_r = r + DD;
float unpack[8] __attribute__ ((aligned (32))) = {0, 0, 0, 0, 0, 0, 0, 0};
sum = _mm256_load_ps(unpack);
// sum = _mm256_loadu_ps(unpack);
if (DR) { AVX_DOT(e_l, e_r, sum, l0, r0); }
for (unsigned i = 0; i < DD; i += 16, l += 16, r += 16) {
AVX_DOT(l, r, sum, l0, r0);
AVX_DOT(l + 8, r + 8, sum, l1, r1);
}
_mm256_store_ps(unpack, sum);
// _mm256_storeu_ps(unpack, sum);
result = unpack[0] + unpack[1] + unpack[2] + unpack[3] + unpack[4] + unpack[5] + unpack[6] + unpack[7];
result = -2 * result + vertex_norm;
return result;
}
//// DEPRECATED.
// The difference from insert_into_queue is that add_into_queue will increase the queue size by 1.
//inline idi Searching::add_into_queue(
// std::vector<PANNS::Candidate> &queue,
// idi &queue_top,
// const idi queue_size,
// const PANNS::Candidate &cand)
//{
// assert(queue_size > 1);
// if (0 == queue_top) {
// queue[queue_top++] = cand;
// return 0;
// } else if (1 == queue_top) {
// if (queue[0] < cand) {
// queue[queue_top++] = cand;
// return 1;
// } else {
// queue[++queue_top] = queue[0];
// queue[0] = cand;
// return 0;
// }
// }
//
// if (queue[queue_top - 1] < cand) {
// if (queue_top < queue_size) {
// queue[queue_top++] = cand;
// }
// return queue_top;
// }
//
// idi r = insert_into_queue(
// queue,
// queue_top - 1,
// cand);
//// {//test
//// printf("r: %u"
//// "queue_top: %u "
//// "queue_size: %u\n",
//// r,
//// queue_top,
//// queue_size);
//// }
// return r;
//
//// /////////////////////////////////////////////////////////////
//// // Find the insert location
//// auto it_loc = std::lower_bound(queue.begin(), queue.begin() + queue_top, cand);
//// idi insert_loc = it_loc - queue.begin();
//// if (insert_loc == queue_size) {
//// return queue_size;
//// }
////
//// // Insert
////// if (queue_top == queue_size) {
////// // If full already
////// --queue_top;
////// }
//// memmove(reinterpret_cast<char *>(queue.data() + insert_loc + 1),
//// reinterpret_cast<char *>(queue.data() + insert_loc),
//// (queue_top - insert_loc) * sizeof(Candidate));
////// for (idi q_i = queue_top; q_i > insert_loc; --q_i) {
////// queue.at(q_i) = queue.at(q_i - 1);
////// }
//// queue[insert_loc] = cand;
//// ++queue_top;
//// return insert_loc;
//}
// The difference from insert_into_queue is that add_into_queue will increase the queue size by 1.
inline idi Searching::add_into_queue(
std::vector<PANNS::Candidate> &queue,
idi &queue_top,
const idi queue_size,
const PANNS::Candidate &cand)
{
if (0 == queue_top) {
queue[queue_top++] = cand;
return 0;
}
// Find the insert location
auto it_loc = std::lower_bound(queue.begin(), queue.begin() + queue_top, cand);
idi insert_loc = it_loc - queue.begin();
if (insert_loc == queue_size) {
return queue_size;
}
// Insert
if (queue_top == queue_size) {
// If full already
--queue_top;
}
memmove(reinterpret_cast<char *>(queue.data() + insert_loc + 1),
reinterpret_cast<char *>(queue.data() + insert_loc),
(queue_top - insert_loc) * sizeof(Candidate));
// for (idi q_i = queue_top; q_i > insert_loc; --q_i) {
// queue.at(q_i) = queue.at(q_i - 1);
// }
queue[insert_loc] = cand;
++queue_top;
return insert_loc;
}
// The difference from insert_into_queue is that add_into_queue will increase the queue size by 1.
// add_into_queue with a queue_start
inline idi Searching::add_into_queue(
std::vector<PANNS::Candidate> &queue,
const idi queue_start,
idi &queue_size, // The insertion location starting from queue_start
const idi queue_capacity, // The maximum capacity of queue, independent with queue_start.
const PANNS::Candidate &cand)
{
if (0 == queue_size) {
queue[queue_start + queue_size++] = cand;
return 0;
}
idi queue_end = queue_start + queue_size;
// Find the insert location
const auto it_loc = std::lower_bound(queue.begin() + queue_start, queue.begin() + queue_end, cand);
// auto it_loc = std::lower_bound(queue.begin(), queue.begin() + queue_size, cand);
idi insert_loc = it_loc - queue.begin();
if (insert_loc != queue_end) {
if (cand.id_ == it_loc->id_) {
// Duplicate
return queue_capacity;
}
if (queue_size >= queue_capacity) { // Queue is full
--queue_size;
--queue_end;
}
} else { // insert_loc == queue_end, insert at the end?
if (queue_size < queue_capacity) { // Queue is not full
// Insert at the end
queue[insert_loc] = cand;
++queue_size;
return queue_size - 1;
} else { // Queue is full
return queue_capacity;
}
}
// Add into queue
memmove(reinterpret_cast<char *>(queue.data() + insert_loc + 1),
reinterpret_cast<char *>(queue.data() + insert_loc),
(queue_end - insert_loc) * sizeof(Candidate));
queue[insert_loc] = cand;
++queue_size;
return insert_loc - queue_start;
}
inline void Searching::add_into_queue_at(
const Candidate &cand,
std::vector<Candidate> &queue,
const idi insert_index, // The insertion location, independent with queue_start
const idi queue_start,
idi &queue_size, // The number of elements in queue, independent with queue_start
const idi queue_length) // The maximum capacity of queue, independent with queue_start.
{
const idi dest_index = queue_start + insert_index;
if (queue_size == queue_length) {
--queue_size;
}
memmove(reinterpret_cast<char *>(queue.data() + dest_index + 1),
reinterpret_cast<char *>(queue.data() + dest_index),
(queue_size - insert_index) * sizeof(Candidate));
queue[dest_index] = cand;
++queue_size;
}
inline void Searching::insert_one_element_at(
// const T &cand,
// T *queue_base,
const Candidate &cand,
std::vector<Candidate> &queue,
const idi insert_index,
const idi queue_start,
const idi queue_size)
{
const idi dest_index = queue_start + insert_index;
memmove(reinterpret_cast<char *>(queue.data() + dest_index + 1),
reinterpret_cast<char *>(queue.data() + dest_index),
(queue_size - insert_index - 1) * sizeof(Candidate));
queue[dest_index] = cand;
// memmove(reinterpret_cast<char *>(queue_base + dest_index + 1),
// reinterpret_cast<char *>(queue_base + dest_index),
// (queue_size - insert_index - 1) * sizeof(T));
// for (idi q_i = queue_size - 1; q_i > insert_index; --q_i) {
// queue_base.at(q_i + queue_start) = queue_base.at(q_i - 1 + queue_start);
// }
// queue_base[dest_index] = cand;
}
/**
* PANNS version of InsertIntoPool(): binary-search to find the insert place and then move.
* @param[out] c_queue
* @param c_queue_top
* @param cand
* @return
*/
inline idi Searching::insert_into_queue(
std::vector<PANNS::Candidate> &c_queue,
PANNS::idi c_queue_top,
PANNS::Candidate cand)
{
if (c_queue[0].distance_ > cand.distance_) {
// If the first
memmove(reinterpret_cast<char *>(c_queue.data() + 1),
reinterpret_cast<char *>(c_queue.data()),
c_queue_top * sizeof(Candidate));
c_queue[0] = cand;
return 0;
} else if (c_queue[c_queue_top - 1].distance_ == cand.distance_) {
// If the last
if (c_queue[c_queue_top - 1].id_ > cand.id_) {
// Use ID as the second metrics for ordering
c_queue[c_queue_top - 1] = cand;
return c_queue_top - 1;
} else {
return c_queue_top;
}
}
idi left = 0;
idi right = c_queue_top;
while (left < right) {
idi mid = (right - left) / 2 + left;
if (c_queue[mid].distance_ > cand.distance_) {
right = mid;
} else {
left = mid + 1;
}
}
// If the distance is the same
if (0 != left && c_queue[left - 1].distance_ != cand.distance_) {
;
} else {
while (0 != left
&& c_queue[left - 1].distance_ == cand.distance_
&& c_queue[left - 1].id_ > cand.id_) {
// Use ID as the second metrics for ordering
--left;
}
}
// Insert to left
memmove(reinterpret_cast<char *>(c_queue.data() + left + 1),
reinterpret_cast<char *>(c_queue.data() + left),
(c_queue_top - left) * sizeof(Candidate));
c_queue[left] = cand;
return left;
}
//inline void Searching::cand_pushes_ngbrs_into_queue(
// idi cand_id,
// const dataf *query_data,
// idi L,
// idi &new_k,
// boost::dynamic_bitset<> &is_visited,
// std::vector<Candidate> &set_L)
//{
// _mm_prefetch(opt_nsg_graph_ + v_id * vertex_bytes_ + data_bytes_, _MM_HINT_T0);
// idi *out_edges = (idi *) (opt_nsg_graph_ + v_id * vertex_bytes_ + data_bytes_);
// idi out_degree = *out_edges++;
// for (idi n_i = 0; n_i < out_degree; ++n_i) {
// _mm_prefetch(opt_nsg_graph_ + out_edges[n_i] * vertex_bytes_, _MM_HINT_T0);
// }
// for (idi e_i = 0; e_i < out_degree; ++e_i) {
// idi nb_id = out_edges[e_i];
// if (is_visited[nb_id]) {
// continue;
// }
// is_visited[nb_id] = true;
// auto *nb_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + nb_id * vertex_bytes_);
// dataf norm = *nb_data++;
// distf dist = compute_distance_with_norm(nb_data, query_data, norm);
// if (dist >= set_L[L-1].distance_) {
// continue;
// }
// Candidate cand(nb_id, dist, false);
// idi r = insert_into_queue(set_L, L, cand);
// if (r < nk) {
// nk = r;
// }
// }
//}
//inline void Searching::search_in_sequential(
// const idi query_id,
// const idi K,
// const idi L,
// std::vector<Candidate> &set_L,
// const std::vector<idi> &init_ids,
// std::vector<idi> &set_K) const
//{
// boost::dynamic_bitset<> is_visited(num_v_);
//
// for (idi v_i = 0; v_i < L; ++v_i) {
// is_visited[init_ids[v_i]] = true;
// }
// const dataf *query_data = queries_load_ + query_id * dimension_;
//
// for (idi v_i = 0; v_i < L; ++v_i) {
// idi v_id = init_ids[v_i];
// _mm_prefetch(opt_nsg_graph_ + v_id * vertex_bytes_, _MM_HINT_T0);
// }
// // Get the distances of all candidates, store in the set set_L.
// for (unsigned i = 0; i < L; i++) {
// unsigned v_id = init_ids[i];
// auto *v_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + v_id * vertex_bytes_);
// dataf norm = *v_data++;
// distf dist = compute_distance_with_norm(v_data, query_data, norm);
// set_L[i] = Candidate(v_id, dist, false); // False means not checked.
// }
// std::sort(set_L.begin(), set_L.begin() + L);
// idi k = 0; // Index of every queue's first unchecked candidate.
// while (k < L) {
// Candidate &top_cand = set_L[k];
// unsigned nk = L;
// if (!top_cand.is_checked_) {
// top_cand.is_checked_ = true;
// idi v_id = top_cand.id_; // Vertex ID.
// _mm_prefetch(opt_nsg_graph_ + v_id * vertex_bytes_ + data_bytes_, _MM_HINT_T0);
// idi *out_edges = (idi *) (opt_nsg_graph_ + v_id * vertex_bytes_ + data_bytes_);
// idi out_degree = *out_edges++;
// for (idi n_i = 0; n_i < out_degree; ++n_i) {
// _mm_prefetch(opt_nsg_graph_ + out_edges[n_i] * vertex_bytes_, _MM_HINT_T0);
// }
// // Traverse v_id's all neighbors, pushing them into the queue
// for (idi e_i = 0; e_i < out_degree; ++e_i) {
// idi nb_id = out_edges[e_i];
// if (is_visited[nb_id]) {
// continue;
// }
// is_visited[nb_id] = true;
// auto *nb_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + nb_id * vertex_bytes_);
// dataf norm = *nb_data++;
// // Compute the distance
// distf dist = compute_distance_with_norm(nb_data, query_data, norm);
// if (dist > set_L[L-1].distance_) {
// continue;
// }
// Candidate cand(nb_id, dist, false);
// // Insert into the queue
// idi r = insert_into_queue(set_L, L, cand);
// if (r < nk) {
// nk = r;
// }
// }
// }
// if (nk <= k) {
// k = nk;
// } else {
// ++k;
// }
// }
//
// for (size_t k_i = 0; k_i < K; ++k_i) {
// set_K[k_i] = set_L[k_i].id_;
// }
//}
// Deprecated: cannot use std::set, because its element is constant.
//inline void Searching::search_in_sequential(
// const idi query_id,
// const idi K,
// const idi L,
//// std::vector<Candidate> &set_L,
// const std::vector<idi> &init_ids,
// std::vector<idi> &set_K) const
//{
// std::set<Candidate> set_L;
// boost::dynamic_bitset<> is_visited(num_v_);
//
// for (idi v_i = 0; v_i < L; ++v_i) {
// is_visited[init_ids[v_i]] = true;
// }
// const dataf *query_data = queries_load_ + query_id * dimension_;
//
// for (idi v_i = 0; v_i < L; ++v_i) {
// idi v_id = init_ids[v_i];
// _mm_prefetch(opt_nsg_graph_ + v_id * vertex_bytes_, _MM_HINT_T0);
// }
// // Get the distances of all candidates, store in the set set_L.
// for (unsigned i = 0; i < L; i++) {
// unsigned v_id = init_ids[i];
// auto *v_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + v_id * vertex_bytes_);
// dataf norm = *v_data++;
// distf dist = compute_distance_with_norm(v_data, query_data, norm);
//// set_L[i] = Candidate(v_id, dist, false); // False means not checked.
// set_L.emplace(v_id, dist, false);
// }
//// std::sort(set_L.begin(), set_L.begin() + L);
// idi k = 0; // Index of every queue's first unchecked candidate.
// while (k < L) {
//// Candidate &top_cand = set_L[k];
// std::set<Candidate>::iterator top_cand = std::next(set_L.begin(), k);
// unsigned nk = L;
// if (!top_cand->is_checked_) {
// top_cand->is_checked_ = true;
// idi v_id = top_cand.id_; // Vertex ID.
// _mm_prefetch(opt_nsg_graph_ + v_id * vertex_bytes_ + data_bytes_, _MM_HINT_T0);
// idi *out_edges = (idi *) (opt_nsg_graph_ + v_id * vertex_bytes_ + data_bytes_);
// idi out_degree = *out_edges++;
// for (idi n_i = 0; n_i < out_degree; ++n_i) {
// _mm_prefetch(opt_nsg_graph_ + out_edges[n_i] * vertex_bytes_, _MM_HINT_T0);
// }
// // Traverse v_id's all neighbors, pushing them into the queue
// for (idi e_i = 0; e_i < out_degree; ++e_i) {
// idi nb_id = out_edges[e_i];
// if (is_visited[nb_id]) {
// continue;
// }
// is_visited[nb_id] = true;
// auto *nb_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + nb_id * vertex_bytes_);
// dataf norm = *nb_data++;
// // Compute the distance
// distf dist = compute_distance_with_norm(nb_data, query_data, norm);
// if (dist > set_L[L-1].distance_) {
// continue;
// }
// Candidate cand(nb_id, dist, false);
// // Insert into the queue
// idi r = insert_into_queue(set_L, L, cand);
// if (r < nk) {
// nk = r;
// }
// }
// }
// if (nk <= k) {
// k = nk;
// } else {
// ++k;
// }
// }
//
// for (size_t k_i = 0; k_i < K; ++k_i) {
// set_K[k_i] = set_L[k_i].id_;
// }
//}
/* Function:
* queue1_size is fixed.
*/
inline idi Searching::merge_two_queues_into_1st_queue_seq_fixed(
std::vector<Candidate> &queue1,
const idi queue1_start,
const idi queue1_size,
std::vector<Candidate> &queue2,
const idi queue2_start,
const idi queue2_size)
// const idi limit_size)
{
assert(queue1_size && queue2_size);
// Record the lowest insert location.
auto it_loc = std::lower_bound(
queue1.begin() + queue1_start,
queue1.begin() + queue1_start + queue1_size,
queue2[queue2_start]);
idi insert_index = it_loc - (queue1.begin() + queue1_start);
if (insert_index == queue1_size) {
return insert_index;
} else if (insert_index == queue1_size - 1) {
queue1[queue1_start + insert_index] = queue2[queue2_start];
return insert_index;
}
// Insert the 1st of queue2
if (queue2[queue2_start].id_ != it_loc->id_) {
// Not Duplicate
insert_one_element_at(
queue2[queue2_start],
queue1,
insert_index,
queue1_start,
queue1_size);
}
if (queue2_size == 1) {
return insert_index;
}
// Insert
idi q_i_1 = insert_index + 1 + queue1_start;
idi q_i_2 = queue2_start + 1;
const idi q_i_1_bound = queue1_start + queue1_size;
const idi q_i_2_bound = queue2_start + queue2_size;
// const idi insert_i_bound = queue1_start + limit_size;
for (idi insert_i = insert_index + 1; insert_i < queue1_size; ++insert_i) {
if (q_i_1 >= q_i_1_bound || q_i_2 >= q_i_2_bound) {
// queue1 or queue2 finished traverse. Rest o
break;
} else if (queue1[q_i_1] < queue2[q_i_2]) {
++q_i_1;
} else if (queue2[q_i_2] < queue1[q_i_1]) {
// Insert queue2[q_i_2] into queue1
insert_one_element_at(
queue2[q_i_2++],
queue1,
insert_i,
queue1_start,
queue1_size);
++q_i_1;
} else {
// Duplicate
++q_i_2;
++q_i_1;
}
}
return insert_index;
}
/* Function:
* queue1_size should be updated.
* queue1_length should be provided.
*/
inline void Searching::merge_two_queues_into_1st_queue_seq_incr(
std::vector<Candidate> &queue1,
const idi queue1_start,
idi &queue1_size, // The number of element in queue1, independent with queue1_start.
const idi queue1_length, // The maximum capacity of queue1, independent with queue1_start.
std::vector<Candidate> &queue2,
const idi queue2_start,
const idi queue2_size)
// const idi limit_size)
{
assert(queue1_size && queue2_size);
// Record the lowest insert location.
auto it_loc = std::lower_bound(
queue1.begin() + queue1_start,
queue1.begin() + queue1_start + queue1_size,
queue2[queue2_start]);
idi insert_index = it_loc - (queue1.begin() + queue1_start);
if (insert_index == queue1_size) {
idi copy_count = (queue1_size + queue2_size > queue1_length) ?
queue1_length - queue1_size :
queue2_size;
memmove(queue1.data() + queue1_start + queue1_size,
queue2.data() + queue2_start,
copy_count * sizeof(Candidate));
queue1_size += copy_count;
return;
}
if (queue2[queue2_start].id_ != it_loc->id_) {
// Not Duplicate
add_into_queue_at(
queue2[queue2_start],
queue1,
insert_index,
queue1_start,
queue1_size,
queue1_length);
}
if (queue2_size == 1) {
return;
}
// Insert
idi q_i_1 = insert_index + 1 + queue1_start;
idi q_i_2 = queue2_start + 1;
idi q_i_1_bound = queue1_start + queue1_size; // When queue1_size is updated, so should be q_i_1_bound.
const idi q_i_2_bound = queue2_start + queue2_size;
// idi insert_i;
for (idi insert_i = insert_index + 1; insert_i < queue1_length; ++insert_i) {
if (q_i_1 >= q_i_1_bound) {
queue1_size += std::min(queue1_length - insert_i, q_i_2_bound - q_i_2);
for ( ; insert_i < queue1_size; ++insert_i) {
queue1[queue1_start + insert_i] = queue2[q_i_2++];
}
break;
} else if (q_i_2 >= q_i_2_bound) {
break;
} else if (queue1[q_i_1] < queue2[q_i_2]) {
++q_i_1;
} else if (queue2[q_i_2] < queue1[q_i_1]) {
add_into_queue_at(
queue2[q_i_2++],
queue1,
insert_i,
queue1_start,
queue1_size,
queue1_length);
++q_i_1;
q_i_1_bound = queue1_start + queue1_size;
} else {
// Duplicate
++q_i_2;
++q_i_1;
}
}
}
inline idi Searching::merge_all_queues_para_list(
std::vector< std::vector<Candidate> > &local_queues_list,
std::vector<idi> &local_queues_ends,
std::vector<Candidate> &set_L,
const idi L)
{
int size = 1 << (static_cast<idi>(log2(num_threads_)));
idi log2size = static_cast<idi>(log2(size));
for (idi d = 0; d < log2size; ++d) {
uint32_t by = 1 << (d + 1);
#pragma omp parallel for
for (int i = 0; i < size; i += by) {
idi ai = i + (1 << (d + 1)) - 1; // i + 2^(d+1) - 1
idi bi = i + (1 << d) - 1; // i + 2^d - 1
if (0 == local_queues_ends[bi]) {
continue;
}
if (local_queues_ends[ai] == 0) {
local_queues_list[ai].swap(local_queues_list[bi]);
std::swap(local_queues_ends[ai], local_queues_ends[bi]);
continue;
}
// else if (local_queues_ends[ai] < L && local_queues_ends[bi] >= L) {
// local_queues_list[ai].swap(local_queues_list[bi]);
// std::swap(local_queues_ends[ai], local_queues_ends[bi]);
// }
// merge_two_queues_into_1st_queue_seq(
// local_queues_list[ai],
// 0,
// local_queues_ends[ai],
// local_queues_list[bi],
// 0,
// local_queues_ends[bi]);
idi tmp_length = local_queues_ends[ai] + local_queues_ends[bi];
std::vector<Candidate> tmp_queue(tmp_length);
std::merge(
local_queues_list[ai].begin(),
local_queues_list[ai].begin() + local_queues_ends[ai],
local_queues_list[bi].begin(),
local_queues_list[bi].begin() + local_queues_ends[bi],
tmp_queue.begin());
if (tmp_length > L) {
tmp_queue.resize(L);
tmp_length = L;
} else if (tmp_length < L) {
tmp_queue.resize(L);
}
local_queues_list[ai].swap(tmp_queue);
local_queues_ends[ai] = tmp_length;
// {// Print queue a
// printf("d: %u "
// "i: %u "
// "ai: %u "
// "local_queues_ends[%d]: %d\n",
// d,
// i,
// ai,
// ai,
// local_queues_ends[ai]);
// for (idi i_q = 0; i_q < local_queues_ends[ai]; ++i_q) {
// printf("[%u]: "
// "id: %u "
// "dist: %f\n",
// i_q,
// local_queues_list[ai][i_q].id_,
// local_queues_list[ai][i_q].distance_);
// }
// }
}
}
// Remain, prefix-sum-like merge
if (size != num_threads_) {
for (int i = size; i < num_threads_; ++i) {
idi ai = i;
idi bi = i - 1;
if (0 == local_queues_ends[bi]) {
continue;
}
if (local_queues_ends[ai] == 0) {
local_queues_list[ai].swap(local_queues_list[bi]);
std::swap(local_queues_ends[ai], local_queues_ends[bi]);
continue;
}
// else if (local_queues_ends[ai] < L && local_queues_ends[bi] >= L) {
// local_queues_list[ai].swap(local_queues_list[bi]);
// std::swap(local_queues_ends[ai], local_queues_ends[bi]);
// }
// merge_two_queues_into_1st_queue_seq(
// local_queues_list[ai],
// 0,
// local_queues_ends[ai],
// local_queues_list[bi],
// 0,
// local_queues_ends[bi]);
idi tmp_length = local_queues_ends[ai] + local_queues_ends[bi];
std::vector<Candidate> tmp_queue(tmp_length);
std::merge(
local_queues_list[ai].begin(),
local_queues_list[ai].begin() + local_queues_ends[ai],
local_queues_list[bi].begin(),
local_queues_list[bi].begin() + local_queues_ends[bi],
tmp_queue.begin());
if (tmp_length > L) {
tmp_queue.resize(L);
tmp_length = L;
} else if (tmp_length < L) {
tmp_queue.resize(L);
}
local_queues_list[ai].swap(tmp_queue);
local_queues_ends[ai] = tmp_length;
}
}
// Merge into set_L
idi r = L;
if (local_queues_ends[num_threads_ - 1]) {
r = merge_two_queues_into_1st_queue_seq_fixed(
set_L,
0,
L,
local_queues_list[num_threads_ - 1],
0,
local_queues_ends[num_threads_ - 1]);
}
// Reset local_queues_ends
std::fill(local_queues_ends.begin(), local_queues_ends.end(), 0);
return r;
}
/* Function:
* Use large local_queues_array as a concatenation of all queues
*/
inline idi Searching::merge_all_queues_para_array(
std::vector<Candidate> &set_L,
std::vector<idi> &local_queues_ends,
const idi local_queue_length,
const idi L)
{
const int num_queues = num_threads_;
idi nk = L;
int size = 1 << (static_cast<idi>(log2(num_queues)));
idi log2size = static_cast<idi>(log2(size));
for (idi d = 0; d < log2size; ++d) {
uint32_t by = 1 << (d + 1);
#pragma omp parallel for
for (int i = 0; i < size; i += by) {
idi ai = i + (1 << (d + 1)) - 1; // i + 2^(d+1) - 1
idi a_start = ai * local_queue_length;
idi bi = i + (1 << d) - 1; // i + 2^d - 1
idi b_start = bi * local_queue_length;
if (0 == local_queues_ends[bi]) {
continue;
}
if (local_queues_ends[ai] == 0) {
std::copy(set_L.begin() + b_start,
set_L.begin() + b_start + local_queues_ends[bi],
set_L.begin() + a_start); // Copy bi to ai
local_queues_ends[ai] = local_queues_ends[bi];
local_queues_ends[bi] = 0;
continue;
}
if (ai != static_cast<idi>(num_queues - 1)) {
merge_two_queues_into_1st_queue_seq_incr(
set_L,
a_start,
local_queues_ends[ai],
local_queue_length,
set_L,
b_start,
local_queues_ends[bi]);
} else {
idi r = merge_two_queues_into_1st_queue_seq_fixed(
set_L,
a_start,
L,
set_L,
b_start,
local_queues_ends[bi]);
if (r < nk) {
nk = r;
}
}
}
}
// Remain, prefix-sum-like merge
if (size != num_queues) {
for (int i = size; i < num_queues; ++i) {
idi ai = i;
idi a_start = ai * local_queue_length;
idi bi = i - 1;
idi b_start = bi * local_queue_length;
if (0 == local_queues_ends[bi]) {
continue;
}
if (local_queues_ends[ai] == 0) {
std::copy(set_L.begin() + b_start,
set_L.begin() + b_start + local_queues_ends[bi],
set_L.begin() + a_start); // Copy bi to ai
local_queues_ends[ai] = local_queues_ends[bi];
local_queues_ends[bi] = 0;
continue;
}
if (ai != static_cast<idi>(num_queues - 1)) {
merge_two_queues_into_1st_queue_seq_incr(
set_L,
a_start,
local_queues_ends[ai],
local_queue_length,
set_L,
b_start,
local_queues_ends[bi]);
} else {
idi r = merge_two_queues_into_1st_queue_seq_fixed(
set_L,
a_start,
L,
set_L,
b_start,
local_queues_ends[bi]);
if (r < nk) {
nk = r;
}
}
}
}
// Reset local_queues_ends
// Not do this for Collector Idea or Selecting Idea
std::fill(local_queues_ends.begin(), local_queues_ends.end() - 1, 0);
// std::fill(local_queues_ends.begin(), local_queues_ends.end(), 0);
return nk;
// return r;
}
/* Function:
* When merge all queues (in an array, and [num_threads_ - 1] is the global queue),
* the starting local is at [queue_base]
*/
inline idi Searching::merge_all_queues_queue_base(
// std::vector< std::vector<Candidate> > &local_queues_list,
std::vector<Candidate> &set_L,
// std::vector<Candidate> &local_queues_array,
std::vector<idi> &local_queues_ends,
const idi queue_base,
const int real_threads,
const idi local_queue_length,
// std::vector<Candidate> &set_L,
const idi L)
{
idi nk = L;
int size = 1 << (static_cast<idi>(log2(real_threads)));
// int size = 1 << (static_cast<idi>(log2(num_threads_)));
idi log2size = static_cast<idi>(log2(size));
for (idi d = 0; d < log2size; ++d) {
idi by = 1 << (d + 1);
idi i_bound = size + queue_base;
#pragma omp parallel for num_threads(real_threads)
for (idi i = queue_base; i < i_bound; i += by) {
// for (int i = 0; i < size; i += by) {
// idi ai = i + (1 << (d + 1)) - 1 + queue_base; // i + 2^(d+1) - 1
idi ai = i + (1 << (d + 1)) - 1; // i + 2^(d+1) - 1
idi a_start = ai * local_queue_length;
// idi bi = i + (1 << d) - 1 + queue_base; // i + 2^d - 1
idi bi = i + (1 << d) - 1; // i + 2^d - 1
idi b_start = bi * local_queue_length;
if (0 == local_queues_ends[bi]) {
continue;
}
if (local_queues_ends[ai] == 0) {
// local_queues_list[ai].swap(local_queues_list[bi]);
std::copy(set_L.begin() + b_start,
set_L.begin() + b_start + local_queues_ends[bi],
set_L.begin() + a_start); // Copy bi to ai
local_queues_ends[ai] = local_queues_ends[bi];
local_queues_ends[bi] = 0;
continue;
}
if (ai != static_cast<idi>(num_threads_ - 1)) {
merge_two_queues_into_1st_queue_seq_incr(
set_L,
a_start,
local_queues_ends[ai],
local_queue_length,
set_L,
b_start,
local_queues_ends[bi]);
} else {
idi r = merge_two_queues_into_1st_queue_seq_fixed(
set_L,
a_start,
L,
set_L,
b_start,
local_queues_ends[bi]);
if (r < nk) {
nk = r;
}
}
}
}
// Remain, prefix-sum-like merge
if (size != real_threads) {
// if (size != num_threads_) {
for (int i = size + queue_base; i < num_threads_; ++i) {
// for (int i = size; i < num_threads_; ++i) {
idi ai = i;
idi a_start = ai * local_queue_length;
idi bi = i - 1;
idi b_start = bi * local_queue_length;
if (0 == local_queues_ends[bi]) {
continue;
}
if (local_queues_ends[ai] == 0) {
std::copy(set_L.begin() + b_start,
set_L.begin() + b_start + local_queues_ends[bi],
set_L.begin() + a_start); // Copy bi to ai
local_queues_ends[ai] = local_queues_ends[bi];
local_queues_ends[bi] = 0;
continue;
}
if (ai != static_cast<idi>(num_threads_ - 1)) {
merge_two_queues_into_1st_queue_seq_incr(
set_L,
a_start,
local_queues_ends[ai],
local_queue_length,
set_L,
b_start,
local_queues_ends[bi]);
} else {
idi r = merge_two_queues_into_1st_queue_seq_fixed(
set_L,
a_start,
L,
set_L,
b_start,
local_queues_ends[bi]);
if (r < nk) {
nk = r;
}
}
}
}
// Reset local_queues_ends
std::fill(local_queues_ends.begin(), local_queues_ends.end() - 1, 0);
// std::fill(local_queues_ends.begin(), local_queues_ends.end(), 0);
return nk;
// return r;
}
inline void Searching::merge_two_consecutive_queues_in_place(
std::vector<Candidate> &two_queues,
const idi base_1,
// const idi &end_1,
const idi base_2,
const idi &length_2)
{
// idi tid = omp_get_thread_num();
idi index_1 = base_1;
idi index_2 = base_2;
const idi bound_2 = base_2 + length_2;
while (index_1 < index_2
&& index_2 < bound_2) {
Candidate e_1 = two_queues[index_1];
Candidate e_2 = two_queues[index_2];
if (e_1 < e_2) {
++index_1;
} else if (e_2 < e_1) {
// time_memmove_list_[tid] -= WallTimer::get_time_mark();
std::memmove(two_queues.data() + index_1 + 1,
two_queues.data() + index_1,
(index_2 - index_1) * sizeof(Candidate));
// time_memmove_list_[tid] += WallTimer::get_time_mark();
two_queues[index_1] = e_2;
++index_1;
++index_2;
} else { // Duplicate, but have no idea what to do right now
// time_memmove_list_[tid] -= WallTimer::get_time_mark();
std::memmove(two_queues.data() + index_1 + 1,
two_queues.data() + index_1,
(index_2 - index_1) * sizeof(Candidate));
// time_memmove_list_[tid] += WallTimer::get_time_mark();
index_1 += 2;
++index_2;
}
}
}
///* Function:
// * Merge all queues to the global queue, in a two-queue-merge way
// */
//inline idi Searching::merge_all_queues_all_together_in_sequential(
// std::vector<Candidate> &set_L,
// std::vector<idi> &local_queues_ends,
// const idi local_queue_length,
// const idi L)
//{
// const idi num_queues = num_threads_;
// const idi global_queue_base = (num_queues - 1) * local_queue_length;
// std::vector<idi> queue_heads(num_queues, 0);
// idi queue_id_min;
//
//// bool is_finished = false;
// bool is_1st_selected = true;
// idi nk = L; // The highest location of insertion.
// {
// for (idi q_i = 0; q_i < num_queues; ++q_i) {
// if (0 == local_queues_ends[q_i]) {
// continue;
// }
// _mm_prefetch(set_L.data() + q_i * local_queue_length, _MM_HINT_T0);
// }
// }
// while (queue_heads[num_queues - 1] < L) {
//// time_compare_minimum_ -= WallTimer::get_time_mark();
// queue_id_min = min_all_queues_at_heads(
// set_L,
// queue_heads,
// local_queues_ends,
// local_queue_length,
// L);
//// time_compare_minimum_ += WallTimer::get_time_mark();
// if (queue_id_min != num_queues - 1) { // Not in the global queue
//// time_insert_ -= WallTimer::get_time_mark();
// insert_one_element_at(
// set_L[queue_heads[queue_id_min] + queue_id_min * local_queue_length],
// set_L,
// queue_heads[num_queues - 1],
// global_queue_base,
// L);
//// time_insert_ += WallTimer::get_time_mark();
// if (is_1st_selected) { // Get the highest inserting location
// is_1st_selected = false;
// nk = queue_heads[num_queues - 1];
// }
// ++queue_heads[queue_id_min];
// }
// ++queue_heads[num_queues - 1];
// }
//
// // Reset local_queues_ends
// std::fill(local_queues_ends.begin(), local_queues_ends.end() - 1, 0);
// return nk;
//}
///* Function:
// * Find the minimum among queues at their head locations
// */
//inline idi Searching::min_all_queues_at_heads(
// const std::vector<Candidate> &set_L,
// std::vector<idi> &queue_heads,
// const std::vector<idi> &local_queues_ends,
// const idi local_queue_length,
// const idi L)
//{
// const idi num_queues = num_threads_;
// idi min_queue_id = num_queues - 1;
// Candidate min_candidate = set_L[queue_heads[min_queue_id] + min_queue_id * local_queue_length];
//
// for (idi q_i = 0; q_i < num_queues - 1; ++q_i) {
// if (queue_heads[q_i] >= local_queues_ends[q_i]) { // q_i finished
// continue;
// }
// const Candidate &ele = set_L[queue_heads[q_i] + q_i * local_queue_length];
// if (ele < min_candidate) {
// min_candidate = ele;
// min_queue_id = q_i;
// } else if (ele.id_ == min_candidate.id_) { // Redundant element
// ++queue_heads[q_i];
// }
// }
//
// return min_queue_id;
//}
inline void Searching::merge_in_set_L(
std::vector<Candidate> &set_L,
const idi set_L_length,
const idi num_queues,
const idi local_queue_length)
{
idi size = 1 << (static_cast<idi>(log2(num_queues)));
idi log2size = static_cast<idi>(log2(size));
for (idi d = 0; d < log2size; ++d) {
const idi merge_length = (local_queue_length << d);
idi by = 1 << (d + 1);
// Parallel for
#pragma omp parallel for
for (idi i = 0; i < size; i += by) {
// idi a = i + (1 << d) - 1;
// idi b = i + (1 << (d + 1)) - 1;
idi a = i;
idi b = i + (1 << d);
idi base_a = a * local_queue_length;
idi base_b = b * local_queue_length;
if (base_a >= set_L_length || base_b >= set_L_length) {
continue;
}
idi length_b;
if (a + by < size) {
length_b = merge_length;
} else { // The last one
if (size == num_queues) {
length_b = set_L_length - base_b;
} else {
length_b = merge_length;
}
}
// printf("a: %u b: %u "
// "base_a: %u base_b: %u length_b: %u\n",
// a, b,
// base_a, base_b, length_b);
merge_two_consecutive_queues_in_place(
set_L,
base_a,
base_b,
length_b);
}
}
if (size != num_queues) {
for (idi i = size; i < num_queues; ++i) {
idi a = 0;
idi b = i;
idi base_a = a;
idi base_b = b * local_queue_length;
if (base_b >= set_L_length) {
continue;
}
idi length_b;
if (b != num_queues - 1) {
length_b = local_queue_length;
} else {
length_b = set_L_length - base_b;
}
// printf("a: %u b: %u "
// "base_a: %u base_b: %u length_b: %u\n",
// a, b,
// base_a, base_b, length_b);
merge_two_consecutive_queues_in_place(
set_L,
base_a,
base_b,
length_b);
}
}
}
/*
* 7/5/2020-20:27
* Every queue keeps only elements which can be ordered in the top-L globally.
* local_queues_lengths records the end location for all queues
*/
inline distf Searching::selecting_top_L_seq(
std::vector<Candidate> &set_L,
const idi global_L,
const idi num_queues,
const std::vector<idi> &local_queues_starts,
std::vector<idi> &local_queues_sizes)
{
std::vector<idi> pointers(num_queues, 0);
distf bound_lth;
idi rank = 0;
bool is_finished = false;
distf min_dist = FLT_MAX;
idi min_q_i;
idi min_id;
while (rank < global_L) {
is_finished = true;
min_dist = FLT_MAX;
for (idi q_i = 0; q_i < num_queues; ++q_i) {
if (pointers[q_i] >= local_queues_sizes[q_i]) {
// q_i is finished
continue;
}
is_finished = false;
idi sub = pointers[q_i] + local_queues_starts[q_i];
distf tmp_dist = set_L[sub].distance_;
idi tmp_id = set_L[sub].id_;
if (tmp_dist < min_dist) {
min_dist = tmp_dist;
min_id = tmp_id;
min_q_i = q_i;
} else if (tmp_dist == min_dist && tmp_id < min_id) {
min_id = tmp_id;
min_q_i = q_i;
}
}
if (is_finished) {
{//test
printf("Error: selecting_top_L_seq: only found %u elements but global_L is %u.\n",
rank,
global_L);
}
break;
}
bound_lth = min_dist;
++pointers[min_q_i];
++rank;
}
std::copy(pointers.begin(), pointers.end(), local_queues_sizes.begin());
return bound_lth;
}
/*
* 7/24/2020-10:08
* Record for every queue the position that contains the top-M unchecked vertices.
* So the total expanded vertices should still be M, which means the computation should
* be the same with merging idea.
*/
inline void Searching::selecting_unchecked_top_M_seq(
const idi query_id,
const idi iter,
std::vector<Candidate> &set_L,
const std::vector<idi> &pointers_starts,
const idi value_M,
const idi num_queues,
const std::vector<idi> &local_queues_starts,
const std::vector<idi> &local_queues_sizes,
std::vector<idi> &local_m_counts)
{
std::vector<idi> pointers(pointers_starts);
// std::vector<idi> pointers(num_queues, 0);
std::fill(local_m_counts.begin(), local_m_counts.end(), 0);
idi rank = 0;
bool is_finished = true;
distf min_dist = FLT_MAX;
idi min_q_i;
idi min_id;
while (rank < value_M) {
min_dist = FLT_MAX;
for (idi q_i = 0; q_i < num_queues; ++q_i) {
idi &pointer = pointers[q_i];
idi sub = pointer + local_queues_starts[q_i];
// {//test
// if (133 == query_id &&
// 3 == iter &&
// 321341 == set_L[sub].id_) {
// printf("(%u %f)\n",
// set_L[sub].id_, set_L[sub].distance_);
// }
// }
while (pointer < local_queues_sizes[q_i]
&& set_L[sub].is_checked_) {
++pointer;
++sub;
}
if (pointer >= local_queues_sizes[q_i]) {
// q_i is finished
continue;
}
is_finished = false;
distf tmp_dist = set_L[sub].distance_;
idi tmp_id = set_L[sub].id_;
if (tmp_dist < min_dist) {
min_dist = tmp_dist;
min_id = tmp_id;
min_q_i = q_i;
} else if (tmp_dist == min_dist && tmp_id < min_id) {
min_id = tmp_id;
min_q_i = q_i;
}
}
if (!is_finished) {
is_finished = true;
++pointers[min_q_i];
++rank;
++local_m_counts[min_q_i];
} else {
break;
}
}
// std::copy(pointers.begin(), pointers.end(), local_top_m_positions.begin());
}
/*
* 7/27/2020-15:41
* Gather the top-M unchecked vertices from local queues.
*/
inline void Searching::gather_unchecked_top_M_seq(
const idi query_id,
const idi iter,
std::vector<Candidate> &set_L,
const std::vector<idi> &pointers_starts,
const idi value_M,
const idi num_queues,
const std::vector<idi> &local_queues_starts,
const std::vector<idi> &local_queues_sizes,
std::vector<idi> &top_m_candidates,
idi &top_m_candidates_size,
std::vector<idi> &bound_subs)
{
std::vector<idi> pointers(pointers_starts);
// std::vector<idi> pointers(num_queues, 0);
// std::fill(local_m_counts.begin(), local_m_counts.end(), 0);
// idi rank = 0;
bool is_finished = true;
distf min_dist = FLT_MAX;
idi min_q_i;
idi min_id;
while (top_m_candidates_size < value_M) {
min_dist = FLT_MAX;
for (idi q_i = 0; q_i < num_queues; ++q_i) {
idi &pointer = pointers[q_i];
idi sub = pointer + local_queues_starts[q_i];
while (pointer < local_queues_sizes[q_i]
&& set_L[sub].is_checked_) {
++pointer;
++sub;
}
if (pointer >= local_queues_sizes[q_i]) {
// q_i is finished
continue;
}
is_finished = false;
distf tmp_dist = set_L[sub].distance_;
idi tmp_id = set_L[sub].id_;
if (tmp_dist < min_dist) {
min_dist = tmp_dist;
min_id = tmp_id;
min_q_i = q_i;
} else if (tmp_dist == min_dist && tmp_id < min_id) {
min_id = tmp_id;
min_q_i = q_i;
}
}
if (!is_finished) {
is_finished = true;
idi sub = local_queues_starts[min_q_i] + pointers[min_q_i];
top_m_candidates[top_m_candidates_size++] = set_L[sub].id_;
set_L[sub].is_checked_ = true; // Checked
++pointers[min_q_i];
// ++rank;
// ++local_m_counts[min_q_i];
} else {
break;
}
}
// std::copy(pointers.begin(), pointers.end(), local_top_m_positions.begin());
std::copy(pointers.begin(), pointers.end(), bound_subs.begin());
}
inline void Searching::search_with_top_m(
const PANNS::idi M,
const PANNS::idi query_id,
const PANNS::idi K,
const PANNS::idi L,
std::vector<Candidate> &set_L,
const std::vector<idi> &init_ids,
std::vector<idi> &set_K)
{
boost::dynamic_bitset<> is_visited(num_v_);
{
for (idi c_i = 0; c_i < L; ++c_i) {
is_visited[init_ids[c_i]] = true;
}
}
const dataf *query_data = queries_load_ + query_id * dimension_;
for (idi v_i = 0; v_i < L; ++v_i) {
idi v_id = init_ids[v_i];
_mm_prefetch(opt_nsg_graph_ + v_id * vertex_bytes_, _MM_HINT_T0);
}
// Get the distances of all candidates, store in the set set_L.
for (unsigned i = 0; i < L; i++) {
unsigned v_id = init_ids[i];
auto *v_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + v_id * vertex_bytes_);
dataf norm = *v_data++;
++count_distance_computation_;
distf dist = compute_distance_with_norm(v_data, query_data, norm);
set_L[i] = Candidate(v_id, dist, false); // False means not checked.
}
std::sort(set_L.begin(), set_L.begin() + L);
std::vector<idi> top_m_candidates(M);
idi top_m_candidates_end = 0;
idi k = 0; // Index of first unchecked candidate.
idi tmp_count = 0; // for debug
while (k < L) {
++tmp_count;
unsigned nk = L;
// Select M candidates
idi last_k = L;
for (idi c_i = k; c_i < L && top_m_candidates_end < M; ++c_i) {
if (set_L[c_i].is_checked_) {
continue;
}
last_k = c_i; // Record the location of the last candidate selected.
set_L[c_i].is_checked_ = true;
top_m_candidates[top_m_candidates_end++] = set_L[c_i].id_;
}
// Push M candidates' neighbors into the queue.
for (idi c_i = 0; c_i < top_m_candidates_end; ++c_i) {
idi cand_id = top_m_candidates[c_i];
_mm_prefetch(opt_nsg_graph_ + cand_id * vertex_bytes_ + data_bytes_, _MM_HINT_T0);
idi *out_edges = (idi *) (opt_nsg_graph_ + cand_id * vertex_bytes_ + data_bytes_);
idi out_degree = *out_edges++;
for (idi n_i = 0; n_i < out_degree; ++n_i) {
_mm_prefetch(opt_nsg_graph_ + out_edges[n_i] * vertex_bytes_, _MM_HINT_T0);
}
for (idi e_i = 0; e_i < out_degree; ++e_i) {
idi nb_id = out_edges[e_i];
if (is_visited[nb_id]) {
continue;
}
is_visited[nb_id] = true;
auto *nb_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + nb_id * vertex_bytes_);
dataf norm = *nb_data++;
++count_distance_computation_;
distf dist = compute_distance_with_norm(nb_data, query_data, norm);
if (dist > set_L[L-1].distance_) {
continue;
}
Candidate cand(nb_id, dist, false);
idi r = insert_into_queue(set_L, L, cand);
if (r < nk) {
nk = r;
}
}
}
top_m_candidates_end = 0; // Clear top_m_candidates
if (nk <= last_k) {
k = nk;
} else {
k = last_k + 1;
}
}
for (idi k_i = 0; k_i < K; ++k_i) {
set_K[k_i] = set_L[k_i].id_;
}
}
inline void Searching::search_with_top_m_scale_m(
const PANNS::idi value_M_max,
const PANNS::idi query_id,
const PANNS::idi K,
const PANNS::idi L,
std::vector<Candidate> &set_L,
const std::vector<idi> &init_ids,
std::vector<idi> &set_K,
std::vector<idi> &top_m_candidates,
boost::dynamic_bitset<> &is_visited)
{
// boost::dynamic_bitset<> is_visited(num_v_);
{
for (idi c_i = 0; c_i < L; ++c_i) {
is_visited[init_ids[c_i]] = true;
}
}
const dataf *query_data = queries_load_ + query_id * dimension_;
for (idi v_i = 0; v_i < L; ++v_i) {
idi v_id = init_ids[v_i];
_mm_prefetch(opt_nsg_graph_ + v_id * vertex_bytes_, _MM_HINT_T0);
}
// Get the distances of all candidates, store in the set set_L.
for (unsigned i = 0; i < L; i++) {
unsigned v_id = init_ids[i];
auto *v_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + v_id * vertex_bytes_);
dataf norm = *v_data++;
++count_distance_computation_;
distf dist = compute_distance_with_norm(v_data, query_data, norm);
set_L[i] = Candidate(v_id, dist, false); // False means not checked.
}
std::sort(set_L.begin(), set_L.begin() + L);
// std::vector<idi> top_m_candidates(M);
idi top_m_candidates_end = 0;
idi k = 0; // Index of first unchecked candidate.
idi tmp_count = 0; // for debug
idi M = 1;
while (k < L) {
++tmp_count;
unsigned nk = L;
// Select M candidates
idi last_k = L;
for (idi c_i = k; c_i < L && top_m_candidates_end < M; ++c_i) {
if (set_L[c_i].is_checked_) {
continue;
}
last_k = c_i; // Record the location of the last candidate selected.
set_L[c_i].is_checked_ = true;
top_m_candidates[top_m_candidates_end++] = set_L[c_i].id_;
}
// Push M candidates' neighbors into the queue.
for (idi c_i = 0; c_i < top_m_candidates_end; ++c_i) {
idi cand_id = top_m_candidates[c_i];
_mm_prefetch(opt_nsg_graph_ + cand_id * vertex_bytes_ + data_bytes_, _MM_HINT_T0);
idi *out_edges = (idi *) (opt_nsg_graph_ + cand_id * vertex_bytes_ + data_bytes_);
idi out_degree = *out_edges++;
for (idi n_i = 0; n_i < out_degree; ++n_i) {
_mm_prefetch(opt_nsg_graph_ + out_edges[n_i] * vertex_bytes_, _MM_HINT_T0);
}
for (idi e_i = 0; e_i < out_degree; ++e_i) {
idi nb_id = out_edges[e_i];
if (is_visited[nb_id]) {
continue;
}
is_visited[nb_id] = true;
auto *nb_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + nb_id * vertex_bytes_);
dataf norm = *nb_data++;
++count_distance_computation_;
distf dist = compute_distance_with_norm(nb_data, query_data, norm);
if (dist > set_L[L-1].distance_) {
continue;
}
Candidate cand(nb_id, dist, false);
idi r = insert_into_queue(set_L, L, cand);
if (r < nk) {
nk = r;
}
}
}
top_m_candidates_end = 0; // Clear top_m_candidates
if (nk <= last_k) {
k = nk;
} else {
k = last_k + 1;
}
if (M < value_M_max) {
M <<= 1;
}
}
for (idi k_i = 0; k_i < K; ++k_i) {
set_K[k_i] = set_L[k_i].id_;
}
{// Reset
is_visited.reset();
}
}
////void Searching::search_with_top_m(
//inline void Searching::search_with_top_m_to_get_distance_range(
// const PANNS::idi M,
// const PANNS::idi query_id,
//// const PANNS::idi K,
// const PANNS::idi L,
// std::vector<Candidate> &set_L,
// const std::vector<idi> &init_ids)
//// std::vector<idi> &set_K)
//{
// dist_max_ = -FLT_MAX;
// dist_min_ = FLT_MAX;
// boost::dynamic_bitset<> is_visited(num_v_);
//
// {
// for (idi c_i = 0; c_i < L; ++c_i) {
// is_visited[init_ids[c_i]] = true;
// }
// }
//
// const dataf *query_data = queries_load_ + query_id * dimension_;
// for (idi v_i = 0; v_i < L; ++v_i) {
// idi v_id = init_ids[v_i];
// _mm_prefetch(opt_nsg_graph_ + v_id * vertex_bytes_, _MM_HINT_T0);
// }
// // Get the distances of all candidates, store in the set set_L.
// for (unsigned i = 0; i < L; i++) {
// unsigned v_id = init_ids[i];
// auto *v_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + v_id * vertex_bytes_);
// dataf norm = *v_data++;
//// ++count_distance_computation;
// distf dist = compute_distance_with_norm(v_data, query_data, norm);
// set_L[i] = Candidate(v_id, dist, false); // False means not checked.
//// {// For distance range
//// if (dist > dist_max_) {
//// dist_max_ = dist;
//// }
//// if (dist < dist_min_) {
//// dist_min_ = dist;
//// }
//// }
// }
// std::sort(set_L.begin(), set_L.begin() + L);
//
// std::vector<idi> top_m_candidates(M);
// idi top_m_candidates_end = 0;
// idi k = 0; // Index of first unchecked candidate.
// idi tmp_count = 0; // for debug
// while (k < L) {
// ++tmp_count;
//
// unsigned nk = L;
//
// // Select M candidates
// idi last_k = L;
// for (idi c_i = k; c_i < L && top_m_candidates_end < M; ++c_i) {
// if (set_L[c_i].is_checked_) {
// continue;
// }
// last_k = c_i; // Record the location of the last candidate selected.
// set_L[c_i].is_checked_ = true;
// top_m_candidates[top_m_candidates_end++] = set_L[c_i].id_;
// }
//
// // Push M candidates' neighbors into the queue.
// for (idi c_i = 0; c_i < top_m_candidates_end; ++c_i) {
// idi cand_id = top_m_candidates[c_i];
// _mm_prefetch(opt_nsg_graph_ + cand_id * vertex_bytes_ + data_bytes_, _MM_HINT_T0);
// idi *out_edges = (idi *) (opt_nsg_graph_ + cand_id * vertex_bytes_ + data_bytes_);
// idi out_degree = *out_edges++;
// for (idi n_i = 0; n_i < out_degree; ++n_i) {
// _mm_prefetch(opt_nsg_graph_ + out_edges[n_i] * vertex_bytes_, _MM_HINT_T0);
// }
// for (idi e_i = 0; e_i < out_degree; ++e_i) {
// idi nb_id = out_edges[e_i];
// if (is_visited[nb_id]) {
// continue;
// }
// is_visited[nb_id] = true;
// auto *nb_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + nb_id * vertex_bytes_);
// dataf norm = *nb_data++;
//// ++count_distance_computation;
// distf dist = compute_distance_with_norm(nb_data, query_data, norm);
// if (dist > set_L[L-1].distance_) {
// continue;
// }
//// if (dist >= set_L[L-1].distance_) {
//// continue;
//// }
// Candidate cand(nb_id, dist, false);
// idi r = insert_into_queue(set_L, L, cand);
// if (r < nk) {
// nk = r;
// }
//// {// For distance range
//// if (dist > dist_max_) {
//// dist_max_ = dist;
//// }
//// if (dist < dist_min_) {
//// dist_min_ = dist;
//// }
//// }
// }
// }
// top_m_candidates_end = 0; // Clear top_m_candidates
//
// if (nk <= last_k) {
// k = nk;
// } else {
// k = last_k + 1;
// }
// {// For histogram
// for (idi i_l = 0; i_l < L; ++i_l) {
// distf dist = set_L[i_l].distance_;
// {// For distance range
// if (dist > dist_max_) {
// dist_max_ = dist;
// }
// if (dist < dist_min_) {
// dist_min_ = dist;
// }
// }
// }
// }
// }
//
//// for (idi k_i = 0; k_i < K; ++k_i) {
//// set_K[k_i] = set_L[k_i].id_;
//// }
//}
//
////void Searching::search_with_top_m(
//inline void Searching::search_with_top_m_myths_M(
// const PANNS::idi M,
// const PANNS::idi query_id,
// const PANNS::idi K,
// const PANNS::idi L,
// std::vector<Candidate> &set_L,
// const std::vector<idi> &init_ids,
// std::vector<idi> &set_K)
//{
//// {//test
//// printf("query_id: %u\n", query_id);
//// }
// const idi loc_range = L / 3;
//
//
// boost::dynamic_bitset<> is_visited(num_v_);
//
// {
// for (idi c_i = 0; c_i < L; ++c_i) {
// is_visited[init_ids[c_i]] = true;
// }
// }
//
// const dataf *query_data = queries_load_ + query_id * dimension_;
// for (idi v_i = 0; v_i < L; ++v_i) {
// idi v_id = init_ids[v_i];
// _mm_prefetch(opt_nsg_graph_ + v_id * vertex_bytes_, _MM_HINT_T0);
// }
// // Get the distances of all candidates, store in the set set_L.
// for (unsigned i = 0; i < L; i++) {
// unsigned v_id = init_ids[i];
// auto *v_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + v_id * vertex_bytes_);
// dataf norm = *v_data++;
//// ++count_distance_computation;
// distf dist = compute_distance_with_norm(v_data, query_data, norm);
// set_L[i] = Candidate(v_id, dist, false); // False means not checked.
// }
// std::sort(set_L.begin(), set_L.begin() + L);
//
//// {// For histogram
//// const distf dist_range = dist_max_ - dist_min_;
//// printf("iter:%u\n", 0);
//// for (idi i_l = 0; i_l < L; ++i_l) {
//// printf("%f\n", (set_L[i_l].distance_ - dist_min_) / dist_range * 100.0);
//// }
//// }
//
// std::vector<idi> top_m_candidates(M);
// idi top_m_candidates_end = 0;
// idi k = 0; // Index of first unchecked candidate.
// idi tmp_count = 0; // for debug
// while (k < L) {
// std::vector<idi> range_count(3, 0);
// idi zero_inserted_count = 0;
//// {//test
//// printf("tmp_count: %u\n", tmp_count);
//// }
// ++tmp_count;
//
// unsigned nk = L;
//
// // Select M candidates
// idi last_k = L;
// for (idi c_i = k; c_i < L && top_m_candidates_end < M; ++c_i) {
// if (set_L[c_i].is_checked_) {
// continue;
// }
// last_k = c_i; // Record the location of the last candidate selected.
// set_L[c_i].is_checked_ = true;
// top_m_candidates[top_m_candidates_end++] = set_L[c_i].id_;
// }
//// {//test
//// printf("top_m_candidates_ends: %u\n", top_m_candidates_end);
//// }
// {
// if (0 == top_m_candidates_end) {
// break;
// }
// }
//
//
// uint64_t count_neighbors = 0;
// uint64_t count_inserted = 0;
// std::vector<idi> locs_to_count(M);
// // Push M candidates' neighbors into the queue.
// for (idi c_i = 0; c_i < top_m_candidates_end; ++c_i) {
// idi cand_id = top_m_candidates[c_i];
// _mm_prefetch(opt_nsg_graph_ + cand_id * vertex_bytes_ + data_bytes_, _MM_HINT_T0);
// idi *out_edges = (idi *) (opt_nsg_graph_ + cand_id * vertex_bytes_ + data_bytes_);
// idi out_degree = *out_edges++;
// for (idi n_i = 0; n_i < out_degree; ++n_i) {
// _mm_prefetch(opt_nsg_graph_ + out_edges[n_i] * vertex_bytes_, _MM_HINT_T0);
// }
//
// count_neighbors += out_degree;
// idi num_inserted = 0;
//
// for (idi e_i = 0; e_i < out_degree; ++e_i) {
// idi nb_id = out_edges[e_i];
// if (is_visited[nb_id]) {
// continue;
// }
// is_visited[nb_id] = true;
// auto *nb_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + nb_id * vertex_bytes_);
// dataf norm = *nb_data++;
//// ++count_distance_computation;
// distf dist = compute_distance_with_norm(nb_data, query_data, norm);
// if (dist > set_L[L-1].distance_) {
// continue;
// }
//// if (dist >= set_L[L-1].distance_) {
//// continue;
//// }
// ++num_inserted;
// Candidate cand(nb_id, dist, false);
// idi r = insert_into_queue(set_L, L, cand);
//// {
//// printf("c_i: %u "
//// "count: %u "
//// "loc_inserted: %u\n",
//// c_i,
//// num_inserted,
//// r);
//// }
// if (r < nk) {
// nk = r;
// }
// {
// ++range_count[r / loc_range];
// }
// }
// {
// if (0 == num_inserted) {
// ++zero_inserted_count;
// }
// locs_to_count[c_i] = num_inserted;
// count_inserted += num_inserted;
// }
//// {
//// printf("c_i: %u "
//// "num_inserted: %u\n",
//// c_i,
//// num_inserted);
//// }
// }
//// {
//// for (idi c_i = top_m_candidates_end; c_i < M; ++c_i) {
//// locs_to_count[c_i] = 0;
//// }
//// printf("iter:%u\n", tmp_count);
//// for (idi c_i = 0; c_i < M; ++c_i) {
//// printf("%u %u\n", c_i, locs_to_count[c_i]);
//// }
//// }
//// {//test
//// idi sum = 0;
//// for (const idi ct : range_count) sum += ct;
//// printf("tmp_count: %u "
//// "k: %u "
//// "actual_M: %u %.1f%% "
//// "zero_ins: %u %.1f%% "
//// "1/3: %u %.1f%% "
//// "2/3: %u %.1f%% "
//// "3/3: %u %.1f%%\n",
//// tmp_count,
//// k,
//// top_m_candidates_end, 100.0 * top_m_candidates_end / M,
//// zero_inserted_count, 100.0 * zero_inserted_count / top_m_candidates_end,
//// range_count[0], 100.0 * range_count[0] / sum,
//// range_count[1], 100.0 * range_count[1] / sum,
//// range_count[2], 100.0 * range_count[2] / sum);
//// }
// top_m_candidates_end = 0; // Clear top_m_candidates
//
// if (nk <= last_k) {
// k = nk;
// } else {
// k = last_k + 1;
// }
// {
// printf("query:%uiter: %u "
// "#neighbors: %lu "
// "#inserted: %lu "
// "ratio: %.2f%%\n",
// query_id, tmp_count,
// count_neighbors,
// count_inserted,
// 100.0 * count_inserted / count_neighbors);
// }
//// {// For histogram
////// const auto it_min = std::min_element(set_L.begin(), set_L.end());
////// const auto it_max = std::max_element(set_L.begin(), set_L.end());
////// const distf dist_min = it_min->distance_;
////// const distf dist_max = it_max->distance_;
////// const distf dist_min = it_min->distance_ - 1.0;
////// const distf dist_max = it_max->distance_ + 1.0;
//// const distf dist_range = dist_max_ - dist_min_;
////// const distf dist_range = dist_max - dist_min;
////// {
////// printf("it_min->distance_: %f dist_min: %f\n",
////// it_min->distance_, dist_min);
////// }
////// const distf dist_range = it_max->distance_ - it_min->distance_;
//// printf("iter:%u\n", tmp_count);
//// for (idi i_l = 0; i_l < L; ++i_l) {
////// printf("%f\n", set_L[i_l].distance_);
////// printf("%f\n", (set_L[i_l].distance_ - dist_min) / dist_range * 100.0);
//// printf("%f\n", (set_L[i_l].distance_ - dist_min_) / dist_range * 100.0);
////// printf("%.2f\n", (set_L[i_l].distance_ - it_min->distance_) / dist_range * 100.0);
//// }
//// }
// }
//
// for (idi k_i = 0; k_i < K; ++k_i) {
// set_K[k_i] = set_L[k_i].id_;
// }
// if (query_id == 3) {
// exit(1);
// }
//}
//
//// Sequential Top-M algorithm for profiling purpose: byte array, CAS, and OpenMP
////void Searching::search_with_top_m(
//inline void Searching::search_with_top_m_profile_bit_CAS(
// const PANNS::idi M,
// const PANNS::idi query_id,
// const PANNS::idi K,
// const PANNS::idi L,
// std::vector<Candidate> &set_L,
// const std::vector<idi> &init_ids,
// std::vector<idi> &set_K)
//{
//// std::vector<uint8_t> is_visited(num_v_, 0); // Byte array
//// boost::dynamic_bitset<> is_visited(num_v_); // Bit array
// BitVector is_visited(num_v_);
//
// {
//#pragma omp parallel for
// for (idi c_i = 0; c_i < L; ++c_i) {
//// is_visited[init_ids[c_i]] = true;
// is_visited.atomic_set_bit(init_ids[c_i]);
// }
// }
//
// const dataf *query_data = queries_load_ + query_id * dimension_;
//#pragma omp parallel for
// for (idi v_i = 0; v_i < L; ++v_i) {
// idi v_id = init_ids[v_i];
// _mm_prefetch(opt_nsg_graph_ + v_id * vertex_bytes_, _MM_HINT_T0);
// }
// // Get the distances of all candidates, store in the set set_L.
//#pragma omp parallel for
// for (unsigned i = 0; i < L; i++) {
// unsigned v_id = init_ids[i];
// auto *v_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + v_id * vertex_bytes_);
// dataf norm = *v_data++;
//// ++count_distance_computation;
// distf dist = compute_distance_with_norm(v_data, query_data, norm);
// set_L[i] = Candidate(v_id, dist, false); // False means not checked.
// }
// std::sort(set_L.begin(), set_L.begin() + L);
//
// std::vector<idi> top_m_candidates(M);
// idi top_m_candidates_end = 0;
// idi k = 0; // Index of first unchecked candidate.
// idi tmp_count = 0; // for debug
// while (k < L) {
// ++tmp_count;
//
// unsigned nk = L;
//
// // Select M candidates
// idi last_k = L;
// for (idi c_i = k; c_i < L && top_m_candidates_end < M; ++c_i) {
// if (set_L[c_i].is_checked_) {
// continue;
// }
// last_k = c_i; // Record the location of the last candidate selected.
// set_L[c_i].is_checked_ = true;
// top_m_candidates[top_m_candidates_end++] = set_L[c_i].id_;
// }
//
// // Push M candidates' neighbors into the queue.
//#pragma omp parallel for
// for (idi c_i = 0; c_i < top_m_candidates_end; ++c_i) {
// idi cand_id = top_m_candidates[c_i];
// _mm_prefetch(opt_nsg_graph_ + cand_id * vertex_bytes_ + data_bytes_, _MM_HINT_T0);
// idi *out_edges = (idi *) (opt_nsg_graph_ + cand_id * vertex_bytes_ + data_bytes_);
// idi out_degree = *out_edges++;
// for (idi n_i = 0; n_i < out_degree; ++n_i) {
// _mm_prefetch(opt_nsg_graph_ + out_edges[n_i] * vertex_bytes_, _MM_HINT_T0);
// }
// for (idi e_i = 0; e_i < out_degree; ++e_i) {
// idi nb_id = out_edges[e_i];
//// if (is_visited[nb_id]) {
//// continue;
//// }
//// is_visited[nb_id] = true;
//
//// if (!AtomicOps::CAS(is_visited.data() + nb_id,
//// static_cast<uint8_t>(0),
//// static_cast<uint8_t>(1))) {
//// continue;
//// }
// {// Self-defined BitVector
// if (is_visited.atomic_is_bit_set(nb_id)) {
// continue;
// }
// is_visited.atomic_set_bit(nb_id);
// }
//
// auto *nb_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + nb_id * vertex_bytes_);
// dataf norm = *nb_data++;
//// ++count_distance_computation;
// distf dist = compute_distance_with_norm(nb_data, query_data, norm);
// if (dist > set_L[L-1].distance_) {
// continue;
// }
//// if (dist >= set_L[L-1].distance_) {
//// continue;
//// }
// Candidate cand(nb_id, dist, false);
// idi r = insert_into_queue(set_L, L, cand);
// if (r < nk) {
// nk = r;
// }
// }
// }
// top_m_candidates_end = 0; // Clear top_m_candidates
//
// if (nk <= last_k) {
// k = nk;
// } else {
// k = last_k + 1;
// }
// }
//
//#pragma omp parallel for
// for (idi k_i = 0; k_i < K; ++k_i) {
// set_K[k_i] = set_L[k_i].id_;
// }
////
//// {//test
//// for (idi k_i = 0; k_i < K; ++k_i) {
//// printf("%u: %u: %u %f\n",
//// query_id,
//// k_i, set_L[k_i].id_, set_L[k_i].distance_);
//// }
//// exit(1);
//// }
//}
///// Backup
//inline void Searching::search_with_top_m(
// const PANNS::idi M,
// const PANNS::idi query_id,
// const PANNS::idi K,
// const PANNS::idi L,
// std::vector<Candidate> &set_L,
// const std::vector<idi> &init_ids,
// std::vector<idi> &set_K)
//// std::vector< std::vector<idi> > &top_m_list)
//{
// boost::dynamic_bitset<> is_visited(num_v_);
//
// {
// for (idi c_i = 0; c_i < L; ++c_i) {
// is_visited[init_ids[c_i]] = true;
// }
// }
//
// const dataf *query_data = queries_load_ + query_id * dimension_;
// for (idi v_i = 0; v_i < L; ++v_i) {
// idi v_id = init_ids[v_i];
// _mm_prefetch(opt_nsg_graph_ + v_id * vertex_bytes_, _MM_HINT_T0);
// }
// // Get the distances of all candidates, store in the set set_L.
// for (unsigned i = 0; i < L; i++) {
// unsigned v_id = init_ids[i];
// auto *v_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + v_id * vertex_bytes_);
// dataf norm = *v_data++;
//// ++count_distance_computation;
// distf dist = compute_distance_with_norm(v_data, query_data, norm);
// set_L[i] = Candidate(v_id, dist, false); // False means not checked.
// }
// std::sort(set_L.begin(), set_L.begin() + L);
//
// std::vector<idi> top_m_candidates(M);
// idi top_m_candidates_end = 0;
// idi k = 0; // Index of first unchecked candidate.
// idi tmp_count = 0; // for debug
// while (k < L) {
// ++tmp_count;
//
// unsigned nk = L;
//
// // Select M candidates
// idi last_k = L;
// for (idi c_i = k; c_i < L && top_m_candidates_end < M; ++c_i) {
// if (set_L[c_i].is_checked_) {
// continue;
// }
// last_k = c_i; // Record the location of the last candidate selected.
// set_L[c_i].is_checked_ = true;
// top_m_candidates[top_m_candidates_end++] = set_L[c_i].id_;
// }
//
// // Push M candidates' neighbors into the queue.
// for (idi c_i = 0; c_i < top_m_candidates_end; ++c_i) {
// idi cand_id = top_m_candidates[c_i];
// _mm_prefetch(opt_nsg_graph_ + cand_id * vertex_bytes_ + data_bytes_, _MM_HINT_T0);
// idi *out_edges = (idi *) (opt_nsg_graph_ + cand_id * vertex_bytes_ + data_bytes_);
// idi out_degree = *out_edges++;
// for (idi n_i = 0; n_i < out_degree; ++n_i) {
// _mm_prefetch(opt_nsg_graph_ + out_edges[n_i] * vertex_bytes_, _MM_HINT_T0);
// }
// for (idi e_i = 0; e_i < out_degree; ++e_i) {
// idi nb_id = out_edges[e_i];
// if (is_visited[nb_id]) {
// continue;
// }
// is_visited[nb_id] = true;
// auto *nb_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + nb_id * vertex_bytes_);
// dataf norm = *nb_data++;
//// ++count_distance_computation;
// distf dist = compute_distance_with_norm(nb_data, query_data, norm);
// if (dist > set_L[L-1].distance_) {
// continue;
// }
//// if (dist >= set_L[L-1].distance_) {
//// continue;
//// }
// Candidate cand(nb_id, dist, false);
// idi r = insert_into_queue(set_L, L, cand);
// if (r < nk) {
// nk = r;
// }
// }
// }
// top_m_candidates_end = 0; // Clear top_m_candidates
//
// if (nk <= last_k) {
// k = nk;
// } else {
// k = last_k + 1;
// }
// }
//
// for (idi k_i = 0; k_i < K; ++k_i) {
// set_K[k_i] = set_L[k_i].id_;
// }
////
//// {//test
//// for (idi k_i = 0; k_i < K; ++k_i) {
//// printf("%u: %u: %u %f\n",
//// query_id,
//// k_i, set_L[k_i].id_, set_L[k_i].distance_);
//// }
//// exit(1);
//// }
//}
//
////// DEPRECATED: the is_visited array cannot be shared among threads.
//inline void Searching::search_with_top_m_no_local_arrays(
// const PANNS::idi M,
// const PANNS::idi query_id,
// const PANNS::idi K,
// const PANNS::idi L,
// std::vector<Candidate> &set_L,
// const std::vector<idi> &init_ids,
// std::vector<idi> &set_K,
// boost::dynamic_bitset<> &is_visited)
//// std::vector< std::vector<idi> > &top_m_list)
//{
//// boost::dynamic_bitset<> is_visited(num_v_);
//
// {
// for (idi c_i = 0; c_i < L; ++c_i) {
// is_visited[init_ids[c_i]] = true;
// }
// }
//
// const dataf *query_data = queries_load_ + query_id * dimension_;
// for (idi v_i = 0; v_i < L; ++v_i) {
// idi v_id = init_ids[v_i];
// _mm_prefetch(opt_nsg_graph_ + v_id * vertex_bytes_, _MM_HINT_T0);
// }
// // Get the distances of all candidates, store in the set set_L.
// for (unsigned i = 0; i < L; i++) {
// unsigned v_id = init_ids[i];
// auto *v_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + v_id * vertex_bytes_);
// dataf norm = *v_data++;
//// ++count_distance_computation;
// distf dist = compute_distance_with_norm(v_data, query_data, norm);
// set_L[i] = Candidate(v_id, dist, false); // False means not checked.
// }
// std::sort(set_L.begin(), set_L.begin() + L);
//
// std::vector<idi> top_m_candidates(M);
// idi top_m_candidates_end = 0;
// idi k = 0; // Index of first unchecked candidate.
// idi tmp_count = 0; // for debug
// while (k < L) {
// ++tmp_count;
//
// unsigned nk = L;
//
// // Select M candidates
// idi last_k = L;
// for (idi c_i = k; c_i < L && top_m_candidates_end < M; ++c_i) {
// if (set_L[c_i].is_checked_) {
// continue;
// }
// last_k = c_i; // Record the location of the last candidate selected.
// set_L[c_i].is_checked_ = true;
// top_m_candidates[top_m_candidates_end++] = set_L[c_i].id_;
// }
//
// // Push M candidates' neighbors into the queue.
// for (idi c_i = 0; c_i < top_m_candidates_end; ++c_i) {
// idi cand_id = top_m_candidates[c_i];
// _mm_prefetch(opt_nsg_graph_ + cand_id * vertex_bytes_ + data_bytes_, _MM_HINT_T0);
// idi *out_edges = (idi *) (opt_nsg_graph_ + cand_id * vertex_bytes_ + data_bytes_);
// idi out_degree = *out_edges++;
// for (idi n_i = 0; n_i < out_degree; ++n_i) {
// _mm_prefetch(opt_nsg_graph_ + out_edges[n_i] * vertex_bytes_, _MM_HINT_T0);
// }
// for (idi e_i = 0; e_i < out_degree; ++e_i) {
// idi nb_id = out_edges[e_i];
// if (is_visited[nb_id]) {
// continue;
// }
// is_visited[nb_id] = true;
// auto *nb_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + nb_id * vertex_bytes_);
// dataf norm = *nb_data++;
//// ++count_distance_computation;
// distf dist = compute_distance_with_norm(nb_data, query_data, norm);
// if (dist > set_L[L-1].distance_) {
// continue;
// }
//// if (dist >= set_L[L-1].distance_) {
//// continue;
//// }
// Candidate cand(nb_id, dist, false);
// idi r = insert_into_queue(set_L, L, cand);
// if (r < nk) {
// nk = r;
// }
// }
// }
// top_m_candidates_end = 0; // Clear top_m_candidates
//
// if (nk <= last_k) {
// k = nk;
// } else {
// k = last_k + 1;
// }
// }
//
// for (idi k_i = 0; k_i < K; ++k_i) {
// set_K[k_i] = set_L[k_i].id_;
// }
////
//// {//test
//// for (idi k_i = 0; k_i < K; ++k_i) {
//// printf("%u: %u: %u %f\n",
//// query_id,
//// k_i, set_L[k_i].id_, set_L[k_i].distance_);
//// }
//// exit(1);
//// }
//}
inline void Searching::search_with_top_m_in_batch(
const PANNS::idi M,
const PANNS::idi batch_start,
const PANNS::idi batch_size,
const PANNS::idi K,
const PANNS::idi L,
std::vector< std::vector<Candidate> > &set_L_list,
const std::vector<idi> &init_ids,
std::vector< std::vector<idi> > &set_K_list)
{
std::vector< boost::dynamic_bitset<> > is_visited_list(batch_size, boost::dynamic_bitset<> (num_v_));
// Prepare the init_ids
{
//#pragma omp parallel for
for (idi q_i = 0; q_i < batch_size; ++q_i) {
auto &is_visited = is_visited_list[q_i];
for (idi c_i = 0; c_i < L; ++c_i) {
is_visited[init_ids[c_i]] = true;
}
}
}
// Initialize set_L_list
{
//#pragma omp parallel for
for (idi q_i = 0; q_i < batch_size; ++q_i) {
const dataf *query_data = queries_load_ + (q_i + batch_start) * dimension_;
for (idi i = 0; i < L; i++) {
idi v_id = init_ids[i];
auto *v_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + v_id * vertex_bytes_);
dataf norm = *v_data++;
// ++count_distance_computation_;
distf dist = compute_distance_with_norm(v_data, query_data, norm);
set_L_list[q_i][i] = Candidate(v_id, dist, false); // False means not checked.
}
std::sort(set_L_list[q_i].begin(), set_L_list[q_i].begin() + L);
}
}
{
std::vector<idi> joint_queue(M * batch_size); // Joint queue for all shared top-M candidates
idi joint_queue_end = 0;
boost::dynamic_bitset<> is_in_joint_queue(num_v_);
// std::vector< std::vector<idi> > cands_query_ids(num_v_, std::vector<idi>(batch_size)); // If candidate cand_id is selected by query q_i, q_i should be in cands_query_ids[cand_id].
// std::vector<idi> cands_query_ids_ends(num_v_, 0);
std::unordered_map< idi, std::vector<idi> > cands_query_ids(batch_size * M);
std::vector<idi> ks(batch_size, 0); // Indices of every queue's first unchecked candidate.
std::vector<idi> nks(batch_size, L); // Indices of highest candidate inserted
std::vector<idi> last_ks(batch_size, L); // Indices of lowest candidate unchecked
std::vector<idi> queries_not_finished(batch_size);
idi queries_not_finished_end = batch_size;
for (idi q_i = 0; q_i < batch_size; ++q_i) {
queries_not_finished[q_i] = q_i;
}
bool is_finished = false;
idi counter_for_debug = 0;
while (!is_finished) {
++counter_for_debug;
// Build the new joint queue
// Traverse every query's queue
for(idi q_i = 0; q_i < queries_not_finished_end; ++q_i) {
idi q_local_id = queries_not_finished[q_i];
// last_ks[q_local_id] = L;
auto &set_L = set_L_list[q_local_id];
idi top_m_count = 0;
for (idi c_i = ks[q_local_id]; c_i < L && top_m_count < M; ++c_i) {
if (set_L[c_i].is_checked_) {
continue;
}
set_L[c_i].is_checked_ = true;
last_ks[q_local_id] = c_i;
++top_m_count;
idi cand_id = set_L[c_i].id_;
// Record which query selected cand_id
auto tmp_c = cands_query_ids.find(cand_id);
if (tmp_c != cands_query_ids.end()) {
tmp_c->second.push_back(q_local_id);
} else {
cands_query_ids.emplace(cand_id, std::vector<idi>());
cands_query_ids[cand_id].reserve(batch_size);
cands_query_ids[cand_id].push_back(q_local_id);
}
// cands_query_ids[cand_id][cands_query_ids_ends[cand_id]++] = q_local_id;
// Add candidate cand_id into the joint queue
if (is_in_joint_queue[cand_id]) {
continue;
}
is_in_joint_queue[cand_id] = true;
joint_queue[joint_queue_end++] = cand_id;
}
}
queries_not_finished_end = 0; // Clear queries_not_finished
// Traverse every shared candidate
for (idi c_i = 0; c_i < joint_queue_end; ++c_i) {
idi cand_id = joint_queue[c_i];
is_in_joint_queue[cand_id] = false; // Reset is_in_joint_queue
idi *out_edges = (idi *) (opt_nsg_graph_ + cand_id * vertex_bytes_ + data_bytes_);
idi out_degree = *out_edges++;
const auto &query_local_ids = cands_query_ids[cand_id];
// Push neighbors to every queue of the queries that selected cand_id.
// Traverse cand_id's neighbors
// idi &q_i_bound = cands_query_ids_ends[cand_id];
// for (idi q_i = 0; q_i < q_i_bound; ++q_i) {
// idi q_local_id = query_local_ids[q_i];
for (idi q_local_id : query_local_ids) {
dataf *query_data = queries_load_ + (q_local_id + batch_start) * dimension_;
auto &is_visited = is_visited_list[q_local_id];
auto &set_L = set_L_list[q_local_id];
// // Traverse cand_id's neighbors
for (idi e_i = 0; e_i < out_degree; ++e_i) {
idi nb_id = out_edges[e_i];
if (is_visited[nb_id]) {
continue;
}
is_visited[nb_id] = true;
auto *nb_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + nb_id * vertex_bytes_);
dataf norm = *nb_data++;
// ++count_distance_computation_;
distf dist = compute_distance_with_norm(nb_data, query_data, norm);
if (dist > set_L[L-1].distance_) {
continue;
}
// if (dist >= set_L[L-1].distance_) {
// continue;
// }
Candidate new_cand(nb_id, dist, false);
idi insert_loc = insert_into_queue(set_L, L, new_cand);
if (insert_loc < nks[q_local_id]) {
nks[q_local_id] = insert_loc;
}
}
}
cands_query_ids.erase(cand_id);
// q_i_bound = 0; // Clear cands_query_ids[cand_id]
}
joint_queue_end = 0; // Clear joint_queue
for (idi q_local_id = 0; q_local_id < batch_size; ++q_local_id) {
if (nks[q_local_id] <= last_ks[q_local_id]) {
ks[q_local_id] = nks[q_local_id];
} else {
ks[q_local_id] = last_ks[q_local_id] + 1;
}
nks[q_local_id] = L;
last_ks[q_local_id] = L;
if (ks[q_local_id] < L) {
queries_not_finished[queries_not_finished_end++] = q_local_id;
}
}
if (!queries_not_finished_end) {
is_finished = true;
}
}
}
{
for (idi q_i = 0; q_i < batch_size; ++q_i) {
for (idi c_i = 0; c_i < K && c_i < L; ++c_i) {
set_K_list[q_i + batch_start][c_i] = set_L_list[q_i][c_i].id_;
}
}
}
////
// {//test
// for (idi q_i = 0; q_i < batch_size; ++q_i) {
// printf("query: %u\n", q_i + batch_start);
// for (idi c_i = 0; c_i < K; ++c_i) {
// printf("%u: %u %f\n", c_i, set_L_list[q_i][c_i].id_, set_L_list[q_i][c_i].distance_);
// }
// }
// }
}
//inline void Searching::para_search_with_top_m_critical_area(
// const PANNS::idi M,
// const PANNS::idi query_id,
// const PANNS::idi K,
// const PANNS::idi L,
// std::vector<Candidate> &set_L,
// const std::vector<idi> &init_ids,
// std::vector<idi> &set_K)
//// std::vector< std::vector<idi> > &top_m_list)
//{
// std::vector<uint8_t> is_visited(num_v_, 0);
//// boost::dynamic_bitset<> is_visited(num_v_);
//
// {
////#pragma omp parallel for
// for (idi c_i = 0; c_i < L; ++c_i) {
// is_visited[init_ids[c_i]] = 1;
// }
// }
//
// const dataf *query_data = queries_load_ + query_id * dimension_;
// for (idi v_i = 0; v_i < L; ++v_i) {
// idi v_id = init_ids[v_i];
// _mm_prefetch(opt_nsg_graph_ + v_id * vertex_bytes_, _MM_HINT_T0);
// }
// // Get the distances of all candidates, store in the set set_L.
////#pragma omp parallel for
// for (unsigned i = 0; i < L; i++) {
// unsigned v_id = init_ids[i];
// auto *v_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + v_id * vertex_bytes_);
// dataf norm = *v_data++;
// ++count_distance_computation_;
// distf dist = compute_distance_with_norm(v_data, query_data, norm);
// set_L[i] = Candidate(v_id, dist, false); // False means not checked.
// }
// std::sort(set_L.begin(), set_L.begin() + L);
//
// std::vector<idi> top_m_candidates(M);
// idi top_m_candidates_end = 0;
// idi k = 0; // Index of first unchecked candidate.
// idi tmp_count = 0; // for debug
// while (k < L) {
// ++tmp_count;
//
// unsigned nk = L;
//// int nk = L;
//
// // Select M candidates
// idi last_k = L;
// for (idi c_i = k; c_i < L && top_m_candidates_end < M; ++c_i) {
// if (set_L[c_i].is_checked_) {
// continue;
// }
// last_k = c_i; // Record the location of the last candidate selected.
// set_L[c_i].is_checked_ = true;
// top_m_candidates[top_m_candidates_end++] = set_L[c_i].id_;
// }
//
// // Push M candidates' neighbors into the queue.
// // OpenMP reduction(min : nk) has a problem if nk is unsigned. nk might end up with being MAX_UINT.
////#pragma omp parallel for
////#pragma omp parallel for reduction(min : nk)
// for (idi c_i = 0; c_i < top_m_candidates_end; ++c_i) {
// idi cand_id = top_m_candidates[c_i];
// _mm_prefetch(opt_nsg_graph_ + cand_id * vertex_bytes_ + data_bytes_, _MM_HINT_T0);
// idi *out_edges = (idi *) (opt_nsg_graph_ + cand_id * vertex_bytes_ + data_bytes_);
// idi out_degree = *out_edges++;
// for (idi n_i = 0; n_i < out_degree; ++n_i) {
// _mm_prefetch(opt_nsg_graph_ + out_edges[n_i] * vertex_bytes_, _MM_HINT_T0);
// }
// for (idi e_i = 0; e_i < out_degree; ++e_i) {
// idi nb_id = out_edges[e_i];
//// if (is_visited[nb_id]) {
//// continue;
//// }
//// is_visited[nb_id] = 1;
//
// if (!AtomicOps::CAS(is_visited.data() + nb_id,
// static_cast<uint8_t>(0),
// static_cast<uint8_t>(1))) {
// continue;
// }
//
// auto *nb_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + nb_id * vertex_bytes_);
// dataf norm = *nb_data++;
// ++count_distance_computation_;
// distf dist = compute_distance_with_norm(nb_data, query_data, norm);
// if (dist > set_L[L-1].distance_) {
// continue;
// }
//// if (dist >= set_L[L-1].distance_) {
//// continue;
//// }
// Candidate cand(nb_id, dist, false);
// idi r;
////#pragma omp critical
// {
// r = insert_into_queue(set_L, L, cand);
// if (r < nk) {
// nk = r;
// }
// }
// }
// }
// top_m_candidates_end = 0; // Clear top_m_candidates
//
// if (nk <= last_k) {
// k = nk;
// } else {
// k = last_k + 1;
// }
// }
//
////#pragma omp parallel for
// for (idi k_i = 0; k_i < K; ++k_i) {
// set_K[k_i] = set_L[k_i].id_;
// }
//}
//
//inline void Searching::para_search_with_top_m_critical_area_no_omp(
// const PANNS::idi M,
// const PANNS::idi query_id,
// const PANNS::idi K,
// const PANNS::idi L,
// std::vector<Candidate> &set_L,
// const std::vector<idi> &init_ids,
// std::vector<idi> &set_K)
//// std::vector< std::vector<idi> > &top_m_list)
//{
// std::vector<uint8_t> is_visited(num_v_, 0);
//// boost::dynamic_bitset<> is_visited(num_v_);
//
// {
////#pragma omp parallel for
// for (idi c_i = 0; c_i < L; ++c_i) {
// is_visited[init_ids[c_i]] = 1;
// }
// }
//
// const dataf *query_data = queries_load_ + query_id * dimension_;
// for (idi v_i = 0; v_i < L; ++v_i) {
// idi v_id = init_ids[v_i];
// _mm_prefetch(opt_nsg_graph_ + v_id * vertex_bytes_, _MM_HINT_T0);
// }
// // Get the distances of all candidates, store in the set set_L.
////#pragma omp parallel for
// for (unsigned i = 0; i < L; i++) {
// unsigned v_id = init_ids[i];
// auto *v_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + v_id * vertex_bytes_);
// dataf norm = *v_data++;
// ++count_distance_computation_;
// distf dist = compute_distance_with_norm(v_data, query_data, norm);
// set_L[i] = Candidate(v_id, dist, false); // False means not checked.
// }
// std::sort(set_L.begin(), set_L.begin() + L);
//
// std::vector<idi> top_m_candidates(M);
// idi top_m_candidates_end = 0;
// idi k = 0; // Index of first unchecked candidate.
// idi tmp_count = 0; // for debug
// while (k < L) {
// ++tmp_count;
//
// unsigned nk = L;
//// int nk = L;
//
// // Select M candidates
// idi last_k = L;
// for (idi c_i = k; c_i < L && top_m_candidates_end < M; ++c_i) {
// if (set_L[c_i].is_checked_) {
// continue;
// }
// last_k = c_i; // Record the location of the last candidate selected.
// set_L[c_i].is_checked_ = true;
// top_m_candidates[top_m_candidates_end++] = set_L[c_i].id_;
// }
//
// // Push M candidates' neighbors into the queue.
// // OpenMP reduction(min : nk) has a problem if nk is unsigned. nk might end up with being MAX_UINT.
////#pragma omp parallel for
////#pragma omp parallel for reduction(min : nk)
// for (idi c_i = 0; c_i < top_m_candidates_end; ++c_i) {
// idi cand_id = top_m_candidates[c_i];
// _mm_prefetch(opt_nsg_graph_ + cand_id * vertex_bytes_ + data_bytes_, _MM_HINT_T0);
// idi *out_edges = (idi *) (opt_nsg_graph_ + cand_id * vertex_bytes_ + data_bytes_);
// idi out_degree = *out_edges++;
// for (idi n_i = 0; n_i < out_degree; ++n_i) {
// _mm_prefetch(opt_nsg_graph_ + out_edges[n_i] * vertex_bytes_, _MM_HINT_T0);
// }
// for (idi e_i = 0; e_i < out_degree; ++e_i) {
// idi nb_id = out_edges[e_i];
//// if (is_visited[nb_id]) {
//// continue;
//// }
//// is_visited[nb_id] = 1;
//
// if (!AtomicOps::CAS(is_visited.data() + nb_id,
// static_cast<uint8_t>(0),
// static_cast<uint8_t>(1))) {
// continue;
// }
//
// auto *nb_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + nb_id * vertex_bytes_);
// dataf norm = *nb_data++;
// ++count_distance_computation_;
// distf dist = compute_distance_with_norm(nb_data, query_data, norm);
// if (dist > set_L[L-1].distance_) {
// continue;
// }
//// if (dist >= set_L[L-1].distance_) {
//// continue;
//// }
// Candidate cand(nb_id, dist, false);
// idi r;
////#pragma omp critical
// {
// r = insert_into_queue(set_L, L, cand);
// if (r < nk) {
// nk = r;
// }
// }
// }
// }
// top_m_candidates_end = 0; // Clear top_m_candidates
//
// if (nk <= last_k) {
// k = nk;
// } else {
// k = last_k + 1;
// }
// }
//
////#pragma omp parallel for
// for (idi k_i = 0; k_i < K; ++k_i) {
// set_K[k_i] = set_L[k_i].id_;
// }
//}
//
//inline void Searching::para_search_with_top_m_critical_area_yes_omp(
// const PANNS::idi M,
// const PANNS::idi query_id,
// const PANNS::idi K,
// const PANNS::idi L,
// std::vector<Candidate> &set_L,
// const std::vector<idi> &init_ids,
// std::vector<idi> &set_K)
//// std::vector< std::vector<idi> > &top_m_list)
//{
// std::vector<uint8_t> is_visited(num_v_, 0);
//// boost::dynamic_bitset<> is_visited(num_v_);
//
// {
//#pragma omp parallel for
// for (idi c_i = 0; c_i < L; ++c_i) {
// is_visited[init_ids[c_i]] = 1;
// }
// }
//
// const dataf *query_data = queries_load_ + query_id * dimension_;
// for (idi v_i = 0; v_i < L; ++v_i) {
// idi v_id = init_ids[v_i];
// _mm_prefetch(opt_nsg_graph_ + v_id * vertex_bytes_, _MM_HINT_T0);
// }
// // Get the distances of all candidates, store in the set set_L.
////#pragma omp parallel for
// for (unsigned i = 0; i < L; i++) {
// unsigned v_id = init_ids[i];
// auto *v_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + v_id * vertex_bytes_);
// dataf norm = *v_data++;
// ++count_distance_computation_;
// distf dist = compute_distance_with_norm(v_data, query_data, norm);
// set_L[i] = Candidate(v_id, dist, false); // False means not checked.
// }
// std::sort(set_L.begin(), set_L.begin() + L);
//
// std::vector<idi> top_m_candidates(M);
// idi top_m_candidates_end = 0;
// idi k = 0; // Index of first unchecked candidate.
// idi tmp_count = 0; // for debug
// while (k < L) {
// ++tmp_count;
//
// unsigned nk = L;
//// int nk = L;
//
// // Select M candidates
// idi last_k = L;
// for (idi c_i = k; c_i < L && top_m_candidates_end < M; ++c_i) {
// if (set_L[c_i].is_checked_) {
// continue;
// }
// last_k = c_i; // Record the location of the last candidate selected.
// set_L[c_i].is_checked_ = true;
// top_m_candidates[top_m_candidates_end++] = set_L[c_i].id_;
// }
//
// // Push M candidates' neighbors into the queue.
// // OpenMP reduction(min : nk) has a problem if nk is unsigned. nk might end up with being MAX_UINT.
////#pragma omp parallel for
////#pragma omp parallel for reduction(min : nk)
// for (idi c_i = 0; c_i < top_m_candidates_end; ++c_i) {
// idi cand_id = top_m_candidates[c_i];
// _mm_prefetch(opt_nsg_graph_ + cand_id * vertex_bytes_ + data_bytes_, _MM_HINT_T0);
// idi *out_edges = (idi *) (opt_nsg_graph_ + cand_id * vertex_bytes_ + data_bytes_);
// idi out_degree = *out_edges++;
// for (idi n_i = 0; n_i < out_degree; ++n_i) {
// _mm_prefetch(opt_nsg_graph_ + out_edges[n_i] * vertex_bytes_, _MM_HINT_T0);
// }
// for (idi e_i = 0; e_i < out_degree; ++e_i) {
// idi nb_id = out_edges[e_i];
//// if (is_visited[nb_id]) {
//// continue;
//// }
//// is_visited[nb_id] = 1;
//
// if (!AtomicOps::CAS(is_visited.data() + nb_id,
// static_cast<uint8_t>(0),
// static_cast<uint8_t>(1))) {
// continue;
// }
//
// auto *nb_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + nb_id * vertex_bytes_);
// dataf norm = *nb_data++;
// ++count_distance_computation_;
// distf dist = compute_distance_with_norm(nb_data, query_data, norm);
// if (dist > set_L[L-1].distance_) {
// continue;
// }
//// if (dist >= set_L[L-1].distance_) {
//// continue;
//// }
// Candidate cand(nb_id, dist, false);
// idi r;
////#pragma omp critical
// {
// r = insert_into_queue(set_L, L, cand);
// if (r < nk) {
// nk = r;
// }
// }
// }
// }
// top_m_candidates_end = 0; // Clear top_m_candidates
//
// if (nk <= last_k) {
// k = nk;
// } else {
// k = last_k + 1;
// }
// }
//
////#pragma omp parallel for
// for (idi k_i = 0; k_i < K; ++k_i) {
// set_K[k_i] = set_L[k_i].id_;
// }
//}
//
//inline void Searching::para_search_with_top_m_visited_array(
// const PANNS::idi M,
// const PANNS::idi query_id,
// const PANNS::idi K,
// const PANNS::idi L,
// std::vector<Candidate> &set_L,
// const std::vector<idi> &init_ids,
// std::vector<idi> &set_K,
// std::vector<uint8_t> &is_visited)
//// std::vector< std::vector<idi> > &top_m_list)
//{
//// uint64_t count_visited = 0;
//
//// std::vector<uint8_t> is_visited(num_v_, 0);
//// boost::dynamic_bitset<> is_visited(num_v_);
//
// {
////#pragma omp parallel for
// for (idi c_i = 0; c_i < L; ++c_i) {
// is_visited[init_ids[c_i]] = 1;
//// ++count_visited;
// }
// }
//
// const dataf *query_data = queries_load_ + query_id * dimension_;
// for (idi v_i = 0; v_i < L; ++v_i) {
// idi v_id = init_ids[v_i];
// _mm_prefetch(opt_nsg_graph_ + v_id * vertex_bytes_, _MM_HINT_T0);
// }
// // Get the distances of all candidates, store in the set set_L.
////#pragma omp parallel for
// for (unsigned i = 0; i < L; i++) {
// unsigned v_id = init_ids[i];
// auto *v_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + v_id * vertex_bytes_);
// dataf norm = *v_data++;
// ++count_distance_computation_;
// distf dist = compute_distance_with_norm(v_data, query_data, norm);
// set_L[i] = Candidate(v_id, dist, false); // False means not checked.
// }
// std::sort(set_L.begin(), set_L.begin() + L);
//
// std::vector<idi> top_m_candidates(M);
// idi top_m_candidates_end = 0;
// idi k = 0; // Index of first unchecked candidate.
// idi tmp_count = 0; // for debug
// while (k < L) {
// ++tmp_count;
//
// unsigned nk = L;
//// int nk = L;
//
// // Select M candidates
// idi last_k = L;
// for (idi c_i = k; c_i < L && top_m_candidates_end < M; ++c_i) {
// if (set_L[c_i].is_checked_) {
// continue;
// }
// last_k = c_i; // Record the location of the last candidate selected.
// set_L[c_i].is_checked_ = true;
// top_m_candidates[top_m_candidates_end++] = set_L[c_i].id_;
// }
//
// // Push M candidates' neighbors into the queue.
// // OpenMP reduction(min : nk) has a problem if nk is unsigned. nk might end up with being MAX_UINT.
////#pragma omp parallel for
////#pragma omp parallel for reduction(min : nk)
// for (idi c_i = 0; c_i < top_m_candidates_end; ++c_i) {
// idi cand_id = top_m_candidates[c_i];
// _mm_prefetch(opt_nsg_graph_ + cand_id * vertex_bytes_ + data_bytes_, _MM_HINT_T0);
// idi *out_edges = (idi *) (opt_nsg_graph_ + cand_id * vertex_bytes_ + data_bytes_);
// idi out_degree = *out_edges++;
// for (idi n_i = 0; n_i < out_degree; ++n_i) {
// _mm_prefetch(opt_nsg_graph_ + out_edges[n_i] * vertex_bytes_, _MM_HINT_T0);
// }
// for (idi e_i = 0; e_i < out_degree; ++e_i) {
// idi nb_id = out_edges[e_i];
//// if (is_visited[nb_id]) {
//// continue;
//// }
//// is_visited[nb_id] = 1;
//
// if (!AtomicOps::CAS(is_visited.data() + nb_id,
// static_cast<uint8_t>(0),
// static_cast<uint8_t>(1))) {
// continue;
// }
//// ++count_visited;
// auto *nb_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + nb_id * vertex_bytes_);
// dataf norm = *nb_data++;
// ++count_distance_computation_;
// distf dist = compute_distance_with_norm(nb_data, query_data, norm);
// if (dist > set_L[L-1].distance_) {
// continue;
// }
//// if (dist >= set_L[L-1].distance_) {
//// continue;
//// }
// Candidate cand(nb_id, dist, false);
// idi r;
////#pragma omp critical
// {
// r = insert_into_queue(set_L, L, cand);
// if (r < nk) {
// nk = r;
// }
// }
// }
// }
// top_m_candidates_end = 0; // Clear top_m_candidates
//
// if (nk <= last_k) {
// k = nk;
// } else {
// k = last_k + 1;
// }
// }
//
////#pragma omp parallel for
// for (idi k_i = 0; k_i < K; ++k_i) {
// set_K[k_i] = set_L[k_i].id_;
// }
//
//// {
//// printf("query_id: %u "
//// "count_visited: %lu %f%%\n",
//// query_id,
//// count_visited,
//// 100.0 * count_visited / num_v_);
//// }
//}
//
//inline void Searching::para_search_with_top_m_merge_queues(
// const idi M,
// const idi query_id,
// const idi K,
// const idi L,
// std::vector<Candidate> &set_L,
// const std::vector<idi> &init_ids,
// std::vector<idi> &set_K)
//{
//// {//test
//// printf("query_id: %u\n", query_id);
//// }
//// const idi local_queue_length = ((M - 1) / num_threads_ + 1) * width_;
// const idi local_queue_length = L;
// std::vector< std::vector<Candidate> > local_queues_list(num_threads_, std::vector<Candidate>(local_queue_length));
// std::vector<idi> local_queues_ends(num_threads_, 0);
// std::vector<uint8_t> is_visited(num_v_, 0);
//// boost::dynamic_bitset<> is_visited(num_v_);
//
// {
//#pragma omp parallel for
// for (idi c_i = 0; c_i < L; ++c_i) {
// is_visited[init_ids[c_i]] = 1;
// }
// }
//
// const dataf *query_data = queries_load_ + query_id * dimension_;
//#pragma omp parallel for
// for (idi v_i = 0; v_i < L; ++v_i) {
// idi v_id = init_ids[v_i];
// _mm_prefetch(opt_nsg_graph_ + v_id * vertex_bytes_, _MM_HINT_T0);
// }
// // Get the distances of all candidates, store in the set set_L.
//#pragma omp parallel for
// for (unsigned i = 0; i < L; i++) {
// unsigned v_id = init_ids[i];
// auto *v_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + v_id * vertex_bytes_);
// dataf norm = *v_data++;
//// ++count_distance_computation;
// distf dist = compute_distance_with_norm(v_data, query_data, norm);
// set_L[i] = Candidate(v_id, dist, false); // False means not checked.
// }
// std::sort(set_L.begin(), set_L.begin() + L);
//
// std::vector<idi> top_m_candidates(M);
// idi top_m_candidates_end = 0;
// idi k = 0; // Index of first unchecked candidate.
// idi tmp_count = 0; // for debug
// while (k < L) {
// ++tmp_count;
//// {//test
//// printf("tmp_count: %d\n", tmp_count);
//// }
//
// // Select M candidates
// idi last_k = L;
//// Cannot use OpenMP here because this for-loop needs early break by the 2nd condition.
// for (idi c_i = k; c_i < L && top_m_candidates_end < M; ++c_i) {
// if (set_L[c_i].is_checked_) {
// continue;
// }
// last_k = c_i; // Record the location of the last candidate selected.
// set_L[c_i].is_checked_ = true;
// top_m_candidates[top_m_candidates_end++] = set_L[c_i].id_;
// }
//
// // Push M candidates' neighbors into the queue.
//#pragma omp parallel for
// for (idi c_i = 0; c_i < top_m_candidates_end; ++c_i) {
// int tid = omp_get_thread_num();
// idi cand_id = top_m_candidates[c_i];
// _mm_prefetch(opt_nsg_graph_ + cand_id * vertex_bytes_ + data_bytes_, _MM_HINT_T0);
// idi *out_edges = (idi *) (opt_nsg_graph_ + cand_id * vertex_bytes_ + data_bytes_);
// idi out_degree = *out_edges++;
// for (idi n_i = 0; n_i < out_degree; ++n_i) {
// _mm_prefetch(opt_nsg_graph_ + out_edges[n_i] * vertex_bytes_, _MM_HINT_T0);
// }
// for (idi e_i = 0; e_i < out_degree; ++e_i) {
// idi nb_id = out_edges[e_i];
//// if (is_visited[nb_id]) {
//// continue;
//// }
//// is_visited[nb_id] = 1;
//
// if (!AtomicOps::CAS(is_visited.data() + nb_id,
// static_cast<uint8_t>(0),
// static_cast<uint8_t>(1))) {
// continue;
// }
//
// auto *nb_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + nb_id * vertex_bytes_);
// dataf norm = *nb_data++;
//// ++count_distance_computation;
// distf dist = compute_distance_with_norm(nb_data, query_data, norm);
// if (dist > set_L[L-1].distance_) {
// continue;
// }
//// if (dist >= set_L[L-1].distance_) {
//// continue;
//// }
// Candidate cand(nb_id, dist, false);
// // Add to the local queue.
// add_into_queue(local_queues_list[tid], local_queues_ends[tid], local_queue_length, cand);
// }
// }
// top_m_candidates_end = 0; // Clear top_m_candidates
//
// idi nk = L;
//// // Merge. Parallel merging in every two queues.
//// {
//// for (int tid = 0; tid < num_threads_; ++tid) {
//// if (0 == local_queues_ends[tid]) continue;
//// idi r = merge_two_queues_into_1st_queue_para(
//// set_L,
//// 0,
//// L,
//// local_queues_list[tid],
//// 0,
//// local_queues_ends[tid]);
////// idi r = merge_two_queues_into_1st_queue_seq(
////// set_L,
////// 0,
////// L,
////// local_queues_list[tid],
////// 0,
////// local_queues_ends[tid]);
//// local_queues_ends[tid] = 0; // Reset the local queue
//// if (r < nk) {
//// nk = r;
//// }
//// }
//// }
//// {// text
//// if (query_id == 4 &&
//// tmp_count == 5) {
//// // Print local queues
//// for (int t_i = 0; t_i < num_threads_; ++t_i) {
////// idi start_i = t_i * local_queue_length;
//// for (idi q_i = 0; q_i < local_queues_ends[t_i]; ++q_i) {
//// printf("t[%u][%u]: "
//// "id: %u "
//// "dist: %f\n",
//// t_i, q_i,
//// local_queues_list[t_i][q_i].id_,
//// local_queues_list[t_i][q_i].distance_);
//// }
//// }
//// printf("----------\n");
//// for (idi i = 0; i < L; ++i) {
//// printf("set_L[%u]: "
//// "id: %u "
//// "dist: %f\n",
//// i,
//// set_L[i].id_,
//// set_L[i].distance_);
//// }
//// printf("----------\n");
//// }
//// }
// // Merge. Merge all queues in parallel.
// {
// if (num_threads_ > 1) {
// idi r = merge_all_queues_para_list(
// local_queues_list,
// local_queues_ends,
// set_L,
// L);
// if (r < nk) {
// nk = r;
// }
// } else {
// if (local_queues_ends[0]) {
// idi r = merge_two_queues_into_1st_queue_seq_fixed(
// set_L,
// 0,
// L,
// local_queues_list[0],
// 0,
// local_queues_ends[0]);
// local_queues_ends[0] = 0;
// if (r < nk) {
// nk = r;
// }
// }
// }
// }
//// {//test
//// if (query_id == 4) {
//// for (idi i = 0; i < L; ++i) {
//// printf("tmp_count: %u "
//// "set_L[%u]: "
//// "id: %u "
//// "dist: %f\n",
//// tmp_count,
//// i,
//// set_L[i].id_,
//// set_L[i].distance_);
//// }
//// }
////
//// }
//
// if (nk <= last_k) {
// k = nk;
// } else {
// k = last_k + 1;
// }
// }
//
//#pragma omp parallel for
// for (idi k_i = 0; k_i < K; ++k_i) {
// set_K[k_i] = set_L[k_i].id_;
// }
//// {
//// exit(1);
//// }
//// {//test
////
////// if (query_id == 4) {
//// for (idi i = 0; i < L; ++i) {
//// printf("set_L[%u]: "
//// "id: %u "
//// "dist: %f\n",
//// i,
//// set_L[i].id_,
//// set_L[i].distance_);
//// }
////// exit(1);
////// }
//// }
//}
//
////// Using local queue and then sequential merge.
//inline void Searching::para_search_with_top_m_queues_seq_merge(
// const PANNS::idi M,
// const PANNS::idi query_id,
// const PANNS::idi K,
// const PANNS::idi L,
// std::vector<Candidate> &set_L,
// const std::vector<idi> &init_ids,
// std::vector<idi> &set_K)
//// std::vector< std::vector<idi> > &top_m_list)
//{
//// const idi local_queue_length = ((L - 1) / num_threads_ + 1) * width_;
// const idi local_queue_length = L;
// std::vector< std::vector<Candidate> > local_queues_list(num_threads_, std::vector<Candidate>(local_queue_length));
// std::vector<idi> local_queues_ends(num_threads_, 0);
// std::vector<uint8_t> is_visited(num_v_, 0);
//// boost::dynamic_bitset<> is_visited(num_v_);
//
// {
//#pragma omp parallel for
// for (idi c_i = 0; c_i < L; ++c_i) {
// is_visited[init_ids[c_i]] = 1;
// }
// }
//
// const dataf *query_data = queries_load_ + query_id * dimension_;
//// for (idi v_i = 0; v_i < L; ++v_i) {
//// idi v_id = init_ids[v_i];
//// _mm_prefetch(opt_nsg_graph_ + v_id * vertex_bytes_, _MM_HINT_T0);
//// }
// // Get the distances of all candidates, store in the set set_L.
//#pragma omp parallel for
// for (unsigned i = 0; i < L; i++) {
// unsigned v_id = init_ids[i];
// auto *v_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + v_id * vertex_bytes_);
// dataf norm = *v_data++;
//// ++count_distance_computation;
// distf dist = compute_distance_with_norm(v_data, query_data, norm);
// set_L[i] = Candidate(v_id, dist, false); // False means not checked.
// }
// std::sort(set_L.begin(), set_L.begin() + L);
//
// std::vector<idi> top_m_candidates(M);
// idi top_m_candidates_end = 0;
// idi k = 0; // Index of first unchecked candidate.
// idi tmp_count = 0; // for debug
// while (k < L) {
// ++tmp_count;
//// {
//// printf("tmp_count: %u "
//// "k: %u\n",
//// tmp_count,
//// k);
//// }
//
//// unsigned nk = L;
//// int nk = L;
//
// // Select M candidates
// idi last_k = L;
// for (idi c_i = k; c_i < L && top_m_candidates_end < M; ++c_i) {
// if (set_L[c_i].is_checked_) {
// continue;
// }
// last_k = c_i; // Record the location of the last candidate selected.
// set_L[c_i].is_checked_ = true;
// top_m_candidates[top_m_candidates_end++] = set_L[c_i].id_;
// }
//
// // Push M candidates' neighbors into the queue.
//#pragma omp parallel for
// for (idi c_i = 0; c_i < top_m_candidates_end; ++c_i) {
// int tid = omp_get_thread_num();
// idi cand_id = top_m_candidates[c_i];
// _mm_prefetch(opt_nsg_graph_ + cand_id * vertex_bytes_ + data_bytes_, _MM_HINT_T0);
// idi *out_edges = (idi *) (opt_nsg_graph_ + cand_id * vertex_bytes_ + data_bytes_);
// idi out_degree = *out_edges++;
// for (idi n_i = 0; n_i < out_degree; ++n_i) {
// _mm_prefetch(opt_nsg_graph_ + out_edges[n_i] * vertex_bytes_, _MM_HINT_T0);
// }
// for (idi e_i = 0; e_i < out_degree; ++e_i) {
// idi nb_id = out_edges[e_i];
//// if (is_visited[nb_id]) {
//// continue;
//// }
//// is_visited[nb_id] = 1;
//
// if (!AtomicOps::CAS(is_visited.data() + nb_id,
// static_cast<uint8_t>(0),
// static_cast<uint8_t>(1))) {
// continue;
// }
//
// auto *nb_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + nb_id * vertex_bytes_);
// dataf norm = *nb_data++;
//// ++count_distance_computation;
// distf dist = compute_distance_with_norm(nb_data, query_data, norm);
// if (dist > set_L[L-1].distance_) {
// continue;
// }
//// if (dist >= set_L[L-1].distance_) {
//// continue;
//// }
// Candidate cand(nb_id, dist, false);
//// idi r;
////#pragma omp critical
//// {
//// r = insert_into_queue(set_L, L, cand);
//// if (r < nk) {
//// nk = r;
//// }
//// }
// // Add to the local queue.
// add_into_queue(local_queues_list[tid], local_queues_ends[tid], local_queue_length, cand);
// }
// }
// top_m_candidates_end = 0; // Clear top_m_candidates
//
// idi nk = L;
// // Merge
// {
// for (int tid = 0; tid < num_threads_; ++tid) {
// if (0 == local_queues_ends[tid]) continue;
// idi r = merge_two_queues_into_1st_queue_seq_fixed(
// set_L,
// 0,
// L,
// local_queues_list[tid],
// 0,
// local_queues_ends[tid]);
//// L + 1);
// local_queues_ends[tid] = 0; // Reset the local queue
// if (r < nk) {
// nk = r;
// }
// }
// }
//
// if (nk <= last_k) {
// k = nk;
// } else {
// k = last_k + 1;
// }
// }
//
//#pragma omp parallel for
// for (idi k_i = 0; k_i < K; ++k_i) {
// set_K[k_i] = set_L[k_i].id_;
// }
////
//// {//test
//// for (idi k_i = 0; k_i < K; ++k_i) {
//// printf("%u: %u: %u %f\n",
//// query_id,
//// k_i, set_L[k_i].id_, set_L[k_i].distance_);
//// }
//// exit(1);
//// }
//}
//
//inline void Searching::para_search_with_top_m_merge_queues_no_CAS(
// const idi M,
// const idi query_id,
// const idi K,
// const idi L,
// std::vector<Candidate> &set_L,
// const std::vector<idi> &init_ids,
// std::vector<idi> &set_K,
// const idi local_queue_length,
// std::vector< std::vector<Candidate> > &local_queues_list,
// std::vector<idi> &local_queues_ends,
//// std::vector<uint8_t> &is_visited)
// boost::dynamic_bitset<> &is_visited)
//{
////// const idi local_queue_length = ((M - 1) / num_threads_ + 1) * width_;
//// const idi local_queue_length = L;
//// std::vector< std::vector<Candidate> > local_queues_list(num_threads_, std::vector<Candidate>(local_queue_length));
//// std::vector<idi> local_queues_ends(num_threads_, 0);
////// std::vector<uint8_t> is_visited(num_v_, 0);
//// boost::dynamic_bitset<> is_visited(num_v_);
//
// {
//#pragma omp parallel for
// for (idi c_i = 0; c_i < L; ++c_i) {
// is_visited[init_ids[c_i]] = 1;
// }
// }
//
// const dataf *query_data = queries_load_ + query_id * dimension_;
//#pragma omp parallel for
// for (idi v_i = 0; v_i < L; ++v_i) {
// idi v_id = init_ids[v_i];
// _mm_prefetch(opt_nsg_graph_ + v_id * vertex_bytes_, _MM_HINT_T0);
// }
// // Get the distances of all candidates, store in the set set_L.
//#pragma omp parallel for
// for (unsigned i = 0; i < L; i++) {
// unsigned v_id = init_ids[i];
// auto *v_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + v_id * vertex_bytes_);
// dataf norm = *v_data++;
//// ++count_distance_computation;
// distf dist = compute_distance_with_norm(v_data, query_data, norm);
// set_L[i] = Candidate(v_id, dist, false); // False means not checked.
// }
// std::sort(set_L.begin(), set_L.begin() + L);
//
// std::vector<idi> top_m_candidates(M);
// idi top_m_candidates_end = 0;
// idi k = 0; // Index of first unchecked candidate.
// idi tmp_count = 0; // for debug
// while (k < L) {
// ++tmp_count;
//
// // Select M candidates
// idi last_k = L;
//// Cannot use OpenMP here because this for-loop needs early break by the 2nd condition.
// for (idi c_i = k; c_i < L && top_m_candidates_end < M; ++c_i) {
// if (set_L[c_i].is_checked_) {
// continue;
// }
// last_k = c_i; // Record the location of the last candidate selected.
// set_L[c_i].is_checked_ = true;
// top_m_candidates[top_m_candidates_end++] = set_L[c_i].id_;
// }
//
// // Push M candidates' neighbors into the queue.
//#pragma omp parallel for
// for (idi c_i = 0; c_i < top_m_candidates_end; ++c_i) {
// int tid = omp_get_thread_num();
// idi cand_id = top_m_candidates[c_i];
// _mm_prefetch(opt_nsg_graph_ + cand_id * vertex_bytes_ + data_bytes_, _MM_HINT_T0);
// idi *out_edges = (idi *) (opt_nsg_graph_ + cand_id * vertex_bytes_ + data_bytes_);
// idi out_degree = *out_edges++;
// for (idi n_i = 0; n_i < out_degree; ++n_i) {
// _mm_prefetch(opt_nsg_graph_ + out_edges[n_i] * vertex_bytes_, _MM_HINT_T0);
// }
// for (idi e_i = 0; e_i < out_degree; ++e_i) {
// idi nb_id = out_edges[e_i];
// if (is_visited[nb_id]) {
// continue;
// }
// is_visited[nb_id] = 1;
//
//// if (!AtomicOps::CAS(is_visited.data() + nb_id,
//// static_cast<uint8_t>(0),
//// static_cast<uint8_t>(1))) {
//// continue;
//// }
//
// auto *nb_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + nb_id * vertex_bytes_);
// dataf norm = *nb_data++;
//// ++count_distance_computation;
// distf dist = compute_distance_with_norm(nb_data, query_data, norm);
// if (dist > set_L[L-1].distance_) {
// continue;
// }
//// if (dist >= set_L[L-1].distance_) {
//// continue;
//// }
// Candidate cand(nb_id, dist, false);
// // Add to the local queue.
// add_into_queue(local_queues_list[tid], local_queues_ends[tid], local_queue_length, cand);
// }
// }
// top_m_candidates_end = 0; // Clear top_m_candidates
//
// idi nk = L;
//// // Merge. Parallel merging in every two queues.
//// {
//// for (int tid = 0; tid < num_threads_; ++tid) {
//// if (0 == local_queues_ends[tid]) continue;
//// idi r = merge_two_queues_into_1st_queue_para(
//// set_L,
//// 0,
//// L,
//// local_queues_list[tid],
//// 0,
//// local_queues_ends[tid]);
////// idi r = merge_two_queues_into_1st_queue_seq(
////// set_L,
////// 0,
////// L,
////// local_queues_list[tid],
////// 0,
////// local_queues_ends[tid]);
//// local_queues_ends[tid] = 0; // Reset the local queue
//// if (r < nk) {
//// nk = r;
//// }
//// }
//// }
//// // Merge. Merge all queues in parallel.
//// {
//// if (num_threads_ > 1) {
//// idi r = merge_all_queues_para(
//// local_queues_list,
//// local_queues_ends,
//// set_L,
//// L);
//// if (r < nk) {
//// nk = r;
//// }
//// } else {
//// if (local_queues_ends[0]) {
//// idi r = merge_two_queues_into_1st_queue_seq(
//// set_L,
//// 0,
//// L,
//// local_queues_list[0],
//// 0,
//// local_queues_ends[0]);
//// local_queues_ends[0] = 0;
//// if (r < nk) {
//// nk = r;
//// }
//// }
//// }
//// }
// // Merge
// {
// for (int tid = 0; tid < num_threads_; ++tid) {
// if (0 == local_queues_ends[tid]) continue;
// idi r = merge_two_queues_into_1st_queue_seq_fixed(
// set_L,
// 0,
// L,
// local_queues_list[tid],
// 0,
// local_queues_ends[tid]);
//// L + 1);
// local_queues_ends[tid] = 0; // Reset the local queue
// if (r < nk) {
// nk = r;
// }
// }
// }
//
// if (nk <= last_k) {
// k = nk;
// } else {
// k = last_k + 1;
// }
// }
//
//#pragma omp parallel for
// for (idi k_i = 0; k_i < K; ++k_i) {
// set_K[k_i] = set_L[k_i].id_;
// }
//
// {// Reset
// is_visited.reset();
//// std::fill(is_visited.begin(), is_visited.end(), 0);
// std::fill(local_queues_ends.begin(), local_queues_ends.end(), 0);
// }
//}
//inline void Searching::para_search_with_top_m_merge_queues_in_array(
//inline void Searching::para_search_with_top_m_merge_queues_new_threshold(
// const idi M,
// const idi query_id,
// const idi K,
// const idi L,
// std::vector<Candidate> &set_L,
// const std::vector<idi> &init_ids,
// std::vector<idi> &set_K,
// const idi local_queue_length, // Maximum size of local queue
//// std::vector< std::vector<Candidate> > &local_queues_list,
// std::vector<Candidate> &local_queues_array,
// std::vector<idi> &local_queues_ends, // Sizes of local queue
// BitVector &is_visited)
//// std::vector<uint8_t> &is_visited)
//// boost::dynamic_bitset<> &is_visited)
//{
// {
//#pragma omp parallel for
// for (idi c_i = 0; c_i < L; ++c_i) {
//// is_visited[init_ids[c_i]] = 1;
// is_visited.atomic_set_bit(init_ids[c_i]);
// }
// }
//
// const dataf *query_data = queries_load_ + query_id * dimension_;
//#pragma omp parallel for
// for (idi v_i = 0; v_i < L; ++v_i) {
// idi v_id = init_ids[v_i];
// _mm_prefetch(opt_nsg_graph_ + v_id * vertex_bytes_, _MM_HINT_T0);
// }
// // Get the distances of all candidates, store in the set set_L.
//#pragma omp parallel for
// for (unsigned i = 0; i < L; i++) {
// unsigned v_id = init_ids[i];
// auto *v_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + v_id * vertex_bytes_);
// dataf norm = *v_data++;
//// ++count_distance_computation;
// distf dist = compute_distance_with_norm(v_data, query_data, norm);
// set_L[i] = Candidate(v_id, dist, false); // False means not checked.
// }
// std::sort(set_L.begin(), set_L.begin() + L);
//
// idi min_index = L - 1;
// distf min_1st = set_L[min_index].distance_;
//
// std::vector<idi> top_m_candidates(M);
// idi top_m_candidates_end = 0;
// idi k = 0; // Index of first unchecked candidate.
// idi tmp_count = 0; // for debug
// while (k < L) {
// ++tmp_count;
//// {//test
//// printf("tmp_count: %d\n", tmp_count);
//// }
//
// // Select M candidates
// idi last_k = L;
//// Cannot use OpenMP here because this for-loop needs early break by the 2nd condition.
// for (idi c_i = k; c_i < L && top_m_candidates_end < M; ++c_i) {
// if (set_L[c_i].is_checked_) {
// continue;
// }
// last_k = c_i; // Record the location of the last candidate selected.
// set_L[c_i].is_checked_ = true;
// top_m_candidates[top_m_candidates_end++] = set_L[c_i].id_;
// }
//
// // Push M candidates' neighbors into the queue.
//#pragma omp parallel for
// for (idi c_i = 0; c_i < top_m_candidates_end; ++c_i) {
// int tid = omp_get_thread_num();
// const idi local_queue_start = tid * local_queue_length;
// idi cand_id = top_m_candidates[c_i];
// _mm_prefetch(opt_nsg_graph_ + cand_id * vertex_bytes_ + data_bytes_, _MM_HINT_T0);
// idi *out_edges = (idi *) (opt_nsg_graph_ + cand_id * vertex_bytes_ + data_bytes_);
// idi out_degree = *out_edges++;
// for (idi n_i = 0; n_i < out_degree; ++n_i) {
// _mm_prefetch(opt_nsg_graph_ + out_edges[n_i] * vertex_bytes_, _MM_HINT_T0);
// }
// for (idi e_i = 0; e_i < out_degree; ++e_i) {
// idi nb_id = out_edges[e_i];
//// { // Sequential edition
//// if (is_visited[nb_id]) {
//// continue;
//// }
//// is_visited[nb_id] = 1;
//// }
//// { // __ATOMIC_SEQ_CST edition
//// if (!AtomicOps::CAS(is_visited.data() + nb_id,
//// static_cast<uint8_t>(0),
//// static_cast<uint8_t>(1))) {
//// continue;
//// }
//// }
//// {// Acquire and Release edition
//// if (__atomic_load_n(is_visited.data() + nb_id, __ATOMIC_ACQUIRE)) {
//// continue;
//// }
//// __atomic_store_n(is_visited.data() + nb_id, 1, __ATOMIC_RELEASE);
//// }
// {// Self-defined BitVector
// if (is_visited.atomic_is_bit_set(nb_id)) {
// continue;
// }
// is_visited.atomic_set_bit(nb_id);
// }
//
// auto *nb_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + nb_id * vertex_bytes_);
// dataf norm = *nb_data++;
//// ++count_distance_computation;
// distf dist = compute_distance_with_norm(nb_data, query_data, norm);
//
// if (dist > min_1st) {
// continue;
// } else if (min_index > 0) {
// // Inserted, so min_1st needs update
// if (dist > set_L[min_index - 1].distance_) {
// min_1st = dist;
// if (min_index < L - 1) {
// ++min_index;
// }
// } else {
// min_1st = set_L[--min_index].distance_;
// }
//// min_1st = set_L[--min_index].distance_;
// }
//
//// if (dist > set_L[L-1].distance_) {
//// continue;
//// }
//
// Candidate cand(nb_id, dist, false);
// // Add to the local queue.
// add_into_queue(local_queues_array, local_queue_start, local_queues_ends[tid], local_queue_length, cand);
// }
// }
// top_m_candidates_end = 0; // Clear top_m_candidates
//
// idi nk = L;
//// // Merge. Parallel merging in every two queues.
//// {
//// for (int tid = 0; tid < num_threads_; ++tid) {
//// if (0 == local_queues_ends[tid]) continue;
//// idi r = merge_two_queues_into_1st_queue_para(
//// set_L,
//// 0,
//// L,
//// local_queues_list[tid],
//// 0,
//// local_queues_ends[tid]);
////// idi r = merge_two_queues_into_1st_queue_seq(
////// set_L,
////// 0,
////// L,
////// local_queues_list[tid],
////// 0,
////// local_queues_ends[tid]);
//// local_queues_ends[tid] = 0; // Reset the local queue
//// if (r < nk) {
//// nk = r;
//// }
//// }
//// }
// // Merge. Merge all queues in parallel.
// {
// if (num_threads_ > 1) {
// idi r = merge_all_queues_para_array(
//// local_queues_list,
// local_queues_array,
// local_queues_ends,
// local_queue_length,
// set_L,
// L);
// if (r < nk) {
// nk = r;
// }
// } else {
// if (local_queues_ends[0]) {
// idi r = merge_two_queues_into_1st_queue_seq_fixed(
// set_L,
// 0,
// L,
//// local_queues_list[0],
// local_queues_array,
// 0,
// local_queues_ends[0]);
// local_queues_ends[0] = 0;
// if (r < nk) {
// nk = r;
// }
// }
// }
// }
//// // Merge Sequentially
//// {
//// for (int tid = 0; tid < num_threads_; ++tid) {
//// if (0 == local_queues_ends[tid]) continue;
//// idi r = merge_two_queues_into_1st_queue_seq_fixed(
//// set_L,
//// 0,
//// L,
////// local_queues_list[tid],
////// 0,
//// local_queues_array,
//// tid * local_queue_length,
//// local_queues_ends[tid]);
////// L + 1);
//// local_queues_ends[tid] = 0; // Reset the local queue
//// if (r < nk) {
//// nk = r;
//// }
//// }
//// }
//
// if (nk <= last_k) {
// k = nk;
// } else {
// k = last_k + 1;
// }
// }
//
//#pragma omp parallel for
// for (idi k_i = 0; k_i < K; ++k_i) {
// set_K[k_i] = set_L[k_i].id_;
// }
//
// {// Reset
//// is_visited.reset();
//// std::fill(is_visited.begin(), is_visited.end(), 0);
// is_visited.clear_all();
// std::fill(local_queues_ends.begin(), local_queues_ends.end(), 0);
// }
//}
/*
* 5/7/2020-15:14
* Use 1 threads to scale M until the value_M_middle.
* Then use multiple threads.
*/
inline void Searching::para_search_with_top_m_merge_queues_middle_m(
const idi value_M_middle,
const idi value_M_max,
const idi query_id,
const idi K,
const idi L,
std::vector<Candidate> &set_L,
const std::vector<idi> &init_ids,
std::vector<idi> &set_K,
const idi local_queue_length, // Maximum size of local queue
const idi base_set_L, // base_set_L = (num_threads_ - 1) * local_queue_length;
std::vector<idi> &local_queues_ends, // Sizes of local queue
std::vector<idi> &top_m_candidates,
boost::dynamic_bitset<> &is_visited)
{
// time_initialization_ -= WallTimer::get_time_mark();
// const idi base_set_L = (num_threads_ - 1) * local_queue_length;
//#pragma omp parallel for
for (idi c_i = 0; c_i < L; ++c_i) {
is_visited[init_ids[c_i]] = 1;
}
const dataf *query_data = queries_load_ + query_id * dimension_;
#pragma omp parallel for
for (idi v_i = 0; v_i < L; ++v_i) {
idi v_id = init_ids[v_i];
_mm_prefetch(opt_nsg_graph_ + v_id * vertex_bytes_, _MM_HINT_T0);
}
uint64_t tmp_count_computation = 0;
// Get the distances of all candidates, store in the set set_L.
//#pragma omp parallel for
#pragma omp parallel for reduction(+ : tmp_count_computation)
for (unsigned i = 0; i < L; i++) {
unsigned v_id = init_ids[i];
auto *v_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + v_id * vertex_bytes_);
dataf norm = *v_data++;
++tmp_count_computation;
distf dist = compute_distance_with_norm(v_data, query_data, norm);
set_L[i + base_set_L] = Candidate(v_id, dist, false); // False means not checked.
}
count_distance_computation_ += tmp_count_computation;
tmp_count_computation = 0;
// std::sort(set_L.begin(), set_L.begin() + L);
std::sort(
set_L.begin() + base_set_L,
set_L.begin() + base_set_L + L);
local_queues_ends[num_threads_ - 1] = L;
// time_initialization_ += WallTimer::get_time_mark();
idi top_m_candidates_end = 0;
idi k = 0; // Index of first unchecked candidate.
idi tmp_count = 0; // for debug
idi M = 1;
// time_sequential_phase_ -= WallTimer::get_time_mark();
{ // Single thread
while (k < L && M < value_M_middle) {
++tmp_count;
// {//test
// printf("tmp_count: %d\n", tmp_count);
// }
// Select M candidates
idi last_k = L;
// Cannot use OpenMP here because this for-loop needs early break by the 2nd condition.
for (idi c_i = k; c_i < L && top_m_candidates_end < M; ++c_i) {
idi index_set_L = c_i + base_set_L;
if (set_L[index_set_L].is_checked_) {
continue;
}
last_k = c_i; // Record the location of the last candidate selected.
set_L[index_set_L].is_checked_ = true;
top_m_candidates[top_m_candidates_end++] = set_L[index_set_L].id_;
}
idi nk = L;
// Push M candidates' neighbors into the queue.
for (idi c_i = 0; c_i < top_m_candidates_end; ++c_i) {
idi cand_id = top_m_candidates[c_i];
_mm_prefetch(opt_nsg_graph_ + cand_id * vertex_bytes_ + data_bytes_, _MM_HINT_T0);
idi *out_edges = (idi *) (opt_nsg_graph_ + cand_id * vertex_bytes_ + data_bytes_);
idi out_degree = *out_edges++;
for (idi n_i = 0; n_i < out_degree; ++n_i) {
_mm_prefetch(opt_nsg_graph_ + out_edges[n_i] * vertex_bytes_, _MM_HINT_T0);
}
for (idi e_i = 0; e_i < out_degree; ++e_i) {
idi nb_id = out_edges[e_i];
{ // Sequential edition
if (is_visited[nb_id]) {
continue;
}
is_visited[nb_id] = 1;
}
auto *nb_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + nb_id * vertex_bytes_);
dataf norm = *nb_data++;
++tmp_count_computation;
distf dist = compute_distance_with_norm(nb_data, query_data, norm);
if (dist > set_L[L - 1 + base_set_L].distance_) {
continue;
}
Candidate cand(nb_id, dist, false);
// Thread 0 maintains the "global" queue
idi r = add_into_queue(
set_L,
base_set_L,
local_queues_ends[num_threads_ - 1],
L,
cand);
if (r < nk) {
nk = r;
}
}
}
top_m_candidates_end = 0; // Clear top_m_candidates
count_distance_computation_ += tmp_count_computation;
tmp_count_computation = 0;
if (nk <= last_k) {
k = nk;
} else {
k = last_k + 1;
}
{// Scale M
if (M < value_M_max) {
M <<= 1;
} else {
M = value_M_max;
}
}
}
}
// time_sequential_phase_ += WallTimer::get_time_mark();
// time_parallel_phase_ -= WallTimer::get_time_mark();
// uint64_t tmp_count_add_to_queue = 0;
// double tmp_time_pick_top_m = 0;
// double tmp_time_distance_computation = 0;
// double tmp_time_add_to_queue = 0.0;
{ // Multiple Threads
while (k < L) {
// time_expand_ -= WallTimer::get_time_mark();
++tmp_count;
// {//test
// printf("tmp_count: %d\n", tmp_count);
// }
// Select M candidates
idi last_k = L;
// time_pick_top_m_ -= WallTimer::get_time_mark();
// Cannot use OpenMP here because this for-loop needs early break by the 2nd condition.
for (idi c_i = k; c_i < L && top_m_candidates_end < M; ++c_i) {
idi index_set_L = c_i + base_set_L;
if (set_L[index_set_L].is_checked_) {
continue;
}
last_k = c_i; // Record the location of the last candidate selected.
set_L[index_set_L].is_checked_ = true;
top_m_candidates[top_m_candidates_end++] = set_L[index_set_L].id_;
}
// time_pick_top_m_ += WallTimer::get_time_mark();
idi nk = L;
// Push M candidates' neighbors into the queue.
#pragma omp parallel for reduction(+ : tmp_count_computation)
// reduction(+ : tmp_count_add_to_queue) \
// reduction(+ : tmp_time_pick_top_m) \
// reduction(+ : tmp_time_distance_computation) \
// reduction(+ : tmp_time_add_to_queue)
// for (int tid = 0; tid < num_threads_; ++tid) {
for (idi c_i = 0; c_i < top_m_candidates_end; ++c_i) {
// tmp_time_pick_top_m -= WallTimer::get_time_mark();
int tid = omp_get_thread_num();
idi cand_id = top_m_candidates[c_i];
// _mm_prefetch(opt_nsg_graph_ + cand_id * vertex_bytes_ + data_bytes_, _MM_HINT_T0);
idi *out_edges = (idi *) (opt_nsg_graph_ + cand_id * vertex_bytes_ + data_bytes_);
idi out_degree = *out_edges++;
// for (idi n_i = 0; n_i < out_degree; ++n_i) {
// _mm_prefetch(opt_nsg_graph_ + out_edges[n_i] * vertex_bytes_, _MM_HINT_T0);
// }
// tmp_time_pick_top_m += WallTimer::get_time_mark();
for (idi e_i = 0; e_i < out_degree; ++e_i) {
// tmp_time_distance_computation -= WallTimer::get_time_mark();
idi nb_id = out_edges[e_i];
{ // Sequential edition
if (is_visited[nb_id]) {
// tmp_time_distance_computation += WallTimer::get_time_mark();
continue;
}
is_visited[nb_id] = 1;
}
auto *nb_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + nb_id * vertex_bytes_);
dataf norm = *nb_data++;
++tmp_count_computation;
distf dist = compute_distance_with_norm(nb_data, query_data, norm);
// tmp_time_distance_computation += WallTimer::get_time_mark();
if (dist > set_L[L - 1 + base_set_L].distance_) {
continue;
}
// ++tmp_count_add_to_queue;
Candidate cand(nb_id, dist, false);
// Add to the local queue.
// tmp_time_pick_top_m -= WallTimer::get_time_mark();
// tmp_time_add_to_queue -= WallTimer::get_time_mark();
if (0 != tid) {
// Non-Master threads using local queues
add_into_queue(
set_L,
(tid - 1) * local_queue_length,
local_queues_ends[tid - 1],
local_queue_length,
cand);
} else {
// Thread 0 maintains the "global" queue
idi r = add_into_queue(
set_L,
base_set_L,
local_queues_ends[num_threads_ - 1],
L,
cand);
if (r < nk) {
nk = r;
}
}
// tmp_time_add_to_queue += WallTimer::get_time_mark();
// tmp_time_pick_top_m += WallTimer::get_time_mark();
}
}
// time_add_to_queue_ += tmp_time_add_to_queue;
// tmp_time_add_to_queue = 0;
// }
// time_distance_computation_ += tmp_time_distance_computation;
// tmp_time_distance_computation = 0;
// time_pick_top_m_ += tmp_time_pick_top_m;
// tmp_time_pick_top_m = 0;
// count_add_to_queue_ += tmp_count_add_to_queue;
// tmp_count_add_to_queue = 0;
top_m_candidates_end = 0; // Clear top_m_candidates
count_distance_computation_ += tmp_count_computation;
tmp_count_computation = 0;
// time_expand_ += WallTimer::get_time_mark();
// // Merge. Merge all queues in parallel.
{
time_merge_ -= WallTimer::get_time_mark();
if (num_threads_ > 1) {
idi r = merge_all_queues_para_array(
set_L,
local_queues_ends,
local_queue_length,
L);
if (r < nk) {
nk = r;
}
}
time_merge_ += WallTimer::get_time_mark();
}
if (nk <= last_k) {
k = nk;
} else {
k = last_k + 1;
}
{// Scale M
if (M < value_M_max) {
M <<= 1;
} else {
M = value_M_max;
}
}
}
}
// time_parallel_phase_ += WallTimer::get_time_mark();
// time_ending_ -= WallTimer::get_time_mark();
#pragma omp parallel for
for (idi k_i = 0; k_i < K; ++k_i) {
set_K[k_i] = set_L[k_i + base_set_L].id_;
// set_K[k_i] = set_L[k_i].id_;
}
{// Reset
// std::fill(is_visited.begin(), is_visited.end(), 0);
is_visited.reset();
// is_visited.clear_all();
std::fill(local_queues_ends.begin(), local_queues_ends.end(), 0);
}
// time_ending_ += WallTimer::get_time_mark();
// {//test
// if (3 == query_id) {
// exit(1);
// }
// }
}
inline void Searching::para_search_with_top_m_merge_queues_middle_m_no_merge(
const uint64_t computation_threshold,
const idi value_M_middle,
const idi value_M_max,
const idi query_id,
const idi K,
const idi L,
const idi init_size,
std::vector<Candidate> &set_L,
const std::vector<idi> &init_ids,
std::vector<idi> &set_K,
const idi local_queue_length, // Maximum size of local queue
const idi base_set_L, // base_set_L = (num_threads_ - 1) * local_queue_length;
std::vector<idi> &local_queues_ends, // Sizes of local queue
std::vector<idi> &top_m_candidates,
boost::dynamic_bitset<> &is_visited)
{
uint64_t count_single_query_computation = 0;
uint64_t count_init_computation = 0;
uint64_t count_seq_computation = 0;
uint64_t count_par_computation = 0;
// {//test
// printf("query_id: %u\n", query_id);
// }
// time_initialization_ -= WallTimer::get_time_mark();
// const idi base_set_L = (num_threads_ - 1) * local_queue_length;
{
#pragma omp parallel for
for (idi c_i = 0; c_i < init_size; ++c_i) {
// for (idi c_i = 0; c_i < L; ++c_i) {
is_visited[init_ids[c_i]] = 1;
// is_visited.atomic_set_bit(init_ids[c_i]);
}
}
const dataf *query_data = queries_load_ + query_id * dimension_;
#pragma omp parallel for
for (idi v_i = 0; v_i < init_size; ++v_i) {
// for (idi v_i = 0; v_i < L; ++v_i) {
idi v_id = init_ids[v_i];
_mm_prefetch(opt_nsg_graph_ + v_id * vertex_bytes_, _MM_HINT_T0);
}
uint64_t tmp_count_computation = 0;
// Get the distances of all candidates, store in the set set_L.
//#pragma omp parallel for
#pragma omp parallel for reduction(+ : tmp_count_computation)
for (unsigned i = 0; i < init_size; i++) {
// for (unsigned i = 0; i < L; i++) {
unsigned v_id = init_ids[i];
auto *v_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + v_id * vertex_bytes_);
dataf norm = *v_data++;
// ++count_distance_computation_;
++tmp_count_computation;
distf dist = compute_distance_with_norm(v_data, query_data, norm);
set_L[i + base_set_L] = Candidate(v_id, dist, false); // False means not checked.
// set_L[i] = Candidate(v_id, dist, false); // False means not checked.
}
count_distance_computation_ += tmp_count_computation;
count_init_computation += tmp_count_computation;
count_single_query_computation += tmp_count_computation;
tmp_count_computation = 0;
// std::sort(set_L.begin(), set_L.begin() + L);
std::sort(
set_L.begin() + base_set_L,
set_L.begin() + base_set_L + init_size);
// set_L.begin() + base_set_L + L);
local_queues_ends[num_threads_ - 1] = init_size;
// local_queues_ends[num_threads_ - 1] = L;
// time_initialization_ += WallTimer::get_time_mark();
// time_sequential_phase_ -= WallTimer::get_time_mark();
// std::vector<idi> top_m_candidates(M);
idi &global_queue_size = local_queues_ends[num_threads_ - 1];
idi top_m_candidates_end = 0;
idi k = 0; // Index of first unchecked candidate.
idi tmp_count = 0; // for debug
idi M = 1;
{ // Single thread
while (k < L && M < value_M_middle && count_single_query_computation <= computation_threshold) {
++tmp_count;
// {//test
// printf("tmp_count: %d\n", tmp_count);
// }
// int real_threads = std::min(static_cast<int>(M), num_threads_);
// idi queue_base = num_threads_ - real_threads;
// Select M candidates
idi last_k = L;
// Cannot use OpenMP here because this for-loop needs early break by the 2nd condition.
for (idi c_i = k; c_i < global_queue_size && top_m_candidates_end < M; ++c_i) {
// for (idi c_i = k; c_i < L && top_m_candidates_end < M; ++c_i) {
idi index_set_L = c_i + base_set_L;
if (set_L[index_set_L].is_checked_) {
continue;
}
last_k = c_i; // Record the location of the last candidate selected.
set_L[index_set_L].is_checked_ = true;
top_m_candidates[top_m_candidates_end++] = set_L[index_set_L].id_;
}
idi nk = L;
// Push M candidates' neighbors into the queue.
for (idi c_i = 0; c_i < top_m_candidates_end; ++c_i) {
idi cand_id = top_m_candidates[c_i];
_mm_prefetch(opt_nsg_graph_ + cand_id * vertex_bytes_ + data_bytes_, _MM_HINT_T0);
idi *out_edges = (idi *) (opt_nsg_graph_ + cand_id * vertex_bytes_ + data_bytes_);
idi out_degree = *out_edges++;
for (idi n_i = 0; n_i < out_degree; ++n_i) {
_mm_prefetch(opt_nsg_graph_ + out_edges[n_i] * vertex_bytes_, _MM_HINT_T0);
}
for (idi e_i = 0; e_i < out_degree; ++e_i) {
idi nb_id = out_edges[e_i];
{ // Sequential edition
if (is_visited[nb_id]) {
continue;
}
is_visited[nb_id] = 1;
}
auto *nb_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + nb_id * vertex_bytes_);
dataf norm = *nb_data++;
// ++count_distance_computation_;
++tmp_count_computation;
distf dist = compute_distance_with_norm(nb_data, query_data, norm);
if (dist > set_L[global_queue_size - 1 + base_set_L].distance_) {
// if (dist > set_L[L - 1 + base_set_L].distance_) {
continue;
}
Candidate cand(nb_id, dist, false);
// Thread 0 maintains the "global" queue
idi r = add_into_queue(
set_L,
base_set_L,
global_queue_size,
// local_queues_ends[num_threads_ - 1],
L,
cand);
if (r < nk) {
nk = r;
}
}
}
top_m_candidates_end = 0; // Clear top_m_candidates
count_distance_computation_ += tmp_count_computation;
count_seq_computation += tmp_count_computation;
count_single_query_computation += tmp_count_computation;
tmp_count_computation = 0;
// {// Local queues' ends
// printf("query%u:iter: %u", query_id, tmp_count);
// for (int i_t = 0; i_t < num_threads_; ++i_t) {
// printf(" [%u]: %u", i_t, local_queues_ends[i_t]);
// }
// printf("\n");
// }
if (nk <= last_k) {
k = nk;
} else {
k = last_k + 1;
}
{// Scale M
if (M < value_M_max) {
M <<= 1;
} else {
M = value_M_max;
}
}
}
}
// time_sequential_phase_ += WallTimer::get_time_mark();
// time_parallel_phase_ -= WallTimer::get_time_mark();
{ // Multiple Threads
while (k < L and count_single_query_computation <= computation_threshold) {
// while (k < L) {
++tmp_count;
// {//test
// printf("tmp_count: %d "
// "k: %u "
// "global_queue_size: %u\n",
// tmp_count,
// k,
// global_queue_size);
// }
// int real_threads = std::min(static_cast<int>(M), num_threads_);
// idi queue_base = num_threads_ - real_threads;
// Select M candidates
idi last_k = L;
// Cannot use OpenMP here because this for-loop needs early break by the 2nd condition.
for (idi c_i = k; c_i < global_queue_size && top_m_candidates_end < M; ++c_i) {
// for (idi c_i = k; c_i < L && top_m_candidates_end < M; ++c_i) {
idi index_set_L = c_i + base_set_L;
if (set_L[index_set_L].is_checked_) {
continue;
}
last_k = c_i; // Record the location of the last candidate selected.
set_L[index_set_L].is_checked_ = true;
top_m_candidates[top_m_candidates_end++] = set_L[index_set_L].id_;
}
idi nk = L;
// Push M candidates' neighbors into the queue.
//#pragma omp parallel for reduction(+ : tmp_count_computation) num_threads(real_threads)
#pragma omp parallel for reduction(+ : tmp_count_computation)
for (idi c_i = 0; c_i < top_m_candidates_end; ++c_i) {
int tid = omp_get_thread_num();
idi cand_id = top_m_candidates[c_i];
_mm_prefetch(opt_nsg_graph_ + cand_id * vertex_bytes_ + data_bytes_, _MM_HINT_T0);
idi *out_edges = (idi *) (opt_nsg_graph_ + cand_id * vertex_bytes_ + data_bytes_);
idi out_degree = *out_edges++;
for (idi n_i = 0; n_i < out_degree; ++n_i) {
_mm_prefetch(opt_nsg_graph_ + out_edges[n_i] * vertex_bytes_, _MM_HINT_T0);
}
for (idi e_i = 0; e_i < out_degree; ++e_i) {
idi nb_id = out_edges[e_i];
{ // Sequential edition
if (is_visited[nb_id]) {
continue;
}
is_visited[nb_id] = 1;
}
auto *nb_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + nb_id * vertex_bytes_);
dataf norm = *nb_data++;
// ++count_distance_computation_;
++tmp_count_computation;
distf dist = compute_distance_with_norm(nb_data, query_data, norm);
if (dist > set_L[global_queue_size - 1 + base_set_L].distance_) {
// if (dist > set_L[L - 1 + base_set_L].distance_) {
continue;
}
Candidate cand(nb_id, dist, false);
// Add to the local queue.
if (0 != tid) {
// Non-Master threads using local queues
add_into_queue(
set_L,
(tid - 1) * local_queue_length,
local_queues_ends[tid - 1],
local_queue_length,
cand);
} else {
// Thread 0 maintains the "global" queue
idi r = add_into_queue(
set_L,
base_set_L,
global_queue_size,
// local_queues_ends[num_threads_ - 1],
L,
cand);
if (r < nk) {
nk = r;
}
}
}
}
top_m_candidates_end = 0; // Clear top_m_candidates
count_distance_computation_ += tmp_count_computation;
count_par_computation += tmp_count_computation;
count_single_query_computation += tmp_count_computation;
tmp_count_computation = 0;
// {// Local queues' ends
// printf("query%u:iter: %u", query_id, tmp_count);
// for (int i_t = 0; i_t < num_threads_; ++i_t) {
// printf(" [%u]: %u", i_t, local_queues_ends[i_t]);
// }
// printf("\n");
// }
// Merge. Merge all queues in parallel.
{
if (num_threads_ > 1) {
// idi r = merge_all_queues_queue_base(
// set_L,
// local_queues_ends,
// queue_base,
// real_threads,
// local_queue_length,
// L);
idi r = merge_all_queues_para_array(
set_L,
local_queues_ends,
local_queue_length,
L);
if (r < nk) {
nk = r;
}
}
}
if (nk <= last_k) {
k = nk;
} else {
k = last_k + 1;
}
{// Scale M
if (M < value_M_max) {
M <<= 1;
} else {
M = value_M_max;
}
}
// {// Print relative distance
//// distf top_dist = set_L[base_set_L].distance_;
// for (idi i_l = 0; i_l < L; ++i_l) {
// printf("%u %f\n",
// tmp_count, set_L[i_l + base_set_L].distance_);
//// tmp_count, set_L[i_l + base_set_L].distance_ - top_dist);
// }
// }
}
}
// time_parallel_phase_ += WallTimer::get_time_mark();
#pragma omp parallel for
for (idi k_i = 0; k_i < K; ++k_i) {
set_K[k_i] = set_L[k_i + base_set_L].id_;
// set_K[k_i] = set_L[k_i].id_;
}
{// Reset
// std::fill(is_visited.begin(), is_visited.end(), 0);
is_visited.reset();
// is_visited.clear_all();
std::fill(local_queues_ends.begin(), local_queues_ends.end(), 0);
}
// {//test
// if (3 == query_id) {
// exit(1);
// }
// }
// {//test
// printf("count_single: %lu "
// "ct_init: %lu "
// "ct_seq: %lu "
// "ct_par: %lu\n",
// count_single_query_computation,
// count_init_computation,
// count_seq_computation,
// count_par_computation);
// }
}
///*
// * 6/15/2020-14:40
// * Queues merging together to the global queue
// */
//inline void Searching::para_search_with_top_m_merge_queues_sequential_merge(
// const idi value_M_middle,
// const idi value_M_max,
// const idi query_id,
// const idi K,
// const idi L,
// std::vector<Candidate> &set_L,
// const std::vector<idi> &init_ids,
// std::vector<idi> &set_K,
// const idi local_queue_length, // Maximum size of local queue
// const idi base_set_L, // base_set_L = (num_threads_ - 1) * local_queue_length;
// std::vector<idi> &local_queues_ends, // Sizes of local queue
// std::vector<idi> &top_m_candidates,
// boost::dynamic_bitset<> &is_visited)
//{
//// const idi base_set_L = (num_threads_ - 1) * local_queue_length;
// {
//#pragma omp parallel for
// for (idi c_i = 0; c_i < L; ++c_i) {
// is_visited[init_ids[c_i]] = 1;
//// is_visited.atomic_set_bit(init_ids[c_i]);
// }
// }
//
// const dataf *query_data = queries_load_ + query_id * dimension_;
//#pragma omp parallel for
// for (idi v_i = 0; v_i < L; ++v_i) {
// idi v_id = init_ids[v_i];
// _mm_prefetch(opt_nsg_graph_ + v_id * vertex_bytes_, _MM_HINT_T0);
// }
// uint64_t tmp_count_computation = 0;
// // Get the distances of all candidates, store in the set set_L.
////#pragma omp parallel for
//#pragma omp parallel for reduction(+ : tmp_count_computation)
// for (unsigned i = 0; i < L; i++) {
// unsigned v_id = init_ids[i];
// auto *v_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + v_id * vertex_bytes_);
// dataf norm = *v_data++;
//// ++count_distance_computation_;
// ++tmp_count_computation;
// distf dist = compute_distance_with_norm(v_data, query_data, norm);
// set_L[i + base_set_L] = Candidate(v_id, dist, false); // False means not checked.
//// set_L[i] = Candidate(v_id, dist, false); // False means not checked.
// }
// count_distance_computation_ += tmp_count_computation;
// tmp_count_computation = 0;
//// std::sort(set_L.begin(), set_L.begin() + L);
// std::sort(
// set_L.begin() + base_set_L,
// set_L.begin() + base_set_L + L);
// local_queues_ends[num_threads_ - 1] = L;
//
//// std::vector<idi> top_m_candidates(M);
// idi top_m_candidates_end = 0;
// idi k = 0; // Index of first unchecked candidate.
// idi tmp_count = 0; // for debug
// idi M = 1;
//
// { // Single thread
// while (k < L && M < value_M_middle) {
// ++tmp_count;
//// {//test
//// printf("tmp_count: %d\n", tmp_count);
//// }
//
// // Select M candidates
// idi last_k = L;
//// Cannot use OpenMP here because this for-loop needs early break by the 2nd condition.
// for (idi c_i = k; c_i < L && top_m_candidates_end < M; ++c_i) {
// idi index_set_L = c_i + base_set_L;
// if (set_L[index_set_L].is_checked_) {
// continue;
// }
// last_k = c_i; // Record the location of the last candidate selected.
// set_L[index_set_L].is_checked_ = true;
// top_m_candidates[top_m_candidates_end++] = set_L[index_set_L].id_;
// }
//
//
// idi nk = L;
// // Push M candidates' neighbors into the queue.
// for (idi c_i = 0; c_i < top_m_candidates_end; ++c_i) {
// idi cand_id = top_m_candidates[c_i];
// _mm_prefetch(opt_nsg_graph_ + cand_id * vertex_bytes_ + data_bytes_, _MM_HINT_T0);
// idi *out_edges = (idi *) (opt_nsg_graph_ + cand_id * vertex_bytes_ + data_bytes_);
// idi out_degree = *out_edges++;
// for (idi n_i = 0; n_i < out_degree; ++n_i) {
// _mm_prefetch(opt_nsg_graph_ + out_edges[n_i] * vertex_bytes_, _MM_HINT_T0);
// }
// for (idi e_i = 0; e_i < out_degree; ++e_i) {
// idi nb_id = out_edges[e_i];
// { // Sequential edition
// if (is_visited[nb_id]) {
// continue;
// }
// is_visited[nb_id] = 1;
// }
//
// auto *nb_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + nb_id * vertex_bytes_);
// dataf norm = *nb_data++;
//// ++count_distance_computation_;
// ++tmp_count_computation;
// distf dist = compute_distance_with_norm(nb_data, query_data, norm);
// if (dist > set_L[L - 1 + base_set_L].distance_) {
// continue;
// }
//
// Candidate cand(nb_id, dist, false);
// // Thread 0 maintains the "global" queue
// idi r = add_into_queue(
// set_L,
// base_set_L,
// local_queues_ends[num_threads_ - 1],
// L,
// cand);
// if (r < nk) {
// nk = r;
// }
// }
// }
// top_m_candidates_end = 0; // Clear top_m_candidates
// count_distance_computation_ += tmp_count_computation;
// tmp_count_computation = 0;
//
// if (nk <= last_k) {
// k = nk;
// } else {
// k = last_k + 1;
// }
//
// {// Scale M
// if (M < value_M_max) {
// M <<= 1;
// } else {
// M = value_M_max;
// }
// }
// }
// }
//
// { // Multiple Threads
// while (k < L) {
// ++tmp_count;
//// {//test
//// if (num_threads_ == 2) {
//// printf("tmp_count: %d "
//// "k: %u\n",
//// tmp_count,
//// k);
//// }
//// }
// // Select M candidates
// idi last_k = L;
//// Cannot use OpenMP here because this for-loop needs early break by the 2nd condition.
// for (idi c_i = k; c_i < L && top_m_candidates_end < M; ++c_i) {
// idi index_set_L = c_i + base_set_L;
// if (set_L[index_set_L].is_checked_) {
// continue;
// }
// last_k = c_i; // Record the location of the last candidate selected.
// set_L[index_set_L].is_checked_ = true;
// top_m_candidates[top_m_candidates_end++] = set_L[index_set_L].id_;
// }
//
//
// idi nk = L;
// // Push M candidates' neighbors into the queue.
////#pragma omp parallel for reduction(+ : tmp_count_computation) num_threads(real_threads)
//#pragma omp parallel for reduction(+ : tmp_count_computation)
// for (idi c_i = 0; c_i < top_m_candidates_end; ++c_i) {
// int tid = omp_get_thread_num();
// idi cand_id = top_m_candidates[c_i];
// _mm_prefetch(opt_nsg_graph_ + cand_id * vertex_bytes_ + data_bytes_, _MM_HINT_T0);
// idi *out_edges = (idi *) (opt_nsg_graph_ + cand_id * vertex_bytes_ + data_bytes_);
// idi out_degree = *out_edges++;
// for (idi n_i = 0; n_i < out_degree; ++n_i) {
// _mm_prefetch(opt_nsg_graph_ + out_edges[n_i] * vertex_bytes_, _MM_HINT_T0);
// }
// for (idi e_i = 0; e_i < out_degree; ++e_i) {
// idi nb_id = out_edges[e_i];
// { // Sequential edition
// if (is_visited[nb_id]) {
// continue;
// }
// is_visited[nb_id] = 1;
// }
//
// auto *nb_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + nb_id * vertex_bytes_);
// dataf norm = *nb_data++;
//// ++count_distance_computation_;
// ++tmp_count_computation;
// distf dist = compute_distance_with_norm(nb_data, query_data, norm);
// if (dist > set_L[L - 1 + base_set_L].distance_) {
// continue;
// }
//
// Candidate cand(nb_id, dist, false);
// // Add to the local queue.
// if (0 != tid) {
// // Non-Master threads using local queues
// add_into_queue(
// set_L,
// (tid - 1) * local_queue_length,
// local_queues_ends[tid - 1],
// local_queue_length,
// cand);
// } else {
// // Thread 0 maintains the "global" queue
// idi r = add_into_queue(
// set_L,
// base_set_L,
// local_queues_ends[num_threads_ - 1],
// L,
// cand);
// if (r < nk) {
// nk = r;
// }
// }
// }
// }
// top_m_candidates_end = 0; // Clear top_m_candidates
// count_distance_computation_ += tmp_count_computation;
// tmp_count_computation = 0;
//
//// // Merge. Merge all queues in parallel.
// {
//// {//test
//// for (idi q_i = 0; q_i < num_threads_; ++q_i) {
//// if (0 == local_queues_ends[q_i]) {
//// continue;
//// }
//// for (idi e_i = 0; e_i < local_queues_ends[q_i]; ++e_i) {
//// printf("tmp_count: %u "
//// "q_i: %u "
//// "[%u]: (%u, %f)\n",
//// tmp_count,
//// q_i,
//// e_i, set_L[q_i * local_queue_length + e_i].id_, set_L[q_i * local_queue_length + e_i].distance_);
//// }
//// }
//// }
//// time_merge_ -= WallTimer::get_time_mark();
// if (num_threads_ > 1) {
// idi r = merge_all_queues_all_together_in_sequential(
// set_L,
// local_queues_ends,
// local_queue_length,
// L);
//// idi r = merge_all_queues_para_array(
//// set_L,
//// local_queues_ends,
//// local_queue_length,
//// L);
// if (r < nk) {
// nk = r;
// }
//// {//test
//// printf("tmp_count: %u "
//// "r: %u "
//// "last_k: %u\n",
//// tmp_count,
//// r,
//// last_k);
//// for (idi l_i = 0; l_i < L; ++l_i) {
//// printf("tmp_count: %u "
//// "[%u]: (%u, %f)\n",
//// tmp_count,
//// l_i, set_L[l_i + base_set_L].id_, set_L[l_i + base_set_L].distance_);
//// }
//// }
// }
//
//// time_merge_ += WallTimer::get_time_mark();
// }
// if (nk <= last_k) {
// k = nk;
// } else {
// k = last_k + 1;
// }
// {// Scale M
// if (M < value_M_max) {
// M <<= 1;
// } else {
// M = value_M_max;
// }
// }
// }
// }
//
//#pragma omp parallel for
// for (idi k_i = 0; k_i < K; ++k_i) {
// set_K[k_i] = set_L[k_i + base_set_L].id_;
//// set_K[k_i] = set_L[k_i].id_;
// }
//
// {// Reset
//// std::fill(is_visited.begin(), is_visited.end(), 0);
// is_visited.reset();
//// is_visited.clear_all();
// std::fill(local_queues_ends.begin(), local_queues_ends.end(), 0);
// }
//
//// {//test
//// if (0 == query_id) {
//// exit(1);
//// }
//// }
//}
///*
// * 6/19/2020:
// * Intra-query + Inter-query
// */
//inline void Searching::para_search_with_top_m_nested_para(
// const idi batch_start,
// const idi batch_size,
// const idi value_M_middle,
// const idi value_M_max,
// const idi K,
// const idi L,
// std::vector< std::vector<Candidate> > &set_L_list,
// const std::vector<idi> &init_ids,
// std::vector< std::vector<idi> > &set_K_list,
// const idi local_queue_length, // Maximum size of local queue
// const idi base_set_L, // base_set_L = (num_threads_intra_query_ - 1) * local_queue_length;
// std::vector< std::vector<idi> > &local_queues_ends_list, // Sizes of local queue
// std::vector< std::vector<idi> > &top_m_candidates_list,
// std::vector< boost::dynamic_bitset<> > &is_visited_list)
//{
// {// Initialize is_visited flag array
//#pragma omp parallel for num_threads(num_threads_inter_query_)
// for (idi q_i = 0; q_i < batch_size; ++q_i) {
// auto &is_visited = is_visited_list[q_i];
//#pragma omp parallel for num_threads(num_threads_intra_query_)
// for (idi c_i = 0; c_i < L; ++c_i) {
// is_visited[init_ids[c_i]] = 1;
// }
// }
// }
//
//#pragma omp parallel for
// for (idi v_i = 0; v_i < L; ++v_i) {
// idi v_id = init_ids[v_i];
// _mm_prefetch(opt_nsg_graph_ + v_id * vertex_bytes_, _MM_HINT_T0);
// }
//
// uint64_t tmp_count_total_computation = 0;
//#pragma omp parallel for num_threads(num_threads_inter_query_) reduction(+ : tmp_count_total_computation)
// for (idi q_i = 0; q_i < batch_size; ++q_i) {
// idi query_id = batch_start + q_i;
// auto &set_L = set_L_list[q_i];
// auto &local_queues_ends = local_queues_ends_list[q_i];
// auto &is_visited = is_visited_list[q_i];
//
// const dataf *query_data = queries_load_ + query_id * dimension_;
////#pragma omp parallel for
//// for (idi v_i = 0; v_i < L; ++v_i) {
//// idi v_id = init_ids[v_i];
//// _mm_prefetch(opt_nsg_graph_ + v_id * vertex_bytes_, _MM_HINT_T0);
//// }
// uint64_t tmp_count_computation = 0;
// // Get the distances of all candidates, store in the set set_L.
////#pragma omp parallel for
//#pragma omp parallel for reduction(+ : tmp_count_computation) num_threads(num_threads_intra_query_)
// for (unsigned i = 0; i < L; i++) {
// unsigned v_id = init_ids[i];
// auto *v_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + v_id * vertex_bytes_);
// dataf norm = *v_data++;
//// ++count_distance_computation_;
// ++tmp_count_computation;
// distf dist = compute_distance_with_norm(v_data, query_data, norm);
// set_L[i + base_set_L] = Candidate(v_id, dist, false); // False means not checked.
//// set_L[i] = Candidate(v_id, dist, false); // False means not checked.
// }
//// count_distance_computation_ += tmp_count_computation;
// tmp_count_total_computation += tmp_count_computation;
// tmp_count_computation = 0;
//// std::sort(set_L.begin(), set_L.begin() + L);
// std::sort(
// set_L.begin() + base_set_L,
// set_L.begin() + base_set_L + L);
// local_queues_ends[num_threads_intra_query_ - 1] = L;
//
//// std::vector<idi> top_m_candidates(M);
// idi top_m_candidates_end = 0;
// idi k = 0; // Index of first unchecked candidate.
// idi tmp_count = 0; // for debug
// idi M = 1;
//
// auto &top_m_candidates = top_m_candidates_list[q_i];
// { // Single thread
// while (k < L && M < value_M_middle) {
// ++tmp_count;
//// {//test
//// printf("tmp_count: %d\n", tmp_count);
//// }
//
// // Select M candidates
// idi last_k = L;
//// Cannot use OpenMP here because this for-loop needs early break by the 2nd condition.
// for (idi c_i = k; c_i < L && top_m_candidates_end < M; ++c_i) {
// idi index_set_L = c_i + base_set_L;
// if (set_L[index_set_L].is_checked_) {
// continue;
// }
// last_k = c_i; // Record the location of the last candidate selected.
// set_L[index_set_L].is_checked_ = true;
// top_m_candidates[top_m_candidates_end++] = set_L[index_set_L].id_;
// }
//
// idi nk = L;
// // Push M candidates' neighbors into the queue.
// for (idi c_i = 0; c_i < top_m_candidates_end; ++c_i) {
// idi cand_id = top_m_candidates[c_i];
// _mm_prefetch(opt_nsg_graph_ + cand_id * vertex_bytes_ + data_bytes_, _MM_HINT_T0);
// idi *out_edges = (idi *) (opt_nsg_graph_ + cand_id * vertex_bytes_ + data_bytes_);
// idi out_degree = *out_edges++;
// for (idi n_i = 0; n_i < out_degree; ++n_i) {
// _mm_prefetch(opt_nsg_graph_ + out_edges[n_i] * vertex_bytes_, _MM_HINT_T0);
// }
// for (idi e_i = 0; e_i < out_degree; ++e_i) {
// idi nb_id = out_edges[e_i];
// { // Sequential edition
// if (is_visited[nb_id]) {
// continue;
// }
// is_visited[nb_id] = 1;
// }
//
// auto *nb_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + nb_id * vertex_bytes_);
// dataf norm = *nb_data++;
//// ++count_distance_computation_;
// ++tmp_count_computation;
// distf dist = compute_distance_with_norm(nb_data, query_data, norm);
//// {//test
//// if (391655 == nb_id) {
//// printf("tmp_count: %u "
//// "nb_id: %u "
//// "distf: %f\n",
//// tmp_count,
//// nb_id,
//// dist);
//// }
//// }
// if (dist > set_L[L - 1 + base_set_L].distance_) {
// continue;
// }
//
// Candidate cand(nb_id, dist, false);
// // Thread 0 maintains the "global" queue
// idi r = add_into_queue(
// set_L,
// base_set_L,
// local_queues_ends[num_threads_intra_query_ - 1],
// L,
// cand);
// if (r < nk) {
// nk = r;
// }
// }
// }
// top_m_candidates_end = 0; // Clear top_m_candidates
//// count_distance_computation_ += tmp_count_computation;
// tmp_count_total_computation += tmp_count_computation;
// tmp_count_computation = 0;
//
// if (nk <= last_k) {
// k = nk;
// } else {
// k = last_k + 1;
// }
//
// {// Scale M
// if (M < value_M_max) {
// M <<= 1;
// } else {
// M = value_M_max;
// }
// }
// }
// }
//
// { // Multiple Threads
// while (k < L) {
// ++tmp_count;
//// {//test
//// printf("tmp_count: %d\n", tmp_count);
//// }
// // Select M candidates
// idi last_k = L;
//// Cannot use OpenMP here because this for-loop needs early break by the 2nd condition.
// for (idi c_i = k; c_i < L && top_m_candidates_end < M; ++c_i) {
// idi index_set_L = c_i + base_set_L;
// if (set_L[index_set_L].is_checked_) {
// continue;
// }
// last_k = c_i; // Record the location of the last candidate selected.
// set_L[index_set_L].is_checked_ = true;
// top_m_candidates[top_m_candidates_end++] = set_L[index_set_L].id_;
// }
//
// idi nk = L;
// // Push M candidates' neighbors into the queue.
//#pragma omp parallel for reduction(+ : tmp_count_computation) num_threads(num_threads_intra_query_)
// for (idi c_i = 0; c_i < top_m_candidates_end; ++c_i) {
// int tid = omp_get_thread_num();
// idi cand_id = top_m_candidates[c_i];
// _mm_prefetch(opt_nsg_graph_ + cand_id * vertex_bytes_ + data_bytes_, _MM_HINT_T0);
// idi *out_edges = (idi *) (opt_nsg_graph_ + cand_id * vertex_bytes_ + data_bytes_);
// idi out_degree = *out_edges++;
// for (idi n_i = 0; n_i < out_degree; ++n_i) {
// _mm_prefetch(opt_nsg_graph_ + out_edges[n_i] * vertex_bytes_, _MM_HINT_T0);
// }
// for (idi e_i = 0; e_i < out_degree; ++e_i) {
// idi nb_id = out_edges[e_i];
// { // Sequential edition
// if (is_visited[nb_id]) {
// continue;
// }
// is_visited[nb_id] = 1;
// }
//
// auto *nb_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + nb_id * vertex_bytes_);
// dataf norm = *nb_data++;
//// ++count_distance_computation_;
// ++tmp_count_computation;
// distf dist = compute_distance_with_norm(nb_data, query_data, norm);
//// {//test
//// if (391655 == nb_id) {
//// printf("tmp_count: %u "
//// "nb_id: %u "
//// "distf: %f\n",
//// tmp_count,
//// nb_id,
//// dist);
//// }
//// }
// if (dist > set_L[L - 1 + base_set_L].distance_) {
// continue;
// }
//
// Candidate cand(nb_id, dist, false);
// // Add to the local queue.
// if (0 != tid) {
// // Non-Master threads using local queues
// add_into_queue(
// set_L,
// (tid - 1) * local_queue_length,
// local_queues_ends[tid - 1],
// local_queue_length,
// cand);
// } else {
// // Thread 0 maintains the "global" queue
// idi r = add_into_queue(
// set_L,
// base_set_L,
// local_queues_ends[num_threads_intra_query_ - 1],
// L,
// cand);
// if (r < nk) {
// nk = r;
// }
// }
// }
// }
// top_m_candidates_end = 0; // Clear top_m_candidates
//// count_distance_computation_ += tmp_count_computation;
// tmp_count_total_computation += tmp_count_computation;
// tmp_count_computation = 0;
//
//// // Merge. Merge all queues in parallel.
// {
//// time_merge_ -= WallTimer::get_time_mark();
// if (num_threads_intra_query_ > 1) {
// idi r = merge_all_queues_para_array(
// set_L,
// local_queues_ends,
// local_queue_length,
// L);
// if (r < nk) {
// nk = r;
// }
// }
//// time_merge_ += WallTimer::get_time_mark();
// }
// if (nk <= last_k) {
// k = nk;
// } else {
// k = last_k + 1;
// }
// {// Scale M
// if (M < value_M_max) {
// M <<= 1;
// } else {
// M = value_M_max;
// }
// }
// }
// }
// count_distance_computation_ += tmp_count_total_computation;
// tmp_count_total_computation = 0;
//
// auto &set_K = set_K_list[query_id];
//
//#pragma omp parallel for num_threads(num_threads_intra_query_)
// for (idi k_i = 0; k_i < K; ++k_i) {
// set_K[k_i] = set_L[k_i + base_set_L].id_;
//// set_K[k_i] = set_L[k_i].id_;
// }
//
// {// Reset
//// std::fill(is_visited.begin(), is_visited.end(), 0);
// is_visited.reset();
//// is_visited.clear_all();
// std::fill(local_queues_ends.begin(), local_queues_ends.end(), 0);
// }
// }
//
//// {//test
//// if (3 == query_id) {
//// exit(1);
//// }
//// }
//// {
//// for (idi k_i = 0; k_i < K; ++k_i) {
//// printf("%u: (%u %f)\n",
//// k_i, set_L_list[0][k_i].id_, set_L_list[0][k_i].distance_);
//// }
//// if (0 == batch_start) {
//// exit(1);
//// }
//// }
//}
/*
* 6/22/2020-21:30
* Do searching on the local_set_L
* local_set_L is already sorted
* is_visited is already set up.
*/
inline void Searching::subsearch_with_top_m(
const idi value_M_max,
const idi query_id,
const idi local_L,
std::vector<Candidate> &set_L,
const idi set_L_start,
idi &set_L_size,
std::vector<idi> &local_top_m_candidates,
boost::dynamic_bitset<> &is_visited,
uint64_t &local_count_distance_computation)
{
const dataf *query_data = queries_load_ + query_id * dimension_;
// idi local_top_m_candidates_end = 0;
idi k = 0; // Index of first unchecked candidate.
idi iter = 0;
idi M = 1; // value of M
while (k < local_L) {
++iter;
subsearch_top_m_for_one_iteration(
iter,
k,
M,
query_id,
query_data,
local_L,
set_L,
set_L_start,
set_L_size,
local_top_m_candidates,
is_visited,
local_count_distance_computation);
{// Scale M
if (M < value_M_max) {
M <<= 1;
} else {
M = value_M_max;
}
}
}
// {//test
// printf("set_L_start: %u "
// "local_count_distance_computation: %lu\n",
// set_L_start,
// local_count_distance_computation);
// }
}
//// Backup
//inline void Searching::subsearch_with_top_m(
// const idi value_M_max,
// const idi query_id,
// const idi local_L,
// std::vector<Candidate> &set_L,
// const idi base_set_L,
// idi &set_L_end,
// std::vector<idi> &local_top_m_candidates,
// boost::dynamic_bitset<> &is_visited,
// uint64_t &local_count_distance_computation)
//{
// const dataf *query_data = queries_load_ + query_id * dimension_;
// idi local_top_m_candidates_end = 0;
// idi k = 0; // Index of first unchecked candidate.
// idi iter = 0;
// idi M = 1; // value of M
//
// while (k < local_L) {
// ++iter;
// // Select M candidates
// idi last_k = local_L;
//// Cannot use OpenMP here because this for-loop needs early break by the 2nd condition.
// for (idi c_i = k; c_i < set_L_end && local_top_m_candidates_end < M; ++c_i) {
// idi index_set_L = c_i + base_set_L;
// if (set_L[index_set_L].is_checked_) {
// continue;
// }
// last_k = c_i; // Record the location of the last candidate selected.
// set_L[index_set_L].is_checked_ = true;
// local_top_m_candidates[local_top_m_candidates_end++] = set_L[index_set_L].id_;
// }
//
// idi nk = local_L;
// // Push M candidates' neighbors into the queue.
// for (idi c_i = 0; c_i < local_top_m_candidates_end; ++c_i) {
// idi cand_id = local_top_m_candidates[c_i];
// _mm_prefetch(opt_nsg_graph_ + cand_id * vertex_bytes_ + data_bytes_, _MM_HINT_T0);
// idi *out_edges = (idi *) (opt_nsg_graph_ + cand_id * vertex_bytes_ + data_bytes_);
// idi out_degree = *out_edges++;
// for (idi n_i = 0; n_i < out_degree; ++n_i) {
// _mm_prefetch(opt_nsg_graph_ + out_edges[n_i] * vertex_bytes_, _MM_HINT_T0);
// }
// for (idi e_i = 0; e_i < out_degree; ++e_i) {
// idi nb_id = out_edges[e_i];
// { // Sequential edition
// if (is_visited[nb_id]) {
// continue;
// }
// is_visited[nb_id] = 1;
// }
//
// auto *nb_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + nb_id * vertex_bytes_);
// dataf norm = *nb_data++;
// ++local_count_distance_computation;
// distf dist = compute_distance_with_norm(nb_data, query_data, norm);
// if (dist > set_L[set_L_end - 1 + base_set_L].distance_) {
// continue;
// }
//
// Candidate cand(nb_id, dist, false);
// // Thread 0 maintains the "global" queue
// idi r = add_into_queue(
// set_L,
// base_set_L,
// set_L_end,
// local_L,
// cand);
// if (r < nk) {
// nk = r;
// }
// }
// }
// local_top_m_candidates_end = 0; // Clear top_m_candidates
//
// if (nk <= last_k) {
// k = nk;
// } else {
// k = last_k + 1;
// }
//
// {// Scale M
// if (M < value_M_max) {
// M <<= 1;
// } else {
// M = value_M_max;
// }
// }
// }
//}
/*
* 7/6/2020-23:17
* Subsearch only 1 iteration using top-m
*/
inline void Searching::subsearch_top_m_for_one_iteration(
const idi iter,
idi &k_uc,
const idi value_M,
const idi query_id,
const dataf *query_data,
const idi L,
std::vector<Candidate> &set_L,
const idi set_L_start,
idi &set_L_size,
std::vector<idi> &top_m_candidates,
boost::dynamic_bitset<> &is_visited,
uint64_t &count_distance_computation)
{
// Select M candidates
idi top_m_candidates_end = 0;
idi last_k = L;
// Cannot use OpenMP here because this for-loop needs early break by the 2nd condition.
for (idi c_i = k_uc; c_i < set_L_size && top_m_candidates_end < value_M; ++c_i) {
idi index_set_L = c_i + set_L_start;
if (set_L[index_set_L].is_checked_) {
continue;
}
last_k = c_i; // Record the location of the last candidate selected.
set_L[index_set_L].is_checked_ = true;
top_m_candidates[top_m_candidates_end++] = set_L[index_set_L].id_;
// {//test
// M_ids_.push_back(set_L[index_set_L].id_);
// }
}
idi nk = L;
// Push M candidates' neighbors into the queue.
for (idi c_i = 0; c_i < top_m_candidates_end; ++c_i) {
idi cand_id = top_m_candidates[c_i];
_mm_prefetch(opt_nsg_graph_ + cand_id * vertex_bytes_ + data_bytes_, _MM_HINT_T0);
idi *out_edges = (idi *) (opt_nsg_graph_ + cand_id * vertex_bytes_ + data_bytes_);
idi out_degree = *out_edges++;
for (idi n_i = 0; n_i < out_degree; ++n_i) {
_mm_prefetch(opt_nsg_graph_ + out_edges[n_i] * vertex_bytes_, _MM_HINT_T0);
}
for (idi e_i = 0; e_i < out_degree; ++e_i) {
idi nb_id = out_edges[e_i];
{ // Sequential edition
if (is_visited[nb_id]) {
continue;
}
is_visited[nb_id] = 1;
}
auto *nb_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + nb_id * vertex_bytes_);
dataf norm = *nb_data++;
++count_distance_computation;
distf dist = compute_distance_with_norm(nb_data, query_data, norm);
if (dist > set_L[set_L_size - 1 + set_L_start].distance_) {
continue;
}
Candidate cand(nb_id, dist, false);
idi r = add_into_queue(
set_L,
set_L_start,
set_L_size,
L,
cand);
if (r < nk) {
nk = r;
}
}
}
// top_m_candidates_end = 0; // Clear top_m_candidates
if (nk <= last_k) {
k_uc = nk;
} else {
k_uc = last_k + 1;
}
// {//test
// for (idi l_i = 0; l_i < set_L_size; ++l_i) {
// L_ids_.push_back(set_L[set_L_start + l_i].id_);
// }
// std::sort(L_ids_.begin(), L_ids_.end());
// std::sort(M_ids_.begin(), M_ids_.end());
// for (idi m_i = 0; m_i < M_ids_.size(); ++m_i) {
// printf("query_id: %u "
// "iter: %u "
// "M[%u]: "
// "%u\n",
// query_id,
// iter,
// m_i,
// M_ids_[m_i]);
// }
// M_ids_.clear();
// for (idi l_i = 0; l_i < L_ids_.size(); ++l_i) {
// printf("query_id: %u "
// "iter: %u "
// "L[%u]: "
// "%u\n",
// query_id,
// iter,
// l_i,
// L_ids_[l_i]);
// }
// L_ids_.clear();
// }
}
///*
// * One more parameter for distance bound
// */
//inline void Searching::subsearch_top_m_for_one_iteration_lth(
// const distf bound_lth,
// const idi iter,
// idi &k_uc,
// const idi value_M,
// const idi query_id,
// const dataf *query_data,
// const idi L,
// std::vector<Candidate> &set_L,
// const idi set_L_start,
// idi &set_L_size,
// std::vector<idi> &top_m_candidates,
// boost::dynamic_bitset<> &is_visited,
// uint64_t &count_distance_computation)
//{
// // Select M candidates
// idi top_m_candidates_end = 0;
// idi last_k = L;
//// Cannot use OpenMP here because this for-loop needs early break by the 2nd condition.
// for (idi c_i = k_uc; c_i < set_L_size && top_m_candidates_end < value_M; ++c_i) {
// idi index_set_L = c_i + set_L_start;
// if (set_L[index_set_L].is_checked_) {
// continue;
// }
// last_k = c_i; // Record the location of the last candidate selected.
// set_L[index_set_L].is_checked_ = true;
// top_m_candidates[top_m_candidates_end++] = set_L[index_set_L].id_;
// }
//
// idi nk = L;
// // Push M candidates' neighbors into the queue.
// for (idi c_i = 0; c_i < top_m_candidates_end; ++c_i) {
// idi cand_id = top_m_candidates[c_i];
// _mm_prefetch(opt_nsg_graph_ + cand_id * vertex_bytes_ + data_bytes_, _MM_HINT_T0);
// idi *out_edges = (idi *) (opt_nsg_graph_ + cand_id * vertex_bytes_ + data_bytes_);
// idi out_degree = *out_edges++;
// for (idi n_i = 0; n_i < out_degree; ++n_i) {
// _mm_prefetch(opt_nsg_graph_ + out_edges[n_i] * vertex_bytes_, _MM_HINT_T0);
// }
// for (idi e_i = 0; e_i < out_degree; ++e_i) {
// idi nb_id = out_edges[e_i];
// { // Sequential edition
// if (is_visited[nb_id]) {
// continue;
// }
// is_visited[nb_id] = 1;
// }
//
// auto *nb_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + nb_id * vertex_bytes_);
// dataf norm = *nb_data++;
// ++count_distance_computation;
// distf dist = compute_distance_with_norm(nb_data, query_data, norm);
// if (dist > bound_lth) {
// continue;
// }
//
// Candidate cand(nb_id, dist, false);
// idi r = add_into_queue(
// set_L,
// set_L_start,
// set_L_size,
// L,
// cand);
// if (r < nk) {
// nk = r;
// }
// }
// }
//
// if (nk <= last_k) {
// k_uc = nk;
// } else {
// k_uc = last_k + 1;
// }
//}
/*
* 7/24/2020-10:53
* Subsearch for one iteration, with the global L-th value as the bound,
* and the top_m_position indicates the bound for local top-M vertices.
*/
inline void Searching::subsearch_top_m_for_one_iteration_lth_mth(
const distf bound_lth,
// const idi top_m_position,
const idi iter,
idi &k_uc,
const idi local_m_count,
const idi query_id,
const dataf *query_data,
const idi L,
std::vector<Candidate> &set_L,
const idi set_L_start,
idi &set_L_size,
std::vector<idi> &top_m_candidates,
boost::dynamic_bitset<> &is_visited,
uint64_t &count_distance_computation,
double &time_pick_top_m,
uint64_t &count_add_to_queue,
double &time_distance_computation,
double &time_add_to_queue)
{
// {//test
// printf("query_id: %u "
// "iter: %u "
// "tid: %u \n",
// query_id,
// iter,
// omp_get_thread_num());
// }
// Select M candidates
idi top_m_candidates_end = 0;
idi last_k = L;
// Cannot use OpenMP here because this for-loop needs early break by the 2nd condition.
// for (idi c_i = k_uc; c_i < top_m_position; ++c_i) {
time_pick_top_m -= WallTimer::get_time_mark();
for (idi c_i = k_uc; c_i < set_L_size && top_m_candidates_end < local_m_count; ++c_i) {
idi index_set_L = c_i + set_L_start;
if (set_L[index_set_L].is_checked_) {
continue;
}
last_k = c_i; // Record the location of the last candidate selected.
set_L[index_set_L].is_checked_ = true;
top_m_candidates[top_m_candidates_end++] = set_L[index_set_L].id_;
// {//test
// M_ids_.push_back(set_L[index_set_L].id_);
// }
}
time_pick_top_m += WallTimer::get_time_mark();
idi nk = L;
// Push M candidates' neighbors into the queue.
for (idi c_i = 0; c_i < top_m_candidates_end; ++c_i) {
time_pick_top_m -= WallTimer::get_time_mark();
idi cand_id = top_m_candidates[c_i];
// _mm_prefetch(opt_nsg_graph_ + cand_id * vertex_bytes_ + data_bytes_, _MM_HINT_T0);
idi *out_edges = (idi *) (opt_nsg_graph_ + cand_id * vertex_bytes_ + data_bytes_);
idi out_degree = *out_edges++;
// for (idi n_i = 0; n_i < out_degree; ++n_i) {
// _mm_prefetch(opt_nsg_graph_ + out_edges[n_i] * vertex_bytes_, _MM_HINT_T0);
// }
time_pick_top_m += WallTimer::get_time_mark();
for (idi e_i = 0; e_i < out_degree; ++e_i) {
time_distance_computation -= WallTimer::get_time_mark();
idi nb_id = out_edges[e_i];
{ // Sequential edition
if (is_visited[nb_id]) {
time_distance_computation += WallTimer::get_time_mark();
continue;
}
is_visited[nb_id] = 1;
}
auto *nb_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + nb_id * vertex_bytes_);
dataf norm = *nb_data++;
++count_distance_computation;
distf dist = compute_distance_with_norm(nb_data, query_data, norm);
time_distance_computation += WallTimer::get_time_mark();
if (dist > set_L[set_L_start + set_L_size - 1].distance_) {
// if (dist > bound_lth) {
continue;
}
++count_add_to_queue;
Candidate cand(nb_id, dist, false);
// time_pick_top_m -= WallTimer::get_time_mark();
time_add_to_queue -= WallTimer::get_time_mark();
idi r = add_into_queue(
set_L,
set_L_start,
set_L_size,
L,
cand);
if (r < nk) {
nk = r;
}
time_add_to_queue += WallTimer::get_time_mark();
// time_pick_top_m += WallTimer::get_time_mark();
}
}
if (nk <= last_k) {
k_uc = nk;
} else {
k_uc = last_k + 1;
}
}
///*
// * 7/26/2020-15:41
// * L-th and M-th Selection.
// * Seq-Par Phases: when M is 1 and 2, do sequential searching;
// * When M is equal and larger than 4, do parallel searching.
// * It's for load-balance issue.
// */
//inline void Searching::para_search_with_top_m_subsearch_v3(
// const idi local_M_max,
// const idi local_M_middle,
// const idi query_id,
// const idi K,
// const idi global_L,
// const idi local_L,
//// const idi total_L,
//// const idi init_queue_size,
// std::vector<Candidate> &set_L,
// const std::vector<idi> &init_ids,
// std::vector<idi> &set_K,
// const std::vector<idi> &local_queues_starts,
// std::vector<idi> &local_queues_sizes,
// std::vector<idi> &local_m_counts,
// std::vector< std::vector<idi> > &top_m_candidates_list,
// boost::dynamic_bitset<> &is_visited)
//{
// time_initialization_ -= WallTimer::get_time_mark();
// uint64_t tmp_count_computation = 0;
// {// Initialization
// // is_visited flag array
////#pragma omp parallel for
//// Cannot use OMP for bit array is_visited!
// for (idi c_i = 0; c_i < global_L; ++c_i) {
// is_visited[init_ids[c_i]] = 1;
// }
//
// const dataf *query_data = queries_load_ + query_id * dimension_;
//#pragma omp parallel for
// for (idi v_i = 0; v_i < global_L; ++v_i) {
// idi v_id = init_ids[v_i];
// _mm_prefetch(opt_nsg_graph_ + v_id * vertex_bytes_, _MM_HINT_T0);
// }
//
// // Get the distances of all candidates, store in the set set_L.
//#pragma omp parallel for reduction(+ : tmp_count_computation)
// for (idi id_i = 0; id_i < global_L; ++id_i) {
// idi v_id = init_ids[id_i];
// auto *v_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + v_id * vertex_bytes_);
// dataf norm = *v_data++;
// ++tmp_count_computation;
// distf dist = compute_distance_with_norm(v_data, query_data, norm);
// set_L[id_i] = Candidate(v_id, dist, false); // False means not checked.
// }
// local_queues_sizes[0] = global_L;
// count_distance_computation_ += tmp_count_computation;
// tmp_count_computation = 0;
// std::sort(set_L.begin(), set_L.begin() + global_L);
// }
// time_initialization_ += WallTimer::get_time_mark();
//
// // Searching
// if (num_threads_ == 1) { // Single threads
//// std::sort(
//// set_L.begin(),
//// set_L.end());
// subsearch_with_top_m(
// local_M_max,
// query_id,
// local_L,
// set_L,
// 0,
// local_queues_sizes[0],
// top_m_candidates_list[0],
// is_visited,
// tmp_count_computation);
// count_distance_computation_ += tmp_count_computation;
// tmp_count_computation = 0;
// } else { // Multiple threads
// const dataf *query_data = queries_load_ + query_id * dimension_;
// const idi num_queues = num_threads_;
// idi local_M = 1;
// idi iter = 0;
// std::vector<idi> ks(num_queues, 0);
//
// time_sequential_phase_ -= WallTimer::get_time_mark();
// {// Sequential Search for M = 1, 2.
// idi &k = ks[0];
// while (k < global_L && local_M < local_M_middle) {
// ++iter;
// subsearch_top_m_for_one_iteration(
// iter,
// k,
// local_M,
// query_id,
// query_data,
// global_L,
// set_L,
// 0,
// local_queues_sizes[0],
// top_m_candidates_list[0],
// is_visited,
// tmp_count_computation);
// count_distance_computation_ += tmp_count_computation;
// tmp_count_computation = 0;
//
// {// Double M
// if (local_M < local_M_max) {
// local_M <<= 1;
// }
// }
// }
// }
// time_sequential_phase_ += WallTimer::get_time_mark();
//
// time_parallel_phase_ -= WallTimer::get_time_mark();
// distf bound_lth = set_L[global_L - 1].distance_;
// {// Parallel Search for M >= 4, or local_M_middle
// time_assign_s_ -=WallTimer::get_time_mark();
// {// Assign elements from Queue[0] to others
// idi dst_i = 1;
// for (idi e_i = 1; e_i < global_L; ++e_i) {
// idi dest_sub = e_i % num_queues;
// if (0 == dest_sub) {
// set_L[dst_i++] = set_L[e_i];
// } else {
// set_L[local_queues_starts[dest_sub] + local_queues_sizes[dest_sub]++] = set_L[e_i];
// }
// }
// local_queues_sizes[0] = dst_i;
// }
// std::fill(ks.begin(), ks.end(), 0);
//
//
// selecting_unchecked_top_M_seq(
// query_id,
// iter,
// set_L,
// ks,
// local_M,
// num_queues,
// local_queues_starts,
// local_queues_sizes,
// local_m_counts);
// time_assign_s_ +=WallTimer::get_time_mark();
//
// double tmp_time_pick_top_m = 0;
// uint64_t tmp_count_add_to_queue = 0;
// uint8_t not_finished = 1;
// double tmp_time_distance_computation = 0;
// double tmp_time_add_to_queue = 0;
// while (true) {
// time_expand_ -= WallTimer::get_time_mark();
// not_finished = 0;
// ++iter;
//#pragma omp parallel for reduction(+ : tmp_count_computation) \
// reduction(+ : tmp_time_pick_top_m) \
// reduction(+ : tmp_count_add_to_queue) \
// reduction(+ : tmp_time_distance_computation) \
// reduction(+ : tmp_time_add_to_queue)
// for (idi q_i = 0; q_i < num_queues; ++q_i) {
// tmp_time_pick_top_m -= WallTimer::get_time_mark();
// idi L_value = q_i == 0 ? global_L : local_L;
// idi &k = ks[q_i];
// idi &local_queue_size = local_queues_sizes[q_i];
// auto &local_top_m_candidates = top_m_candidates_list[q_i];
// idi local_m_count = local_m_counts[q_i];
//// if (local_M < num_queues && !local_m_count) {
//// local_m_count = 1;
//// }
// tmp_time_pick_top_m += WallTimer::get_time_mark();
// if (!local_m_count) {
// continue;
// }
// not_finished = 1;
// const idi local_queue_start = local_queues_starts[q_i];
//
// subsearch_top_m_for_one_iteration_lth_mth(
// bound_lth,
// iter,
// k,
// local_m_count,
// query_id,
// query_data,
// L_value,
// set_L,
// local_queue_start,
// local_queue_size,
// local_top_m_candidates,
// is_visited,
// tmp_count_computation,
// tmp_time_pick_top_m,
// tmp_count_add_to_queue,
// tmp_time_distance_computation,
// tmp_time_add_to_queue);
// }
// time_add_to_queue_ += tmp_time_add_to_queue;
// tmp_time_add_to_queue = 0;
// time_distance_computation_ += tmp_time_distance_computation;
// tmp_time_distance_computation = 0;
// count_add_to_queue_ += tmp_count_add_to_queue;
// tmp_count_add_to_queue = 0;
// time_pick_top_m_ += tmp_time_pick_top_m;
// tmp_time_pick_top_m = 0;
// count_distance_computation_ += tmp_count_computation;
// tmp_count_computation = 0;
// time_expand_ += WallTimer::get_time_mark();
// if (!not_finished) {
// break;
// }
// {// Scale M
// if (local_M < local_M_max) {
// local_M <<= 1;
// }
//// else {
//// local_M = value_M_max;
//// }
// }
// time_select_ -= WallTimer::get_time_mark();
//#pragma omp parallel sections
// {
//#pragma omp section
// {// Setecting and update local_queues_lengths
//// time_select_L_ -= WallTimer::get_time_mark();
// bound_lth = selecting_top_L_seq(
// set_L,
// global_L,
//// local_L,
// num_queues,
// local_queues_starts,
// local_queues_sizes);
//// time_select_L_ += WallTimer::get_time_mark();
// }
//#pragma omp section
// {
//// time_select_M_ -= WallTimer::get_time_mark();
// selecting_unchecked_top_M_seq(
// query_id,
// iter,
// set_L,
// ks,
// local_M,
// num_queues,
// local_queues_starts,
// local_queues_sizes,
// local_m_counts);
//// time_select_M_ += WallTimer::get_time_mark();
// }
// }
// time_select_ += WallTimer::get_time_mark();
//// {//test
//// printf("query_id: %u "
//// "iter: %u",
//// query_id,
//// iter);
//// printf(" local_queues_sizes:");
//// for (idi i = 0; i < num_queues; ++i) {
//// printf(" %u", local_queues_sizes[i]);
//// }
//// printf(" local_m_counts:");
//// for (idi i = 0; i < num_queues; ++i) {
//// printf(" %u", local_m_counts[i]);
//// }
//// printf(" ks:");
//// for (idi i = 0; i < num_queues; ++i) {
//// printf(" %u", ks[i]);
//// }
//// printf("\n");
//// }
// }
// }
// time_parallel_phase_ += WallTimer::get_time_mark();
// }
//
//// time_merge_ -= WallTimer::get_time_mark();
// time_ending_ -= WallTimer::get_time_mark();
// {// Return the results to set_K
// std::vector<idi> pointer(num_threads_, 0);
// // get the first
// distf min_dist = FLT_MAX;
// idi min_q_i;
// idi min_id;
// idi min_sub;
// idi last_id;
// for (int q_i = 0; q_i < num_threads_; ++q_i) {
// if (pointer[q_i] >= local_queues_sizes[q_i]) {
// continue;
// }
// idi sub = pointer[q_i] + local_queues_starts[q_i];
// distf tmp_dist = set_L[sub].distance_;
// idi tmp_id = set_L[sub].id_;
// if (tmp_dist < min_dist) {
// min_dist = tmp_dist;
// min_id = tmp_id;
// min_q_i = q_i;
// min_sub = sub;
// } else if (tmp_dist == min_dist && tmp_id < min_id) {
// min_id = tmp_id;
// min_q_i = q_i;
// min_sub = sub;
// }
// }
// set_K[0] = set_L[min_sub].id_;
//// {//test
//// printf("query_id: %u "
//// "[%u]: "
//// "(%u, %f)\n",
//// query_id,
//// 0,
//// set_L[min_sub].id_, set_L[min_sub].distance_);
//// }
// ++pointer[min_q_i];
// last_id = set_K[0];
//
// bool is_finished = false;
// idi k_i = 1;
// while (k_i < K && !is_finished) {
// is_finished = true;
// min_dist = FLT_MAX;
// for (int q_i = 0; q_i < num_threads_; ++q_i) {
// const idi local_queue_size = local_queues_sizes[q_i];
// idi sub = pointer[q_i] + local_queues_starts[q_i];
//
// while (pointer[q_i] < local_queue_size
// && set_L[sub].id_ == last_id) {
// ++pointer[q_i];
// ++sub;
// }
// if (pointer[q_i] >= local_queue_size) {
// continue;
// }
// is_finished = false;
// distf tmp_dist = set_L[sub].distance_;
// idi tmp_id = set_L[sub].id_;
// if (tmp_dist < min_dist) {
// min_dist = tmp_dist;
// min_id = tmp_id;
// min_q_i = q_i;
// min_sub = sub;
// } else if (tmp_dist == min_dist && tmp_id < min_id) {
// min_id = tmp_id;
// min_q_i = q_i;
// min_sub = sub;
// }
// }
// set_K[k_i] = set_L[min_sub].id_;
//// {//test
//// printf("query_id: %u "
//// "[%u]: "
//// "(%u, %f)\n",
//// query_id,
//// k_i,
//// set_L[min_sub].id_, set_L[min_sub].distance_);
//// }
// ++pointer[min_q_i];
// ++k_i;
// }
// }
//// time_merge_ += WallTimer::get_time_mark();
//
// {// Reset
//// std::fill(is_visited.begin(), is_visited.end(), 0);
// is_visited.reset();
// std::fill(local_queues_sizes.begin() + 1, local_queues_sizes.end(), 0);
// }
//
// time_ending_ += WallTimer::get_time_mark();
//// {//test
//// if (3 == query_id) {
//// exit(1);
//// }
//// }
//}
/*
* 7/27/2020-15:33
* Same with v3, but gather top-m vertices together
*/
inline void Searching::para_search_with_top_m_subsearch_v4(
const idi local_M_max,
const idi local_M_middle,
const idi query_id,
const idi K,
const idi global_L,
const idi local_L,
// const idi total_L,
// const idi init_queue_size,
std::vector<Candidate> &set_L,
const std::vector<idi> &init_ids,
std::vector<idi> &set_K,
const std::vector<idi> &local_queues_starts,
std::vector<idi> &local_queues_sizes,
// std::vector<idi> &local_m_counts,
std::vector<idi> &top_m_candidates,
// std::vector< std::vector<idi> > &top_m_candidates_list,
boost::dynamic_bitset<> &is_visited)
{
// time_initialization_ -= WallTimer::get_time_mark();
uint64_t tmp_count_computation = 0;
{// Initialization
// is_visited flag array
//#pragma omp parallel for
// Cannot use OMP for bit array is_visited!
for (idi c_i = 0; c_i < global_L; ++c_i) {
is_visited[init_ids[c_i]] = 1;
}
const dataf *query_data = queries_load_ + query_id * dimension_;
#pragma omp parallel for
for (idi v_i = 0; v_i < global_L; ++v_i) {
idi v_id = init_ids[v_i];
_mm_prefetch(opt_nsg_graph_ + v_id * vertex_bytes_, _MM_HINT_T0);
}
// Get the distances of all candidates, store in the set set_L.
#pragma omp parallel for reduction(+ : tmp_count_computation)
for (idi id_i = 0; id_i < global_L; ++id_i) {
idi v_id = init_ids[id_i];
auto *v_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + v_id * vertex_bytes_);
dataf norm = *v_data++;
++tmp_count_computation;
distf dist = compute_distance_with_norm(v_data, query_data, norm);
set_L[id_i] = Candidate(v_id, dist, false); // False means not checked.
}
local_queues_sizes[0] = global_L;
count_distance_computation_ += tmp_count_computation;
tmp_count_computation = 0;
std::sort(set_L.begin(), set_L.begin() + global_L);
}
// time_initialization_ += WallTimer::get_time_mark();
// Searching
if (num_threads_ == 1) { // Single threads
// std::sort(
// set_L.begin(),
// set_L.end());
subsearch_with_top_m(
local_M_max,
query_id,
global_L,
set_L,
0,
local_queues_sizes[0],
top_m_candidates,
is_visited,
tmp_count_computation);
count_distance_computation_ += tmp_count_computation;
tmp_count_computation = 0;
} else { // Multiple threads
const dataf *query_data = queries_load_ + query_id * dimension_;
const idi num_queues = num_threads_;
idi local_M = 1;
idi iter = 0;
// std::vector<idi> ks(num_queues, 0);
// time_sequential_phase_ -= WallTimer::get_time_mark();
{// Sequential Search for M = 1, 2.
idi k = 0;
// idi &k = ks[0];
while (k < global_L && local_M < local_M_middle) {
++iter;
subsearch_top_m_for_one_iteration(
iter,
k,
local_M,
query_id,
query_data,
global_L,
set_L,
0,
local_queues_sizes[0],
top_m_candidates,
is_visited,
tmp_count_computation);
count_distance_computation_ += tmp_count_computation;
tmp_count_computation = 0;
{// Double M
if (local_M < local_M_max) {
local_M <<= 1;
}
}
}
}
// time_sequential_phase_ += WallTimer::get_time_mark();
// time_parallel_phase_ -= WallTimer::get_time_mark();
distf bound_lth = set_L[global_L - 1].distance_;
{// Parallel Search for M >= 4, or local_M_middle
// time_assign_s_ -=WallTimer::get_time_mark();
{// Assign elements from Queue[0] to others
idi dst_i = 1;
for (idi e_i = 1; e_i < global_L; ++e_i) {
idi dest_sub = e_i % num_queues;
if (0 == dest_sub) {
set_L[dst_i++] = set_L[e_i];
} else {
set_L[local_queues_starts[dest_sub] + local_queues_sizes[dest_sub]++] = set_L[e_i];
}
}
local_queues_sizes[0] = dst_i;
}
// std::fill(ks.begin(), ks.end(), 0);
idi top_m_candidates_size = 0;
// selecting_unchecked_top_M_seq(
// query_id,
// iter,
// set_L,
// ks,
// local_M,
// num_queues,
// local_queues_starts,
// local_queues_sizes,
// local_m_counts);
// time_assign_s_ +=WallTimer::get_time_mark();
std::vector<idi> ks(num_queues, 0);
std::vector<idi> nks(num_queues);
std::vector<idi> bound_ks(num_queues);
// double tmp_time_pick_top_m = 0;
// uint64_t tmp_count_add_to_queue = 0;
// uint8_t not_finished = 1;
// double tmp_time_distance_computation = 0;
// double tmp_time_add_to_queue = 0;
while (true) {
// time_expand_ -= WallTimer::get_time_mark();
// not_finished = 0;
++iter;
// Gather top-M vertices
// time_pick_top_m_ -= WallTimer::get_time_mark();
gather_unchecked_top_M_seq(
query_id,
iter,
set_L,
ks,
local_M,
num_queues,
local_queues_starts,
local_queues_sizes,
top_m_candidates,
top_m_candidates_size,
bound_ks);
// time_pick_top_m_ += WallTimer::get_time_mark();
if (!top_m_candidates_size) {
// time_expand_ += WallTimer::get_time_mark();
break;
}
std::fill(nks.begin(), nks.end(), global_L);
// Expand top-M vertices
#pragma omp parallel for schedule(static, 1) reduction(+ : tmp_count_computation)
// reduction(+ : tmp_count_add_to_queue) \
// reduction(+ : tmp_time_distance_computation) \
// reduction(+ : tmp_time_pick_top_m) \
// reduction(+ : tmp_time_add_to_queue)
for (idi c_i = 0; c_i < top_m_candidates_size; ++c_i) {
// tmp_time_pick_top_m -= WallTimer::get_time_mark();
idi tid = omp_get_thread_num();
const idi set_L_start = local_queues_starts[tid];
idi &set_L_size = local_queues_sizes[tid];
idi &nk = nks[tid];
idi L_value = tid == 0 ? global_L : local_L;
idi cand_id = top_m_candidates[c_i];
// _mm_prefetch(opt_nsg_graph_ + cand_id * vertex_bytes_ + data_bytes_, _MM_HINT_T0);
idi *out_edges = (idi *) (opt_nsg_graph_ + cand_id * vertex_bytes_ + data_bytes_);
idi out_degree = *out_edges++;
// for (idi n_i = 0; n_i < out_degree; ++n_i) {
// _mm_prefetch(opt_nsg_graph_ + out_edges[n_i] * vertex_bytes_, _MM_HINT_T0);
// }
// tmp_time_pick_top_m += WallTimer::get_time_mark();
// Expand cand_id's neighbors
for (idi e_i = 0; e_i < out_degree; ++e_i) {
// tmp_time_distance_computation -= WallTimer::get_time_mark();
idi nb_id = out_edges[e_i];
{ // Sequential edition
if (is_visited[nb_id]) {
// tmp_time_distance_computation += WallTimer::get_time_mark();
continue;
}
is_visited[nb_id] = 1;
}
auto *nb_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + nb_id * vertex_bytes_);
dataf norm = *nb_data++;
++tmp_count_computation;
distf dist = compute_distance_with_norm(nb_data, query_data, norm);
// tmp_time_distance_computation += WallTimer::get_time_mark();
// if (dist > set_L[set_L_start + set_L_size - 1].distance_) {
if (dist > bound_lth) {
continue;
}
// ++tmp_count_add_to_queue;
Candidate cand(nb_id, dist, false);
// tmp_time_add_to_queue -= WallTimer::get_time_mark();
idi r = add_into_queue(
set_L,
set_L_start,
set_L_size,
L_value,
cand);
if (r < nk) {
nk = r;
}
// tmp_time_add_to_queue += WallTimer::get_time_mark();
}
}
top_m_candidates_size = 0;
// time_add_to_queue_ += tmp_time_add_to_queue;
// tmp_time_add_to_queue = 0;
// time_distance_computation_ += tmp_time_distance_computation;
// tmp_time_distance_computation = 0;
// count_add_to_queue_ += tmp_count_add_to_queue;
// tmp_count_add_to_queue = 0;
// time_pick_top_m_ += tmp_time_pick_top_m;
// tmp_time_pick_top_m = 0;
count_distance_computation_ += tmp_count_computation;
tmp_count_computation = 0;
for (idi q_i = 0; q_i < num_queues; ++q_i) {
if (nks[q_i] < bound_ks[q_i]) {
ks[q_i] = nks[q_i];
} else {
ks[q_i] = bound_ks[q_i];
}
}
// time_expand_ += WallTimer::get_time_mark();
time_select_ -= WallTimer::get_time_mark();
{// Select L-th
bound_lth = selecting_top_L_seq(
set_L,
global_L,
num_queues,
local_queues_starts,
local_queues_sizes);
}
time_select_ += WallTimer::get_time_mark();
{// Scale M
if (local_M < local_M_max) {
local_M <<= 1;
}
}
// {//test
// printf("query_id: %u "
// "iter: %u",
// query_id,
// iter);
// printf(" local_queues_sizes:");
// for (idi i = 0; i < num_queues; ++i) {
// printf(" %u", local_queues_sizes[i]);
// }
//// printf(" local_m_counts:");
//// for (idi i = 0; i < num_queues; ++i) {
//// printf(" %u", local_m_counts[i]);
//// }
// printf(" ks:");
// for (idi i = 0; i < num_queues; ++i) {
// printf(" %u", ks[i]);
// }
// printf("\n");
// }
}
}
// time_parallel_phase_ += WallTimer::get_time_mark();
}
// time_merge_ -= WallTimer::get_time_mark();
// time_ending_ -= WallTimer::get_time_mark();
{// Return the results to set_K
std::vector<idi> pointer(num_threads_, 0);
// get the first
distf min_dist = FLT_MAX;
idi min_q_i;
idi min_id;
idi min_sub;
idi last_id;
for (int q_i = 0; q_i < num_threads_; ++q_i) {
if (pointer[q_i] >= local_queues_sizes[q_i]) {
continue;
}
idi sub = pointer[q_i] + local_queues_starts[q_i];
distf tmp_dist = set_L[sub].distance_;
idi tmp_id = set_L[sub].id_;
if (tmp_dist < min_dist) {
min_dist = tmp_dist;
min_id = tmp_id;
min_q_i = q_i;
min_sub = sub;
} else if (tmp_dist == min_dist && tmp_id < min_id) {
min_id = tmp_id;
min_q_i = q_i;
min_sub = sub;
}
}
set_K[0] = set_L[min_sub].id_;
// {//test
// printf("query_id: %u "
// "[%u]: "
// "(%u, %f)\n",
// query_id,
// 0,
// set_L[min_sub].id_, set_L[min_sub].distance_);
// }
++pointer[min_q_i];
last_id = set_K[0];
bool is_finished = false;
idi k_i = 1;
while (k_i < K && !is_finished) {
is_finished = true;
min_dist = FLT_MAX;
for (int q_i = 0; q_i < num_threads_; ++q_i) {
const idi local_queue_size = local_queues_sizes[q_i];
idi sub = pointer[q_i] + local_queues_starts[q_i];
while (pointer[q_i] < local_queue_size
&& set_L[sub].id_ == last_id) {
++pointer[q_i];
++sub;
}
if (pointer[q_i] >= local_queue_size) {
continue;
}
is_finished = false;
distf tmp_dist = set_L[sub].distance_;
idi tmp_id = set_L[sub].id_;
if (tmp_dist < min_dist) {
min_dist = tmp_dist;
min_id = tmp_id;
min_q_i = q_i;
min_sub = sub;
} else if (tmp_dist == min_dist && tmp_id < min_id) {
min_id = tmp_id;
min_q_i = q_i;
min_sub = sub;
}
}
set_K[k_i] = set_L[min_sub].id_;
// {//test
// printf("query_id: %u "
// "[%u]: "
// "(%u, %f)\n",
// query_id,
// k_i,
// set_L[min_sub].id_, set_L[min_sub].distance_);
// }
++pointer[min_q_i];
++k_i;
}
}
// time_merge_ += WallTimer::get_time_mark();
{// Reset
// std::fill(is_visited.begin(), is_visited.end(), 0);
is_visited.reset();
std::fill(local_queues_sizes.begin() + 1, local_queues_sizes.end(), 0);
}
// time_ending_ += WallTimer::get_time_mark();
// {//test
// if (3 == query_id) {
// exit(1);
// }
// }
}
/*
* 6/27/2020-12:33
* Do searching on the local_set_L
* local_set_L is already sorted
* is_visited is already set up.
*/
inline void Searching::subsearch_for_simple_search(
const idi query_id,
const idi local_L,
std::vector<Candidate> &set_L,
const idi base_set_L,
idi &set_L_end,
// std::vector<uint8_t> &is_visited,
boost::dynamic_bitset<> &is_visited,
uint64_t &local_count_distance_computation)
{
const dataf *query_data = queries_load_ + query_id * dimension_;
// idi local_top_m_candidates_end = 0;
idi k = 0; // Index of first unchecked candidate.
idi iter = 0;
// idi M = 1; // value of M
while (k < local_L) {
++iter;
// {//test
// printf("query_id: %u "
// "iter: %u\n",
// query_id,
// iter);
// }
// Select the top-1 unchecked candidate
idi top_1;
idi last_k = local_L;
// Cannot use OpenMP here because this for-loop needs early break by the 2nd condition.
for (idi c_i = k; c_i < set_L_end; ++c_i) {
idi index_set_L = c_i + base_set_L;
if (set_L[index_set_L].is_checked_) {
continue;
}
top_1 = set_L[index_set_L].id_;
last_k = c_i; // Record the location of the last candidate selected.
set_L[index_set_L].is_checked_ = true;
// local_top_m_candidates[local_top_m_candidates_end++] = set_L[index_set_L].id_;
break;
}
if (last_k == local_L) {
break;
}
idi nk = local_L;
// Push top-1' neighbors into the queue.
idi cand_id = top_1;
_mm_prefetch(opt_nsg_graph_ + cand_id * vertex_bytes_ + data_bytes_, _MM_HINT_T0);
idi *out_edges = (idi *) (opt_nsg_graph_ + cand_id * vertex_bytes_ + data_bytes_);
idi out_degree = *out_edges++;
for (idi n_i = 0; n_i < out_degree; ++n_i) {
_mm_prefetch(opt_nsg_graph_ + out_edges[n_i] * vertex_bytes_, _MM_HINT_T0);
}
for (idi e_i = 0; e_i < out_degree; ++e_i) {
idi nb_id = out_edges[e_i];
{ // Sequential edition
if (is_visited[nb_id]) {
continue;
}
is_visited[nb_id] = 1;
}
// {// Critical edition
// if (!AtomicOps::CAS(is_visited.data() + nb_id,
// static_cast<uint8_t>(0),
// static_cast<uint8_t>(1))) {
// continue;
// }
// }
auto *nb_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + nb_id * vertex_bytes_);
dataf norm = *nb_data++;
++local_count_distance_computation;
distf dist = compute_distance_with_norm(nb_data, query_data, norm);
// {
// if (0 == query_id
// && (785802 == nb_id
// || 180955 == nb_id
// || 240996 == nb_id
// || 813701 == nb_id
// || 708177 == nb_id
// || 87578 == nb_id
// || 561813 == nb_id
// || 701258 == nb_id
// || 872728 == nb_id)) {
//// && 180955 == nb_id) {
// printf("parent: %u "
// "nb_id: %u "
// "dist: %f "
// "base_set_L: %u "
// "set_L_end: %u\n",
// cand_id,
// nb_id,
// dist,
// base_set_L,
// set_L_end);
// }
// }
if (dist > set_L[set_L_end - 1 + base_set_L].distance_) {
continue;
}
Candidate cand(nb_id, dist, false);
// Thread 0 maintains the "global" queue
idi r = add_into_queue(
set_L,
base_set_L,
set_L_end,
local_L,
cand);
if (r < nk) {
nk = r;
}
}
if (nk <= last_k) {
k = nk;
} else {
k = last_k + 1;
}
}
}
/*
* 6/27/2020-12:26
* Is is good to use subsearch by every thread it self?
*/
inline void Searching::para_simple_search_subsearch(
const idi query_id,
const idi K,
const idi L,
std::vector<Candidate> &set_L,
const std::vector<idi> &init_ids,
std::vector<idi> &set_K,
// std::vector<uint8_t> &is_visited)
boost::dynamic_bitset<> &is_visited)
{
uint64_t tmp_count_computation = 0;
{// Initialization
// is_visited flag array
//#pragma omp parallel for
// Cannot use OMP for bit array is_visited!
for (idi c_i = 0; c_i < L; ++c_i) {
is_visited[init_ids[c_i]] = 1;
}
const dataf *query_data = queries_load_ + query_id * dimension_;
#pragma omp parallel for
for (idi v_i = 0; v_i < L; ++v_i) {
idi v_id = init_ids[v_i];
_mm_prefetch(opt_nsg_graph_ + v_id * vertex_bytes_, _MM_HINT_T0);
}
// Get the distances of all candidates, store in the set set_L.
//#pragma omp parallel for
#pragma omp parallel for reduction(+ : tmp_count_computation)
for (unsigned i = 0; i < L; i++) {
unsigned v_id = init_ids[i];
auto *v_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + v_id * vertex_bytes_);
dataf norm = *v_data++;
++tmp_count_computation;
distf dist = compute_distance_with_norm(v_data, query_data, norm);
set_L[i] = Candidate(v_id, dist, false); // False means not checked.
}
count_distance_computation_ += tmp_count_computation;
tmp_count_computation = 0;
// std::sort(
// set_L.begin(),
// set_L.begin() + L);
}
idi queue_end = L;
// Searching
if (num_threads_ == 1) { // Single threads
std::sort(
set_L.begin(),
set_L.end());
subsearch_for_simple_search(
query_id,
L,
set_L,
0,
queue_end,
is_visited,
tmp_count_computation);
count_distance_computation_ += tmp_count_computation;
// {
//// {//test
//// for (idi i = 0; i < queue_end; ++i) {
//// printf("start: "
//// "query_id: %u "
//// "set_L[%u]: "
//// "(%u %f)\n",
//// query_id,
//// i,
//// set_L[i].id_, set_L[i].distance_);
//// }
//// }
//
// idi half_length = queue_end / 2;
// std::sort(
// set_L.begin(),
// set_L.begin() + half_length);
//// {//test
//// for (idi i = 0; i < half_length; ++i) {
//// printf("sorted: "
//// "query_id: %u "
//// "set_L[%u]: "
//// "(%u %f)\n",
//// query_id,
//// i,
//// set_L[i].id_, set_L[i].distance_);
//// }
//// }
//
// subsearch_for_simple_search(
// query_id,
// half_length, // local_L
// set_L,
// 0, // base_set_L
// half_length, // set_L_end
// is_visited,
// tmp_count_computation);
//
//// {//test
//// for (idi i = 0; i < half_length; ++i) {
//// printf("subsearched: "
//// "query_id: %u "
//// "set_L[%u]: "
//// "(%u %f)\n",
//// query_id,
//// i,
//// set_L[i].id_, set_L[i].distance_);
//// }
//// }
//
// std::sort(
// set_L.begin() + half_length,
// set_L.end());
//
//// {//test
//// for (idi i = half_length; i < queue_end; ++i) {
//// printf("sorted: "
//// "query_id: %u "
//// "set_L[%u]: "
//// "(%u %f)\n",
//// query_id,
//// i,
//// set_L[i].id_, set_L[i].distance_);
//// }
//// }
//
// subsearch_for_simple_search(
// query_id,
// half_length, // local_L
// set_L,
// half_length, // base_set_L
// half_length, // set_L_end
// is_visited,
// tmp_count_computation);
//// {//test
//// for (idi i = half_length; i < queue_end; ++i) {
//// printf("subsearched: "
//// "query_id: %u "
//// "set_L[%u]: "
//// "(%u %f)\n",
//// query_id,
//// i,
//// set_L[i].id_, set_L[i].distance_);
//// }
//// }
//// {//test
//// for (idi i = 0; i < queue_end; ++i) {
//// printf("explored: "
//// "query_id: %u "
//// "set_L[%u]: "
//// "(%u %f)\n",
//// query_id,
//// i,
//// set_L[i].id_, set_L[i].distance_);
//// }
//// }
// count_distance_computation_ += tmp_count_computation;
//
// std::vector <Candidate> tmp_set_L(L);
// std::merge(set_L.begin(), set_L.begin() + half_length,
// set_L.begin() + half_length, set_L.end(),
// tmp_set_L.begin());
// std::copy(tmp_set_L.begin(), tmp_set_L.end(), set_L.begin());
//// {//test
//// for (idi i = 0; i < queue_end; ++i) {
//// printf("merged: "
//// "query_id: %u "
//// "set_L[%u]: "
//// "(%u %f)\n",
//// query_id,
//// i,
//// set_L[i].id_, set_L[i].distance_);
//// }
//// }
// }
} else { // Multiple threads
const idi num_queues = num_threads_;
const idi local_queue_length = (L - 1) / num_queues + 1;
// Parallel for
#pragma omp parallel for reduction(+ : tmp_count_computation)
for (idi q_i = 0; q_i < num_queues; ++q_i) {
idi local_queue_base = q_i * local_queue_length;
if (local_queue_base >= L) {
continue;
}
idi local_queue_end = local_queue_length;
if (local_queue_base + local_queue_end > L) {
local_queue_end = L - local_queue_base;
}
std::sort(
set_L.begin() + local_queue_base,
set_L.begin() + local_queue_base + local_queue_end);
subsearch_for_simple_search(
query_id,
local_queue_end, // local_L
set_L,
local_queue_base, // base_set_L
local_queue_end, // set_L_end
is_visited,
tmp_count_computation);
}
count_distance_computation_ += tmp_count_computation;
// Merge
// time_merge_ -= WallTimer::get_time_mark();
merge_in_set_L(
set_L,
L,
num_queues,
local_queue_length);
// time_merge_ += WallTimer::get_time_mark();
}
{// Return the results to set_K
// How to deal with duplicate?
idi last_id = set_L[0].id_;
set_K[0] = last_id;
idi k_i = 1;
idi l_i = 1;
while (k_i < K && l_i < L) {
if (last_id == set_L[l_i].id_) {
++l_i;
continue;
}
last_id = set_L[l_i++].id_;
set_K[k_i++] = last_id;
}
//#pragma omp parallel for
// for (idi k_i = 0; k_i < K; ++k_i) {
// set_K[k_i] = set_L[k_i].id_;
//// set_K[k_i] = set_L[k_i].id_;
// }
}
{// Reset
// std::fill(is_visited.begin(), is_visited.end(), 0);
is_visited.reset();
// is_visited.clear_all();
}
// {//test
// if (0 == query_id) {
// exit(1);
// }
// }
}
///*
// * 6/22/2020-09:38
// * A synchronized last element as the sentinel
// */
//inline void Searching::para_search_with_top_m_merge_queues_global_threshold(
// const idi value_M_middle,
// const idi value_M_max,
// const idi query_id,
// const idi K,
// const idi L,
// std::vector<Candidate> &set_L,
// const std::vector<idi> &init_ids,
// std::vector<idi> &set_K,
// const idi local_queue_length, // Maximum size of local queue
// const idi base_set_L, // base_set_L = (num_threads_ - 1) * local_queue_length;
// std::vector<idi> &local_queues_ends, // Sizes of local queue
// std::vector<idi> &top_m_candidates,
// boost::dynamic_bitset<> &is_visited)
//{
//// const idi base_set_L = (num_threads_ - 1) * local_queue_length;
// {
//#pragma omp parallel for
// for (idi c_i = 0; c_i < L; ++c_i) {
// is_visited[init_ids[c_i]] = 1;
// }
// }
//
// const dataf *query_data = queries_load_ + query_id * dimension_;
//#pragma omp parallel for
// for (idi v_i = 0; v_i < L; ++v_i) {
// idi v_id = init_ids[v_i];
// _mm_prefetch(opt_nsg_graph_ + v_id * vertex_bytes_, _MM_HINT_T0);
// }
// uint64_t tmp_count_computation = 0;
// // Get the distances of all candidates, store in the set set_L.
////#pragma omp parallel for
//#pragma omp parallel for reduction(+ : tmp_count_computation)
// for (unsigned i = 0; i < L; i++) {
// unsigned v_id = init_ids[i];
// auto *v_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + v_id * vertex_bytes_);
// dataf norm = *v_data++;
// ++tmp_count_computation;
// distf dist = compute_distance_with_norm(v_data, query_data, norm);
// set_L[i + base_set_L] = Candidate(v_id, dist, false); // False means not checked.
// }
// count_distance_computation_ += tmp_count_computation;
// tmp_count_computation = 0;
//// std::sort(set_L.begin(), set_L.begin() + L);
// std::sort(
// set_L.begin() + base_set_L,
// set_L.begin() + base_set_L + L);
// local_queues_ends[num_threads_ - 1] = L;
//
// idi top_m_candidates_end = 0;
// idi k = 0; // Index of first unchecked candidate.
// idi tmp_count = 0; // for debug
// idi M = 1;
//
// { // Single thread
// while (k < L && M < value_M_middle) {
// ++tmp_count;
//// {//test
//// printf("tmp_count: %d\n", tmp_count);
//// }
//
// // Select M candidates
// idi last_k = L;
//// Cannot use OpenMP here because this for-loop needs early break by the 2nd condition.
// for (idi c_i = k; c_i < L && top_m_candidates_end < M; ++c_i) {
// idi index_set_L = c_i + base_set_L;
// if (set_L[index_set_L].is_checked_) {
// continue;
// }
// last_k = c_i; // Record the location of the last candidate selected.
// set_L[index_set_L].is_checked_ = true;
// top_m_candidates[top_m_candidates_end++] = set_L[index_set_L].id_;
// }
//
// idi nk = L;
// // Push M candidates' neighbors into the queue.
// for (idi c_i = 0; c_i < top_m_candidates_end; ++c_i) {
// idi cand_id = top_m_candidates[c_i];
// _mm_prefetch(opt_nsg_graph_ + cand_id * vertex_bytes_ + data_bytes_, _MM_HINT_T0);
// idi *out_edges = (idi *) (opt_nsg_graph_ + cand_id * vertex_bytes_ + data_bytes_);
// idi out_degree = *out_edges++;
// for (idi n_i = 0; n_i < out_degree; ++n_i) {
// _mm_prefetch(opt_nsg_graph_ + out_edges[n_i] * vertex_bytes_, _MM_HINT_T0);
// }
// for (idi e_i = 0; e_i < out_degree; ++e_i) {
// idi nb_id = out_edges[e_i];
// { // Sequential edition
// if (is_visited[nb_id]) {
// continue;
// }
// is_visited[nb_id] = 1;
// }
//
// auto *nb_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + nb_id * vertex_bytes_);
// dataf norm = *nb_data++;
// ++tmp_count_computation;
// distf dist = compute_distance_with_norm(nb_data, query_data, norm);
// if (dist > set_L[L - 1 + base_set_L].distance_) {
// continue;
// }
//
// Candidate cand(nb_id, dist, false);
// // Thread 0 maintains the "global" queue
// idi r = add_into_queue(
// set_L,
// base_set_L,
// local_queues_ends[num_threads_ - 1],
// L,
// cand);
// if (r < nk) {
// nk = r;
// }
// }
// }
// top_m_candidates_end = 0; // Clear top_m_candidates
// count_distance_computation_ += tmp_count_computation;
// tmp_count_computation = 0;
//
// if (nk <= last_k) {
// k = nk;
// } else {
// k = last_k + 1;
// }
//
// {// Scale M
// if (M < value_M_max) {
// M <<= 1;
// } else {
// M = value_M_max;
// }
// }
//
// }
// }
//
// { // Multiple Threads
// while (k < L) {
// ++tmp_count;
//// {//test
//// printf("tmp_count: %d\n", tmp_count);
//// }
// // Select M candidates
// idi last_k = L;
//// Cannot use OpenMP here because this for-loop needs early break by the 2nd condition.
// for (idi c_i = k; c_i < L && top_m_candidates_end < M; ++c_i) {
// idi index_set_L = c_i + base_set_L;
// if (set_L[index_set_L].is_checked_) {
// continue;
// }
// last_k = c_i; // Record the location of the last candidate selected.
// set_L[index_set_L].is_checked_ = true;
// top_m_candidates[top_m_candidates_end++] = set_L[index_set_L].id_;
// }
//
//
// idi nk = L;
// // Push M candidates' neighbors into the queue.
////#pragma omp parallel for reduction(+ : tmp_count_computation) num_threads(real_threads)
//#pragma omp parallel for reduction(+ : tmp_count_computation)
// for (idi c_i = 0; c_i < top_m_candidates_end; ++c_i) {
// int tid = omp_get_thread_num();
// idi cand_id = top_m_candidates[c_i];
// _mm_prefetch(opt_nsg_graph_ + cand_id * vertex_bytes_ + data_bytes_, _MM_HINT_T0);
// idi *out_edges = (idi *) (opt_nsg_graph_ + cand_id * vertex_bytes_ + data_bytes_);
// idi out_degree = *out_edges++;
// for (idi n_i = 0; n_i < out_degree; ++n_i) {
// _mm_prefetch(opt_nsg_graph_ + out_edges[n_i] * vertex_bytes_, _MM_HINT_T0);
// }
// for (idi e_i = 0; e_i < out_degree; ++e_i) {
// idi nb_id = out_edges[e_i];
// { // Sequential edition
// if (is_visited[nb_id]) {
// continue;
// }
// is_visited[nb_id] = 1;
// }
//
// auto *nb_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + nb_id * vertex_bytes_);
// dataf norm = *nb_data++;
// ++tmp_count_computation;
// distf dist = compute_distance_with_norm(nb_data, query_data, norm);
// if (dist > set_L[L - 1 + base_set_L].distance_) {
// continue;
// }
//
// Candidate cand(nb_id, dist, false);
// // Add to the local queue.
// if (0 != tid) {
// // Non-Master threads using local queues
// add_into_queue(
// set_L,
// (tid - 1) * local_queue_length,
// local_queues_ends[tid - 1],
// local_queue_length,
// cand);
// } else {
// // Thread 0 maintains the "global" queue
// idi r = add_into_queue(
// set_L,
// base_set_L,
// local_queues_ends[num_threads_ - 1],
// L,
// cand);
// if (r < nk) {
// nk = r;
// }
// }
// }
// }
// top_m_candidates_end = 0; // Clear top_m_candidates
// count_distance_computation_ += tmp_count_computation;
// tmp_count_computation = 0;
//
// {// Local queues' ends
//// printf("query%u:iter: %u", query_id, tmp_count);
// idi total_elements = 0;
// for (int i_t = 0; i_t < num_threads_ - 1; ++i_t) {
// total_elements += local_queues_ends[i_t];
// }
// number_local_elements_ += total_elements;
//// printf(" total_elements: %u+%u\n", total_elements - local_queues_ends[num_threads_ - 1], local_queues_ends[num_threads_ - 1]);
//// for (int i_t = 0; i_t < num_threads_; ++i_t) {
//// printf(" [%u]: %u", i_t, local_queues_ends[i_t]);
//// }
//// printf("\n");
// }
//
//// // Merge. Merge all queues in parallel.
// {
// time_merge_ -= WallTimer::get_time_mark();
// if (num_threads_ > 1) {
// idi r = merge_all_queues_para_array(
// set_L,
// local_queues_ends,
// local_queue_length,
// L);
// if (r < nk) {
// nk = r;
// }
// }
// time_merge_ += WallTimer::get_time_mark();
// }
// if (nk <= last_k) {
// k = nk;
// } else {
// k = last_k + 1;
// }
// {// Scale M
// if (M < value_M_max) {
// M <<= 1;
// } else {
// M = value_M_max;
// }
// }
//
// }
// }
//
//
//#pragma omp parallel for
// for (idi k_i = 0; k_i < K; ++k_i) {
// set_K[k_i] = set_L[k_i + base_set_L].id_;
//// set_K[k_i] = set_L[k_i].id_;
// }
//
// {// Reset
//// std::fill(is_visited.begin(), is_visited.end(), 0);
// is_visited.reset();
//// is_visited.clear_all();
// std::fill(local_queues_ends.begin(), local_queues_ends.end(), 0);
// }
//
//// {//test
//// if (0 == query_id) {
//// exit(1);
//// }
//// }
//}
///*
// * 6/7/2020-16:55
// * Use 1 threads to scale M until the value_M_middle.
// * Then use multiple threads.
// * Except for Thread 0, other threads are collectors. They collect, but do not merge.
// * Only merge once after Thread 0 stops.
// */
//inline void Searching::para_search_with_top_m_merge_queues_collectors(
// const idi value_M_middle,
// const idi value_M_max,
// const idi query_id,
// const idi K,
// const idi L,
// std::vector<Candidate> &set_L,
// const std::vector<idi> &init_ids,
// std::vector<idi> &set_K,
// const idi local_queue_length, // Maximum size of local queue
// const idi base_set_L, // base_set_L = (num_threads_ - 1) * local_queue_length;
// std::vector<idi> &local_queues_ends, // Sizes of local queue
//// std::vector<Candidate> &top_m_candidates,
// std::vector<idi> &top_m_candidates,
//// std::vector<uint8_t> &is_visited)
// boost::dynamic_bitset<> &is_visited)
//// std::vector<distf> &local_thresholds)
//// BitVector &is_visited)
//{
// {
//#pragma omp parallel for
// for (idi c_i = 0; c_i < L; ++c_i) {
// is_visited[init_ids[c_i]] = 1;
//// is_visited.atomic_set_bit(init_ids[c_i]);
// }
// }
//
// const dataf *query_data = queries_load_ + query_id * dimension_;
//#pragma omp parallel for
// for (idi v_i = 0; v_i < L; ++v_i) {
// idi v_id = init_ids[v_i];
// _mm_prefetch(opt_nsg_graph_ + v_id * vertex_bytes_, _MM_HINT_T0);
// }
// uint64_t tmp_count_computation = 0;
// // Get the distances of all candidates, store in the set set_L.
////#pragma omp parallel for
//#pragma omp parallel for reduction(+ : tmp_count_computation)
// for (unsigned i = 0; i < L; i++) {
// unsigned v_id = init_ids[i];
// auto *v_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + v_id * vertex_bytes_);
// dataf norm = *v_data++;
//// ++count_distance_computation_;
// ++tmp_count_computation;
// distf dist = compute_distance_with_norm(v_data, query_data, norm);
// set_L[i + base_set_L] = Candidate(v_id, dist, false); // False means not checked.
//// set_L[i] = Candidate(v_id, dist, false); // False means not checked.
// }
// count_distance_computation_ += tmp_count_computation;
// tmp_count_computation = 0;
//// std::sort(set_L.begin(), set_L.begin() + L);
// std::sort(
// set_L.begin() + base_set_L,
// set_L.begin() + base_set_L + L);
//// boost::sort::block_indirect_sort(
//// set_L.begin() + base_set_L,
//// set_L.begin() + base_set_L + L,
//// num_threads_);
// local_queues_ends[num_threads_ - 1] = L;
//
//// std::vector<idi> top_m_candidates(M);
// idi top_m_candidates_end = 0;
// idi k = 0; // Index of first unchecked candidate.
// idi tmp_count = 0; // for debug
// idi M = 1;
//
// // Single thread
// {
// while (k < L && M < value_M_middle) {
// ++tmp_count;
//// {//test
//// printf("tmp_count: %d\n", tmp_count);
//// }
//
//// int real_threads = std::min(static_cast<int>(M), num_threads_);
//// idi queue_base = num_threads_ - real_threads;
// // Select M candidates
// idi last_k = L;
//// Cannot use OpenMP here because this for-loop needs early break by the 2nd condition.
// for (idi c_i = k; c_i < L && top_m_candidates_end < M; ++c_i) {
// idi index_set_L = c_i + base_set_L;
// if (set_L[index_set_L].is_checked_) {
// continue;
// }
// last_k = c_i; // Record the location of the last candidate selected.
// set_L[index_set_L].is_checked_ = true;
// top_m_candidates[top_m_candidates_end++] = set_L[index_set_L].id_;
// }
//
// idi nk = L;
// // Push M candidates' neighbors into the queue.
// for (idi c_i = 0; c_i < top_m_candidates_end; ++c_i) {
// idi cand_id = top_m_candidates[c_i];
// _mm_prefetch(opt_nsg_graph_ + cand_id * vertex_bytes_ + data_bytes_, _MM_HINT_T0);
// idi *out_edges = (idi *) (opt_nsg_graph_ + cand_id * vertex_bytes_ + data_bytes_);
// idi out_degree = *out_edges++;
// for (idi n_i = 0; n_i < out_degree; ++n_i) {
// _mm_prefetch(opt_nsg_graph_ + out_edges[n_i] * vertex_bytes_, _MM_HINT_T0);
// }
// for (idi e_i = 0; e_i < out_degree; ++e_i) {
// idi nb_id = out_edges[e_i];
// { // Sequential edition
// if (is_visited[nb_id]) {
// continue;
// }
// is_visited[nb_id] = 1;
// }
//
// auto *nb_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + nb_id * vertex_bytes_);
// dataf norm = *nb_data++;
//// ++count_distance_computation_;
// ++tmp_count_computation;
// distf dist = compute_distance_with_norm(nb_data, query_data, norm);
// if (dist > set_L[L - 1 + base_set_L].distance_) {
// continue;
// }
//
// Candidate cand(nb_id, dist, false);
// // Thread 0 maintains the "global" queue
// idi r = add_into_queue(
// set_L,
// base_set_L,
// local_queues_ends[num_threads_ - 1],
// L,
// cand);
// if (r < nk) {
// nk = r;
// }
// }
// }
// top_m_candidates_end = 0; // Clear top_m_candidates
// count_distance_computation_ += tmp_count_computation;
// tmp_count_computation = 0;
//
// if (nk <= last_k) {
// k = nk;
// } else {
// k = last_k + 1;
// }
//
// {// Scale M
// if (M < value_M_max) {
// M <<= 1;
// } else {
// M = value_M_max;
// }
// }
// }
// }
//
// // Multiple Threads
// {
//// while (k < L/num_threads_/2) {
// while (k < L) {
// ++tmp_count;
//// {//test
//// printf("tmp_count: %d\n", tmp_count);
//// }
//// int real_threads = std::min(static_cast<int>(M), num_threads_);
//// idi queue_base = num_threads_ - real_threads;
// // Select M candidates
// idi last_k = L;
//// Cannot use OpenMP here because this for-loop needs early break by the 2nd condition.
// for (idi c_i = k; c_i < L && top_m_candidates_end < M; ++c_i) {
// idi index_set_L = c_i + base_set_L;
// if (set_L[index_set_L].is_checked_) {
// continue;
// }
// last_k = c_i; // Record the location of the last candidate selected.
// set_L[index_set_L].is_checked_ = true;
// top_m_candidates[top_m_candidates_end++] = set_L[index_set_L].id_;
// }
//
// idi chunk_size;
// if (num_threads_ <= top_m_candidates_end) {
// chunk_size = (top_m_candidates_end - 1) / num_threads_ + 1;
// } else {
// chunk_size = 1;
// }
// idi nk = L;
// // Push M candidates' neighbors into the queue.
////#pragma omp parallel for reduction(+ : tmp_count_computation) num_threads(real_threads)
////#pragma omp parallel for reduction(+ : tmp_count_computation)
//#pragma omp parallel for reduction(+ : tmp_count_computation) schedule(static, chunk_size)
// for (idi c_i = 0; c_i < top_m_candidates_end; ++c_i) {
// int tid = omp_get_thread_num();
//// {
//// if (c_i < chunk_size && tid != 0) {
//// printf("query_id: %u "
//// "tmp_count: %u "
//// "chunk_size: %u "
//// "c_i: %u "
//// "tid: %u\n",
//// query_id,
//// tmp_count,
//// chunk_size,
//// c_i,
//// tid);
//// }
//// }
// idi cand_id = top_m_candidates[c_i];
// _mm_prefetch(opt_nsg_graph_ + cand_id * vertex_bytes_ + data_bytes_, _MM_HINT_T0);
// idi *out_edges = (idi *) (opt_nsg_graph_ + cand_id * vertex_bytes_ + data_bytes_);
// idi out_degree = *out_edges++;
// for (idi n_i = 0; n_i < out_degree; ++n_i) {
// _mm_prefetch(opt_nsg_graph_ + out_edges[n_i] * vertex_bytes_, _MM_HINT_T0);
// }
// for (idi e_i = 0; e_i < out_degree; ++e_i) {
// idi nb_id = out_edges[e_i];
// { // Sequential edition
// if (is_visited[nb_id]) {
// continue;
// }
// is_visited[nb_id] = 1;
// }
//
// auto *nb_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + nb_id * vertex_bytes_);
// dataf norm = *nb_data++;
//// ++count_distance_computation_;
// ++tmp_count_computation;
// distf dist = compute_distance_with_norm(nb_data, query_data, norm);
// if (dist > set_L[L - 1 + base_set_L].distance_) {
// continue;
// }
//
// Candidate cand(nb_id, dist, false);
// // Add to the local queue.
// if (0 != tid) {
// // Non-Master threads using local queues
// add_into_queue(
// set_L,
// (tid - 1) * local_queue_length,
// local_queues_ends[tid - 1],
// local_queue_length,
// cand);
// } else {
// // Thread 0 maintains the "global" queue
// idi r = add_into_queue(
// set_L,
// base_set_L,
// local_queues_ends[num_threads_ - 1],
// L,
// cand);
// if (r < nk) {
// nk = r;
// }
// }
// }
// }
// top_m_candidates_end = 0; // Clear top_m_candidates
// count_distance_computation_ += tmp_count_computation;
// tmp_count_computation = 0;
//
////// // Merge. Merge all queues in parallel.
//// {
//// time_merge_ -= WallTimer::get_time_mark();
//// if (num_threads_ > 1) {
////// idi r = merge_all_queues_queue_base(
////// set_L,
////// local_queues_ends,
////// queue_base,
////// real_threads,
////// local_queue_length,
////// L);
//// idi r = merge_all_queues_para_array(
//// set_L,
//// local_queues_ends,
//// local_queue_length,
//// L);
//// if (r < nk) {
//// nk = r;
//// }
//// }
//// time_merge_ += WallTimer::get_time_mark();
//// }
// if (nk <= last_k) {
// k = nk;
// } else {
// k = last_k + 1;
// }
// {// Scale M
// if (M < value_M_max) {
// M <<= 1;
// } else {
// M = value_M_max;
// }
// }
// }
//
//// // Merge only once after Master Thread stops.
//// {
//// time_merge_ -= WallTimer::get_time_mark();
//// if (num_threads_ > 1) {
////// idi r = merge_all_queues_queue_base(
////// set_L,
////// local_queues_ends,
////// queue_base,
////// real_threads,
////// local_queue_length,
////// L);
//// merge_all_queues_para_array(
//// set_L,
//// local_queues_ends,
//// local_queue_length,
//// L);
//// }
//// time_merge_ += WallTimer::get_time_mark();
//// }
// }
//
//
//#pragma omp parallel for
// for (idi k_i = 0; k_i < K; ++k_i) {
// set_K[k_i] = set_L[k_i + base_set_L].id_;
//// set_K[k_i] = set_L[k_i].id_;
// }
//
// {// Reset
//// std::fill(is_visited.begin(), is_visited.end(), 0);
// is_visited.reset();
//// is_visited.clear_all();
// std::fill(local_queues_ends.begin(), local_queues_ends.end(), 0);
// }
//
//// {//test
//// printf("tmp_count: %u\n", tmp_count);
//// if (3 == query_id) {
//// exit(1);
//// }
//// }
//}
///*
// * 6/8/2020-16:39
// * Selecting rather than merging
// */
//inline void Searching::para_search_with_top_m_merge_queues_selecting(
// const idi value_M_middle,
// const idi value_M_max,
// const idi query_id,
// const idi K,
// const idi L,
// std::vector<Candidate> &set_L,
// const std::vector<idi> &init_ids,
// std::vector<idi> &set_K,
// const idi local_queue_length, // Maximum size of local queue
// const idi base_set_L, // base_set_L = (num_threads_ - 1) * local_queue_length;
// std::vector<idi> &local_queues_ends, // Sizes of local queue
//// std::vector<Candidate> &top_m_candidates,
// std::vector<idi> &top_m_candidates,
//// std::vector<uint8_t> &is_visited)
// boost::dynamic_bitset<> &is_visited)
//{
// {
//#pragma omp parallel for
// for (idi c_i = 0; c_i < L; ++c_i) {
// is_visited[init_ids[c_i]] = 1;
//// is_visited.atomic_set_bit(init_ids[c_i]);
// }
// }
//
// const dataf *query_data = queries_load_ + query_id * dimension_;
//#pragma omp parallel for
// for (idi v_i = 0; v_i < L; ++v_i) {
// idi v_id = init_ids[v_i];
// _mm_prefetch(opt_nsg_graph_ + v_id * vertex_bytes_, _MM_HINT_T0);
// }
// uint64_t tmp_count_computation = 0;
// // Get the distances of all candidates, store in the set set_L.
////#pragma omp parallel for
//#pragma omp parallel for reduction(+ : tmp_count_computation)
// for (unsigned i = 0; i < L; i++) {
// unsigned v_id = init_ids[i];
// auto *v_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + v_id * vertex_bytes_);
// dataf norm = *v_data++;
//// ++count_distance_computation_;
// ++tmp_count_computation;
// distf dist = compute_distance_with_norm(v_data, query_data, norm);
// set_L[i + base_set_L] = Candidate(v_id, dist, false); // False means not checked.
//// set_L[i] = Candidate(v_id, dist, false); // False means not checked.
// }
// count_distance_computation_ += tmp_count_computation;
// tmp_count_computation = 0;
//// std::sort(set_L.begin(), set_L.begin() + L);
// std::sort(
// set_L.begin() + base_set_L,
// set_L.begin() + base_set_L + L);
//// boost::sort::block_indirect_sort(
//// set_L.begin() + base_set_L,
//// set_L.begin() + base_set_L + L,
//// num_threads_);
// local_queues_ends[num_threads_ - 1] = L;
//
//// std::vector<idi> top_m_candidates(M);
// idi top_m_candidates_end = 0;
// idi k = 0; // Index of first unchecked candidate.
// idi tmp_count = 0; // for debug
// idi M = 1;
//
// // Single thread
// {
// while (k < L && M < value_M_middle) {
// ++tmp_count;
//// {//test
//// printf("tmp_count: %d\n", tmp_count);
//// }
//
//// int real_threads = std::min(static_cast<int>(M), num_threads_);
//// idi queue_base = num_threads_ - real_threads;
// // Select M candidates
// idi last_k = L;
//// Cannot use OpenMP here because this for-loop needs early break by the 2nd condition.
// for (idi c_i = k; c_i < L && top_m_candidates_end < M; ++c_i) {
// idi index_set_L = c_i + base_set_L;
// if (set_L[index_set_L].is_checked_) {
// continue;
// }
// last_k = c_i; // Record the location of the last candidate selected.
// set_L[index_set_L].is_checked_ = true;
// top_m_candidates[top_m_candidates_end++] = set_L[index_set_L].id_;
// }
//
// idi nk = L;
// // Push M candidates' neighbors into the queue.
// for (idi c_i = 0; c_i < top_m_candidates_end; ++c_i) {
// idi cand_id = top_m_candidates[c_i];
// _mm_prefetch(opt_nsg_graph_ + cand_id * vertex_bytes_ + data_bytes_, _MM_HINT_T0);
// idi *out_edges = (idi *) (opt_nsg_graph_ + cand_id * vertex_bytes_ + data_bytes_);
// idi out_degree = *out_edges++;
// for (idi n_i = 0; n_i < out_degree; ++n_i) {
// _mm_prefetch(opt_nsg_graph_ + out_edges[n_i] * vertex_bytes_, _MM_HINT_T0);
// }
// for (idi e_i = 0; e_i < out_degree; ++e_i) {
// idi nb_id = out_edges[e_i];
// { // Sequential edition
// if (is_visited[nb_id]) {
// continue;
// }
// is_visited[nb_id] = 1;
// }
//
// auto *nb_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + nb_id * vertex_bytes_);
// dataf norm = *nb_data++;
//// ++count_distance_computation_;
// ++tmp_count_computation;
// distf dist = compute_distance_with_norm(nb_data, query_data, norm);
// if (dist > set_L[L - 1 + base_set_L].distance_) {
// continue;
// }
//
// Candidate cand(nb_id, dist, false);
// // Thread 0 maintains the "global" queue
// idi r = add_into_queue(
// set_L,
// base_set_L,
// local_queues_ends[num_threads_ - 1],
// L,
// cand);
// if (r < nk) {
// nk = r;
// }
// }
// }
// top_m_candidates_end = 0; // Clear top_m_candidates
// count_distance_computation_ += tmp_count_computation;
// tmp_count_computation = 0;
//
// if (nk <= last_k) {
// k = nk;
// } else {
// k = last_k + 1;
// }
//
// {// Scale M
// if (M < value_M_max) {
// M <<= 1;
// } else {
// M = value_M_max;
// }
// }
// }
// }
//
// // Multiple Threads
// {
//// while (k < L/num_threads_/2) {
//// while (k < L) {
// while (true) {
// ++tmp_count;
//// {//test
//// printf("tmp_count: %d\n", tmp_count);
//// }
//// // Select M candidates
//// idi last_k = L;
////// Cannot use OpenMP here because this for-loop needs early break by the 2nd condition.
//// for (idi c_i = k; c_i < L && top_m_candidates_end < M; ++c_i) {
//// idi index_set_L = c_i + base_set_L;
//// if (set_L[index_set_L].is_checked_) {
//// continue;
//// }
//// last_k = c_i; // Record the location of the last candidate selected.
//// set_L[index_set_L].is_checked_ = true;
//// top_m_candidates[top_m_candidates_end++] = set_L[index_set_L].id_;
//// }
//
// // Select M candidates
// {
// idi traverse_count = 0;
// idi bound_sub = L; // This is not always true!
// for (idi sub = 0; sub < bound_sub && top_m_candidates_end < M && traverse_count < L; ++sub) {
// for (int tid = 0; tid < num_threads_ && top_m_candidates_end < M && traverse_count < L; ++tid) {
// if (sub >= local_queues_ends[tid]) {
// continue;
// }
// idi index_set_L = tid * local_queue_length + sub;
// if (set_L[index_set_L].is_checked_) {
// continue;
// }
// set_L[index_set_L].is_checked_ = true;
// top_m_candidates[top_m_candidates_end++] = set_L[index_set_L].id_;
// }
// }
//
// if (0 == top_m_candidates_end) {
// break;
// }
// }
//
//// idi nk = L;
// // Push M candidates' neighbors into the queue.
////#pragma omp parallel for reduction(+ : tmp_count_computation) num_threads(real_threads)
//#pragma omp parallel for reduction(+ : tmp_count_computation)
// for (idi c_i = 0; c_i < top_m_candidates_end; ++c_i) {
// int tid = omp_get_thread_num();
// idi cand_id = top_m_candidates[c_i];
// _mm_prefetch(opt_nsg_graph_ + cand_id * vertex_bytes_ + data_bytes_, _MM_HINT_T0);
// idi *out_edges = (idi *) (opt_nsg_graph_ + cand_id * vertex_bytes_ + data_bytes_);
// idi out_degree = *out_edges++;
// for (idi n_i = 0; n_i < out_degree; ++n_i) {
// _mm_prefetch(opt_nsg_graph_ + out_edges[n_i] * vertex_bytes_, _MM_HINT_T0);
// }
// for (idi e_i = 0; e_i < out_degree; ++e_i) {
// idi nb_id = out_edges[e_i];
// { // Sequential edition
// if (is_visited[nb_id]) {
// continue;
// }
// is_visited[nb_id] = 1;
// }
//
// auto *nb_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + nb_id * vertex_bytes_);
// dataf norm = *nb_data++;
//// ++count_distance_computation_;
// ++tmp_count_computation;
// distf dist = compute_distance_with_norm(nb_data, query_data, norm);
// if (dist > set_L[L - 1 + base_set_L].distance_) {
// continue;
// }
//
// Candidate cand(nb_id, dist, false);
// // Add to the local queue.
// if (0 != tid) {
// // Non-Master threads using local queues
// add_into_queue(
// set_L,
// (tid - 1) * local_queue_length,
// local_queues_ends[tid - 1],
// local_queue_length,
// cand);
// } else {
// // Thread 0 maintains the "global" queue
//// idi r =
// add_into_queue(
// set_L,
// base_set_L,
// local_queues_ends[num_threads_ - 1],
// L,
// cand);
//// if (r < nk) {
//// nk = r;
//// }
// }
// }
// }
// top_m_candidates_end = 0; // Clear top_m_candidates
// count_distance_computation_ += tmp_count_computation;
// tmp_count_computation = 0;
//
//// // Merge. Merge all queues in parallel.
// {
// time_merge_ -= WallTimer::get_time_mark();
// if (num_threads_ > 1) {
//// idi r = merge_all_queues_queue_base(
//// set_L,
//// local_queues_ends,
//// queue_base,
//// real_threads,
//// local_queue_length,
//// L);
//// idi r =
// merge_all_queues_para_array(
// set_L,
// local_queues_ends,
// local_queue_length,
// L);
//// if (r < nk) {
//// nk = r;
//// }
// }
// time_merge_ += WallTimer::get_time_mark();
// }
//// if (nk <= last_k) {
//// k = nk;
//// } else {
//// k = last_k + 1;
//// }
// {// Scale M
// if (M < value_M_max) {
// M <<= 1;
// } else {
// M = value_M_max;
// }
// }
// }
// }
//
//
////#pragma omp parallel for
//// for (idi k_i = 0; k_i < K; ++k_i) {
//// set_K[k_i] = set_L[k_i + base_set_L].id_;
////// set_K[k_i] = set_L[k_i].id_;
//// }
//
// {
// idi k_i = 0;
// idi bound_sub = K / num_threads_;
// for (idi sub = 0; sub < bound_sub; ++sub) {
// for (int tid = 0; tid < num_threads_; ++tid) {
// idi index_set_L = tid * local_queue_length + sub;
// set_K[k_i++] = set_L[index_set_L].id_;
// }
// }
// idi remain = K - k_i;
// if (remain) {
// for (int tid = 0; tid < remain; ++tid) {
// idi index_set_L = tid * local_queue_length + bound_sub;
// set_K[k_i++] = set_L[index_set_L].id_;
// }
// }
// }
//
// {// Reset
//// std::fill(is_visited.begin(), is_visited.end(), 0);
// is_visited.reset();
//// is_visited.clear_all();
// std::fill(local_queues_ends.begin(), local_queues_ends.end(), 0);
// }
//
//// {//test
//// printf("tmp_count: %u\n", tmp_count);
//// if (3 == query_id) {
//// exit(1);
//// }
//// }
//}
} // namespace PANNS
#endif //BATCH_SEARCHING_SEARCHING_H
|
GB_binop__isle_int8.c |
//------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_emult.h"
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB (_AaddB__isle_int8)
// A.*B function (eWiseMult): GB (_AemultB_08__isle_int8)
// A.*B function (eWiseMult): GB (_AemultB_02__isle_int8)
// A.*B function (eWiseMult): GB (_AemultB_04__isle_int8)
// A.*B function (eWiseMult): GB (_AemultB_bitmap__isle_int8)
// A*D function (colscale): GB (_AxD__isle_int8)
// D*A function (rowscale): GB (_DxB__isle_int8)
// C+=B function (dense accum): GB (_Cdense_accumB__isle_int8)
// C+=b function (dense accum): GB (_Cdense_accumb__isle_int8)
// C+=A+B function (dense ewise3): GB ((none))
// C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__isle_int8)
// C=scalar+B GB (_bind1st__isle_int8)
// C=scalar+B' GB (_bind1st_tran__isle_int8)
// C=A+scalar GB (_bind2nd__isle_int8)
// C=A'+scalar GB (_bind2nd_tran__isle_int8)
// C type: int8_t
// A type: int8_t
// A pattern? 0
// B type: int8_t
// B pattern? 0
// BinaryOp: cij = (aij <= bij)
#define GB_ATYPE \
int8_t
#define GB_BTYPE \
int8_t
#define GB_CTYPE \
int8_t
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA,A_iso) \
int8_t aij = GBX (Ax, pA, A_iso)
// true if values of A are not used
#define GB_A_IS_PATTERN \
0 \
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB,B_iso) \
int8_t bij = GBX (Bx, pB, B_iso)
// true if values of B are not used
#define GB_B_IS_PATTERN \
0 \
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
int8_t t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \
cij = GBX (Ax, pA, A_iso)
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \
cij = GBX (Bx, pB, B_iso)
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z,x,y,i,j) \
z = (x <= y) ;
// true if the binop must be flipped
#define GB_BINOP_FLIP \
0
// op is second
#define GB_OP_IS_SECOND \
0
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_ISLE || GxB_NO_INT8 || GxB_NO_ISLE_INT8)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
#if 0
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB ((none))
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
#endif
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
void GB (_Cdense_ewise3_noaccum__isle_int8)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_noaccum_template.c"
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumB__isle_int8)
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumb__isle_int8)
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type int8_t
int8_t bwork = (*((int8_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_AxD__isle_int8)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix D,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int8_t *restrict Cx = (int8_t *) C->x ;
#include "GB_AxB_colscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_DxB__isle_int8)
(
GrB_Matrix C,
const GrB_Matrix D,
const GrB_Matrix B,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int8_t *restrict Cx = (int8_t *) C->x ;
#include "GB_AxB_rowscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B
//------------------------------------------------------------------------------
GrB_Info GB (_AaddB__isle_int8)
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool is_eWiseUnion,
const GB_void *alpha_scalar_in,
const GB_void *beta_scalar_in,
const bool Ch_is_Mh,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GB_WERK_DECLARE (M_ek_slicing, int64_t) ;
GB_WERK_DECLARE (A_ek_slicing, int64_t) ;
GB_WERK_DECLARE (B_ek_slicing, int64_t) ;
int8_t alpha_scalar ;
int8_t beta_scalar ;
if (is_eWiseUnion)
{
alpha_scalar = (*((int8_t *) alpha_scalar_in)) ;
beta_scalar = (*((int8_t *) beta_scalar_in )) ;
}
#include "GB_add_template.c"
GB_FREE_WORKSPACE ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_08__isle_int8)
(
GrB_Matrix C,
const int C_sparsity,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_08_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_02__isle_int8)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool flipxy,
const int64_t *restrict Cp_kfirst,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if GB_BINOP_FLIP
// The operator is not commutative, and does not have a flipped
// variant. For example z=atan2(y,x).
if (flipxy)
{
// use fmult(y,x)
#undef GB_FLIPPED
#define GB_FLIPPED 1
#include "GB_emult_02_template.c"
}
else
{
// use fmult(x,y)
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
}
#else
// No need to handle the flip: the operator is either commutative, or
// has been handled by changing z=div(y,x) to z=rdiv(x,y) for example.
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_04__isle_int8)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict Cp_kfirst,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_04_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_bitmap__isle_int8)
(
GrB_Matrix C,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_bitmap_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB (_bind1st__isle_int8)
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *restrict Bb,
int64_t bnz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int8_t *Cx = (int8_t *) Cx_output ;
int8_t x = (*((int8_t *) x_input)) ;
int8_t *Bx = (int8_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < bnz ; p++)
{
if (!GBB (Bb, p)) continue ;
int8_t bij = GBX (Bx, p, false) ;
Cx [p] = (x <= bij) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB (_bind2nd__isle_int8)
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *restrict Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
int8_t *Cx = (int8_t *) Cx_output ;
int8_t *Ax = (int8_t *) Ax_input ;
int8_t y = (*((int8_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
int8_t aij = GBX (Ax, p, false) ;
Cx [p] = (aij <= y) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
int8_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = (x <= aij) ; \
}
GrB_Info GB (_bind1st_tran__isle_int8)
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
int8_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int8_t x = (*((const int8_t *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
int8_t
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
int8_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = (aij <= y) ; \
}
GrB_Info GB (_bind2nd_tran__isle_int8)
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int8_t y = (*((const int8_t *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
NeighborhoodGraph.h | // Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
#ifndef _SPTAG_COMMON_NG_H_
#define _SPTAG_COMMON_NG_H_
#include "../VectorIndex.h"
#include "CommonUtils.h"
#include "Dataset.h"
#include "FineGrainedLock.h"
#include "QueryResultSet.h"
namespace SPTAG
{
namespace COMMON
{
class NeighborhoodGraph
{
public:
NeighborhoodGraph(): m_iTPTNumber(32),
m_iTPTLeafSize(2000),
m_iSamples(1000),
m_numTopDimensionTPTSplit(5),
m_iNeighborhoodSize(32),
m_iNeighborhoodScale(2),
m_iCEFScale(2),
m_iRefineIter(2),
m_iCEF(1000),
m_iAddCEF(500),
m_iMaxCheckForRefineGraph(10000)
{}
~NeighborhoodGraph() {}
virtual void InsertNeighbors(VectorIndex* index, const SizeType node, SizeType insertNode, float insertDist) = 0;
virtual void RebuildNeighbors(VectorIndex* index, const SizeType node, SizeType* nodes, const BasicResult* queryResults, const int numResults) = 0;
virtual float GraphAccuracyEstimation(VectorIndex* index, const SizeType samples, const std::unordered_map<SizeType, SizeType>* idmap = nullptr) = 0;
template <typename T>
void BuildGraph(VectorIndex* index, const std::unordered_map<SizeType, SizeType>* idmap = nullptr)
{
std::cout << "build RNG graph!" << std::endl;
m_iGraphSize = index->GetNumSamples();
m_iNeighborhoodSize = m_iNeighborhoodSize * m_iNeighborhoodScale;
m_pNeighborhoodGraph.Initialize(m_iGraphSize, m_iNeighborhoodSize);
if (m_iGraphSize < 1000) {
RefineGraph<T>(index, idmap);
std::cout << "Build RNG Graph end!" << std::endl;
return;
}
{
COMMON::Dataset<float> NeighborhoodDists(m_iGraphSize, m_iNeighborhoodSize);
std::vector<std::vector<SizeType>> TptreeDataIndices(m_iTPTNumber, std::vector<SizeType>(m_iGraphSize));
std::vector<std::vector<std::pair<SizeType, SizeType>>> TptreeLeafNodes(m_iTPTNumber, std::vector<std::pair<SizeType, SizeType>>());
for (SizeType i = 0; i < m_iGraphSize; i++)
for (DimensionType j = 0; j < m_iNeighborhoodSize; j++)
(NeighborhoodDists)[i][j] = MaxDist;
std::cout << "Parallel TpTree Partition begin " << std::endl;
#pragma omp parallel for schedule(dynamic)
for (int i = 0; i < m_iTPTNumber; i++)
{
Sleep(i * 100); std::srand(clock());
for (SizeType j = 0; j < m_iGraphSize; j++) TptreeDataIndices[i][j] = j;
std::random_shuffle(TptreeDataIndices[i].begin(), TptreeDataIndices[i].end());
PartitionByTptree<T>(index, TptreeDataIndices[i], 0, m_iGraphSize - 1, TptreeLeafNodes[i]);
std::cout << "Finish Getting Leaves for Tree " << i << std::endl;
}
std::cout << "Parallel TpTree Partition done" << std::endl;
for (int i = 0; i < m_iTPTNumber; i++)
{
#pragma omp parallel for schedule(dynamic)
for (SizeType j = 0; j < (SizeType)TptreeLeafNodes[i].size(); j++)
{
SizeType start_index = TptreeLeafNodes[i][j].first;
SizeType end_index = TptreeLeafNodes[i][j].second;
if (omp_get_thread_num() == 0) std::cout << "\rProcessing Tree " << i << ' ' << j * 100 / TptreeLeafNodes[i].size() << '%';
for (SizeType x = start_index; x < end_index; x++)
{
for (SizeType y = x + 1; y <= end_index; y++)
{
SizeType p1 = TptreeDataIndices[i][x];
SizeType p2 = TptreeDataIndices[i][y];
float dist = index->ComputeDistance(index->GetSample(p1), index->GetSample(p2));
if (idmap != nullptr) {
p1 = (idmap->find(p1) == idmap->end()) ? p1 : idmap->at(p1);
p2 = (idmap->find(p2) == idmap->end()) ? p2 : idmap->at(p2);
}
COMMON::Utils::AddNeighbor(p2, dist, (m_pNeighborhoodGraph)[p1], (NeighborhoodDists)[p1], m_iNeighborhoodSize);
COMMON::Utils::AddNeighbor(p1, dist, (m_pNeighborhoodGraph)[p2], (NeighborhoodDists)[p2], m_iNeighborhoodSize);
}
}
}
TptreeDataIndices[i].clear();
TptreeLeafNodes[i].clear();
std::cout << std::endl;
}
TptreeDataIndices.clear();
TptreeLeafNodes.clear();
}
RefineGraph<T>(index, idmap);
}
template <typename T>
void RefineGraph(VectorIndex* index, const std::unordered_map<SizeType, SizeType>* idmap = nullptr)
{
for (int iter = 0; iter < m_iRefineIter - 1; iter++)
{
#pragma omp parallel for schedule(dynamic)
for (SizeType i = 0; i < m_iGraphSize; i++)
{
RefineNode<T>(index, i, false, false, m_iCEF * m_iCEFScale);
if (i % 1000 == 0) std::cout << "\rRefine " << iter << " " << static_cast<int>(i * 1.0 / m_iGraphSize * 100) << "%";
}
std::cout << "Refine RNG, graph acc:" << GraphAccuracyEstimation(index, 100, idmap) << std::endl;
}
m_iNeighborhoodSize /= m_iNeighborhoodScale;
#pragma omp parallel for schedule(dynamic)
for (SizeType i = 0; i < m_iGraphSize; i++)
{
RefineNode<T>(index, i, false, false, m_iCEF);
if (i % 1000 == 0) std::cout << "\rRefine " << (m_iRefineIter - 1) << " " << static_cast<int>(i * 1.0 / m_iGraphSize * 100) << "%";
}
std::cout << "Refine RNG, graph acc:" << GraphAccuracyEstimation(index, 100, idmap) << std::endl;
if (idmap != nullptr) {
for (auto iter = idmap->begin(); iter != idmap->end(); iter++)
if (iter->first < 0)
{
m_pNeighborhoodGraph[-1 - iter->first][m_iNeighborhoodSize - 1] = -2 - iter->second;
}
}
}
template <typename T>
ErrorCode RefineGraph(VectorIndex* index, std::vector<SizeType>& indices, std::vector<SizeType>& reverseIndices,
std::ostream* output, NeighborhoodGraph* newGraph, const std::unordered_map<SizeType, SizeType>* idmap = nullptr)
{
std::shared_ptr<NeighborhoodGraph> tmp;
if (newGraph == nullptr) {
tmp = NeighborhoodGraph::CreateInstance(Type());
newGraph = tmp.get();
}
SizeType R = (SizeType)indices.size();
newGraph->m_pNeighborhoodGraph.Initialize(R, m_iNeighborhoodSize);
newGraph->m_iGraphSize = R;
newGraph->m_iNeighborhoodSize = m_iNeighborhoodSize;
#pragma omp parallel for schedule(dynamic)
for (SizeType i = 0; i < R; i++)
{
if (i % 1000 == 0) std::cout << "\rRefine " << static_cast<int>(i * 1.0 / R * 100) << "%";
SizeType *outnodes = newGraph->m_pNeighborhoodGraph[i];
COMMON::QueryResultSet<T> query((const T*)index->GetSample(indices[i]), m_iCEF + 1);
index->RefineSearchIndex(query, false);
RebuildNeighbors(index, indices[i], outnodes, query.GetResults(), m_iCEF + 1);
std::unordered_map<SizeType, SizeType>::const_iterator iter;
for (DimensionType j = 0; j < m_iNeighborhoodSize; j++)
{
if (outnodes[j] >= 0 && outnodes[j] < reverseIndices.size()) outnodes[j] = reverseIndices[outnodes[j]];
if (idmap != nullptr && (iter = idmap->find(outnodes[j])) != idmap->end()) outnodes[j] = iter->second;
}
if (idmap != nullptr && (iter = idmap->find(-1 - i)) != idmap->end())
outnodes[m_iNeighborhoodSize - 1] = -2 - iter->second;
}
if (output != nullptr) newGraph->SaveGraph(*output);
return ErrorCode::Success;
}
template <typename T>
void RefineNode(VectorIndex* index, const SizeType node, bool updateNeighbors, bool searchDeleted, int CEF)
{
COMMON::QueryResultSet<T> query((const T*)index->GetSample(node), CEF + 1);
index->RefineSearchIndex(query, searchDeleted);
RebuildNeighbors(index, node, m_pNeighborhoodGraph[node], query.GetResults(), CEF + 1);
if (updateNeighbors) {
// update neighbors
for (int j = 0; j <= CEF; j++)
{
BasicResult* item = query.GetResult(j);
if (item->VID < 0) break;
if (item->VID == node) continue;
InsertNeighbors(index, item->VID, node, item->Dist);
}
}
}
template <typename T>
void PartitionByTptree(VectorIndex* index, std::vector<SizeType>& indices, const SizeType first, const SizeType last,
std::vector<std::pair<SizeType, SizeType>> & leaves)
{
if (last - first <= m_iTPTLeafSize)
{
leaves.emplace_back(first, last);
}
else
{
std::vector<float> Mean(index->GetFeatureDim(), 0);
int iIteration = 100;
SizeType end = min(first + m_iSamples, last);
SizeType count = end - first + 1;
// calculate the mean of each dimension
for (SizeType j = first; j <= end; j++)
{
const T* v = (const T*)index->GetSample(indices[j]);
for (DimensionType k = 0; k < index->GetFeatureDim(); k++)
{
Mean[k] += v[k];
}
}
for (DimensionType k = 0; k < index->GetFeatureDim(); k++)
{
Mean[k] /= count;
}
std::vector<BasicResult> Variance;
Variance.reserve(index->GetFeatureDim());
for (DimensionType j = 0; j < index->GetFeatureDim(); j++)
{
Variance.emplace_back(j, 0.0f);
}
// calculate the variance of each dimension
for (SizeType j = first; j <= end; j++)
{
const T* v = (const T*)index->GetSample(indices[j]);
for (DimensionType k = 0; k < index->GetFeatureDim(); k++)
{
float dist = v[k] - Mean[k];
Variance[k].Dist += dist*dist;
}
}
std::sort(Variance.begin(), Variance.end(), COMMON::Compare);
std::vector<SizeType> indexs(m_numTopDimensionTPTSplit);
std::vector<float> weight(m_numTopDimensionTPTSplit), bestweight(m_numTopDimensionTPTSplit);
float bestvariance = Variance[index->GetFeatureDim() - 1].Dist;
for (int i = 0; i < m_numTopDimensionTPTSplit; i++)
{
indexs[i] = Variance[index->GetFeatureDim() - 1 - i].VID;
bestweight[i] = 0;
}
bestweight[0] = 1;
float bestmean = Mean[indexs[0]];
std::vector<float> Val(count);
for (int i = 0; i < iIteration; i++)
{
float sumweight = 0;
for (int j = 0; j < m_numTopDimensionTPTSplit; j++)
{
weight[j] = float(rand() % 10000) / 5000.0f - 1.0f;
sumweight += weight[j] * weight[j];
}
sumweight = sqrt(sumweight);
for (int j = 0; j < m_numTopDimensionTPTSplit; j++)
{
weight[j] /= sumweight;
}
float mean = 0;
for (SizeType j = 0; j < count; j++)
{
Val[j] = 0;
const T* v = (const T*)index->GetSample(indices[first + j]);
for (int k = 0; k < m_numTopDimensionTPTSplit; k++)
{
Val[j] += weight[k] * v[indexs[k]];
}
mean += Val[j];
}
mean /= count;
float var = 0;
for (SizeType j = 0; j < count; j++)
{
float dist = Val[j] - mean;
var += dist * dist;
}
if (var > bestvariance)
{
bestvariance = var;
bestmean = mean;
for (int j = 0; j < m_numTopDimensionTPTSplit; j++)
{
bestweight[j] = weight[j];
}
}
}
SizeType i = first;
SizeType j = last;
// decide which child one point belongs
while (i <= j)
{
float val = 0;
const T* v = (const T*)index->GetSample(indices[i]);
for (int k = 0; k < m_numTopDimensionTPTSplit; k++)
{
val += bestweight[k] * v[indexs[k]];
}
if (val < bestmean)
{
i++;
}
else
{
std::swap(indices[i], indices[j]);
j--;
}
}
// if all the points in the node are equal,equally split the node into 2
if ((i == first) || (i == last + 1))
{
i = (first + last + 1) / 2;
}
Mean.clear();
Variance.clear();
Val.clear();
indexs.clear();
weight.clear();
bestweight.clear();
PartitionByTptree<T>(index, indices, first, i - 1, leaves);
PartitionByTptree<T>(index, indices, i, last, leaves);
}
}
inline std::uint64_t BufferSize() const
{
return m_pNeighborhoodGraph.BufferSize();
}
bool LoadGraph(std::string sGraphFilename)
{
if (!m_pNeighborhoodGraph.Load(sGraphFilename)) return false;
m_iGraphSize = m_pNeighborhoodGraph.R();
m_iNeighborhoodSize = m_pNeighborhoodGraph.C();
return true;
}
bool LoadGraph(char* pGraphMemFile)
{
m_pNeighborhoodGraph.Load(pGraphMemFile);
m_iGraphSize = m_pNeighborhoodGraph.R();
m_iNeighborhoodSize = m_pNeighborhoodGraph.C();
return true;
}
bool SaveGraph(std::string sGraphFilename) const
{
std::cout << "Save " << m_pNeighborhoodGraph.Name() << " To " << sGraphFilename << std::endl;
std::ofstream output(sGraphFilename, std::ios::binary);
if (!output.is_open()) return false;
SaveGraph(output);
output.close();
return true;
}
bool SaveGraph(std::ostream& output) const
{
output.write((char*)&m_iGraphSize, sizeof(SizeType));
output.write((char*)&m_iNeighborhoodSize, sizeof(DimensionType));
for (SizeType i = 0; i < m_iGraphSize; i++)
output.write((char*)m_pNeighborhoodGraph[i], sizeof(SizeType) * m_iNeighborhoodSize);
std::cout << "Save " << m_pNeighborhoodGraph.Name() << " (" << m_iGraphSize << ", " << m_iNeighborhoodSize << ") Finish!" << std::endl;
return true;
}
inline ErrorCode AddBatch(SizeType num)
{
ErrorCode ret = m_pNeighborhoodGraph.AddBatch(num);
if (ret != ErrorCode::Success) return ret;
m_iGraphSize += num;
return ErrorCode::Success;
}
inline SizeType* operator[](SizeType index) { return m_pNeighborhoodGraph[index]; }
inline const SizeType* operator[](SizeType index) const { return m_pNeighborhoodGraph[index]; }
void Update(SizeType row, DimensionType col, SizeType val) {
std::lock_guard<std::mutex> lock(m_dataUpdateLock[row]);
m_pNeighborhoodGraph[row][col] = val;
}
inline void SetR(SizeType rows) {
m_pNeighborhoodGraph.SetR(rows);
m_iGraphSize = rows;
}
inline SizeType R() const { return m_iGraphSize; }
inline std::string Type() const { return m_pNeighborhoodGraph.Name(); }
static std::shared_ptr<NeighborhoodGraph> CreateInstance(std::string type);
protected:
// Graph structure
SizeType m_iGraphSize;
COMMON::Dataset<SizeType> m_pNeighborhoodGraph;
FineGrainedLock m_dataUpdateLock;
public:
int m_iTPTNumber, m_iTPTLeafSize, m_iSamples, m_numTopDimensionTPTSplit;
DimensionType m_iNeighborhoodSize;
int m_iNeighborhoodScale, m_iCEFScale, m_iRefineIter, m_iCEF, m_iAddCEF, m_iMaxCheckForRefineGraph;
};
}
}
#endif
|
cryptsha512_fmt_plug.c | /*
* This file is part of John the Ripper password cracker,
* based on rawSHA256_fmt.c code and Drepper's spec at
* http://www.akkadia.org/drepper/SHA-crypt.txt
*
* This software is Copyright (c) 2012 magnum, and it is hereby released to the
* general public under the following terms: Redistribution and use in source
* and binary forms, with or without modification, are permitted.
*
* See code/comments in cryptsha256 for how and why this is being done. NOTE,
* we could limit ourselves to 15 byte password, and then only need 1 limb
* SHA512 SIMD logic. If we allow 2 limb logic then 79 byte passwords are max.
* this is better than cryptsha256, where if we only allowed 1 limb, then only
* 3 btye passwords would have been max, and even at 2 limbs, 35 byte passwords
* are the longest we can do.
*
* Porting to SSE2, May 2015, JimF. A little harder than some, since we have to
* group and rearrange passwords based upon length. We must only run passwords
* of a specific block group size in 1 SSE_COEF_SHA512 bundle. If we later do
* PARA_SHA512, then each bundle of SSE_COEF_SHA512*PARA_SHA512 will have to be
* made up of passwords of same block group size.
*
* Here are the block sizes per password length. To be equal group size, all
* numbers for 2 passwords must be equal all the way across. So, password
* lengths of 0, 1, ... 15 are 1 group. 16..23 are another group. 24..31 are
* yet another, etc. There are 5 'groups' of lengths.
*
* Here is the raw block length data. Only first and last length for the group has been kept.
Len: cp pspc cspp ppc cpp psc csp pc
0 : 1 1 1 1 1 1 1 1
15 : 1 1 1 1 1 1 1 1
16 : 1 2 2 1 1 1 1 1
23 : 1 2 2 1 1 1 1 1
24 : 1 2 2 2 2 1 1 1
31 : 1 2 2 2 2 1 1 1
32 : 1 2 2 2 2 2 2 1
47 : 1 2 2 2 2 2 2 1
48 : 2 2 2 2 2 2 2 2
79 : 2 2 2 2 2 2 2 2
Source to make above table (made up to 90,but over 79 is 3 limbs)
#include <stdio.h>
int c=64, s=16;
int S(int sz) {
if (sz<=111) return 1;
else if (sz <= 111+128) return 2;
else return 3;
}
void proc(int p) {
int cp=p+c;
printf("%-2d : %d %d %d %d %d %d %d %d\n",
p,S(cp),S(cp+s+p),S(cp+s+p),S(cp+p),S(cp+p),S(cp+s),S(cp+s),S(cp));
}
void main(int argc, char **argv) {
int i;
if (argc==2) s=atoi(argv[1]);
printf("Len: cp pspc cspp ppc cpp psc csp pc (saltlen=%d)\n",s);
for (i = 0; i < 90; ++i)
proc(i);
}
*/
#if FMT_EXTERNS_H
extern struct fmt_main fmt_cryptsha512;
#elif FMT_REGISTERS_H
john_register_one(&fmt_cryptsha512);
#else
#include "arch.h"
//#undef SIMD_COEF_64
#include "sha2.h"
#define _GNU_SOURCE 1
#include <string.h>
#include "params.h"
#include "common.h"
#include "formats.h"
#include "johnswap.h"
#include "simd-intrinsics.h"
#ifdef _OPENMP
#ifndef OMP_SCALE
#define OMP_SCALE 16
#endif
#include <omp.h>
#endif
#include "memdbg.h"
// NOTE, in SSE mode, even if NOT in OMP, we may need to scale, quite a bit, due to needing
// to 'group' passwords differently, so that we have lengths which 'share' the same number
// of crypt block counts for each 'type'. We may want to scale as much as 128 or so, just
// to try to have better saturation. If we only had 8 passwords given to us, and they were
// one each of these lengths: 3 7 8 12 13 14 15 21, in theory, we could do this
// with only 2 SSE calls (SIMD_COEF_32==4 for SHA256). However, length 3 has to to run by itself,
// length 7 by itself, 8 by itself, and the rest can run together, but there are 5 of them,
// so it takes to runs. So, instead of 2 runs, we have to do 5 runs. Not very efficient.
// however, if we have a lot more passwords to work with, we can re-arrange them, to run
// them in groups that all 'fit' together, and do so until we exhaust all from a given length
// range, then do all in the next range. Thus, until we get to the last set within a length
// range, we are doing a fully packed SSE run, and having a LOT less wasted space. This will
// get even more interesting, when we start doing OMP, but it should just be the same principal,
// preload more passwords, and group them, then run the OMP threads over a single length, then
// go to the next length, until done, trying to keep each thread running, and keeping each block
// of SSE data full, until the last in a range. We probably can simply build all the rearrangments,
// then let the threads go on ALL data, without caring about the length, since each thread will only
// be working on passwords in a single MMX buffer that all match, at any given moment.
#ifdef SIMD_COEF_64
#ifdef _OPENMP
#define SIMD_COEF_SCALE (32/SIMD_COEF_64)
#else
#define SIMD_COEF_SCALE (64/SIMD_COEF_64)
#endif
#else
#define SIMD_COEF_SCALE 1
#endif
#define FORMAT_LABEL "sha512crypt"
#ifdef SIMD_COEF_64
#define ALGORITHM_NAME SHA512_ALGORITHM_NAME
#else
#if ARCH_BITS >= 64
#define ALGORITHM_NAME "64/" ARCH_BITS_STR " " SHA2_LIB
#else
#define ALGORITHM_NAME "32/" ARCH_BITS_STR " " SHA2_LIB
#endif
#endif
// 79 is max length we can do in 2 SIMD limbs, so just make it 79 always.
#define PLAINTEXT_LENGTH 79
#define BINARY_ALIGN 4
#define SALT_SIZE sizeof(struct saltstruct)
#define SALT_ALIGN 4
#ifdef SIMD_COEF_64
#define MIN_KEYS_PER_CRYPT (SIMD_COEF_64*SIMD_PARA_SHA512)
#define MAX_KEYS_PER_CRYPT (SIMD_COEF_64*SIMD_PARA_SHA512)
#if ARCH_LITTLE_ENDIAN==1
#define GETPOS(i, index) ( (index&(SIMD_COEF_64-1))*8 + ((i)&(0xffffffff-7))*SIMD_COEF_64 + (7-((i)&7)) + (unsigned int)index/SIMD_COEF_64*SHA_BUF_SIZ*SIMD_COEF_64*8 )
#else
#define GETPOS(i, index) ( (index&(SIMD_COEF_64-1))*8 + ((i)&(0xffffffff-7))*SIMD_COEF_64 + ((i)&7) + (unsigned int)index/SIMD_COEF_64*SHA_BUF_SIZ*SIMD_COEF_64*8 )
#endif
#else
#define MIN_KEYS_PER_CRYPT 1
#define MAX_KEYS_PER_CRYPT 1
#endif
// these MUST be defined prior to loading cryptsha512_valid.h
#define BINARY_SIZE 64
#define SALT_LENGTH 16
#define CIPHERTEXT_LENGTH 86
#define __CRYPTSHA512_CREATE_PROPER_TESTS_ARRAY__
#include "cryptsha512_common.h"
#define BLKS MAX_KEYS_PER_CRYPT
/* This structure is 'pre-loaded' with the keyspace of all possible crypts which */
/* will be performed WITHIN the inner loop. There are 8 possible buffers that */
/* are used. They are cp, pspc, cspp, ppc, cpp, psc, csp, and pc, where p stands */
/* for the 'hash' built from the password (and it is the same length as the */
/* password), s stands for the hash built from the salt (same size as salt), and */
/* c stands for the crypt results from the prior loop. There are 8 possible */
/* buffer layouts listed, but they fall into a pattern that is 42 long (2*3*7) */
/* this structure encapsulates this. we build this buffer, after computing the */
/* s hash, the p hash, and the starting c values. Then, within the inner loop, */
/* we simply spin through this structure, calling the SHA512 code to do the work. */
/* NOTE, most of the time, there will be 1 block and 2 block crypts. As the */
/* the password length grows, the more 2 block crypts there are, thus slower */
/**/
/* for SSE only, but 'could' be done for sha2.c code (jtr sha2) */
/* This keyspace was changed, to be put into BE at the start, and then we never */
/* do any swapping, but keep it in BE format from that point on. To do this, we */
/* changed the pointers to be a pointer to the start of the block, AND an offset */
/* for SSE, we need a pointer to the start of the block[0], and the offset. The */
/* index needed will be known in the crypt_all. This means we need something */
/* similar to out GET_POS macros, but also for oSSL formats. */
/* To do this, we have to use the JtR sha2.c functions, since there is this func: */
/* sha512_hash_block(&CTX, data, int perform_endian_swap). So if we set the last */
/* param to 0, we can call this function, and it will avoid the byte swapping */
typedef struct cryptloopstruct_t {
unsigned char buf[8*2*128*BLKS]; // will allocate to hold 42 2 block buffers (42 * 2 * 128) Reduced to only requiring 8*2*128
// now, the cryptstructs are on the stack within the crypt for loop, so we avoid allocation.
// and to avoid the single static variable, or a static array.
unsigned char *bufs[BLKS][42]; // points to the start of each 2 block buffer.
#ifdef SIMD_COEF_64
int offs[BLKS][42];
#endif
unsigned char *cptr[BLKS][42]; // points to where we copy the crypt pointer for next round.
// Round 0 points to somewhere in round 1's buffer, etc.
int datlen[42]; // if 1, then this is a small, only 1 block crypt. Some rounds for shorter passwords take only 1 crypt block.
// NOTE, datlen could be changed to a number, and then we could do > 2 block crypts. Would take a little
// more memory (and longer PW's certainly DO take more time), but it should work fine. It may be an issue
// especially when doing OMP, that the memory footprint of this 'hot' inner loop simply gets too big, and
// things slow down. For now, we are limiting ourselves to 35 byte password, which fits into 2 SHA512 buffers
} cryptloopstruct;
static int (*saved_len);
static char (*saved_key)[PLAINTEXT_LENGTH + 1];
static uint32_t (*crypt_out)[BINARY_SIZE / sizeof(uint32_t)];
/* these 2 values are used in setup of the cryptloopstruct, AND to do our SHA512_Init() calls, in the inner loop */
static const unsigned char padding[256] = { 0x80, 0 /* 0,0,0,0.... */ };
#if !defined(JTR_INC_COMMON_CRYPTO_SHA2) && !defined (SIMD_COEF_64)
static const uint64_t ctx_init[8] =
{0x6A09E667F3BCC908ULL,0xBB67AE8584CAA73BULL,0x3C6EF372FE94F82BULL,0xA54FF53A5F1D36F1ULL,0x510E527FADE682D1ULL,0x9B05688C2B3E6C1FULL,0x1F83D9ABFB41BD6BULL,0x5BE0CD19137E2179ULL};
#endif
static struct saltstruct {
unsigned int len;
unsigned int rounds;
unsigned char salt[SALT_LENGTH];
} *cur_salt;
static void init(struct fmt_main *self)
{
int omp_t = 1;
int max_crypts;
#ifdef _OPENMP
omp_t = omp_get_max_threads();
omp_t *= OMP_SCALE;
#endif
max_crypts = SIMD_COEF_SCALE * omp_t * MAX_KEYS_PER_CRYPT;
self->params.max_keys_per_crypt = max_crypts;
// we allocate 1 more than needed, and use that 'extra' value as a zero
// length PW to fill in the tail groups in MMX mode.
saved_len = mem_calloc(1 + max_crypts, sizeof(*saved_len));
saved_key = mem_calloc(1 + max_crypts, sizeof(*saved_key));
crypt_out = mem_calloc(1 + max_crypts, sizeof(*crypt_out));
}
static void done(void)
{
MEM_FREE(crypt_out);
MEM_FREE(saved_key);
MEM_FREE(saved_len);
}
#define COMMON_GET_HASH_VAR crypt_out
#include "common-get-hash.h"
static void set_key(char *key, int index)
{
saved_len[index] = strnzcpyn(saved_key[index], key, sizeof(*saved_key));
}
static char *get_key(int index)
{
saved_key[index][saved_len[index]] = 0;
return saved_key[index];
}
/*
These are the 8 types of buffers this algorithm uses:
cp
pspc
cspp
ppc
cpp
psc
csp
pc
*/
static void LoadCryptStruct(cryptloopstruct *crypt_struct, int index, int idx, char *p_bytes, char *s_bytes) {
unsigned len_pc, len_ppsc, len_ppc, len_psc; // length of 'data'
unsigned tot_pc, tot_ppsc, tot_ppc, tot_psc; // length of entire block to crypt (128 or 256)
unsigned off_pc, off_pspc, off_ppc, off_psc; // offset to the crypt ptr for these 4 'types'.
unsigned dlen_pc, dlen_ppsc, dlen_ppc, dlen_psc; // is this 1 or 2 block (or actual len for CommonCrypto, since it uses SHA512_Final()
unsigned plen=saved_len[index];
unsigned char *cp = crypt_struct->buf;
cryptloopstruct *pstr = crypt_struct;
#ifdef SIMD_COEF_64
// in SSE mode, we FORCE every buffer to be 2 blocks, even if it COULD fit into 1.
// Then we simply use the 2 block SSE code.
unsigned char *next_cp;
cp += idx*2*128;
#endif
len_pc = plen + BINARY_SIZE;
len_ppsc = (plen<<1) + cur_salt->len + BINARY_SIZE;
len_ppc = (plen<<1) + BINARY_SIZE;
len_psc = plen + cur_salt->len + BINARY_SIZE;
#ifdef JTR_INC_COMMON_CRYPTO_SHA2
if (len_pc <=111) tot_pc =128; else tot_pc =256;
if (len_ppsc<=111) tot_ppsc=128; else tot_ppsc=256;
if (len_ppc <=111) tot_ppc =128; else tot_ppc =256;
if (len_psc <=111) tot_psc =128; else tot_psc =256;
dlen_pc =len_pc;
dlen_ppsc=len_ppsc;
dlen_ppc =len_ppc;
dlen_psc =len_psc;
#else
if (len_pc <=111) {tot_pc =128; dlen_pc =128;}else{tot_pc =256; dlen_pc =256; }
if (len_ppsc<=111) {tot_ppsc=128; dlen_ppsc=128;}else{tot_ppsc=256; dlen_ppsc=256; }
if (len_ppc <=111) {tot_ppc =128; dlen_ppc =128;}else{tot_ppc =256; dlen_ppc =256; }
if (len_psc <=111) {tot_psc =128; dlen_psc =128;}else{tot_psc =256; dlen_psc =256; }
#endif
off_pc = len_pc - BINARY_SIZE;
off_pspc = len_ppsc - BINARY_SIZE;
off_ppc = len_ppc - BINARY_SIZE;
off_psc = len_psc - BINARY_SIZE;
// Adjust cp for idx;
#ifdef SIMD_COEF_64
next_cp = cp + (2*128*BLKS);
#endif
// pstr->buf[0] is a cp (First of this type)
pstr->bufs[idx][0] = pstr->cptr[idx][41] = cp;
// For fist element only, we DO copy in the c value.
memcpy(cp, crypt_out[index], BINARY_SIZE); cp += BINARY_SIZE;
memcpy(cp, p_bytes, plen); cp += plen;
if (!idx) pstr->datlen[0] = dlen_pc;
memcpy(cp, padding, tot_pc-2-len_pc); cp += (tot_pc-len_pc);
pstr->bufs[idx][0][tot_pc-2] = (len_pc<<3)>>8;
pstr->bufs[idx][0][tot_pc-1] = (len_pc<<3)&0xFF;
#ifdef SIMD_COEF_64
cp = next_cp;
next_cp = cp + (2*128*BLKS);
#endif
// pstr->buf[1] is a pspc (First of this type)
pstr->bufs[idx][1] = cp;
pstr->cptr[idx][0] = cp + off_pspc;
memcpy(cp, p_bytes, plen); cp += plen;
memcpy(cp, s_bytes, cur_salt->len); cp += cur_salt->len;
memcpy(cp, p_bytes, plen); cp += (plen+BINARY_SIZE);
if (!idx) pstr->datlen[1] = dlen_ppsc;
memcpy(cp, padding, tot_ppsc-2-len_ppsc); cp += (tot_ppsc-len_ppsc);
pstr->bufs[idx][1][tot_ppsc-2] = (len_ppsc<<3)>>8;
pstr->bufs[idx][1][tot_ppsc-1] = (len_ppsc<<3)&0xFF;
#ifdef SIMD_COEF_64
cp = next_cp;
next_cp = cp + (2*128*BLKS);
#endif
// pstr->buf[2] is a cspp (First of this type)
pstr->bufs[idx][2] = pstr->cptr[idx][1] = cp;
cp += BINARY_SIZE;
memcpy(cp, s_bytes, cur_salt->len); cp += cur_salt->len;
memcpy(cp, p_bytes, plen); cp += plen;
memcpy(cp, p_bytes, plen); cp += plen;
if (!idx) pstr->datlen[2] = dlen_ppsc;
memcpy(cp, padding, tot_ppsc-2-len_ppsc); cp += (tot_ppsc-len_ppsc);
pstr->bufs[idx][2][tot_ppsc-2] = (len_ppsc<<3)>>8;
pstr->bufs[idx][2][tot_ppsc-1] = (len_ppsc<<3)&0xFF;
#ifdef SIMD_COEF_64
cp = next_cp;
next_cp = cp + (2*128*BLKS);
#endif
// pstr->buf[3] is a ppc (First of this type)
pstr->bufs[idx][3] = cp;
pstr->cptr[idx][2] = cp + off_ppc;
memcpy(cp, p_bytes, plen); cp += plen;
memcpy(cp, p_bytes, plen); cp +=(plen+BINARY_SIZE);
if (!idx) pstr->datlen[3] = dlen_ppc;
memcpy(cp, padding, tot_ppc-2-len_ppc); cp += (tot_ppc-len_ppc);
pstr->bufs[idx][3][tot_ppc-2] = (len_ppc<<3)>>8;
pstr->bufs[idx][3][tot_ppc-1] = (len_ppc<<3)&0xFF;
#ifdef SIMD_COEF_64
cp = next_cp;
next_cp = cp + (2*128*BLKS);
#endif
// pstr->buf[4] is a cspp (from 2)
pstr->bufs[idx][4] = pstr->cptr[idx][3] = pstr->bufs[idx][2];
if (!idx) pstr->datlen[4] = dlen_ppsc;
// pstr->buf[5] is a pspc (from [1])
pstr->bufs[idx][5] = pstr->bufs[idx][1]; pstr->cptr[idx][4] = pstr->cptr[idx][0];
if (!idx) pstr->datlen[5] = dlen_ppsc;
// pstr->buf[6] is a cpp (First of this type)
pstr->bufs[idx][6] = pstr->cptr[idx][5] = cp;
cp += BINARY_SIZE;
memcpy(cp, p_bytes, plen); cp += plen;
memcpy(cp, p_bytes, plen); cp += plen;
if (!idx) pstr->datlen[6] = dlen_ppc;
memcpy(cp, padding, tot_ppc-2-len_ppc); cp += (tot_ppc-len_ppc);
pstr->bufs[idx][6][tot_ppc-2] = (len_ppc<<3)>>8;
pstr->bufs[idx][6][tot_ppc-1] = (len_ppc<<3)&0xFF;
#ifdef SIMD_COEF_64
cp = next_cp;
next_cp = cp + (2*128*BLKS);
#endif
// pstr->buf[07] psc (First of this type)
pstr->bufs[idx][7] = cp;
pstr->cptr[idx][6] = cp + off_psc;
memcpy(cp, p_bytes, plen); cp += plen;
memcpy(cp, s_bytes, cur_salt->len); cp += (cur_salt->len+BINARY_SIZE);
if (!idx) pstr->datlen[7] = dlen_psc;
memcpy(cp, padding, tot_psc-2-len_psc); cp += (tot_psc-len_psc);
pstr->bufs[idx][7][tot_psc-2] = (len_psc<<3)>>8;
pstr->bufs[idx][7][tot_psc-1] = (len_psc<<3)&0xFF;
#ifdef SIMD_COEF_64
cp = next_cp;
next_cp = cp + (2*128*BLKS);
#endif
// pstr->buf[08] cspp (from 2)
pstr->bufs[idx][8] = pstr->cptr[idx][7] = pstr->bufs[idx][2];
if (!idx) pstr->datlen[8] = dlen_ppsc;
// pstr->buf[09] ppc (from 3)
pstr->bufs[idx][9] = pstr->bufs[idx][3]; pstr->cptr[idx][8] = pstr->cptr[idx][2];
if (!idx) pstr->datlen[9] = dlen_ppc;
// pstr->buf[10] cspp (from 2)
pstr->bufs[idx][10] = pstr->cptr[idx][9] = pstr->bufs[idx][2];
if (!idx) pstr->datlen[10] = dlen_ppsc;
// pstr->buf[11] pspc (from 1)
pstr->bufs[idx][11] = pstr->bufs[idx][1]; pstr->cptr[idx][10] = pstr->cptr[idx][0];
if (!idx) pstr->datlen[11] = dlen_ppsc;
// pstr->buf[12] cpp (from 6)
pstr->bufs[idx][12] = pstr->cptr[idx][11] = pstr->bufs[idx][6];
if (!idx) pstr->datlen[12] = dlen_ppc;
// pstr->buf[13] pspc (from 1)
pstr->bufs[idx][13] = pstr->bufs[idx][1]; pstr->cptr[idx][12] = pstr->cptr[idx][0];
if (!idx) pstr->datlen[13] = dlen_ppsc;
// pstr->buf[14] csp (First of this type)
pstr->bufs[idx][14] = pstr->cptr[idx][13] = cp;
cp += BINARY_SIZE;
memcpy(cp, s_bytes, cur_salt->len); cp += cur_salt->len;
memcpy(cp, p_bytes, plen); cp += plen;
if (!idx) pstr->datlen[14] = dlen_psc;
memcpy(cp, padding, tot_psc-2-len_psc); cp += (tot_psc-len_psc);
pstr->bufs[idx][14][tot_psc-2] = (len_psc<<3)>>8;
pstr->bufs[idx][14][tot_psc-1] = (len_psc<<3)&0xFF;
#ifdef SIMD_COEF_64
cp = next_cp;
next_cp = cp + (2*128*BLKS);
#endif
// pstr->buf[15] ppc (from 3)
pstr->bufs[idx][15] = pstr->bufs[idx][3]; pstr->cptr[idx][14] = pstr->cptr[idx][2];
if (!idx) pstr->datlen[15] = dlen_ppc;
// pstr->buf[16] cspp (from 2)
pstr->bufs[idx][16] = pstr->cptr[idx][15] = pstr->bufs[idx][2];
if (!idx) pstr->datlen[16] = dlen_ppsc;
// pstr->buf[17] pspc (from 1)
pstr->bufs[idx][17] = pstr->bufs[idx][1]; pstr->cptr[idx][16] = pstr->cptr[idx][0];
if (!idx) pstr->datlen[17] = dlen_ppsc;
// pstr->buf[18] cpp (from 6)
pstr->bufs[idx][18] = pstr->cptr[idx][17] = pstr->bufs[idx][6];
if (!idx) pstr->datlen[18] = dlen_ppc;
// pstr->buf[19] pspc (from 1)
pstr->bufs[idx][19] = pstr->bufs[idx][1]; pstr->cptr[idx][18] = pstr->cptr[idx][0];
if (!idx) pstr->datlen[19] = dlen_ppsc;
// pstr->buf[20] cspp (from 2)
pstr->bufs[idx][20] = pstr->cptr[idx][19] = pstr->bufs[idx][2];
if (!idx) pstr->datlen[20] = dlen_ppsc;
// pstr->buf[21] pc (First of this type)
pstr->bufs[idx][21] = cp;
pstr->cptr[idx][20] = cp + off_pc;
memcpy(cp, p_bytes, plen); cp += (plen+BINARY_SIZE);
if (!idx) pstr->datlen[21] = dlen_pc;
memcpy(cp, padding, tot_psc-2-len_pc);
pstr->bufs[idx][21][tot_pc-2] = (len_pc<<3)>>8;
pstr->bufs[idx][21][tot_pc-1] = (len_pc<<3)&0xFF;
#ifdef SIMD_COEF_64
cp = next_cp;
next_cp = cp + (2*128*BLKS);
#endif
// pstr->buf[22] cspp (from 2)
pstr->bufs[idx][22] = pstr->cptr[idx][21] = pstr->bufs[idx][2];
if (!idx) pstr->datlen[22] = dlen_ppsc;
// pstr->buf[23] pspc (from 1)
pstr->bufs[idx][23] = pstr->bufs[idx][1]; pstr->cptr[idx][22] = pstr->cptr[idx][0];
if (!idx) pstr->datlen[23] = dlen_ppsc;
// pstr->buf[24] cpp (from 6)
pstr->bufs[idx][24] = pstr->cptr[idx][23] = pstr->bufs[idx][6];
if (!idx) pstr->datlen[24] = dlen_ppc;
// pstr->buf[25] pspc (from 1)
pstr->bufs[idx][25] = pstr->bufs[idx][1]; pstr->cptr[idx][24] = pstr->cptr[idx][0];
if (!idx) pstr->datlen[25] = dlen_ppsc;
// pstr->buf[26] cspp (from 2)
pstr->bufs[idx][26] = pstr->cptr[idx][25] = pstr->bufs[idx][2];
if (!idx) pstr->datlen[26] = dlen_ppsc;
// pstr->buf[27] ppc (from 3)
pstr->bufs[idx][27] = pstr->bufs[idx][3]; pstr->cptr[idx][26] = pstr->cptr[idx][2];
if (!idx) pstr->datlen[27] = dlen_ppc;
// pstr->buf[28] csp (from 14)
pstr->bufs[idx][28] = pstr->cptr[idx][27] = pstr->bufs[idx][14];
if (!idx) pstr->datlen[28] = dlen_psc;
// pstr->buf[29] pspc (from 1)
pstr->bufs[idx][29] = pstr->bufs[idx][1]; pstr->cptr[idx][28] = pstr->cptr[idx][0];
if (!idx) pstr->datlen[29] = dlen_ppsc;
// pstr->buf[30] cpp (from 6)
pstr->bufs[idx][30] = pstr->cptr[idx][29] = pstr->bufs[idx][6];
if (!idx) pstr->datlen[30] = dlen_ppc;
// pstr->buf[31] pspc (from 1)
pstr->bufs[idx][31] = pstr->bufs[idx][1]; pstr->cptr[idx][30] = pstr->cptr[idx][0];
if (!idx) pstr->datlen[31] = dlen_ppsc;
// pstr->buf[32] cspp (from 2)
pstr->bufs[idx][32] = pstr->cptr[idx][31] = pstr->bufs[idx][2];
if (!idx) pstr->datlen[32] = dlen_ppsc;
// pstr->buf[33] ppc (from 3)
pstr->bufs[idx][33] = pstr->bufs[idx][3]; pstr->cptr[idx][32] = pstr->cptr[idx][2];
if (!idx) pstr->datlen[33] = dlen_ppc;
// pstr->buf[34] cspp (from 2)
pstr->bufs[idx][34] = pstr->cptr[idx][33] = pstr->bufs[idx][2];
if (!idx) pstr->datlen[34] = dlen_ppsc;
// pstr->buf[35] psc (from 7)
pstr->bufs[idx][35] = pstr->bufs[idx][7]; pstr->cptr[idx][34] = pstr->cptr[idx][6];
if (!idx) pstr->datlen[35] = dlen_psc;
// pstr->buf[36] cpp (from 6)
pstr->bufs[idx][36] = pstr->cptr[idx][35] = pstr->bufs[idx][6];
if (!idx) pstr->datlen[36] = dlen_ppc;
// pstr->buf[37] pspc (from 1)
pstr->bufs[idx][37] = pstr->bufs[idx][1]; pstr->cptr[idx][36] = pstr->cptr[idx][0];
if (!idx) pstr->datlen[37] = dlen_ppsc;
// pstr->buf[38] cspp (from 2)
pstr->bufs[idx][38] = pstr->cptr[idx][37] = pstr->bufs[idx][2];
if (!idx) pstr->datlen[38] = dlen_ppsc;
// pstr->buf[39] ppc (from 3)
pstr->bufs[idx][39] = pstr->bufs[idx][3]; pstr->cptr[idx][38] = pstr->cptr[idx][2];
if (!idx) pstr->datlen[39] = dlen_ppc;
// pstr->buf[40] cspp (from 2)
pstr->bufs[idx][40] = pstr->cptr[idx][39] = pstr->bufs[idx][2];
if (!idx) pstr->datlen[40] = dlen_ppsc;
// pstr->buf[41] pspc (from 1)
pstr->bufs[idx][41] = pstr->bufs[idx][1]; pstr->cptr[idx][40] = pstr->cptr[idx][0];
if (!idx) pstr->datlen[41] = dlen_ppsc;
}
static int crypt_all(int *pcount, struct db_salt *salt)
{
const int count = *pcount;
int index = 0;
int *MixOrder, tot_todo;
#ifdef SIMD_COEF_64
// group based upon size splits.
MixOrder = mem_calloc((count+6*MAX_KEYS_PER_CRYPT), sizeof(int));
{
static const int lens[17][6] = {
{0,24,48,88,89,90}, // 0 byte salt
{0,24,48,88,89,90}, // 1 byte salt
{0,23,24,46,48,87}, // 2 byte salt
{0,23,24,45,48,87}, // 3 byte salt
{0,22,24,44,48,86}, // 4 byte salt
{0,22,24,43,48,86}, // 5 byte salt
{0,21,24,42,48,85}, // 6 byte salt
{0,21,24,41,48,85}, // 7 byte salt
{0,20,24,40,48,84}, // 8 byte salt
{0,20,24,39,48,84}, // 9 byte salt
{0,19,24,38,48,83}, // 10 byte salt
{0,19,24,37,48,83}, // 11 byte salt
{0,18,24,36,48,82}, // 12 byte salt
{0,18,24,35,48,82}, // 13 byte salt
{0,17,24,34,48,81}, // 14 byte salt
{0,17,24,33,48,81}, // 15 byte salt
{0,16,24,32,48,80} };
int j;
tot_todo = 0;
saved_len[count] = 0; // point all 'tail' MMX buffer elements to this location.
for (j = 0; j < 5; ++j) {
for (index = 0; index < count; ++index) {
if (saved_len[index] >= lens[cur_salt->len][j] && saved_len[index] < lens[cur_salt->len][j+1])
MixOrder[tot_todo++] = index;
}
while (tot_todo % MAX_KEYS_PER_CRYPT)
MixOrder[tot_todo++] = count;
}
}
#else
// no need to mix. just run them one after the next, in any order.
MixOrder = mem_calloc(count, sizeof(int));
for (index = 0; index < count; ++index)
MixOrder[index] = index;
tot_todo = count;
#endif
#ifdef _OPENMP
#pragma omp parallel for
#endif
for (index = 0; index < tot_todo; index += MAX_KEYS_PER_CRYPT)
{
// portably align temp_result char * pointer machine word size.
union xx {
unsigned char c[BINARY_SIZE];
ARCH_WORD a[BINARY_SIZE/sizeof(ARCH_WORD)];
} u;
unsigned char *temp_result = u.c;
SHA512_CTX ctx;
SHA512_CTX alt_ctx;
size_t cnt;
int idx;
char *cp;
char p_bytes[PLAINTEXT_LENGTH+1];
char s_bytes[PLAINTEXT_LENGTH+1];
char tmp_cls[sizeof(cryptloopstruct)+MEM_ALIGN_SIMD];
cryptloopstruct *crypt_struct;
#ifdef SIMD_COEF_64
char tmp_sse_out[8*MAX_KEYS_PER_CRYPT*8+MEM_ALIGN_SIMD];
uint64_t *sse_out;
sse_out = (uint64_t *)mem_align(tmp_sse_out, MEM_ALIGN_SIMD);
#endif
crypt_struct = (cryptloopstruct *)mem_align(tmp_cls,MEM_ALIGN_SIMD);
for (idx = 0; idx < MAX_KEYS_PER_CRYPT; ++idx)
{
/* Prepare for the real work. */
SHA512_Init(&ctx);
/* Add the key string. */
SHA512_Update(&ctx, (unsigned char*)saved_key[MixOrder[index+idx]], saved_len[MixOrder[index+idx]]);
/* The last part is the salt string. This must be at most 16
characters and it ends at the first `$' character (for
compatibility with existing implementations). */
SHA512_Update(&ctx, cur_salt->salt, cur_salt->len);
/* Compute alternate SHA512 sum with input KEY, SALT, and KEY. The
final result will be added to the first context. */
SHA512_Init(&alt_ctx);
/* Add key. */
SHA512_Update(&alt_ctx, (unsigned char*)saved_key[MixOrder[index+idx]], saved_len[MixOrder[index+idx]]);
/* Add salt. */
SHA512_Update(&alt_ctx, cur_salt->salt, cur_salt->len);
/* Add key again. */
SHA512_Update(&alt_ctx, (unsigned char*)saved_key[MixOrder[index+idx]], saved_len[MixOrder[index+idx]]);
/* Now get result of this (64 bytes) and add it to the other
context. */
SHA512_Final((unsigned char*)crypt_out[MixOrder[index+idx]], &alt_ctx);
/* Add for any character in the key one byte of the alternate sum. */
for (cnt = saved_len[MixOrder[index+idx]]; cnt > BINARY_SIZE; cnt -= BINARY_SIZE)
SHA512_Update(&ctx, (unsigned char*)crypt_out[MixOrder[index+idx]], BINARY_SIZE);
SHA512_Update(&ctx, (unsigned char*)crypt_out[MixOrder[index+idx]], cnt);
/* Take the binary representation of the length of the key and for every
1 add the alternate sum, for every 0 the key. */
for (cnt = saved_len[MixOrder[index+idx]]; cnt > 0; cnt >>= 1)
if ((cnt & 1) != 0)
SHA512_Update(&ctx, (unsigned char*)crypt_out[MixOrder[index+idx]], BINARY_SIZE);
else
SHA512_Update(&ctx, (unsigned char*)saved_key[MixOrder[index+idx]], saved_len[MixOrder[index+idx]]);
/* Create intermediate result. */
SHA512_Final((unsigned char*)crypt_out[MixOrder[index+idx]], &ctx);
/* Start computation of P byte sequence. */
SHA512_Init(&alt_ctx);
/* For every character in the password add the entire password. */
for (cnt = 0; cnt < saved_len[MixOrder[index+idx]]; ++cnt)
SHA512_Update(&alt_ctx, (unsigned char*)saved_key[MixOrder[index+idx]], saved_len[MixOrder[index+idx]]);
/* Finish the digest. */
SHA512_Final(temp_result, &alt_ctx);
/* Create byte sequence P. */
cp = p_bytes;
for (cnt = saved_len[MixOrder[index+idx]]; cnt >= BINARY_SIZE; cnt -= BINARY_SIZE)
cp = (char *) memcpy (cp, temp_result, BINARY_SIZE) + BINARY_SIZE;
memcpy (cp, temp_result, cnt);
/* Start computation of S byte sequence. */
SHA512_Init(&alt_ctx);
/* repeat the following 16+A[0] times, where A[0] represents the
first byte in digest A interpreted as an 8-bit unsigned value */
for (cnt = 0; cnt < 16 + ((unsigned char*)crypt_out[MixOrder[index+idx]])[0]; ++cnt)
SHA512_Update(&alt_ctx, cur_salt->salt, cur_salt->len);
/* Finish the digest. */
SHA512_Final(temp_result, &alt_ctx);
/* Create byte sequence S. */
cp = s_bytes;
for (cnt = cur_salt->len; cnt >= BINARY_SIZE; cnt -= BINARY_SIZE)
cp = (char *) memcpy (cp, temp_result, BINARY_SIZE) + BINARY_SIZE;
memcpy (cp, temp_result, cnt);
/* Repeatedly run the collected hash value through SHA512 to
burn CPU cycles. */
LoadCryptStruct(crypt_struct, MixOrder[index+idx], idx, p_bytes, s_bytes);
}
idx = 0;
#ifdef SIMD_COEF_64
for (cnt = 1; ; ++cnt) {
if (crypt_struct->datlen[idx]==256) {
unsigned char *cp = crypt_struct->bufs[0][idx];
SIMDSHA512body((__m128i *)cp, sse_out, NULL, SSEi_FLAT_IN|SSEi_2BUF_INPUT_FIRST_BLK);
SIMDSHA512body((__m128i *)&cp[128], sse_out, sse_out, SSEi_FLAT_IN|SSEi_2BUF_INPUT_FIRST_BLK|SSEi_RELOAD);
} else {
unsigned char *cp = crypt_struct->bufs[0][idx];
SIMDSHA512body((__m128i *)cp, sse_out, NULL, SSEi_FLAT_IN|SSEi_2BUF_INPUT_FIRST_BLK);
}
if (cnt == cur_salt->rounds)
break;
{
int j, k;
for (k = 0; k < MAX_KEYS_PER_CRYPT; ++k) {
uint64_t *o = (uint64_t *)crypt_struct->cptr[k][idx];
#if !ARCH_ALLOWS_UNALIGNED
if (!is_aligned(o, 8)) {
unsigned char *cp = (unsigned char*)o;
for (j = 0; j < 64; ++j)
*cp++ = ((unsigned char*)sse_out)[GETPOS(j, k)];
} else
#endif
for (j = 0; j < 8; ++j)
#if ARCH_LITTLE_ENDIAN==1
*o++ = JOHNSWAP64(sse_out[j*SIMD_COEF_64+(k&(SIMD_COEF_64-1))+k/SIMD_COEF_64*8*SIMD_COEF_64]);
#else
*o++ = sse_out[j*SIMD_COEF_64+(k&(SIMD_COEF_64-1))+k/SIMD_COEF_64*8*SIMD_COEF_64];
#endif
}
}
if (++idx == 42)
idx = 0;
}
{
int j, k;
for (k = 0; k < MAX_KEYS_PER_CRYPT; ++k) {
uint64_t *o = (uint64_t *)crypt_out[MixOrder[index+k]];
for (j = 0; j < 8; ++j)
#if ARCH_LITTLE_ENDIAN==1
*o++ = JOHNSWAP64(sse_out[j*SIMD_COEF_64+(k&(SIMD_COEF_64-1))+k/SIMD_COEF_64*8*SIMD_COEF_64]);
#else
*o++ = sse_out[j*SIMD_COEF_64+(k&(SIMD_COEF_64-1))+k/SIMD_COEF_64*8*SIMD_COEF_64];
#endif
}
}
#else
SHA512_Init(&ctx);
for (cnt = 1; ; ++cnt) {
// calling with 128 byte, or 256 byte always, will force the update to properly crypt the data.
// NOTE the data is fully formed. It ends in a 0x80, is padded with nulls, AND has bit appended.
SHA512_Update(&ctx, crypt_struct->bufs[0][idx], crypt_struct->datlen[idx]);
if (cnt == cur_salt->rounds)
break;
#ifdef JTR_INC_COMMON_CRYPTO_SHA2
SHA512_Final(crypt_struct->cptr[0][idx], &ctx);
#else // !defined JTR_INC_COMMON_CRYPTO_SHA2, so it is oSSL, or generic
#if ARCH_LITTLE_ENDIAN
{
int j;
uint64_t *o = (uint64_t *)crypt_struct->cptr[0][idx];
for (j = 0; j < 8; ++j)
*o++ = JOHNSWAP64(ctx.h[j]);
}
#else
memcpy(crypt_struct->cptr[0][idx], ctx.h, BINARY_SIZE);
#endif
#endif
if (++idx == 42)
idx = 0;
#ifdef JTR_INC_COMMON_CRYPTO_SHA2
SHA512_Init(&ctx);
#else
// this memcpy is 'good enough', used instead of SHA512_Init()
memcpy(ctx.h, ctx_init, sizeof(ctx_init));
#endif
}
#ifdef JTR_INC_COMMON_CRYPTO_SHA2
SHA512_Final((unsigned char*)crypt_out[MixOrder[index]], &ctx);
#else
#if ARCH_LITTLE_ENDIAN
{
int j;
uint64_t *o = (uint64_t *)crypt_out[MixOrder[index]];
for (j = 0; j < 8; ++j)
*o++ = JOHNSWAP64(ctx.h[j]);
}
#else
memcpy(crypt_out[MixOrder[index]], ctx.h, BINARY_SIZE);
#endif
#endif
#endif
}
MEM_FREE(MixOrder);
return count;
}
static void set_salt(void *salt)
{
cur_salt = salt;
}
static void *get_salt(char *ciphertext)
{
static struct saltstruct out;
int len;
memset(&out, 0, sizeof(out));
out.rounds = ROUNDS_DEFAULT;
ciphertext += FORMAT_TAG_LEN;
if (!strncmp(ciphertext, ROUNDS_PREFIX,
sizeof(ROUNDS_PREFIX) - 1)) {
const char *num = ciphertext + sizeof(ROUNDS_PREFIX) - 1;
char *endp;
unsigned long int srounds = strtoul(num, &endp, 10);
if (*endp == '$')
{
ciphertext = endp + 1;
srounds = srounds < ROUNDS_MIN ?
ROUNDS_MIN : srounds;
out.rounds = srounds > ROUNDS_MAX ?
ROUNDS_MAX : srounds;
}
}
for (len = 0; ciphertext[len] != '$'; len++);
if (len > SALT_LENGTH)
len = SALT_LENGTH;
memcpy(out.salt, ciphertext, len);
out.len = len;
return &out;
}
static int cmp_all(void *binary, int count)
{
int index = 0;
for (; index < count; index++)
if (!memcmp(binary, crypt_out[index], ARCH_SIZE))
return 1;
return 0;
}
static int cmp_one(void *binary, int index)
{
return !memcmp(binary, crypt_out[index], BINARY_SIZE);
}
static int cmp_exact(char *source, int index)
{
return 1;
}
static unsigned int sha512crypt_iterations(void *salt)
{
struct saltstruct *sha512crypt_salt;
sha512crypt_salt = salt;
return (unsigned int)sha512crypt_salt->rounds;
}
// Public domain hash function by DJ Bernstein
// We are hashing the entire struct
static int salt_hash(void *salt)
{
unsigned char *s = salt;
unsigned int hash = 5381;
unsigned int i;
for (i = 0; i < SALT_SIZE; i++)
hash = ((hash << 5) + hash) ^ s[i];
return hash & (SALT_HASH_SIZE - 1);
}
struct fmt_main fmt_cryptsha512 = {
{
FORMAT_LABEL,
FORMAT_NAME,
"SHA512 " ALGORITHM_NAME,
BENCHMARK_COMMENT,
BENCHMARK_LENGTH,
0,
PLAINTEXT_LENGTH,
BINARY_SIZE,
BINARY_ALIGN,
SALT_SIZE,
SALT_ALIGN,
MIN_KEYS_PER_CRYPT,
MAX_KEYS_PER_CRYPT,
FMT_CASE | FMT_8_BIT | FMT_OMP,
{
"iteration count",
},
{ FORMAT_TAG },
tests
}, {
init,
done,
fmt_default_reset,
fmt_default_prepare,
valid,
fmt_default_split,
get_binary,
get_salt,
{
sha512crypt_iterations,
},
fmt_default_source,
{
fmt_default_binary_hash_0,
fmt_default_binary_hash_1,
fmt_default_binary_hash_2,
fmt_default_binary_hash_3,
fmt_default_binary_hash_4,
fmt_default_binary_hash_5,
fmt_default_binary_hash_6
},
salt_hash,
NULL,
set_salt,
set_key,
get_key,
fmt_default_clear_keys,
crypt_all,
{
#define COMMON_GET_HASH_LINK
#include "common-get-hash.h"
},
cmp_all,
cmp_one,
cmp_exact
}
};
#endif /* plugin stanza */
|
gimplify.c | /* Tree lowering pass. This pass converts the GENERIC functions-as-trees
tree representation into the GIMPLE form.
Copyright (C) 2002-2020 Free Software Foundation, Inc.
Major work done by Sebastian Pop <s.pop@laposte.net>,
Diego Novillo <dnovillo@redhat.com> and Jason Merrill <jason@redhat.com>.
This file is part of GCC.
GCC is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free
Software Foundation; either version 3, or (at your option) any later
version.
GCC is distributed in the hope that it will be useful, but WITHOUT ANY
WARRANTY; without even the implied warranty of MERCHANTABILITY or
FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
for more details.
You should have received a copy of the GNU General Public License
along with GCC; see the file COPYING3. If not see
<http://www.gnu.org/licenses/>. */
#include "config.h"
#include "system.h"
#include "coretypes.h"
#include "backend.h"
#include "target.h"
#include "rtl.h"
#include "tree.h"
#include "memmodel.h"
#include "tm_p.h"
#include "gimple.h"
#include "gimple-predict.h"
#include "tree-pass.h" /* FIXME: only for PROP_gimple_any */
#include "ssa.h"
#include "cgraph.h"
#include "tree-pretty-print.h"
#include "diagnostic-core.h"
#include "alias.h"
#include "fold-const.h"
#include "calls.h"
#include "varasm.h"
#include "stmt.h"
#include "expr.h"
#include "gimple-fold.h"
#include "tree-eh.h"
#include "gimplify.h"
#include "gimple-iterator.h"
#include "stor-layout.h"
#include "print-tree.h"
#include "tree-iterator.h"
#include "tree-inline.h"
#include "langhooks.h"
#include "tree-cfg.h"
#include "tree-ssa.h"
#include "omp-general.h"
#include "omp-low.h"
#include "gimple-low.h"
#include "gomp-constants.h"
#include "splay-tree.h"
#include "gimple-walk.h"
#include "langhooks-def.h" /* FIXME: for lhd_set_decl_assembler_name */
#include "builtins.h"
#include "stringpool.h"
#include "attribs.h"
#include "asan.h"
#include "dbgcnt.h"
#include "omp-offload.h"
#include "context.h"
/* Hash set of poisoned variables in a bind expr. */
static hash_set<tree> *asan_poisoned_variables = NULL;
enum gimplify_omp_var_data
{
GOVD_SEEN = 0x000001,
GOVD_EXPLICIT = 0x000002,
GOVD_SHARED = 0x000004,
GOVD_PRIVATE = 0x000008,
GOVD_FIRSTPRIVATE = 0x000010,
GOVD_LASTPRIVATE = 0x000020,
GOVD_REDUCTION = 0x000040,
GOVD_LOCAL = 0x00080,
GOVD_MAP = 0x000100,
GOVD_DEBUG_PRIVATE = 0x000200,
GOVD_PRIVATE_OUTER_REF = 0x000400,
GOVD_LINEAR = 0x000800,
GOVD_ALIGNED = 0x001000,
/* Flag for GOVD_MAP: don't copy back. */
GOVD_MAP_TO_ONLY = 0x002000,
/* Flag for GOVD_LINEAR or GOVD_LASTPRIVATE: no outer reference. */
GOVD_LINEAR_LASTPRIVATE_NO_OUTER = 0x004000,
GOVD_MAP_0LEN_ARRAY = 0x008000,
/* Flag for GOVD_MAP, if it is always, to or always, tofrom mapping. */
GOVD_MAP_ALWAYS_TO = 0x010000,
/* Flag for shared vars that are or might be stored to in the region. */
GOVD_WRITTEN = 0x020000,
/* Flag for GOVD_MAP, if it is a forced mapping. */
GOVD_MAP_FORCE = 0x040000,
/* Flag for GOVD_MAP: must be present already. */
GOVD_MAP_FORCE_PRESENT = 0x080000,
/* Flag for GOVD_MAP: only allocate. */
GOVD_MAP_ALLOC_ONLY = 0x100000,
/* Flag for GOVD_MAP: only copy back. */
GOVD_MAP_FROM_ONLY = 0x200000,
GOVD_NONTEMPORAL = 0x400000,
/* Flag for GOVD_LASTPRIVATE: conditional modifier. */
GOVD_LASTPRIVATE_CONDITIONAL = 0x800000,
GOVD_CONDTEMP = 0x1000000,
/* Flag for GOVD_REDUCTION: inscan seen in {in,ex}clusive clause. */
GOVD_REDUCTION_INSCAN = 0x2000000,
/* Flag for GOVD_MAP: (struct) vars that have pointer attachments for
fields. */
GOVD_MAP_HAS_ATTACHMENTS = 8388608,
GOVD_DATA_SHARE_CLASS = (GOVD_SHARED | GOVD_PRIVATE | GOVD_FIRSTPRIVATE
| GOVD_LASTPRIVATE | GOVD_REDUCTION | GOVD_LINEAR
| GOVD_LOCAL)
};
enum omp_region_type
{
ORT_WORKSHARE = 0x00,
ORT_TASKGROUP = 0x01,
ORT_SIMD = 0x04,
ORT_PARALLEL = 0x08,
ORT_COMBINED_PARALLEL = ORT_PARALLEL | 1,
ORT_TASK = 0x10,
ORT_UNTIED_TASK = ORT_TASK | 1,
ORT_TASKLOOP = ORT_TASK | 2,
ORT_UNTIED_TASKLOOP = ORT_UNTIED_TASK | 2,
ORT_TEAMS = 0x20,
ORT_COMBINED_TEAMS = ORT_TEAMS | 1,
ORT_HOST_TEAMS = ORT_TEAMS | 2,
ORT_COMBINED_HOST_TEAMS = ORT_COMBINED_TEAMS | 2,
/* Data region. */
ORT_TARGET_DATA = 0x40,
/* Data region with offloading. */
ORT_TARGET = 0x80,
ORT_COMBINED_TARGET = ORT_TARGET | 1,
ORT_IMPLICIT_TARGET = ORT_TARGET | 2,
/* OpenACC variants. */
ORT_ACC = 0x100, /* A generic OpenACC region. */
ORT_ACC_DATA = ORT_ACC | ORT_TARGET_DATA, /* Data construct. */
ORT_ACC_PARALLEL = ORT_ACC | ORT_TARGET, /* Parallel construct */
ORT_ACC_KERNELS = ORT_ACC | ORT_TARGET | 2, /* Kernels construct. */
ORT_ACC_SERIAL = ORT_ACC | ORT_TARGET | 4, /* Serial construct. */
ORT_ACC_HOST_DATA = ORT_ACC | ORT_TARGET_DATA | 2, /* Host data. */
/* Dummy OpenMP region, used to disable expansion of
DECL_VALUE_EXPRs in taskloop pre body. */
ORT_NONE = 0x200
};
/* Gimplify hashtable helper. */
struct gimplify_hasher : free_ptr_hash <elt_t>
{
static inline hashval_t hash (const elt_t *);
static inline bool equal (const elt_t *, const elt_t *);
};
struct gimplify_ctx
{
struct gimplify_ctx *prev_context;
vec<gbind *> bind_expr_stack;
tree temps;
gimple_seq conditional_cleanups;
tree exit_label;
tree return_temp;
vec<tree> case_labels;
hash_set<tree> *live_switch_vars;
/* The formal temporary table. Should this be persistent? */
hash_table<gimplify_hasher> *temp_htab;
int conditions;
unsigned into_ssa : 1;
unsigned allow_rhs_cond_expr : 1;
unsigned in_cleanup_point_expr : 1;
unsigned keep_stack : 1;
unsigned save_stack : 1;
unsigned in_switch_expr : 1;
};
enum gimplify_defaultmap_kind
{
GDMK_SCALAR,
GDMK_AGGREGATE,
GDMK_ALLOCATABLE,
GDMK_POINTER
};
struct gimplify_omp_ctx
{
struct gimplify_omp_ctx *outer_context;
splay_tree variables;
hash_set<tree> *privatized_types;
tree clauses;
/* Iteration variables in an OMP_FOR. */
vec<tree> loop_iter_var;
location_t location;
enum omp_clause_default_kind default_kind;
enum omp_region_type region_type;
enum tree_code code;
bool combined_loop;
bool distribute;
bool target_firstprivatize_array_bases;
bool add_safelen1;
bool order_concurrent;
bool has_depend;
bool in_for_exprs;
int defaultmap[4];
};
static struct gimplify_ctx *gimplify_ctxp;
static struct gimplify_omp_ctx *gimplify_omp_ctxp;
static bool in_omp_construct;
/* Forward declaration. */
static enum gimplify_status gimplify_compound_expr (tree *, gimple_seq *, bool);
static hash_map<tree, tree> *oacc_declare_returns;
static enum gimplify_status gimplify_expr (tree *, gimple_seq *, gimple_seq *,
bool (*) (tree), fallback_t, bool);
/* Shorter alias name for the above function for use in gimplify.c
only. */
static inline void
gimplify_seq_add_stmt (gimple_seq *seq_p, gimple *gs)
{
gimple_seq_add_stmt_without_update (seq_p, gs);
}
/* Append sequence SRC to the end of sequence *DST_P. If *DST_P is
NULL, a new sequence is allocated. This function is
similar to gimple_seq_add_seq, but does not scan the operands.
During gimplification, we need to manipulate statement sequences
before the def/use vectors have been constructed. */
static void
gimplify_seq_add_seq (gimple_seq *dst_p, gimple_seq src)
{
gimple_stmt_iterator si;
if (src == NULL)
return;
si = gsi_last (*dst_p);
gsi_insert_seq_after_without_update (&si, src, GSI_NEW_STMT);
}
/* Pointer to a list of allocated gimplify_ctx structs to be used for pushing
and popping gimplify contexts. */
static struct gimplify_ctx *ctx_pool = NULL;
/* Return a gimplify context struct from the pool. */
static inline struct gimplify_ctx *
ctx_alloc (void)
{
struct gimplify_ctx * c = ctx_pool;
if (c)
ctx_pool = c->prev_context;
else
c = XNEW (struct gimplify_ctx);
memset (c, '\0', sizeof (*c));
return c;
}
/* Put gimplify context C back into the pool. */
static inline void
ctx_free (struct gimplify_ctx *c)
{
c->prev_context = ctx_pool;
ctx_pool = c;
}
/* Free allocated ctx stack memory. */
void
free_gimplify_stack (void)
{
struct gimplify_ctx *c;
while ((c = ctx_pool))
{
ctx_pool = c->prev_context;
free (c);
}
}
/* Set up a context for the gimplifier. */
void
push_gimplify_context (bool in_ssa, bool rhs_cond_ok)
{
struct gimplify_ctx *c = ctx_alloc ();
c->prev_context = gimplify_ctxp;
gimplify_ctxp = c;
gimplify_ctxp->into_ssa = in_ssa;
gimplify_ctxp->allow_rhs_cond_expr = rhs_cond_ok;
}
/* Tear down a context for the gimplifier. If BODY is non-null, then
put the temporaries into the outer BIND_EXPR. Otherwise, put them
in the local_decls.
BODY is not a sequence, but the first tuple in a sequence. */
void
pop_gimplify_context (gimple *body)
{
struct gimplify_ctx *c = gimplify_ctxp;
gcc_assert (c
&& (!c->bind_expr_stack.exists ()
|| c->bind_expr_stack.is_empty ()));
c->bind_expr_stack.release ();
gimplify_ctxp = c->prev_context;
if (body)
declare_vars (c->temps, body, false);
else
record_vars (c->temps);
delete c->temp_htab;
c->temp_htab = NULL;
ctx_free (c);
}
/* Push a GIMPLE_BIND tuple onto the stack of bindings. */
static void
gimple_push_bind_expr (gbind *bind_stmt)
{
gimplify_ctxp->bind_expr_stack.reserve (8);
gimplify_ctxp->bind_expr_stack.safe_push (bind_stmt);
}
/* Pop the first element off the stack of bindings. */
static void
gimple_pop_bind_expr (void)
{
gimplify_ctxp->bind_expr_stack.pop ();
}
/* Return the first element of the stack of bindings. */
gbind *
gimple_current_bind_expr (void)
{
return gimplify_ctxp->bind_expr_stack.last ();
}
/* Return the stack of bindings created during gimplification. */
vec<gbind *>
gimple_bind_expr_stack (void)
{
return gimplify_ctxp->bind_expr_stack;
}
/* Return true iff there is a COND_EXPR between us and the innermost
CLEANUP_POINT_EXPR. This info is used by gimple_push_cleanup. */
static bool
gimple_conditional_context (void)
{
return gimplify_ctxp->conditions > 0;
}
/* Note that we've entered a COND_EXPR. */
static void
gimple_push_condition (void)
{
#ifdef ENABLE_GIMPLE_CHECKING
if (gimplify_ctxp->conditions == 0)
gcc_assert (gimple_seq_empty_p (gimplify_ctxp->conditional_cleanups));
#endif
++(gimplify_ctxp->conditions);
}
/* Note that we've left a COND_EXPR. If we're back at unconditional scope
now, add any conditional cleanups we've seen to the prequeue. */
static void
gimple_pop_condition (gimple_seq *pre_p)
{
int conds = --(gimplify_ctxp->conditions);
gcc_assert (conds >= 0);
if (conds == 0)
{
gimplify_seq_add_seq (pre_p, gimplify_ctxp->conditional_cleanups);
gimplify_ctxp->conditional_cleanups = NULL;
}
}
/* A stable comparison routine for use with splay trees and DECLs. */
static int
splay_tree_compare_decl_uid (splay_tree_key xa, splay_tree_key xb)
{
tree a = (tree) xa;
tree b = (tree) xb;
return DECL_UID (a) - DECL_UID (b);
}
/* Create a new omp construct that deals with variable remapping. */
static struct gimplify_omp_ctx *
new_omp_context (enum omp_region_type region_type)
{
struct gimplify_omp_ctx *c;
c = XCNEW (struct gimplify_omp_ctx);
c->outer_context = gimplify_omp_ctxp;
c->variables = splay_tree_new (splay_tree_compare_decl_uid, 0, 0);
c->privatized_types = new hash_set<tree>;
c->location = input_location;
c->region_type = region_type;
if ((region_type & ORT_TASK) == 0)
c->default_kind = OMP_CLAUSE_DEFAULT_SHARED;
else
c->default_kind = OMP_CLAUSE_DEFAULT_UNSPECIFIED;
c->defaultmap[GDMK_SCALAR] = GOVD_MAP;
c->defaultmap[GDMK_AGGREGATE] = GOVD_MAP;
c->defaultmap[GDMK_ALLOCATABLE] = GOVD_MAP;
c->defaultmap[GDMK_POINTER] = GOVD_MAP;
return c;
}
/* Destroy an omp construct that deals with variable remapping. */
static void
delete_omp_context (struct gimplify_omp_ctx *c)
{
splay_tree_delete (c->variables);
delete c->privatized_types;
c->loop_iter_var.release ();
XDELETE (c);
}
static void omp_add_variable (struct gimplify_omp_ctx *, tree, unsigned int);
static bool omp_notice_variable (struct gimplify_omp_ctx *, tree, bool);
/* Both gimplify the statement T and append it to *SEQ_P. This function
behaves exactly as gimplify_stmt, but you don't have to pass T as a
reference. */
void
gimplify_and_add (tree t, gimple_seq *seq_p)
{
gimplify_stmt (&t, seq_p);
}
/* Gimplify statement T into sequence *SEQ_P, and return the first
tuple in the sequence of generated tuples for this statement.
Return NULL if gimplifying T produced no tuples. */
static gimple *
gimplify_and_return_first (tree t, gimple_seq *seq_p)
{
gimple_stmt_iterator last = gsi_last (*seq_p);
gimplify_and_add (t, seq_p);
if (!gsi_end_p (last))
{
gsi_next (&last);
return gsi_stmt (last);
}
else
return gimple_seq_first_stmt (*seq_p);
}
/* Returns true iff T is a valid RHS for an assignment to an un-renamed
LHS, or for a call argument. */
static bool
is_gimple_mem_rhs (tree t)
{
/* If we're dealing with a renamable type, either source or dest must be
a renamed variable. */
if (is_gimple_reg_type (TREE_TYPE (t)))
return is_gimple_val (t);
else
return is_gimple_val (t) || is_gimple_lvalue (t);
}
/* Return true if T is a CALL_EXPR or an expression that can be
assigned to a temporary. Note that this predicate should only be
used during gimplification. See the rationale for this in
gimplify_modify_expr. */
static bool
is_gimple_reg_rhs_or_call (tree t)
{
return (get_gimple_rhs_class (TREE_CODE (t)) != GIMPLE_INVALID_RHS
|| TREE_CODE (t) == CALL_EXPR);
}
/* Return true if T is a valid memory RHS or a CALL_EXPR. Note that
this predicate should only be used during gimplification. See the
rationale for this in gimplify_modify_expr. */
static bool
is_gimple_mem_rhs_or_call (tree t)
{
/* If we're dealing with a renamable type, either source or dest must be
a renamed variable. */
if (is_gimple_reg_type (TREE_TYPE (t)))
return is_gimple_val (t);
else
return (is_gimple_val (t)
|| is_gimple_lvalue (t)
|| TREE_CLOBBER_P (t)
|| TREE_CODE (t) == CALL_EXPR);
}
/* Create a temporary with a name derived from VAL. Subroutine of
lookup_tmp_var; nobody else should call this function. */
static inline tree
create_tmp_from_val (tree val)
{
/* Drop all qualifiers and address-space information from the value type. */
tree type = TYPE_MAIN_VARIANT (TREE_TYPE (val));
tree var = create_tmp_var (type, get_name (val));
if (TREE_CODE (TREE_TYPE (var)) == COMPLEX_TYPE
|| TREE_CODE (TREE_TYPE (var)) == VECTOR_TYPE)
DECL_GIMPLE_REG_P (var) = 1;
return var;
}
/* Create a temporary to hold the value of VAL. If IS_FORMAL, try to reuse
an existing expression temporary. */
static tree
lookup_tmp_var (tree val, bool is_formal)
{
tree ret;
/* If not optimizing, never really reuse a temporary. local-alloc
won't allocate any variable that is used in more than one basic
block, which means it will go into memory, causing much extra
work in reload and final and poorer code generation, outweighing
the extra memory allocation here. */
if (!optimize || !is_formal || TREE_SIDE_EFFECTS (val))
ret = create_tmp_from_val (val);
else
{
elt_t elt, *elt_p;
elt_t **slot;
elt.val = val;
if (!gimplify_ctxp->temp_htab)
gimplify_ctxp->temp_htab = new hash_table<gimplify_hasher> (1000);
slot = gimplify_ctxp->temp_htab->find_slot (&elt, INSERT);
if (*slot == NULL)
{
elt_p = XNEW (elt_t);
elt_p->val = val;
elt_p->temp = ret = create_tmp_from_val (val);
*slot = elt_p;
}
else
{
elt_p = *slot;
ret = elt_p->temp;
}
}
return ret;
}
/* Helper for get_formal_tmp_var and get_initialized_tmp_var. */
static tree
internal_get_tmp_var (tree val, gimple_seq *pre_p, gimple_seq *post_p,
bool is_formal, bool allow_ssa)
{
tree t, mod;
/* Notice that we explicitly allow VAL to be a CALL_EXPR so that we
can create an INIT_EXPR and convert it into a GIMPLE_CALL below. */
gimplify_expr (&val, pre_p, post_p, is_gimple_reg_rhs_or_call,
fb_rvalue);
if (allow_ssa
&& gimplify_ctxp->into_ssa
&& is_gimple_reg_type (TREE_TYPE (val)))
{
t = make_ssa_name (TYPE_MAIN_VARIANT (TREE_TYPE (val)));
if (! gimple_in_ssa_p (cfun))
{
const char *name = get_name (val);
if (name)
SET_SSA_NAME_VAR_OR_IDENTIFIER (t, create_tmp_var_name (name));
}
}
else
t = lookup_tmp_var (val, is_formal);
mod = build2 (INIT_EXPR, TREE_TYPE (t), t, unshare_expr (val));
SET_EXPR_LOCATION (mod, EXPR_LOC_OR_LOC (val, input_location));
/* gimplify_modify_expr might want to reduce this further. */
gimplify_and_add (mod, pre_p);
ggc_free (mod);
return t;
}
/* Return a formal temporary variable initialized with VAL. PRE_P is as
in gimplify_expr. Only use this function if:
1) The value of the unfactored expression represented by VAL will not
change between the initialization and use of the temporary, and
2) The temporary will not be otherwise modified.
For instance, #1 means that this is inappropriate for SAVE_EXPR temps,
and #2 means it is inappropriate for && temps.
For other cases, use get_initialized_tmp_var instead. */
tree
get_formal_tmp_var (tree val, gimple_seq *pre_p)
{
return internal_get_tmp_var (val, pre_p, NULL, true, true);
}
/* Return a temporary variable initialized with VAL. PRE_P and POST_P
are as in gimplify_expr. */
tree
get_initialized_tmp_var (tree val, gimple_seq *pre_p,
gimple_seq *post_p /* = NULL */,
bool allow_ssa /* = true */)
{
return internal_get_tmp_var (val, pre_p, post_p, false, allow_ssa);
}
/* Declare all the variables in VARS in SCOPE. If DEBUG_INFO is true,
generate debug info for them; otherwise don't. */
void
declare_vars (tree vars, gimple *gs, bool debug_info)
{
tree last = vars;
if (last)
{
tree temps, block;
gbind *scope = as_a <gbind *> (gs);
temps = nreverse (last);
block = gimple_bind_block (scope);
gcc_assert (!block || TREE_CODE (block) == BLOCK);
if (!block || !debug_info)
{
DECL_CHAIN (last) = gimple_bind_vars (scope);
gimple_bind_set_vars (scope, temps);
}
else
{
/* We need to attach the nodes both to the BIND_EXPR and to its
associated BLOCK for debugging purposes. The key point here
is that the BLOCK_VARS of the BIND_EXPR_BLOCK of a BIND_EXPR
is a subchain of the BIND_EXPR_VARS of the BIND_EXPR. */
if (BLOCK_VARS (block))
BLOCK_VARS (block) = chainon (BLOCK_VARS (block), temps);
else
{
gimple_bind_set_vars (scope,
chainon (gimple_bind_vars (scope), temps));
BLOCK_VARS (block) = temps;
}
}
}
}
/* For VAR a VAR_DECL of variable size, try to find a constant upper bound
for the size and adjust DECL_SIZE/DECL_SIZE_UNIT accordingly. Abort if
no such upper bound can be obtained. */
static void
force_constant_size (tree var)
{
/* The only attempt we make is by querying the maximum size of objects
of the variable's type. */
HOST_WIDE_INT max_size;
gcc_assert (VAR_P (var));
max_size = max_int_size_in_bytes (TREE_TYPE (var));
gcc_assert (max_size >= 0);
DECL_SIZE_UNIT (var)
= build_int_cst (TREE_TYPE (DECL_SIZE_UNIT (var)), max_size);
DECL_SIZE (var)
= build_int_cst (TREE_TYPE (DECL_SIZE (var)), max_size * BITS_PER_UNIT);
}
/* Push the temporary variable TMP into the current binding. */
void
gimple_add_tmp_var_fn (struct function *fn, tree tmp)
{
gcc_assert (!DECL_CHAIN (tmp) && !DECL_SEEN_IN_BIND_EXPR_P (tmp));
/* Later processing assumes that the object size is constant, which might
not be true at this point. Force the use of a constant upper bound in
this case. */
if (!tree_fits_poly_uint64_p (DECL_SIZE_UNIT (tmp)))
force_constant_size (tmp);
DECL_CONTEXT (tmp) = fn->decl;
DECL_SEEN_IN_BIND_EXPR_P (tmp) = 1;
record_vars_into (tmp, fn->decl);
}
/* Push the temporary variable TMP into the current binding. */
void
gimple_add_tmp_var (tree tmp)
{
gcc_assert (!DECL_CHAIN (tmp) && !DECL_SEEN_IN_BIND_EXPR_P (tmp));
/* Later processing assumes that the object size is constant, which might
not be true at this point. Force the use of a constant upper bound in
this case. */
if (!tree_fits_poly_uint64_p (DECL_SIZE_UNIT (tmp)))
force_constant_size (tmp);
DECL_CONTEXT (tmp) = current_function_decl;
DECL_SEEN_IN_BIND_EXPR_P (tmp) = 1;
if (gimplify_ctxp)
{
DECL_CHAIN (tmp) = gimplify_ctxp->temps;
gimplify_ctxp->temps = tmp;
/* Mark temporaries local within the nearest enclosing parallel. */
if (gimplify_omp_ctxp)
{
struct gimplify_omp_ctx *ctx = gimplify_omp_ctxp;
int flag = GOVD_LOCAL | GOVD_SEEN;
while (ctx
&& (ctx->region_type == ORT_WORKSHARE
|| ctx->region_type == ORT_TASKGROUP
|| ctx->region_type == ORT_SIMD
|| ctx->region_type == ORT_ACC))
{
if (ctx->region_type == ORT_SIMD
&& TREE_ADDRESSABLE (tmp)
&& !TREE_STATIC (tmp))
{
if (TREE_CODE (DECL_SIZE_UNIT (tmp)) != INTEGER_CST)
ctx->add_safelen1 = true;
else if (ctx->in_for_exprs)
flag = GOVD_PRIVATE;
else
flag = GOVD_PRIVATE | GOVD_SEEN;
break;
}
ctx = ctx->outer_context;
}
if (ctx)
omp_add_variable (ctx, tmp, flag);
}
}
else if (cfun)
record_vars (tmp);
else
{
gimple_seq body_seq;
/* This case is for nested functions. We need to expose the locals
they create. */
body_seq = gimple_body (current_function_decl);
declare_vars (tmp, gimple_seq_first_stmt (body_seq), false);
}
}
/* This page contains routines to unshare tree nodes, i.e. to duplicate tree
nodes that are referenced more than once in GENERIC functions. This is
necessary because gimplification (translation into GIMPLE) is performed
by modifying tree nodes in-place, so gimplication of a shared node in a
first context could generate an invalid GIMPLE form in a second context.
This is achieved with a simple mark/copy/unmark algorithm that walks the
GENERIC representation top-down, marks nodes with TREE_VISITED the first
time it encounters them, duplicates them if they already have TREE_VISITED
set, and finally removes the TREE_VISITED marks it has set.
The algorithm works only at the function level, i.e. it generates a GENERIC
representation of a function with no nodes shared within the function when
passed a GENERIC function (except for nodes that are allowed to be shared).
At the global level, it is also necessary to unshare tree nodes that are
referenced in more than one function, for the same aforementioned reason.
This requires some cooperation from the front-end. There are 2 strategies:
1. Manual unsharing. The front-end needs to call unshare_expr on every
expression that might end up being shared across functions.
2. Deep unsharing. This is an extension of regular unsharing. Instead
of calling unshare_expr on expressions that might be shared across
functions, the front-end pre-marks them with TREE_VISITED. This will
ensure that they are unshared on the first reference within functions
when the regular unsharing algorithm runs. The counterpart is that
this algorithm must look deeper than for manual unsharing, which is
specified by LANG_HOOKS_DEEP_UNSHARING.
If there are only few specific cases of node sharing across functions, it is
probably easier for a front-end to unshare the expressions manually. On the
contrary, if the expressions generated at the global level are as widespread
as expressions generated within functions, deep unsharing is very likely the
way to go. */
/* Similar to copy_tree_r but do not copy SAVE_EXPR or TARGET_EXPR nodes.
These nodes model computations that must be done once. If we were to
unshare something like SAVE_EXPR(i++), the gimplification process would
create wrong code. However, if DATA is non-null, it must hold a pointer
set that is used to unshare the subtrees of these nodes. */
static tree
mostly_copy_tree_r (tree *tp, int *walk_subtrees, void *data)
{
tree t = *tp;
enum tree_code code = TREE_CODE (t);
/* Do not copy SAVE_EXPR, TARGET_EXPR or BIND_EXPR nodes themselves, but
copy their subtrees if we can make sure to do it only once. */
if (code == SAVE_EXPR || code == TARGET_EXPR || code == BIND_EXPR)
{
if (data && !((hash_set<tree> *)data)->add (t))
;
else
*walk_subtrees = 0;
}
/* Stop at types, decls, constants like copy_tree_r. */
else if (TREE_CODE_CLASS (code) == tcc_type
|| TREE_CODE_CLASS (code) == tcc_declaration
|| TREE_CODE_CLASS (code) == tcc_constant)
*walk_subtrees = 0;
/* Cope with the statement expression extension. */
else if (code == STATEMENT_LIST)
;
/* Leave the bulk of the work to copy_tree_r itself. */
else
copy_tree_r (tp, walk_subtrees, NULL);
return NULL_TREE;
}
/* Callback for walk_tree to unshare most of the shared trees rooted at *TP.
If *TP has been visited already, then *TP is deeply copied by calling
mostly_copy_tree_r. DATA is passed to mostly_copy_tree_r unmodified. */
static tree
copy_if_shared_r (tree *tp, int *walk_subtrees, void *data)
{
tree t = *tp;
enum tree_code code = TREE_CODE (t);
/* Skip types, decls, and constants. But we do want to look at their
types and the bounds of types. Mark them as visited so we properly
unmark their subtrees on the unmark pass. If we've already seen them,
don't look down further. */
if (TREE_CODE_CLASS (code) == tcc_type
|| TREE_CODE_CLASS (code) == tcc_declaration
|| TREE_CODE_CLASS (code) == tcc_constant)
{
if (TREE_VISITED (t))
*walk_subtrees = 0;
else
TREE_VISITED (t) = 1;
}
/* If this node has been visited already, unshare it and don't look
any deeper. */
else if (TREE_VISITED (t))
{
walk_tree (tp, mostly_copy_tree_r, data, NULL);
*walk_subtrees = 0;
}
/* Otherwise, mark the node as visited and keep looking. */
else
TREE_VISITED (t) = 1;
return NULL_TREE;
}
/* Unshare most of the shared trees rooted at *TP. DATA is passed to the
copy_if_shared_r callback unmodified. */
void
copy_if_shared (tree *tp, void *data)
{
walk_tree (tp, copy_if_shared_r, data, NULL);
}
/* Unshare all the trees in the body of FNDECL, as well as in the bodies of
any nested functions. */
static void
unshare_body (tree fndecl)
{
struct cgraph_node *cgn = cgraph_node::get (fndecl);
/* If the language requires deep unsharing, we need a pointer set to make
sure we don't repeatedly unshare subtrees of unshareable nodes. */
hash_set<tree> *visited
= lang_hooks.deep_unsharing ? new hash_set<tree> : NULL;
copy_if_shared (&DECL_SAVED_TREE (fndecl), visited);
copy_if_shared (&DECL_SIZE (DECL_RESULT (fndecl)), visited);
copy_if_shared (&DECL_SIZE_UNIT (DECL_RESULT (fndecl)), visited);
delete visited;
if (cgn)
for (cgn = cgn->nested; cgn; cgn = cgn->next_nested)
unshare_body (cgn->decl);
}
/* Callback for walk_tree to unmark the visited trees rooted at *TP.
Subtrees are walked until the first unvisited node is encountered. */
static tree
unmark_visited_r (tree *tp, int *walk_subtrees, void *data ATTRIBUTE_UNUSED)
{
tree t = *tp;
/* If this node has been visited, unmark it and keep looking. */
if (TREE_VISITED (t))
TREE_VISITED (t) = 0;
/* Otherwise, don't look any deeper. */
else
*walk_subtrees = 0;
return NULL_TREE;
}
/* Unmark the visited trees rooted at *TP. */
static inline void
unmark_visited (tree *tp)
{
walk_tree (tp, unmark_visited_r, NULL, NULL);
}
/* Likewise, but mark all trees as not visited. */
static void
unvisit_body (tree fndecl)
{
struct cgraph_node *cgn = cgraph_node::get (fndecl);
unmark_visited (&DECL_SAVED_TREE (fndecl));
unmark_visited (&DECL_SIZE (DECL_RESULT (fndecl)));
unmark_visited (&DECL_SIZE_UNIT (DECL_RESULT (fndecl)));
if (cgn)
for (cgn = cgn->nested; cgn; cgn = cgn->next_nested)
unvisit_body (cgn->decl);
}
/* Unconditionally make an unshared copy of EXPR. This is used when using
stored expressions which span multiple functions, such as BINFO_VTABLE,
as the normal unsharing process can't tell that they're shared. */
tree
unshare_expr (tree expr)
{
walk_tree (&expr, mostly_copy_tree_r, NULL, NULL);
return expr;
}
/* Worker for unshare_expr_without_location. */
static tree
prune_expr_location (tree *tp, int *walk_subtrees, void *)
{
if (EXPR_P (*tp))
SET_EXPR_LOCATION (*tp, UNKNOWN_LOCATION);
else
*walk_subtrees = 0;
return NULL_TREE;
}
/* Similar to unshare_expr but also prune all expression locations
from EXPR. */
tree
unshare_expr_without_location (tree expr)
{
walk_tree (&expr, mostly_copy_tree_r, NULL, NULL);
if (EXPR_P (expr))
walk_tree (&expr, prune_expr_location, NULL, NULL);
return expr;
}
/* Return the EXPR_LOCATION of EXPR, if it (maybe recursively) has
one, OR_ELSE otherwise. The location of a STATEMENT_LISTs
comprising at least one DEBUG_BEGIN_STMT followed by exactly one
EXPR is the location of the EXPR. */
static location_t
rexpr_location (tree expr, location_t or_else = UNKNOWN_LOCATION)
{
if (!expr)
return or_else;
if (EXPR_HAS_LOCATION (expr))
return EXPR_LOCATION (expr);
if (TREE_CODE (expr) != STATEMENT_LIST)
return or_else;
tree_stmt_iterator i = tsi_start (expr);
bool found = false;
while (!tsi_end_p (i) && TREE_CODE (tsi_stmt (i)) == DEBUG_BEGIN_STMT)
{
found = true;
tsi_next (&i);
}
if (!found || !tsi_one_before_end_p (i))
return or_else;
return rexpr_location (tsi_stmt (i), or_else);
}
/* Return TRUE iff EXPR (maybe recursively) has a location; see
rexpr_location for the potential recursion. */
static inline bool
rexpr_has_location (tree expr)
{
return rexpr_location (expr) != UNKNOWN_LOCATION;
}
/* WRAPPER is a code such as BIND_EXPR or CLEANUP_POINT_EXPR which can both
contain statements and have a value. Assign its value to a temporary
and give it void_type_node. Return the temporary, or NULL_TREE if
WRAPPER was already void. */
tree
voidify_wrapper_expr (tree wrapper, tree temp)
{
tree type = TREE_TYPE (wrapper);
if (type && !VOID_TYPE_P (type))
{
tree *p;
/* Set p to point to the body of the wrapper. Loop until we find
something that isn't a wrapper. */
for (p = &wrapper; p && *p; )
{
switch (TREE_CODE (*p))
{
case BIND_EXPR:
TREE_SIDE_EFFECTS (*p) = 1;
TREE_TYPE (*p) = void_type_node;
/* For a BIND_EXPR, the body is operand 1. */
p = &BIND_EXPR_BODY (*p);
break;
case CLEANUP_POINT_EXPR:
case TRY_FINALLY_EXPR:
case TRY_CATCH_EXPR:
TREE_SIDE_EFFECTS (*p) = 1;
TREE_TYPE (*p) = void_type_node;
p = &TREE_OPERAND (*p, 0);
break;
case STATEMENT_LIST:
{
tree_stmt_iterator i = tsi_last (*p);
TREE_SIDE_EFFECTS (*p) = 1;
TREE_TYPE (*p) = void_type_node;
p = tsi_end_p (i) ? NULL : tsi_stmt_ptr (i);
}
break;
case COMPOUND_EXPR:
/* Advance to the last statement. Set all container types to
void. */
for (; TREE_CODE (*p) == COMPOUND_EXPR; p = &TREE_OPERAND (*p, 1))
{
TREE_SIDE_EFFECTS (*p) = 1;
TREE_TYPE (*p) = void_type_node;
}
break;
case TRANSACTION_EXPR:
TREE_SIDE_EFFECTS (*p) = 1;
TREE_TYPE (*p) = void_type_node;
p = &TRANSACTION_EXPR_BODY (*p);
break;
default:
/* Assume that any tree upon which voidify_wrapper_expr is
directly called is a wrapper, and that its body is op0. */
if (p == &wrapper)
{
TREE_SIDE_EFFECTS (*p) = 1;
TREE_TYPE (*p) = void_type_node;
p = &TREE_OPERAND (*p, 0);
break;
}
goto out;
}
}
out:
if (p == NULL || IS_EMPTY_STMT (*p))
temp = NULL_TREE;
else if (temp)
{
/* The wrapper is on the RHS of an assignment that we're pushing
down. */
gcc_assert (TREE_CODE (temp) == INIT_EXPR
|| TREE_CODE (temp) == MODIFY_EXPR);
TREE_OPERAND (temp, 1) = *p;
*p = temp;
}
else
{
temp = create_tmp_var (type, "retval");
*p = build2 (INIT_EXPR, type, temp, *p);
}
return temp;
}
return NULL_TREE;
}
/* Prepare calls to builtins to SAVE and RESTORE the stack as well as
a temporary through which they communicate. */
static void
build_stack_save_restore (gcall **save, gcall **restore)
{
tree tmp_var;
*save = gimple_build_call (builtin_decl_implicit (BUILT_IN_STACK_SAVE), 0);
tmp_var = create_tmp_var (ptr_type_node, "saved_stack");
gimple_call_set_lhs (*save, tmp_var);
*restore
= gimple_build_call (builtin_decl_implicit (BUILT_IN_STACK_RESTORE),
1, tmp_var);
}
/* Generate IFN_ASAN_MARK call that poisons shadow of a for DECL variable. */
static tree
build_asan_poison_call_expr (tree decl)
{
/* Do not poison variables that have size equal to zero. */
tree unit_size = DECL_SIZE_UNIT (decl);
if (zerop (unit_size))
return NULL_TREE;
tree base = build_fold_addr_expr (decl);
return build_call_expr_internal_loc (UNKNOWN_LOCATION, IFN_ASAN_MARK,
void_type_node, 3,
build_int_cst (integer_type_node,
ASAN_MARK_POISON),
base, unit_size);
}
/* Generate IFN_ASAN_MARK call that would poison or unpoison, depending
on POISON flag, shadow memory of a DECL variable. The call will be
put on location identified by IT iterator, where BEFORE flag drives
position where the stmt will be put. */
static void
asan_poison_variable (tree decl, bool poison, gimple_stmt_iterator *it,
bool before)
{
tree unit_size = DECL_SIZE_UNIT (decl);
tree base = build_fold_addr_expr (decl);
/* Do not poison variables that have size equal to zero. */
if (zerop (unit_size))
return;
/* It's necessary to have all stack variables aligned to ASAN granularity
bytes. */
if (DECL_ALIGN_UNIT (decl) <= ASAN_SHADOW_GRANULARITY)
SET_DECL_ALIGN (decl, BITS_PER_UNIT * ASAN_SHADOW_GRANULARITY);
HOST_WIDE_INT flags = poison ? ASAN_MARK_POISON : ASAN_MARK_UNPOISON;
gimple *g
= gimple_build_call_internal (IFN_ASAN_MARK, 3,
build_int_cst (integer_type_node, flags),
base, unit_size);
if (before)
gsi_insert_before (it, g, GSI_NEW_STMT);
else
gsi_insert_after (it, g, GSI_NEW_STMT);
}
/* Generate IFN_ASAN_MARK internal call that depending on POISON flag
either poisons or unpoisons a DECL. Created statement is appended
to SEQ_P gimple sequence. */
static void
asan_poison_variable (tree decl, bool poison, gimple_seq *seq_p)
{
gimple_stmt_iterator it = gsi_last (*seq_p);
bool before = false;
if (gsi_end_p (it))
before = true;
asan_poison_variable (decl, poison, &it, before);
}
/* Sort pair of VAR_DECLs A and B by DECL_UID. */
static int
sort_by_decl_uid (const void *a, const void *b)
{
const tree *t1 = (const tree *)a;
const tree *t2 = (const tree *)b;
int uid1 = DECL_UID (*t1);
int uid2 = DECL_UID (*t2);
if (uid1 < uid2)
return -1;
else if (uid1 > uid2)
return 1;
else
return 0;
}
/* Generate IFN_ASAN_MARK internal call for all VARIABLES
depending on POISON flag. Created statement is appended
to SEQ_P gimple sequence. */
static void
asan_poison_variables (hash_set<tree> *variables, bool poison, gimple_seq *seq_p)
{
unsigned c = variables->elements ();
if (c == 0)
return;
auto_vec<tree> sorted_variables (c);
for (hash_set<tree>::iterator it = variables->begin ();
it != variables->end (); ++it)
sorted_variables.safe_push (*it);
sorted_variables.qsort (sort_by_decl_uid);
unsigned i;
tree var;
FOR_EACH_VEC_ELT (sorted_variables, i, var)
{
asan_poison_variable (var, poison, seq_p);
/* Add use_after_scope_memory attribute for the variable in order
to prevent re-written into SSA. */
if (!lookup_attribute (ASAN_USE_AFTER_SCOPE_ATTRIBUTE,
DECL_ATTRIBUTES (var)))
DECL_ATTRIBUTES (var)
= tree_cons (get_identifier (ASAN_USE_AFTER_SCOPE_ATTRIBUTE),
integer_one_node,
DECL_ATTRIBUTES (var));
}
}
/* Gimplify a BIND_EXPR. Just voidify and recurse. */
static enum gimplify_status
gimplify_bind_expr (tree *expr_p, gimple_seq *pre_p)
{
tree bind_expr = *expr_p;
bool old_keep_stack = gimplify_ctxp->keep_stack;
bool old_save_stack = gimplify_ctxp->save_stack;
tree t;
gbind *bind_stmt;
gimple_seq body, cleanup;
gcall *stack_save;
location_t start_locus = 0, end_locus = 0;
tree ret_clauses = NULL;
tree temp = voidify_wrapper_expr (bind_expr, NULL);
/* Mark variables seen in this bind expr. */
for (t = BIND_EXPR_VARS (bind_expr); t ; t = DECL_CHAIN (t))
{
if (VAR_P (t))
{
struct gimplify_omp_ctx *ctx = gimplify_omp_ctxp;
/* Mark variable as local. */
if (ctx && ctx->region_type != ORT_NONE && !DECL_EXTERNAL (t))
{
if (! DECL_SEEN_IN_BIND_EXPR_P (t)
|| splay_tree_lookup (ctx->variables,
(splay_tree_key) t) == NULL)
{
int flag = GOVD_LOCAL;
if (ctx->region_type == ORT_SIMD
&& TREE_ADDRESSABLE (t)
&& !TREE_STATIC (t))
{
if (TREE_CODE (DECL_SIZE_UNIT (t)) != INTEGER_CST)
ctx->add_safelen1 = true;
else
flag = GOVD_PRIVATE;
}
omp_add_variable (ctx, t, flag | GOVD_SEEN);
}
/* Static locals inside of target construct or offloaded
routines need to be "omp declare target". */
if (TREE_STATIC (t))
for (; ctx; ctx = ctx->outer_context)
if ((ctx->region_type & ORT_TARGET) != 0)
{
if (!lookup_attribute ("omp declare target",
DECL_ATTRIBUTES (t)))
{
tree id = get_identifier ("omp declare target");
DECL_ATTRIBUTES (t)
= tree_cons (id, NULL_TREE, DECL_ATTRIBUTES (t));
varpool_node *node = varpool_node::get (t);
if (node)
{
node->offloadable = 1;
if (ENABLE_OFFLOADING && !DECL_EXTERNAL (t))
{
g->have_offload = true;
if (!in_lto_p)
vec_safe_push (offload_vars, t);
}
}
}
break;
}
}
DECL_SEEN_IN_BIND_EXPR_P (t) = 1;
if (DECL_HARD_REGISTER (t) && !is_global_var (t) && cfun)
cfun->has_local_explicit_reg_vars = true;
}
/* Preliminarily mark non-addressed complex variables as eligible
for promotion to gimple registers. We'll transform their uses
as we find them. */
if ((TREE_CODE (TREE_TYPE (t)) == COMPLEX_TYPE
|| TREE_CODE (TREE_TYPE (t)) == VECTOR_TYPE)
&& !TREE_THIS_VOLATILE (t)
&& (VAR_P (t) && !DECL_HARD_REGISTER (t))
&& !needs_to_live_in_memory (t))
DECL_GIMPLE_REG_P (t) = 1;
}
bind_stmt = gimple_build_bind (BIND_EXPR_VARS (bind_expr), NULL,
BIND_EXPR_BLOCK (bind_expr));
gimple_push_bind_expr (bind_stmt);
gimplify_ctxp->keep_stack = false;
gimplify_ctxp->save_stack = false;
/* Gimplify the body into the GIMPLE_BIND tuple's body. */
body = NULL;
gimplify_stmt (&BIND_EXPR_BODY (bind_expr), &body);
gimple_bind_set_body (bind_stmt, body);
/* Source location wise, the cleanup code (stack_restore and clobbers)
belongs to the end of the block, so propagate what we have. The
stack_save operation belongs to the beginning of block, which we can
infer from the bind_expr directly if the block has no explicit
assignment. */
if (BIND_EXPR_BLOCK (bind_expr))
{
end_locus = BLOCK_SOURCE_END_LOCATION (BIND_EXPR_BLOCK (bind_expr));
start_locus = BLOCK_SOURCE_LOCATION (BIND_EXPR_BLOCK (bind_expr));
}
if (start_locus == 0)
start_locus = EXPR_LOCATION (bind_expr);
cleanup = NULL;
stack_save = NULL;
/* If the code both contains VLAs and calls alloca, then we cannot reclaim
the stack space allocated to the VLAs. */
if (gimplify_ctxp->save_stack && !gimplify_ctxp->keep_stack)
{
gcall *stack_restore;
/* Save stack on entry and restore it on exit. Add a try_finally
block to achieve this. */
build_stack_save_restore (&stack_save, &stack_restore);
gimple_set_location (stack_save, start_locus);
gimple_set_location (stack_restore, end_locus);
gimplify_seq_add_stmt (&cleanup, stack_restore);
}
/* Add clobbers for all variables that go out of scope. */
for (t = BIND_EXPR_VARS (bind_expr); t ; t = DECL_CHAIN (t))
{
if (VAR_P (t)
&& !is_global_var (t)
&& DECL_CONTEXT (t) == current_function_decl)
{
if (!DECL_HARD_REGISTER (t)
&& !TREE_THIS_VOLATILE (t)
&& !DECL_HAS_VALUE_EXPR_P (t)
/* Only care for variables that have to be in memory. Others
will be rewritten into SSA names, hence moved to the
top-level. */
&& !is_gimple_reg (t)
&& flag_stack_reuse != SR_NONE)
{
tree clobber = build_clobber (TREE_TYPE (t));
gimple *clobber_stmt;
clobber_stmt = gimple_build_assign (t, clobber);
gimple_set_location (clobber_stmt, end_locus);
gimplify_seq_add_stmt (&cleanup, clobber_stmt);
}
if (flag_openacc && oacc_declare_returns != NULL)
{
tree *c = oacc_declare_returns->get (t);
if (c != NULL)
{
if (ret_clauses)
OMP_CLAUSE_CHAIN (*c) = ret_clauses;
ret_clauses = *c;
oacc_declare_returns->remove (t);
if (oacc_declare_returns->is_empty ())
{
delete oacc_declare_returns;
oacc_declare_returns = NULL;
}
}
}
}
if (asan_poisoned_variables != NULL
&& asan_poisoned_variables->contains (t))
{
asan_poisoned_variables->remove (t);
asan_poison_variable (t, true, &cleanup);
}
if (gimplify_ctxp->live_switch_vars != NULL
&& gimplify_ctxp->live_switch_vars->contains (t))
gimplify_ctxp->live_switch_vars->remove (t);
}
if (ret_clauses)
{
gomp_target *stmt;
gimple_stmt_iterator si = gsi_start (cleanup);
stmt = gimple_build_omp_target (NULL, GF_OMP_TARGET_KIND_OACC_DECLARE,
ret_clauses);
gsi_insert_seq_before_without_update (&si, stmt, GSI_NEW_STMT);
}
if (cleanup)
{
gtry *gs;
gimple_seq new_body;
new_body = NULL;
gs = gimple_build_try (gimple_bind_body (bind_stmt), cleanup,
GIMPLE_TRY_FINALLY);
if (stack_save)
gimplify_seq_add_stmt (&new_body, stack_save);
gimplify_seq_add_stmt (&new_body, gs);
gimple_bind_set_body (bind_stmt, new_body);
}
/* keep_stack propagates all the way up to the outermost BIND_EXPR. */
if (!gimplify_ctxp->keep_stack)
gimplify_ctxp->keep_stack = old_keep_stack;
gimplify_ctxp->save_stack = old_save_stack;
gimple_pop_bind_expr ();
gimplify_seq_add_stmt (pre_p, bind_stmt);
if (temp)
{
*expr_p = temp;
return GS_OK;
}
*expr_p = NULL_TREE;
return GS_ALL_DONE;
}
/* Maybe add early return predict statement to PRE_P sequence. */
static void
maybe_add_early_return_predict_stmt (gimple_seq *pre_p)
{
/* If we are not in a conditional context, add PREDICT statement. */
if (gimple_conditional_context ())
{
gimple *predict = gimple_build_predict (PRED_TREE_EARLY_RETURN,
NOT_TAKEN);
gimplify_seq_add_stmt (pre_p, predict);
}
}
/* Gimplify a RETURN_EXPR. If the expression to be returned is not a
GIMPLE value, it is assigned to a new temporary and the statement is
re-written to return the temporary.
PRE_P points to the sequence where side effects that must happen before
STMT should be stored. */
static enum gimplify_status
gimplify_return_expr (tree stmt, gimple_seq *pre_p)
{
greturn *ret;
tree ret_expr = TREE_OPERAND (stmt, 0);
tree result_decl, result;
if (ret_expr == error_mark_node)
return GS_ERROR;
if (!ret_expr
|| TREE_CODE (ret_expr) == RESULT_DECL)
{
maybe_add_early_return_predict_stmt (pre_p);
greturn *ret = gimple_build_return (ret_expr);
gimple_set_no_warning (ret, TREE_NO_WARNING (stmt));
gimplify_seq_add_stmt (pre_p, ret);
return GS_ALL_DONE;
}
if (VOID_TYPE_P (TREE_TYPE (TREE_TYPE (current_function_decl))))
result_decl = NULL_TREE;
else if (TREE_CODE (ret_expr) == COMPOUND_EXPR)
{
/* Used in C++ for handling EH cleanup of the return value if a local
cleanup throws. Assume the front-end knows what it's doing. */
result_decl = DECL_RESULT (current_function_decl);
/* But crash if we end up trying to modify ret_expr below. */
ret_expr = NULL_TREE;
}
else
{
result_decl = TREE_OPERAND (ret_expr, 0);
/* See through a return by reference. */
if (TREE_CODE (result_decl) == INDIRECT_REF)
result_decl = TREE_OPERAND (result_decl, 0);
gcc_assert ((TREE_CODE (ret_expr) == MODIFY_EXPR
|| TREE_CODE (ret_expr) == INIT_EXPR)
&& TREE_CODE (result_decl) == RESULT_DECL);
}
/* If aggregate_value_p is true, then we can return the bare RESULT_DECL.
Recall that aggregate_value_p is FALSE for any aggregate type that is
returned in registers. If we're returning values in registers, then
we don't want to extend the lifetime of the RESULT_DECL, particularly
across another call. In addition, for those aggregates for which
hard_function_value generates a PARALLEL, we'll die during normal
expansion of structure assignments; there's special code in expand_return
to handle this case that does not exist in expand_expr. */
if (!result_decl)
result = NULL_TREE;
else if (aggregate_value_p (result_decl, TREE_TYPE (current_function_decl)))
{
if (!poly_int_tree_p (DECL_SIZE (result_decl)))
{
if (!TYPE_SIZES_GIMPLIFIED (TREE_TYPE (result_decl)))
gimplify_type_sizes (TREE_TYPE (result_decl), pre_p);
/* Note that we don't use gimplify_vla_decl because the RESULT_DECL
should be effectively allocated by the caller, i.e. all calls to
this function must be subject to the Return Slot Optimization. */
gimplify_one_sizepos (&DECL_SIZE (result_decl), pre_p);
gimplify_one_sizepos (&DECL_SIZE_UNIT (result_decl), pre_p);
}
result = result_decl;
}
else if (gimplify_ctxp->return_temp)
result = gimplify_ctxp->return_temp;
else
{
result = create_tmp_reg (TREE_TYPE (result_decl));
/* ??? With complex control flow (usually involving abnormal edges),
we can wind up warning about an uninitialized value for this. Due
to how this variable is constructed and initialized, this is never
true. Give up and never warn. */
TREE_NO_WARNING (result) = 1;
gimplify_ctxp->return_temp = result;
}
/* Smash the lhs of the MODIFY_EXPR to the temporary we plan to use.
Then gimplify the whole thing. */
if (result != result_decl)
TREE_OPERAND (ret_expr, 0) = result;
gimplify_and_add (TREE_OPERAND (stmt, 0), pre_p);
maybe_add_early_return_predict_stmt (pre_p);
ret = gimple_build_return (result);
gimple_set_no_warning (ret, TREE_NO_WARNING (stmt));
gimplify_seq_add_stmt (pre_p, ret);
return GS_ALL_DONE;
}
/* Gimplify a variable-length array DECL. */
static void
gimplify_vla_decl (tree decl, gimple_seq *seq_p)
{
/* This is a variable-sized decl. Simplify its size and mark it
for deferred expansion. */
tree t, addr, ptr_type;
gimplify_one_sizepos (&DECL_SIZE (decl), seq_p);
gimplify_one_sizepos (&DECL_SIZE_UNIT (decl), seq_p);
/* Don't mess with a DECL_VALUE_EXPR set by the front-end. */
if (DECL_HAS_VALUE_EXPR_P (decl))
return;
/* All occurrences of this decl in final gimplified code will be
replaced by indirection. Setting DECL_VALUE_EXPR does two
things: First, it lets the rest of the gimplifier know what
replacement to use. Second, it lets the debug info know
where to find the value. */
ptr_type = build_pointer_type (TREE_TYPE (decl));
addr = create_tmp_var (ptr_type, get_name (decl));
DECL_IGNORED_P (addr) = 0;
t = build_fold_indirect_ref (addr);
TREE_THIS_NOTRAP (t) = 1;
SET_DECL_VALUE_EXPR (decl, t);
DECL_HAS_VALUE_EXPR_P (decl) = 1;
t = build_alloca_call_expr (DECL_SIZE_UNIT (decl), DECL_ALIGN (decl),
max_int_size_in_bytes (TREE_TYPE (decl)));
/* The call has been built for a variable-sized object. */
CALL_ALLOCA_FOR_VAR_P (t) = 1;
t = fold_convert (ptr_type, t);
t = build2 (MODIFY_EXPR, TREE_TYPE (addr), addr, t);
gimplify_and_add (t, seq_p);
/* Record the dynamic allocation associated with DECL if requested. */
if (flag_callgraph_info & CALLGRAPH_INFO_DYNAMIC_ALLOC)
record_dynamic_alloc (decl);
}
/* A helper function to be called via walk_tree. Mark all labels under *TP
as being forced. To be called for DECL_INITIAL of static variables. */
static tree
force_labels_r (tree *tp, int *walk_subtrees, void *data ATTRIBUTE_UNUSED)
{
if (TYPE_P (*tp))
*walk_subtrees = 0;
if (TREE_CODE (*tp) == LABEL_DECL)
{
FORCED_LABEL (*tp) = 1;
cfun->has_forced_label_in_static = 1;
}
return NULL_TREE;
}
/* Gimplify a DECL_EXPR node *STMT_P by making any necessary allocation
and initialization explicit. */
static enum gimplify_status
gimplify_decl_expr (tree *stmt_p, gimple_seq *seq_p)
{
tree stmt = *stmt_p;
tree decl = DECL_EXPR_DECL (stmt);
*stmt_p = NULL_TREE;
if (TREE_TYPE (decl) == error_mark_node)
return GS_ERROR;
if ((TREE_CODE (decl) == TYPE_DECL
|| VAR_P (decl))
&& !TYPE_SIZES_GIMPLIFIED (TREE_TYPE (decl)))
{
gimplify_type_sizes (TREE_TYPE (decl), seq_p);
if (TREE_CODE (TREE_TYPE (decl)) == REFERENCE_TYPE)
gimplify_type_sizes (TREE_TYPE (TREE_TYPE (decl)), seq_p);
}
/* ??? DECL_ORIGINAL_TYPE is streamed for LTO so it needs to be gimplified
in case its size expressions contain problematic nodes like CALL_EXPR. */
if (TREE_CODE (decl) == TYPE_DECL
&& DECL_ORIGINAL_TYPE (decl)
&& !TYPE_SIZES_GIMPLIFIED (DECL_ORIGINAL_TYPE (decl)))
{
gimplify_type_sizes (DECL_ORIGINAL_TYPE (decl), seq_p);
if (TREE_CODE (DECL_ORIGINAL_TYPE (decl)) == REFERENCE_TYPE)
gimplify_type_sizes (TREE_TYPE (DECL_ORIGINAL_TYPE (decl)), seq_p);
}
if (VAR_P (decl) && !DECL_EXTERNAL (decl))
{
tree init = DECL_INITIAL (decl);
bool is_vla = false;
poly_uint64 size;
if (!poly_int_tree_p (DECL_SIZE_UNIT (decl), &size)
|| (!TREE_STATIC (decl)
&& flag_stack_check == GENERIC_STACK_CHECK
&& maybe_gt (size,
(unsigned HOST_WIDE_INT) STACK_CHECK_MAX_VAR_SIZE)))
{
gimplify_vla_decl (decl, seq_p);
is_vla = true;
}
if (asan_poisoned_variables
&& !is_vla
&& TREE_ADDRESSABLE (decl)
&& !TREE_STATIC (decl)
&& !DECL_HAS_VALUE_EXPR_P (decl)
&& DECL_ALIGN (decl) <= MAX_SUPPORTED_STACK_ALIGNMENT
&& dbg_cnt (asan_use_after_scope)
&& !gimplify_omp_ctxp)
{
asan_poisoned_variables->add (decl);
asan_poison_variable (decl, false, seq_p);
if (!DECL_ARTIFICIAL (decl) && gimplify_ctxp->live_switch_vars)
gimplify_ctxp->live_switch_vars->add (decl);
}
/* Some front ends do not explicitly declare all anonymous
artificial variables. We compensate here by declaring the
variables, though it would be better if the front ends would
explicitly declare them. */
if (!DECL_SEEN_IN_BIND_EXPR_P (decl)
&& DECL_ARTIFICIAL (decl) && DECL_NAME (decl) == NULL_TREE)
gimple_add_tmp_var (decl);
if (init && init != error_mark_node)
{
if (!TREE_STATIC (decl))
{
DECL_INITIAL (decl) = NULL_TREE;
init = build2 (INIT_EXPR, void_type_node, decl, init);
gimplify_and_add (init, seq_p);
ggc_free (init);
}
else
/* We must still examine initializers for static variables
as they may contain a label address. */
walk_tree (&init, force_labels_r, NULL, NULL);
}
}
return GS_ALL_DONE;
}
/* Gimplify a LOOP_EXPR. Normally this just involves gimplifying the body
and replacing the LOOP_EXPR with goto, but if the loop contains an
EXIT_EXPR, we need to append a label for it to jump to. */
static enum gimplify_status
gimplify_loop_expr (tree *expr_p, gimple_seq *pre_p)
{
tree saved_label = gimplify_ctxp->exit_label;
tree start_label = create_artificial_label (UNKNOWN_LOCATION);
gimplify_seq_add_stmt (pre_p, gimple_build_label (start_label));
gimplify_ctxp->exit_label = NULL_TREE;
gimplify_and_add (LOOP_EXPR_BODY (*expr_p), pre_p);
gimplify_seq_add_stmt (pre_p, gimple_build_goto (start_label));
if (gimplify_ctxp->exit_label)
gimplify_seq_add_stmt (pre_p,
gimple_build_label (gimplify_ctxp->exit_label));
gimplify_ctxp->exit_label = saved_label;
*expr_p = NULL;
return GS_ALL_DONE;
}
/* Gimplify a statement list onto a sequence. These may be created either
by an enlightened front-end, or by shortcut_cond_expr. */
static enum gimplify_status
gimplify_statement_list (tree *expr_p, gimple_seq *pre_p)
{
tree temp = voidify_wrapper_expr (*expr_p, NULL);
tree_stmt_iterator i = tsi_start (*expr_p);
while (!tsi_end_p (i))
{
gimplify_stmt (tsi_stmt_ptr (i), pre_p);
tsi_delink (&i);
}
if (temp)
{
*expr_p = temp;
return GS_OK;
}
return GS_ALL_DONE;
}
/* Callback for walk_gimple_seq. */
static tree
warn_switch_unreachable_r (gimple_stmt_iterator *gsi_p, bool *handled_ops_p,
struct walk_stmt_info *wi)
{
gimple *stmt = gsi_stmt (*gsi_p);
*handled_ops_p = true;
switch (gimple_code (stmt))
{
case GIMPLE_TRY:
/* A compiler-generated cleanup or a user-written try block.
If it's empty, don't dive into it--that would result in
worse location info. */
if (gimple_try_eval (stmt) == NULL)
{
wi->info = stmt;
return integer_zero_node;
}
/* Fall through. */
case GIMPLE_BIND:
case GIMPLE_CATCH:
case GIMPLE_EH_FILTER:
case GIMPLE_TRANSACTION:
/* Walk the sub-statements. */
*handled_ops_p = false;
break;
case GIMPLE_DEBUG:
/* Ignore these. We may generate them before declarations that
are never executed. If there's something to warn about,
there will be non-debug stmts too, and we'll catch those. */
break;
case GIMPLE_CALL:
if (gimple_call_internal_p (stmt, IFN_ASAN_MARK))
{
*handled_ops_p = false;
break;
}
/* Fall through. */
default:
/* Save the first "real" statement (not a decl/lexical scope/...). */
wi->info = stmt;
return integer_zero_node;
}
return NULL_TREE;
}
/* Possibly warn about unreachable statements between switch's controlling
expression and the first case. SEQ is the body of a switch expression. */
static void
maybe_warn_switch_unreachable (gimple_seq seq)
{
if (!warn_switch_unreachable
/* This warning doesn't play well with Fortran when optimizations
are on. */
|| lang_GNU_Fortran ()
|| seq == NULL)
return;
struct walk_stmt_info wi;
memset (&wi, 0, sizeof (wi));
walk_gimple_seq (seq, warn_switch_unreachable_r, NULL, &wi);
gimple *stmt = (gimple *) wi.info;
if (stmt && gimple_code (stmt) != GIMPLE_LABEL)
{
if (gimple_code (stmt) == GIMPLE_GOTO
&& TREE_CODE (gimple_goto_dest (stmt)) == LABEL_DECL
&& DECL_ARTIFICIAL (gimple_goto_dest (stmt)))
/* Don't warn for compiler-generated gotos. These occur
in Duff's devices, for example. */;
else
warning_at (gimple_location (stmt), OPT_Wswitch_unreachable,
"statement will never be executed");
}
}
/* A label entry that pairs label and a location. */
struct label_entry
{
tree label;
location_t loc;
};
/* Find LABEL in vector of label entries VEC. */
static struct label_entry *
find_label_entry (const auto_vec<struct label_entry> *vec, tree label)
{
unsigned int i;
struct label_entry *l;
FOR_EACH_VEC_ELT (*vec, i, l)
if (l->label == label)
return l;
return NULL;
}
/* Return true if LABEL, a LABEL_DECL, represents a case label
in a vector of labels CASES. */
static bool
case_label_p (const vec<tree> *cases, tree label)
{
unsigned int i;
tree l;
FOR_EACH_VEC_ELT (*cases, i, l)
if (CASE_LABEL (l) == label)
return true;
return false;
}
/* Find the last nondebug statement in a scope STMT. */
static gimple *
last_stmt_in_scope (gimple *stmt)
{
if (!stmt)
return NULL;
switch (gimple_code (stmt))
{
case GIMPLE_BIND:
{
gbind *bind = as_a <gbind *> (stmt);
stmt = gimple_seq_last_nondebug_stmt (gimple_bind_body (bind));
return last_stmt_in_scope (stmt);
}
case GIMPLE_TRY:
{
gtry *try_stmt = as_a <gtry *> (stmt);
stmt = gimple_seq_last_nondebug_stmt (gimple_try_eval (try_stmt));
gimple *last_eval = last_stmt_in_scope (stmt);
if (gimple_stmt_may_fallthru (last_eval)
&& (last_eval == NULL
|| !gimple_call_internal_p (last_eval, IFN_FALLTHROUGH))
&& gimple_try_kind (try_stmt) == GIMPLE_TRY_FINALLY)
{
stmt = gimple_seq_last_nondebug_stmt (gimple_try_cleanup (try_stmt));
return last_stmt_in_scope (stmt);
}
else
return last_eval;
}
case GIMPLE_DEBUG:
gcc_unreachable ();
default:
return stmt;
}
}
/* Collect interesting labels in LABELS and return the statement preceding
another case label, or a user-defined label. Store a location useful
to give warnings at *PREVLOC (usually the location of the returned
statement or of its surrounding scope). */
static gimple *
collect_fallthrough_labels (gimple_stmt_iterator *gsi_p,
auto_vec <struct label_entry> *labels,
location_t *prevloc)
{
gimple *prev = NULL;
*prevloc = UNKNOWN_LOCATION;
do
{
if (gimple_code (gsi_stmt (*gsi_p)) == GIMPLE_BIND)
{
/* Recognize the special GIMPLE_BIND added by gimplify_switch_expr,
which starts on a GIMPLE_SWITCH and ends with a break label.
Handle that as a single statement that can fall through. */
gbind *bind = as_a <gbind *> (gsi_stmt (*gsi_p));
gimple *first = gimple_seq_first_stmt (gimple_bind_body (bind));
gimple *last = gimple_seq_last_stmt (gimple_bind_body (bind));
if (last
&& gimple_code (first) == GIMPLE_SWITCH
&& gimple_code (last) == GIMPLE_LABEL)
{
tree label = gimple_label_label (as_a <glabel *> (last));
if (SWITCH_BREAK_LABEL_P (label))
{
prev = bind;
gsi_next (gsi_p);
continue;
}
}
}
if (gimple_code (gsi_stmt (*gsi_p)) == GIMPLE_BIND
|| gimple_code (gsi_stmt (*gsi_p)) == GIMPLE_TRY)
{
/* Nested scope. Only look at the last statement of
the innermost scope. */
location_t bind_loc = gimple_location (gsi_stmt (*gsi_p));
gimple *last = last_stmt_in_scope (gsi_stmt (*gsi_p));
if (last)
{
prev = last;
/* It might be a label without a location. Use the
location of the scope then. */
if (!gimple_has_location (prev))
*prevloc = bind_loc;
}
gsi_next (gsi_p);
continue;
}
/* Ifs are tricky. */
if (gimple_code (gsi_stmt (*gsi_p)) == GIMPLE_COND)
{
gcond *cond_stmt = as_a <gcond *> (gsi_stmt (*gsi_p));
tree false_lab = gimple_cond_false_label (cond_stmt);
location_t if_loc = gimple_location (cond_stmt);
/* If we have e.g.
if (i > 1) goto <D.2259>; else goto D;
we can't do much with the else-branch. */
if (!DECL_ARTIFICIAL (false_lab))
break;
/* Go on until the false label, then one step back. */
for (; !gsi_end_p (*gsi_p); gsi_next (gsi_p))
{
gimple *stmt = gsi_stmt (*gsi_p);
if (gimple_code (stmt) == GIMPLE_LABEL
&& gimple_label_label (as_a <glabel *> (stmt)) == false_lab)
break;
}
/* Not found? Oops. */
if (gsi_end_p (*gsi_p))
break;
struct label_entry l = { false_lab, if_loc };
labels->safe_push (l);
/* Go to the last statement of the then branch. */
gsi_prev (gsi_p);
/* if (i != 0) goto <D.1759>; else goto <D.1760>;
<D.1759>:
<stmt>;
goto <D.1761>;
<D.1760>:
*/
if (gimple_code (gsi_stmt (*gsi_p)) == GIMPLE_GOTO
&& !gimple_has_location (gsi_stmt (*gsi_p)))
{
/* Look at the statement before, it might be
attribute fallthrough, in which case don't warn. */
gsi_prev (gsi_p);
bool fallthru_before_dest
= gimple_call_internal_p (gsi_stmt (*gsi_p), IFN_FALLTHROUGH);
gsi_next (gsi_p);
tree goto_dest = gimple_goto_dest (gsi_stmt (*gsi_p));
if (!fallthru_before_dest)
{
struct label_entry l = { goto_dest, if_loc };
labels->safe_push (l);
}
}
/* And move back. */
gsi_next (gsi_p);
}
/* Remember the last statement. Skip labels that are of no interest
to us. */
if (gimple_code (gsi_stmt (*gsi_p)) == GIMPLE_LABEL)
{
tree label = gimple_label_label (as_a <glabel *> (gsi_stmt (*gsi_p)));
if (find_label_entry (labels, label))
prev = gsi_stmt (*gsi_p);
}
else if (gimple_call_internal_p (gsi_stmt (*gsi_p), IFN_ASAN_MARK))
;
else if (gimple_code (gsi_stmt (*gsi_p)) == GIMPLE_PREDICT)
;
else if (!is_gimple_debug (gsi_stmt (*gsi_p)))
prev = gsi_stmt (*gsi_p);
gsi_next (gsi_p);
}
while (!gsi_end_p (*gsi_p)
/* Stop if we find a case or a user-defined label. */
&& (gimple_code (gsi_stmt (*gsi_p)) != GIMPLE_LABEL
|| !gimple_has_location (gsi_stmt (*gsi_p))));
if (prev && gimple_has_location (prev))
*prevloc = gimple_location (prev);
return prev;
}
/* Return true if the switch fallthough warning should occur. LABEL is
the label statement that we're falling through to. */
static bool
should_warn_for_implicit_fallthrough (gimple_stmt_iterator *gsi_p, tree label)
{
gimple_stmt_iterator gsi = *gsi_p;
/* Don't warn if the label is marked with a "falls through" comment. */
if (FALLTHROUGH_LABEL_P (label))
return false;
/* Don't warn for non-case labels followed by a statement:
case 0:
foo ();
label:
bar ();
as these are likely intentional. */
if (!case_label_p (&gimplify_ctxp->case_labels, label))
{
tree l;
while (!gsi_end_p (gsi)
&& gimple_code (gsi_stmt (gsi)) == GIMPLE_LABEL
&& (l = gimple_label_label (as_a <glabel *> (gsi_stmt (gsi))))
&& !case_label_p (&gimplify_ctxp->case_labels, l))
gsi_next_nondebug (&gsi);
if (gsi_end_p (gsi) || gimple_code (gsi_stmt (gsi)) != GIMPLE_LABEL)
return false;
}
/* Don't warn for terminated branches, i.e. when the subsequent case labels
immediately breaks. */
gsi = *gsi_p;
/* Skip all immediately following labels. */
while (!gsi_end_p (gsi)
&& (gimple_code (gsi_stmt (gsi)) == GIMPLE_LABEL
|| gimple_code (gsi_stmt (gsi)) == GIMPLE_PREDICT))
gsi_next_nondebug (&gsi);
/* { ... something; default:; } */
if (gsi_end_p (gsi)
/* { ... something; default: break; } or
{ ... something; default: goto L; } */
|| gimple_code (gsi_stmt (gsi)) == GIMPLE_GOTO
/* { ... something; default: return; } */
|| gimple_code (gsi_stmt (gsi)) == GIMPLE_RETURN)
return false;
return true;
}
/* Callback for walk_gimple_seq. */
static tree
warn_implicit_fallthrough_r (gimple_stmt_iterator *gsi_p, bool *handled_ops_p,
struct walk_stmt_info *)
{
gimple *stmt = gsi_stmt (*gsi_p);
*handled_ops_p = true;
switch (gimple_code (stmt))
{
case GIMPLE_TRY:
case GIMPLE_BIND:
case GIMPLE_CATCH:
case GIMPLE_EH_FILTER:
case GIMPLE_TRANSACTION:
/* Walk the sub-statements. */
*handled_ops_p = false;
break;
/* Find a sequence of form:
GIMPLE_LABEL
[...]
<may fallthru stmt>
GIMPLE_LABEL
and possibly warn. */
case GIMPLE_LABEL:
{
/* Found a label. Skip all immediately following labels. */
while (!gsi_end_p (*gsi_p)
&& gimple_code (gsi_stmt (*gsi_p)) == GIMPLE_LABEL)
gsi_next_nondebug (gsi_p);
/* There might be no more statements. */
if (gsi_end_p (*gsi_p))
return integer_zero_node;
/* Vector of labels that fall through. */
auto_vec <struct label_entry> labels;
location_t prevloc;
gimple *prev = collect_fallthrough_labels (gsi_p, &labels, &prevloc);
/* There might be no more statements. */
if (gsi_end_p (*gsi_p))
return integer_zero_node;
gimple *next = gsi_stmt (*gsi_p);
tree label;
/* If what follows is a label, then we may have a fallthrough. */
if (gimple_code (next) == GIMPLE_LABEL
&& gimple_has_location (next)
&& (label = gimple_label_label (as_a <glabel *> (next)))
&& prev != NULL)
{
struct label_entry *l;
bool warned_p = false;
auto_diagnostic_group d;
if (!should_warn_for_implicit_fallthrough (gsi_p, label))
/* Quiet. */;
else if (gimple_code (prev) == GIMPLE_LABEL
&& (label = gimple_label_label (as_a <glabel *> (prev)))
&& (l = find_label_entry (&labels, label)))
warned_p = warning_at (l->loc, OPT_Wimplicit_fallthrough_,
"this statement may fall through");
else if (!gimple_call_internal_p (prev, IFN_FALLTHROUGH)
/* Try to be clever and don't warn when the statement
can't actually fall through. */
&& gimple_stmt_may_fallthru (prev)
&& prevloc != UNKNOWN_LOCATION)
warned_p = warning_at (prevloc,
OPT_Wimplicit_fallthrough_,
"this statement may fall through");
if (warned_p)
inform (gimple_location (next), "here");
/* Mark this label as processed so as to prevent multiple
warnings in nested switches. */
FALLTHROUGH_LABEL_P (label) = true;
/* So that next warn_implicit_fallthrough_r will start looking for
a new sequence starting with this label. */
gsi_prev (gsi_p);
}
}
break;
default:
break;
}
return NULL_TREE;
}
/* Warn when a switch case falls through. */
static void
maybe_warn_implicit_fallthrough (gimple_seq seq)
{
if (!warn_implicit_fallthrough)
return;
/* This warning is meant for C/C++/ObjC/ObjC++ only. */
if (!(lang_GNU_C ()
|| lang_GNU_CXX ()
|| lang_GNU_OBJC ()))
return;
struct walk_stmt_info wi;
memset (&wi, 0, sizeof (wi));
walk_gimple_seq (seq, warn_implicit_fallthrough_r, NULL, &wi);
}
/* Callback for walk_gimple_seq. */
static tree
expand_FALLTHROUGH_r (gimple_stmt_iterator *gsi_p, bool *handled_ops_p,
struct walk_stmt_info *wi)
{
gimple *stmt = gsi_stmt (*gsi_p);
*handled_ops_p = true;
switch (gimple_code (stmt))
{
case GIMPLE_TRY:
case GIMPLE_BIND:
case GIMPLE_CATCH:
case GIMPLE_EH_FILTER:
case GIMPLE_TRANSACTION:
/* Walk the sub-statements. */
*handled_ops_p = false;
break;
case GIMPLE_CALL:
if (gimple_call_internal_p (stmt, IFN_FALLTHROUGH))
{
gsi_remove (gsi_p, true);
if (gsi_end_p (*gsi_p))
{
*static_cast<location_t *>(wi->info) = gimple_location (stmt);
return integer_zero_node;
}
bool found = false;
location_t loc = gimple_location (stmt);
gimple_stmt_iterator gsi2 = *gsi_p;
stmt = gsi_stmt (gsi2);
if (gimple_code (stmt) == GIMPLE_GOTO && !gimple_has_location (stmt))
{
/* Go on until the artificial label. */
tree goto_dest = gimple_goto_dest (stmt);
for (; !gsi_end_p (gsi2); gsi_next (&gsi2))
{
if (gimple_code (gsi_stmt (gsi2)) == GIMPLE_LABEL
&& gimple_label_label (as_a <glabel *> (gsi_stmt (gsi2)))
== goto_dest)
break;
}
/* Not found? Stop. */
if (gsi_end_p (gsi2))
break;
/* Look one past it. */
gsi_next (&gsi2);
}
/* We're looking for a case label or default label here. */
while (!gsi_end_p (gsi2))
{
stmt = gsi_stmt (gsi2);
if (gimple_code (stmt) == GIMPLE_LABEL)
{
tree label = gimple_label_label (as_a <glabel *> (stmt));
if (gimple_has_location (stmt) && DECL_ARTIFICIAL (label))
{
found = true;
break;
}
}
else if (gimple_call_internal_p (stmt, IFN_ASAN_MARK))
;
else if (!is_gimple_debug (stmt))
/* Anything else is not expected. */
break;
gsi_next (&gsi2);
}
if (!found)
pedwarn (loc, 0, "attribute %<fallthrough%> not preceding "
"a case label or default label");
}
break;
default:
break;
}
return NULL_TREE;
}
/* Expand all FALLTHROUGH () calls in SEQ. */
static void
expand_FALLTHROUGH (gimple_seq *seq_p)
{
struct walk_stmt_info wi;
location_t loc;
memset (&wi, 0, sizeof (wi));
wi.info = (void *) &loc;
walk_gimple_seq_mod (seq_p, expand_FALLTHROUGH_r, NULL, &wi);
if (wi.callback_result == integer_zero_node)
/* We've found [[fallthrough]]; at the end of a switch, which the C++
standard says is ill-formed; see [dcl.attr.fallthrough]. */
pedwarn (loc, 0, "attribute %<fallthrough%> not preceding "
"a case label or default label");
}
/* Gimplify a SWITCH_EXPR, and collect the vector of labels it can
branch to. */
static enum gimplify_status
gimplify_switch_expr (tree *expr_p, gimple_seq *pre_p)
{
tree switch_expr = *expr_p;
gimple_seq switch_body_seq = NULL;
enum gimplify_status ret;
tree index_type = TREE_TYPE (switch_expr);
if (index_type == NULL_TREE)
index_type = TREE_TYPE (SWITCH_COND (switch_expr));
ret = gimplify_expr (&SWITCH_COND (switch_expr), pre_p, NULL, is_gimple_val,
fb_rvalue);
if (ret == GS_ERROR || ret == GS_UNHANDLED)
return ret;
if (SWITCH_BODY (switch_expr))
{
vec<tree> labels;
vec<tree> saved_labels;
hash_set<tree> *saved_live_switch_vars = NULL;
tree default_case = NULL_TREE;
gswitch *switch_stmt;
/* Save old labels, get new ones from body, then restore the old
labels. Save all the things from the switch body to append after. */
saved_labels = gimplify_ctxp->case_labels;
gimplify_ctxp->case_labels.create (8);
/* Do not create live_switch_vars if SWITCH_BODY is not a BIND_EXPR. */
saved_live_switch_vars = gimplify_ctxp->live_switch_vars;
tree_code body_type = TREE_CODE (SWITCH_BODY (switch_expr));
if (body_type == BIND_EXPR || body_type == STATEMENT_LIST)
gimplify_ctxp->live_switch_vars = new hash_set<tree> (4);
else
gimplify_ctxp->live_switch_vars = NULL;
bool old_in_switch_expr = gimplify_ctxp->in_switch_expr;
gimplify_ctxp->in_switch_expr = true;
gimplify_stmt (&SWITCH_BODY (switch_expr), &switch_body_seq);
gimplify_ctxp->in_switch_expr = old_in_switch_expr;
maybe_warn_switch_unreachable (switch_body_seq);
maybe_warn_implicit_fallthrough (switch_body_seq);
/* Only do this for the outermost GIMPLE_SWITCH. */
if (!gimplify_ctxp->in_switch_expr)
expand_FALLTHROUGH (&switch_body_seq);
labels = gimplify_ctxp->case_labels;
gimplify_ctxp->case_labels = saved_labels;
if (gimplify_ctxp->live_switch_vars)
{
gcc_assert (gimplify_ctxp->live_switch_vars->is_empty ());
delete gimplify_ctxp->live_switch_vars;
}
gimplify_ctxp->live_switch_vars = saved_live_switch_vars;
preprocess_case_label_vec_for_gimple (labels, index_type,
&default_case);
bool add_bind = false;
if (!default_case)
{
glabel *new_default;
default_case
= build_case_label (NULL_TREE, NULL_TREE,
create_artificial_label (UNKNOWN_LOCATION));
if (old_in_switch_expr)
{
SWITCH_BREAK_LABEL_P (CASE_LABEL (default_case)) = 1;
add_bind = true;
}
new_default = gimple_build_label (CASE_LABEL (default_case));
gimplify_seq_add_stmt (&switch_body_seq, new_default);
}
else if (old_in_switch_expr)
{
gimple *last = gimple_seq_last_stmt (switch_body_seq);
if (last && gimple_code (last) == GIMPLE_LABEL)
{
tree label = gimple_label_label (as_a <glabel *> (last));
if (SWITCH_BREAK_LABEL_P (label))
add_bind = true;
}
}
switch_stmt = gimple_build_switch (SWITCH_COND (switch_expr),
default_case, labels);
/* For the benefit of -Wimplicit-fallthrough, if switch_body_seq
ends with a GIMPLE_LABEL holding SWITCH_BREAK_LABEL_P LABEL_DECL,
wrap the GIMPLE_SWITCH up to that GIMPLE_LABEL into a GIMPLE_BIND,
so that we can easily find the start and end of the switch
statement. */
if (add_bind)
{
gimple_seq bind_body = NULL;
gimplify_seq_add_stmt (&bind_body, switch_stmt);
gimple_seq_add_seq (&bind_body, switch_body_seq);
gbind *bind = gimple_build_bind (NULL_TREE, bind_body, NULL_TREE);
gimple_set_location (bind, EXPR_LOCATION (switch_expr));
gimplify_seq_add_stmt (pre_p, bind);
}
else
{
gimplify_seq_add_stmt (pre_p, switch_stmt);
gimplify_seq_add_seq (pre_p, switch_body_seq);
}
labels.release ();
}
else
gcc_unreachable ();
return GS_ALL_DONE;
}
/* Gimplify the LABEL_EXPR pointed to by EXPR_P. */
static enum gimplify_status
gimplify_label_expr (tree *expr_p, gimple_seq *pre_p)
{
gcc_assert (decl_function_context (LABEL_EXPR_LABEL (*expr_p))
== current_function_decl);
tree label = LABEL_EXPR_LABEL (*expr_p);
glabel *label_stmt = gimple_build_label (label);
gimple_set_location (label_stmt, EXPR_LOCATION (*expr_p));
gimplify_seq_add_stmt (pre_p, label_stmt);
if (lookup_attribute ("cold", DECL_ATTRIBUTES (label)))
gimple_seq_add_stmt (pre_p, gimple_build_predict (PRED_COLD_LABEL,
NOT_TAKEN));
else if (lookup_attribute ("hot", DECL_ATTRIBUTES (label)))
gimple_seq_add_stmt (pre_p, gimple_build_predict (PRED_HOT_LABEL,
TAKEN));
return GS_ALL_DONE;
}
/* Gimplify the CASE_LABEL_EXPR pointed to by EXPR_P. */
static enum gimplify_status
gimplify_case_label_expr (tree *expr_p, gimple_seq *pre_p)
{
struct gimplify_ctx *ctxp;
glabel *label_stmt;
/* Invalid programs can play Duff's Device type games with, for example,
#pragma omp parallel. At least in the C front end, we don't
detect such invalid branches until after gimplification, in the
diagnose_omp_blocks pass. */
for (ctxp = gimplify_ctxp; ; ctxp = ctxp->prev_context)
if (ctxp->case_labels.exists ())
break;
tree label = CASE_LABEL (*expr_p);
label_stmt = gimple_build_label (label);
gimple_set_location (label_stmt, EXPR_LOCATION (*expr_p));
ctxp->case_labels.safe_push (*expr_p);
gimplify_seq_add_stmt (pre_p, label_stmt);
if (lookup_attribute ("cold", DECL_ATTRIBUTES (label)))
gimple_seq_add_stmt (pre_p, gimple_build_predict (PRED_COLD_LABEL,
NOT_TAKEN));
else if (lookup_attribute ("hot", DECL_ATTRIBUTES (label)))
gimple_seq_add_stmt (pre_p, gimple_build_predict (PRED_HOT_LABEL,
TAKEN));
return GS_ALL_DONE;
}
/* Build a GOTO to the LABEL_DECL pointed to by LABEL_P, building it first
if necessary. */
tree
build_and_jump (tree *label_p)
{
if (label_p == NULL)
/* If there's nowhere to jump, just fall through. */
return NULL_TREE;
if (*label_p == NULL_TREE)
{
tree label = create_artificial_label (UNKNOWN_LOCATION);
*label_p = label;
}
return build1 (GOTO_EXPR, void_type_node, *label_p);
}
/* Gimplify an EXIT_EXPR by converting to a GOTO_EXPR inside a COND_EXPR.
This also involves building a label to jump to and communicating it to
gimplify_loop_expr through gimplify_ctxp->exit_label. */
static enum gimplify_status
gimplify_exit_expr (tree *expr_p)
{
tree cond = TREE_OPERAND (*expr_p, 0);
tree expr;
expr = build_and_jump (&gimplify_ctxp->exit_label);
expr = build3 (COND_EXPR, void_type_node, cond, expr, NULL_TREE);
*expr_p = expr;
return GS_OK;
}
/* *EXPR_P is a COMPONENT_REF being used as an rvalue. If its type is
different from its canonical type, wrap the whole thing inside a
NOP_EXPR and force the type of the COMPONENT_REF to be the canonical
type.
The canonical type of a COMPONENT_REF is the type of the field being
referenced--unless the field is a bit-field which can be read directly
in a smaller mode, in which case the canonical type is the
sign-appropriate type corresponding to that mode. */
static void
canonicalize_component_ref (tree *expr_p)
{
tree expr = *expr_p;
tree type;
gcc_assert (TREE_CODE (expr) == COMPONENT_REF);
if (INTEGRAL_TYPE_P (TREE_TYPE (expr)))
type = TREE_TYPE (get_unwidened (expr, NULL_TREE));
else
type = TREE_TYPE (TREE_OPERAND (expr, 1));
/* One could argue that all the stuff below is not necessary for
the non-bitfield case and declare it a FE error if type
adjustment would be needed. */
if (TREE_TYPE (expr) != type)
{
#ifdef ENABLE_TYPES_CHECKING
tree old_type = TREE_TYPE (expr);
#endif
int type_quals;
/* We need to preserve qualifiers and propagate them from
operand 0. */
type_quals = TYPE_QUALS (type)
| TYPE_QUALS (TREE_TYPE (TREE_OPERAND (expr, 0)));
if (TYPE_QUALS (type) != type_quals)
type = build_qualified_type (TYPE_MAIN_VARIANT (type), type_quals);
/* Set the type of the COMPONENT_REF to the underlying type. */
TREE_TYPE (expr) = type;
#ifdef ENABLE_TYPES_CHECKING
/* It is now a FE error, if the conversion from the canonical
type to the original expression type is not useless. */
gcc_assert (useless_type_conversion_p (old_type, type));
#endif
}
}
/* If a NOP conversion is changing a pointer to array of foo to a pointer
to foo, embed that change in the ADDR_EXPR by converting
T array[U];
(T *)&array
==>
&array[L]
where L is the lower bound. For simplicity, only do this for constant
lower bound.
The constraint is that the type of &array[L] is trivially convertible
to T *. */
static void
canonicalize_addr_expr (tree *expr_p)
{
tree expr = *expr_p;
tree addr_expr = TREE_OPERAND (expr, 0);
tree datype, ddatype, pddatype;
/* We simplify only conversions from an ADDR_EXPR to a pointer type. */
if (!POINTER_TYPE_P (TREE_TYPE (expr))
|| TREE_CODE (addr_expr) != ADDR_EXPR)
return;
/* The addr_expr type should be a pointer to an array. */
datype = TREE_TYPE (TREE_TYPE (addr_expr));
if (TREE_CODE (datype) != ARRAY_TYPE)
return;
/* The pointer to element type shall be trivially convertible to
the expression pointer type. */
ddatype = TREE_TYPE (datype);
pddatype = build_pointer_type (ddatype);
if (!useless_type_conversion_p (TYPE_MAIN_VARIANT (TREE_TYPE (expr)),
pddatype))
return;
/* The lower bound and element sizes must be constant. */
if (!TYPE_SIZE_UNIT (ddatype)
|| TREE_CODE (TYPE_SIZE_UNIT (ddatype)) != INTEGER_CST
|| !TYPE_DOMAIN (datype) || !TYPE_MIN_VALUE (TYPE_DOMAIN (datype))
|| TREE_CODE (TYPE_MIN_VALUE (TYPE_DOMAIN (datype))) != INTEGER_CST)
return;
/* All checks succeeded. Build a new node to merge the cast. */
*expr_p = build4 (ARRAY_REF, ddatype, TREE_OPERAND (addr_expr, 0),
TYPE_MIN_VALUE (TYPE_DOMAIN (datype)),
NULL_TREE, NULL_TREE);
*expr_p = build1 (ADDR_EXPR, pddatype, *expr_p);
/* We can have stripped a required restrict qualifier above. */
if (!useless_type_conversion_p (TREE_TYPE (expr), TREE_TYPE (*expr_p)))
*expr_p = fold_convert (TREE_TYPE (expr), *expr_p);
}
/* *EXPR_P is a NOP_EXPR or CONVERT_EXPR. Remove it and/or other conversions
underneath as appropriate. */
static enum gimplify_status
gimplify_conversion (tree *expr_p)
{
location_t loc = EXPR_LOCATION (*expr_p);
gcc_assert (CONVERT_EXPR_P (*expr_p));
/* Then strip away all but the outermost conversion. */
STRIP_SIGN_NOPS (TREE_OPERAND (*expr_p, 0));
/* And remove the outermost conversion if it's useless. */
if (tree_ssa_useless_type_conversion (*expr_p))
*expr_p = TREE_OPERAND (*expr_p, 0);
/* If we still have a conversion at the toplevel,
then canonicalize some constructs. */
if (CONVERT_EXPR_P (*expr_p))
{
tree sub = TREE_OPERAND (*expr_p, 0);
/* If a NOP conversion is changing the type of a COMPONENT_REF
expression, then canonicalize its type now in order to expose more
redundant conversions. */
if (TREE_CODE (sub) == COMPONENT_REF)
canonicalize_component_ref (&TREE_OPERAND (*expr_p, 0));
/* If a NOP conversion is changing a pointer to array of foo
to a pointer to foo, embed that change in the ADDR_EXPR. */
else if (TREE_CODE (sub) == ADDR_EXPR)
canonicalize_addr_expr (expr_p);
}
/* If we have a conversion to a non-register type force the
use of a VIEW_CONVERT_EXPR instead. */
if (CONVERT_EXPR_P (*expr_p) && !is_gimple_reg_type (TREE_TYPE (*expr_p)))
*expr_p = fold_build1_loc (loc, VIEW_CONVERT_EXPR, TREE_TYPE (*expr_p),
TREE_OPERAND (*expr_p, 0));
/* Canonicalize CONVERT_EXPR to NOP_EXPR. */
if (TREE_CODE (*expr_p) == CONVERT_EXPR)
TREE_SET_CODE (*expr_p, NOP_EXPR);
return GS_OK;
}
/* Gimplify a VAR_DECL or PARM_DECL. Return GS_OK if we expanded a
DECL_VALUE_EXPR, and it's worth re-examining things. */
static enum gimplify_status
gimplify_var_or_parm_decl (tree *expr_p)
{
tree decl = *expr_p;
/* ??? If this is a local variable, and it has not been seen in any
outer BIND_EXPR, then it's probably the result of a duplicate
declaration, for which we've already issued an error. It would
be really nice if the front end wouldn't leak these at all.
Currently the only known culprit is C++ destructors, as seen
in g++.old-deja/g++.jason/binding.C. */
if (VAR_P (decl)
&& !DECL_SEEN_IN_BIND_EXPR_P (decl)
&& !TREE_STATIC (decl) && !DECL_EXTERNAL (decl)
&& decl_function_context (decl) == current_function_decl)
{
gcc_assert (seen_error ());
return GS_ERROR;
}
/* When within an OMP context, notice uses of variables. */
if (gimplify_omp_ctxp && omp_notice_variable (gimplify_omp_ctxp, decl, true))
return GS_ALL_DONE;
/* If the decl is an alias for another expression, substitute it now. */
if (DECL_HAS_VALUE_EXPR_P (decl))
{
*expr_p = unshare_expr (DECL_VALUE_EXPR (decl));
return GS_OK;
}
return GS_ALL_DONE;
}
/* Recalculate the value of the TREE_SIDE_EFFECTS flag for T. */
static void
recalculate_side_effects (tree t)
{
enum tree_code code = TREE_CODE (t);
int len = TREE_OPERAND_LENGTH (t);
int i;
switch (TREE_CODE_CLASS (code))
{
case tcc_expression:
switch (code)
{
case INIT_EXPR:
case MODIFY_EXPR:
case VA_ARG_EXPR:
case PREDECREMENT_EXPR:
case PREINCREMENT_EXPR:
case POSTDECREMENT_EXPR:
case POSTINCREMENT_EXPR:
/* All of these have side-effects, no matter what their
operands are. */
return;
default:
break;
}
/* Fall through. */
case tcc_comparison: /* a comparison expression */
case tcc_unary: /* a unary arithmetic expression */
case tcc_binary: /* a binary arithmetic expression */
case tcc_reference: /* a reference */
case tcc_vl_exp: /* a function call */
TREE_SIDE_EFFECTS (t) = TREE_THIS_VOLATILE (t);
for (i = 0; i < len; ++i)
{
tree op = TREE_OPERAND (t, i);
if (op && TREE_SIDE_EFFECTS (op))
TREE_SIDE_EFFECTS (t) = 1;
}
break;
case tcc_constant:
/* No side-effects. */
return;
default:
gcc_unreachable ();
}
}
/* Gimplify the COMPONENT_REF, ARRAY_REF, REALPART_EXPR or IMAGPART_EXPR
node *EXPR_P.
compound_lval
: min_lval '[' val ']'
| min_lval '.' ID
| compound_lval '[' val ']'
| compound_lval '.' ID
This is not part of the original SIMPLE definition, which separates
array and member references, but it seems reasonable to handle them
together. Also, this way we don't run into problems with union
aliasing; gcc requires that for accesses through a union to alias, the
union reference must be explicit, which was not always the case when we
were splitting up array and member refs.
PRE_P points to the sequence where side effects that must happen before
*EXPR_P should be stored.
POST_P points to the sequence where side effects that must happen after
*EXPR_P should be stored. */
static enum gimplify_status
gimplify_compound_lval (tree *expr_p, gimple_seq *pre_p, gimple_seq *post_p,
fallback_t fallback)
{
tree *p;
enum gimplify_status ret = GS_ALL_DONE, tret;
int i;
location_t loc = EXPR_LOCATION (*expr_p);
tree expr = *expr_p;
/* Create a stack of the subexpressions so later we can walk them in
order from inner to outer. */
auto_vec<tree, 10> expr_stack;
/* We can handle anything that get_inner_reference can deal with. */
for (p = expr_p; ; p = &TREE_OPERAND (*p, 0))
{
restart:
/* Fold INDIRECT_REFs now to turn them into ARRAY_REFs. */
if (TREE_CODE (*p) == INDIRECT_REF)
*p = fold_indirect_ref_loc (loc, *p);
if (handled_component_p (*p))
;
/* Expand DECL_VALUE_EXPR now. In some cases that may expose
additional COMPONENT_REFs. */
else if ((VAR_P (*p) || TREE_CODE (*p) == PARM_DECL)
&& gimplify_var_or_parm_decl (p) == GS_OK)
goto restart;
else
break;
expr_stack.safe_push (*p);
}
gcc_assert (expr_stack.length ());
/* Now EXPR_STACK is a stack of pointers to all the refs we've
walked through and P points to the innermost expression.
Java requires that we elaborated nodes in source order. That
means we must gimplify the inner expression followed by each of
the indices, in order. But we can't gimplify the inner
expression until we deal with any variable bounds, sizes, or
positions in order to deal with PLACEHOLDER_EXPRs.
So we do this in three steps. First we deal with the annotations
for any variables in the components, then we gimplify the base,
then we gimplify any indices, from left to right. */
for (i = expr_stack.length () - 1; i >= 0; i--)
{
tree t = expr_stack[i];
if (TREE_CODE (t) == ARRAY_REF || TREE_CODE (t) == ARRAY_RANGE_REF)
{
/* Gimplify the low bound and element type size and put them into
the ARRAY_REF. If these values are set, they have already been
gimplified. */
if (TREE_OPERAND (t, 2) == NULL_TREE)
{
tree low = unshare_expr (array_ref_low_bound (t));
if (!is_gimple_min_invariant (low))
{
TREE_OPERAND (t, 2) = low;
tret = gimplify_expr (&TREE_OPERAND (t, 2), pre_p,
post_p, is_gimple_reg,
fb_rvalue);
ret = MIN (ret, tret);
}
}
else
{
tret = gimplify_expr (&TREE_OPERAND (t, 2), pre_p, post_p,
is_gimple_reg, fb_rvalue);
ret = MIN (ret, tret);
}
if (TREE_OPERAND (t, 3) == NULL_TREE)
{
tree elmt_size = array_ref_element_size (t);
if (!is_gimple_min_invariant (elmt_size))
{
elmt_size = unshare_expr (elmt_size);
tree elmt_type = TREE_TYPE (TREE_TYPE (TREE_OPERAND (t, 0)));
tree factor = size_int (TYPE_ALIGN_UNIT (elmt_type));
/* Divide the element size by the alignment of the element
type (above). */
elmt_size = size_binop_loc (loc, EXACT_DIV_EXPR,
elmt_size, factor);
TREE_OPERAND (t, 3) = elmt_size;
tret = gimplify_expr (&TREE_OPERAND (t, 3), pre_p,
post_p, is_gimple_reg,
fb_rvalue);
ret = MIN (ret, tret);
}
}
else
{
tret = gimplify_expr (&TREE_OPERAND (t, 3), pre_p, post_p,
is_gimple_reg, fb_rvalue);
ret = MIN (ret, tret);
}
}
else if (TREE_CODE (t) == COMPONENT_REF)
{
/* Set the field offset into T and gimplify it. */
if (TREE_OPERAND (t, 2) == NULL_TREE)
{
tree offset = component_ref_field_offset (t);
if (!is_gimple_min_invariant (offset))
{
offset = unshare_expr (offset);
tree field = TREE_OPERAND (t, 1);
tree factor
= size_int (DECL_OFFSET_ALIGN (field) / BITS_PER_UNIT);
/* Divide the offset by its alignment. */
offset = size_binop_loc (loc, EXACT_DIV_EXPR,
offset, factor);
TREE_OPERAND (t, 2) = offset;
tret = gimplify_expr (&TREE_OPERAND (t, 2), pre_p,
post_p, is_gimple_reg,
fb_rvalue);
ret = MIN (ret, tret);
}
}
else
{
tret = gimplify_expr (&TREE_OPERAND (t, 2), pre_p, post_p,
is_gimple_reg, fb_rvalue);
ret = MIN (ret, tret);
}
}
}
/* Step 2 is to gimplify the base expression. Make sure lvalue is set
so as to match the min_lval predicate. Failure to do so may result
in the creation of large aggregate temporaries. */
tret = gimplify_expr (p, pre_p, post_p, is_gimple_min_lval,
fallback | fb_lvalue);
ret = MIN (ret, tret);
/* And finally, the indices and operands of ARRAY_REF. During this
loop we also remove any useless conversions. */
for (; expr_stack.length () > 0; )
{
tree t = expr_stack.pop ();
if (TREE_CODE (t) == ARRAY_REF || TREE_CODE (t) == ARRAY_RANGE_REF)
{
/* Gimplify the dimension. */
if (!is_gimple_min_invariant (TREE_OPERAND (t, 1)))
{
tret = gimplify_expr (&TREE_OPERAND (t, 1), pre_p, post_p,
is_gimple_val, fb_rvalue);
ret = MIN (ret, tret);
}
}
STRIP_USELESS_TYPE_CONVERSION (TREE_OPERAND (t, 0));
/* The innermost expression P may have originally had
TREE_SIDE_EFFECTS set which would have caused all the outer
expressions in *EXPR_P leading to P to also have had
TREE_SIDE_EFFECTS set. */
recalculate_side_effects (t);
}
/* If the outermost expression is a COMPONENT_REF, canonicalize its type. */
if ((fallback & fb_rvalue) && TREE_CODE (*expr_p) == COMPONENT_REF)
{
canonicalize_component_ref (expr_p);
}
expr_stack.release ();
gcc_assert (*expr_p == expr || ret != GS_ALL_DONE);
return ret;
}
/* Gimplify the self modifying expression pointed to by EXPR_P
(++, --, +=, -=).
PRE_P points to the list where side effects that must happen before
*EXPR_P should be stored.
POST_P points to the list where side effects that must happen after
*EXPR_P should be stored.
WANT_VALUE is nonzero iff we want to use the value of this expression
in another expression.
ARITH_TYPE is the type the computation should be performed in. */
enum gimplify_status
gimplify_self_mod_expr (tree *expr_p, gimple_seq *pre_p, gimple_seq *post_p,
bool want_value, tree arith_type)
{
enum tree_code code;
tree lhs, lvalue, rhs, t1;
gimple_seq post = NULL, *orig_post_p = post_p;
bool postfix;
enum tree_code arith_code;
enum gimplify_status ret;
location_t loc = EXPR_LOCATION (*expr_p);
code = TREE_CODE (*expr_p);
gcc_assert (code == POSTINCREMENT_EXPR || code == POSTDECREMENT_EXPR
|| code == PREINCREMENT_EXPR || code == PREDECREMENT_EXPR);
/* Prefix or postfix? */
if (code == POSTINCREMENT_EXPR || code == POSTDECREMENT_EXPR)
/* Faster to treat as prefix if result is not used. */
postfix = want_value;
else
postfix = false;
/* For postfix, make sure the inner expression's post side effects
are executed after side effects from this expression. */
if (postfix)
post_p = &post;
/* Add or subtract? */
if (code == PREINCREMENT_EXPR || code == POSTINCREMENT_EXPR)
arith_code = PLUS_EXPR;
else
arith_code = MINUS_EXPR;
/* Gimplify the LHS into a GIMPLE lvalue. */
lvalue = TREE_OPERAND (*expr_p, 0);
ret = gimplify_expr (&lvalue, pre_p, post_p, is_gimple_lvalue, fb_lvalue);
if (ret == GS_ERROR)
return ret;
/* Extract the operands to the arithmetic operation. */
lhs = lvalue;
rhs = TREE_OPERAND (*expr_p, 1);
/* For postfix operator, we evaluate the LHS to an rvalue and then use
that as the result value and in the postqueue operation. */
if (postfix)
{
ret = gimplify_expr (&lhs, pre_p, post_p, is_gimple_val, fb_rvalue);
if (ret == GS_ERROR)
return ret;
lhs = get_initialized_tmp_var (lhs, pre_p);
}
/* For POINTERs increment, use POINTER_PLUS_EXPR. */
if (POINTER_TYPE_P (TREE_TYPE (lhs)))
{
rhs = convert_to_ptrofftype_loc (loc, rhs);
if (arith_code == MINUS_EXPR)
rhs = fold_build1_loc (loc, NEGATE_EXPR, TREE_TYPE (rhs), rhs);
t1 = fold_build2 (POINTER_PLUS_EXPR, TREE_TYPE (*expr_p), lhs, rhs);
}
else
t1 = fold_convert (TREE_TYPE (*expr_p),
fold_build2 (arith_code, arith_type,
fold_convert (arith_type, lhs),
fold_convert (arith_type, rhs)));
if (postfix)
{
gimplify_assign (lvalue, t1, pre_p);
gimplify_seq_add_seq (orig_post_p, post);
*expr_p = lhs;
return GS_ALL_DONE;
}
else
{
*expr_p = build2 (MODIFY_EXPR, TREE_TYPE (lvalue), lvalue, t1);
return GS_OK;
}
}
/* If *EXPR_P has a variable sized type, wrap it in a WITH_SIZE_EXPR. */
static void
maybe_with_size_expr (tree *expr_p)
{
tree expr = *expr_p;
tree type = TREE_TYPE (expr);
tree size;
/* If we've already wrapped this or the type is error_mark_node, we can't do
anything. */
if (TREE_CODE (expr) == WITH_SIZE_EXPR
|| type == error_mark_node)
return;
/* If the size isn't known or is a constant, we have nothing to do. */
size = TYPE_SIZE_UNIT (type);
if (!size || poly_int_tree_p (size))
return;
/* Otherwise, make a WITH_SIZE_EXPR. */
size = unshare_expr (size);
size = SUBSTITUTE_PLACEHOLDER_IN_EXPR (size, expr);
*expr_p = build2 (WITH_SIZE_EXPR, type, expr, size);
}
/* Helper for gimplify_call_expr. Gimplify a single argument *ARG_P
Store any side-effects in PRE_P. CALL_LOCATION is the location of
the CALL_EXPR. If ALLOW_SSA is set the actual parameter may be
gimplified to an SSA name. */
enum gimplify_status
gimplify_arg (tree *arg_p, gimple_seq *pre_p, location_t call_location,
bool allow_ssa)
{
bool (*test) (tree);
fallback_t fb;
/* In general, we allow lvalues for function arguments to avoid
extra overhead of copying large aggregates out of even larger
aggregates into temporaries only to copy the temporaries to
the argument list. Make optimizers happy by pulling out to
temporaries those types that fit in registers. */
if (is_gimple_reg_type (TREE_TYPE (*arg_p)))
test = is_gimple_val, fb = fb_rvalue;
else
{
test = is_gimple_lvalue, fb = fb_either;
/* Also strip a TARGET_EXPR that would force an extra copy. */
if (TREE_CODE (*arg_p) == TARGET_EXPR)
{
tree init = TARGET_EXPR_INITIAL (*arg_p);
if (init
&& !VOID_TYPE_P (TREE_TYPE (init)))
*arg_p = init;
}
}
/* If this is a variable sized type, we must remember the size. */
maybe_with_size_expr (arg_p);
/* FIXME diagnostics: This will mess up gcc.dg/Warray-bounds.c. */
/* Make sure arguments have the same location as the function call
itself. */
protected_set_expr_location (*arg_p, call_location);
/* There is a sequence point before a function call. Side effects in
the argument list must occur before the actual call. So, when
gimplifying arguments, force gimplify_expr to use an internal
post queue which is then appended to the end of PRE_P. */
return gimplify_expr (arg_p, pre_p, NULL, test, fb, allow_ssa);
}
/* Don't fold inside offloading or taskreg regions: it can break code by
adding decl references that weren't in the source. We'll do it during
omplower pass instead. */
static bool
maybe_fold_stmt (gimple_stmt_iterator *gsi)
{
struct gimplify_omp_ctx *ctx;
for (ctx = gimplify_omp_ctxp; ctx; ctx = ctx->outer_context)
if ((ctx->region_type & (ORT_TARGET | ORT_PARALLEL | ORT_TASK)) != 0)
return false;
else if ((ctx->region_type & ORT_HOST_TEAMS) == ORT_HOST_TEAMS)
return false;
/* Delay folding of builtins until the IL is in consistent state
so the diagnostic machinery can do a better job. */
if (gimple_call_builtin_p (gsi_stmt (*gsi)))
return false;
return fold_stmt (gsi);
}
/* Gimplify the CALL_EXPR node *EXPR_P into the GIMPLE sequence PRE_P.
WANT_VALUE is true if the result of the call is desired. */
static enum gimplify_status
gimplify_call_expr (tree *expr_p, gimple_seq *pre_p, bool want_value)
{
tree fndecl, parms, p, fnptrtype;
enum gimplify_status ret;
int i, nargs;
gcall *call;
bool builtin_va_start_p = false;
location_t loc = EXPR_LOCATION (*expr_p);
gcc_assert (TREE_CODE (*expr_p) == CALL_EXPR);
/* For reliable diagnostics during inlining, it is necessary that
every call_expr be annotated with file and line. */
if (! EXPR_HAS_LOCATION (*expr_p))
SET_EXPR_LOCATION (*expr_p, input_location);
/* Gimplify internal functions created in the FEs. */
if (CALL_EXPR_FN (*expr_p) == NULL_TREE)
{
if (want_value)
return GS_ALL_DONE;
nargs = call_expr_nargs (*expr_p);
enum internal_fn ifn = CALL_EXPR_IFN (*expr_p);
auto_vec<tree> vargs (nargs);
for (i = 0; i < nargs; i++)
{
gimplify_arg (&CALL_EXPR_ARG (*expr_p, i), pre_p,
EXPR_LOCATION (*expr_p));
vargs.quick_push (CALL_EXPR_ARG (*expr_p, i));
}
gcall *call = gimple_build_call_internal_vec (ifn, vargs);
gimple_call_set_nothrow (call, TREE_NOTHROW (*expr_p));
gimplify_seq_add_stmt (pre_p, call);
return GS_ALL_DONE;
}
/* This may be a call to a builtin function.
Builtin function calls may be transformed into different
(and more efficient) builtin function calls under certain
circumstances. Unfortunately, gimplification can muck things
up enough that the builtin expanders are not aware that certain
transformations are still valid.
So we attempt transformation/gimplification of the call before
we gimplify the CALL_EXPR. At this time we do not manage to
transform all calls in the same manner as the expanders do, but
we do transform most of them. */
fndecl = get_callee_fndecl (*expr_p);
if (fndecl && fndecl_built_in_p (fndecl, BUILT_IN_NORMAL))
switch (DECL_FUNCTION_CODE (fndecl))
{
CASE_BUILT_IN_ALLOCA:
/* If the call has been built for a variable-sized object, then we
want to restore the stack level when the enclosing BIND_EXPR is
exited to reclaim the allocated space; otherwise, we precisely
need to do the opposite and preserve the latest stack level. */
if (CALL_ALLOCA_FOR_VAR_P (*expr_p))
gimplify_ctxp->save_stack = true;
else
gimplify_ctxp->keep_stack = true;
break;
case BUILT_IN_VA_START:
{
builtin_va_start_p = TRUE;
if (call_expr_nargs (*expr_p) < 2)
{
error ("too few arguments to function %<va_start%>");
*expr_p = build_empty_stmt (EXPR_LOCATION (*expr_p));
return GS_OK;
}
if (fold_builtin_next_arg (*expr_p, true))
{
*expr_p = build_empty_stmt (EXPR_LOCATION (*expr_p));
return GS_OK;
}
break;
}
case BUILT_IN_EH_RETURN:
cfun->calls_eh_return = true;
break;
default:
;
}
if (fndecl && fndecl_built_in_p (fndecl))
{
tree new_tree = fold_call_expr (input_location, *expr_p, !want_value);
if (new_tree && new_tree != *expr_p)
{
/* There was a transformation of this call which computes the
same value, but in a more efficient way. Return and try
again. */
*expr_p = new_tree;
return GS_OK;
}
}
/* Remember the original function pointer type. */
fnptrtype = TREE_TYPE (CALL_EXPR_FN (*expr_p));
if (flag_openmp
&& fndecl
&& cfun
&& (cfun->curr_properties & PROP_gimple_any) == 0)
{
tree variant = omp_resolve_declare_variant (fndecl);
if (variant != fndecl)
CALL_EXPR_FN (*expr_p) = build1 (ADDR_EXPR, fnptrtype, variant);
}
/* There is a sequence point before the call, so any side effects in
the calling expression must occur before the actual call. Force
gimplify_expr to use an internal post queue. */
ret = gimplify_expr (&CALL_EXPR_FN (*expr_p), pre_p, NULL,
is_gimple_call_addr, fb_rvalue);
nargs = call_expr_nargs (*expr_p);
/* Get argument types for verification. */
fndecl = get_callee_fndecl (*expr_p);
parms = NULL_TREE;
if (fndecl)
parms = TYPE_ARG_TYPES (TREE_TYPE (fndecl));
else
parms = TYPE_ARG_TYPES (TREE_TYPE (fnptrtype));
if (fndecl && DECL_ARGUMENTS (fndecl))
p = DECL_ARGUMENTS (fndecl);
else if (parms)
p = parms;
else
p = NULL_TREE;
for (i = 0; i < nargs && p; i++, p = TREE_CHAIN (p))
;
/* If the last argument is __builtin_va_arg_pack () and it is not
passed as a named argument, decrease the number of CALL_EXPR
arguments and set instead the CALL_EXPR_VA_ARG_PACK flag. */
if (!p
&& i < nargs
&& TREE_CODE (CALL_EXPR_ARG (*expr_p, nargs - 1)) == CALL_EXPR)
{
tree last_arg = CALL_EXPR_ARG (*expr_p, nargs - 1);
tree last_arg_fndecl = get_callee_fndecl (last_arg);
if (last_arg_fndecl
&& fndecl_built_in_p (last_arg_fndecl, BUILT_IN_VA_ARG_PACK))
{
tree call = *expr_p;
--nargs;
*expr_p = build_call_array_loc (loc, TREE_TYPE (call),
CALL_EXPR_FN (call),
nargs, CALL_EXPR_ARGP (call));
/* Copy all CALL_EXPR flags, location and block, except
CALL_EXPR_VA_ARG_PACK flag. */
CALL_EXPR_STATIC_CHAIN (*expr_p) = CALL_EXPR_STATIC_CHAIN (call);
CALL_EXPR_TAILCALL (*expr_p) = CALL_EXPR_TAILCALL (call);
CALL_EXPR_RETURN_SLOT_OPT (*expr_p)
= CALL_EXPR_RETURN_SLOT_OPT (call);
CALL_FROM_THUNK_P (*expr_p) = CALL_FROM_THUNK_P (call);
SET_EXPR_LOCATION (*expr_p, EXPR_LOCATION (call));
/* Set CALL_EXPR_VA_ARG_PACK. */
CALL_EXPR_VA_ARG_PACK (*expr_p) = 1;
}
}
/* If the call returns twice then after building the CFG the call
argument computations will no longer dominate the call because
we add an abnormal incoming edge to the call. So do not use SSA
vars there. */
bool returns_twice = call_expr_flags (*expr_p) & ECF_RETURNS_TWICE;
/* Gimplify the function arguments. */
if (nargs > 0)
{
for (i = (PUSH_ARGS_REVERSED ? nargs - 1 : 0);
PUSH_ARGS_REVERSED ? i >= 0 : i < nargs;
PUSH_ARGS_REVERSED ? i-- : i++)
{
enum gimplify_status t;
/* Avoid gimplifying the second argument to va_start, which needs to
be the plain PARM_DECL. */
if ((i != 1) || !builtin_va_start_p)
{
t = gimplify_arg (&CALL_EXPR_ARG (*expr_p, i), pre_p,
EXPR_LOCATION (*expr_p), ! returns_twice);
if (t == GS_ERROR)
ret = GS_ERROR;
}
}
}
/* Gimplify the static chain. */
if (CALL_EXPR_STATIC_CHAIN (*expr_p))
{
if (fndecl && !DECL_STATIC_CHAIN (fndecl))
CALL_EXPR_STATIC_CHAIN (*expr_p) = NULL;
else
{
enum gimplify_status t;
t = gimplify_arg (&CALL_EXPR_STATIC_CHAIN (*expr_p), pre_p,
EXPR_LOCATION (*expr_p), ! returns_twice);
if (t == GS_ERROR)
ret = GS_ERROR;
}
}
/* Verify the function result. */
if (want_value && fndecl
&& VOID_TYPE_P (TREE_TYPE (TREE_TYPE (fnptrtype))))
{
error_at (loc, "using result of function returning %<void%>");
ret = GS_ERROR;
}
/* Try this again in case gimplification exposed something. */
if (ret != GS_ERROR)
{
tree new_tree = fold_call_expr (input_location, *expr_p, !want_value);
if (new_tree && new_tree != *expr_p)
{
/* There was a transformation of this call which computes the
same value, but in a more efficient way. Return and try
again. */
*expr_p = new_tree;
return GS_OK;
}
}
else
{
*expr_p = error_mark_node;
return GS_ERROR;
}
/* If the function is "const" or "pure", then clear TREE_SIDE_EFFECTS on its
decl. This allows us to eliminate redundant or useless
calls to "const" functions. */
if (TREE_CODE (*expr_p) == CALL_EXPR)
{
int flags = call_expr_flags (*expr_p);
if (flags & (ECF_CONST | ECF_PURE)
/* An infinite loop is considered a side effect. */
&& !(flags & (ECF_LOOPING_CONST_OR_PURE)))
TREE_SIDE_EFFECTS (*expr_p) = 0;
}
/* If the value is not needed by the caller, emit a new GIMPLE_CALL
and clear *EXPR_P. Otherwise, leave *EXPR_P in its gimplified
form and delegate the creation of a GIMPLE_CALL to
gimplify_modify_expr. This is always possible because when
WANT_VALUE is true, the caller wants the result of this call into
a temporary, which means that we will emit an INIT_EXPR in
internal_get_tmp_var which will then be handled by
gimplify_modify_expr. */
if (!want_value)
{
/* The CALL_EXPR in *EXPR_P is already in GIMPLE form, so all we
have to do is replicate it as a GIMPLE_CALL tuple. */
gimple_stmt_iterator gsi;
call = gimple_build_call_from_tree (*expr_p, fnptrtype);
notice_special_calls (call);
gimplify_seq_add_stmt (pre_p, call);
gsi = gsi_last (*pre_p);
maybe_fold_stmt (&gsi);
*expr_p = NULL_TREE;
}
else
/* Remember the original function type. */
CALL_EXPR_FN (*expr_p) = build1 (NOP_EXPR, fnptrtype,
CALL_EXPR_FN (*expr_p));
return ret;
}
/* Handle shortcut semantics in the predicate operand of a COND_EXPR by
rewriting it into multiple COND_EXPRs, and possibly GOTO_EXPRs.
TRUE_LABEL_P and FALSE_LABEL_P point to the labels to jump to if the
condition is true or false, respectively. If null, we should generate
our own to skip over the evaluation of this specific expression.
LOCUS is the source location of the COND_EXPR.
This function is the tree equivalent of do_jump.
shortcut_cond_r should only be called by shortcut_cond_expr. */
static tree
shortcut_cond_r (tree pred, tree *true_label_p, tree *false_label_p,
location_t locus)
{
tree local_label = NULL_TREE;
tree t, expr = NULL;
/* OK, it's not a simple case; we need to pull apart the COND_EXPR to
retain the shortcut semantics. Just insert the gotos here;
shortcut_cond_expr will append the real blocks later. */
if (TREE_CODE (pred) == TRUTH_ANDIF_EXPR)
{
location_t new_locus;
/* Turn if (a && b) into
if (a); else goto no;
if (b) goto yes; else goto no;
(no:) */
if (false_label_p == NULL)
false_label_p = &local_label;
/* Keep the original source location on the first 'if'. */
t = shortcut_cond_r (TREE_OPERAND (pred, 0), NULL, false_label_p, locus);
append_to_statement_list (t, &expr);
/* Set the source location of the && on the second 'if'. */
new_locus = rexpr_location (pred, locus);
t = shortcut_cond_r (TREE_OPERAND (pred, 1), true_label_p, false_label_p,
new_locus);
append_to_statement_list (t, &expr);
}
else if (TREE_CODE (pred) == TRUTH_ORIF_EXPR)
{
location_t new_locus;
/* Turn if (a || b) into
if (a) goto yes;
if (b) goto yes; else goto no;
(yes:) */
if (true_label_p == NULL)
true_label_p = &local_label;
/* Keep the original source location on the first 'if'. */
t = shortcut_cond_r (TREE_OPERAND (pred, 0), true_label_p, NULL, locus);
append_to_statement_list (t, &expr);
/* Set the source location of the || on the second 'if'. */
new_locus = rexpr_location (pred, locus);
t = shortcut_cond_r (TREE_OPERAND (pred, 1), true_label_p, false_label_p,
new_locus);
append_to_statement_list (t, &expr);
}
else if (TREE_CODE (pred) == COND_EXPR
&& !VOID_TYPE_P (TREE_TYPE (TREE_OPERAND (pred, 1)))
&& !VOID_TYPE_P (TREE_TYPE (TREE_OPERAND (pred, 2))))
{
location_t new_locus;
/* As long as we're messing with gotos, turn if (a ? b : c) into
if (a)
if (b) goto yes; else goto no;
else
if (c) goto yes; else goto no;
Don't do this if one of the arms has void type, which can happen
in C++ when the arm is throw. */
/* Keep the original source location on the first 'if'. Set the source
location of the ? on the second 'if'. */
new_locus = rexpr_location (pred, locus);
expr = build3 (COND_EXPR, void_type_node, TREE_OPERAND (pred, 0),
shortcut_cond_r (TREE_OPERAND (pred, 1), true_label_p,
false_label_p, locus),
shortcut_cond_r (TREE_OPERAND (pred, 2), true_label_p,
false_label_p, new_locus));
}
else
{
expr = build3 (COND_EXPR, void_type_node, pred,
build_and_jump (true_label_p),
build_and_jump (false_label_p));
SET_EXPR_LOCATION (expr, locus);
}
if (local_label)
{
t = build1 (LABEL_EXPR, void_type_node, local_label);
append_to_statement_list (t, &expr);
}
return expr;
}
/* If EXPR is a GOTO_EXPR, return it. If it is a STATEMENT_LIST, skip
any of its leading DEBUG_BEGIN_STMTS and recurse on the subsequent
statement, if it is the last one. Otherwise, return NULL. */
static tree
find_goto (tree expr)
{
if (!expr)
return NULL_TREE;
if (TREE_CODE (expr) == GOTO_EXPR)
return expr;
if (TREE_CODE (expr) != STATEMENT_LIST)
return NULL_TREE;
tree_stmt_iterator i = tsi_start (expr);
while (!tsi_end_p (i) && TREE_CODE (tsi_stmt (i)) == DEBUG_BEGIN_STMT)
tsi_next (&i);
if (!tsi_one_before_end_p (i))
return NULL_TREE;
return find_goto (tsi_stmt (i));
}
/* Same as find_goto, except that it returns NULL if the destination
is not a LABEL_DECL. */
static inline tree
find_goto_label (tree expr)
{
tree dest = find_goto (expr);
if (dest && TREE_CODE (GOTO_DESTINATION (dest)) == LABEL_DECL)
return dest;
return NULL_TREE;
}
/* Given a conditional expression EXPR with short-circuit boolean
predicates using TRUTH_ANDIF_EXPR or TRUTH_ORIF_EXPR, break the
predicate apart into the equivalent sequence of conditionals. */
static tree
shortcut_cond_expr (tree expr)
{
tree pred = TREE_OPERAND (expr, 0);
tree then_ = TREE_OPERAND (expr, 1);
tree else_ = TREE_OPERAND (expr, 2);
tree true_label, false_label, end_label, t;
tree *true_label_p;
tree *false_label_p;
bool emit_end, emit_false, jump_over_else;
bool then_se = then_ && TREE_SIDE_EFFECTS (then_);
bool else_se = else_ && TREE_SIDE_EFFECTS (else_);
/* First do simple transformations. */
if (!else_se)
{
/* If there is no 'else', turn
if (a && b) then c
into
if (a) if (b) then c. */
while (TREE_CODE (pred) == TRUTH_ANDIF_EXPR)
{
/* Keep the original source location on the first 'if'. */
location_t locus = EXPR_LOC_OR_LOC (expr, input_location);
TREE_OPERAND (expr, 0) = TREE_OPERAND (pred, 1);
/* Set the source location of the && on the second 'if'. */
if (rexpr_has_location (pred))
SET_EXPR_LOCATION (expr, rexpr_location (pred));
then_ = shortcut_cond_expr (expr);
then_se = then_ && TREE_SIDE_EFFECTS (then_);
pred = TREE_OPERAND (pred, 0);
expr = build3 (COND_EXPR, void_type_node, pred, then_, NULL_TREE);
SET_EXPR_LOCATION (expr, locus);
}
}
if (!then_se)
{
/* If there is no 'then', turn
if (a || b); else d
into
if (a); else if (b); else d. */
while (TREE_CODE (pred) == TRUTH_ORIF_EXPR)
{
/* Keep the original source location on the first 'if'. */
location_t locus = EXPR_LOC_OR_LOC (expr, input_location);
TREE_OPERAND (expr, 0) = TREE_OPERAND (pred, 1);
/* Set the source location of the || on the second 'if'. */
if (rexpr_has_location (pred))
SET_EXPR_LOCATION (expr, rexpr_location (pred));
else_ = shortcut_cond_expr (expr);
else_se = else_ && TREE_SIDE_EFFECTS (else_);
pred = TREE_OPERAND (pred, 0);
expr = build3 (COND_EXPR, void_type_node, pred, NULL_TREE, else_);
SET_EXPR_LOCATION (expr, locus);
}
}
/* If we're done, great. */
if (TREE_CODE (pred) != TRUTH_ANDIF_EXPR
&& TREE_CODE (pred) != TRUTH_ORIF_EXPR)
return expr;
/* Otherwise we need to mess with gotos. Change
if (a) c; else d;
to
if (a); else goto no;
c; goto end;
no: d; end:
and recursively gimplify the condition. */
true_label = false_label = end_label = NULL_TREE;
/* If our arms just jump somewhere, hijack those labels so we don't
generate jumps to jumps. */
if (tree then_goto = find_goto_label (then_))
{
true_label = GOTO_DESTINATION (then_goto);
then_ = NULL;
then_se = false;
}
if (tree else_goto = find_goto_label (else_))
{
false_label = GOTO_DESTINATION (else_goto);
else_ = NULL;
else_se = false;
}
/* If we aren't hijacking a label for the 'then' branch, it falls through. */
if (true_label)
true_label_p = &true_label;
else
true_label_p = NULL;
/* The 'else' branch also needs a label if it contains interesting code. */
if (false_label || else_se)
false_label_p = &false_label;
else
false_label_p = NULL;
/* If there was nothing else in our arms, just forward the label(s). */
if (!then_se && !else_se)
return shortcut_cond_r (pred, true_label_p, false_label_p,
EXPR_LOC_OR_LOC (expr, input_location));
/* If our last subexpression already has a terminal label, reuse it. */
if (else_se)
t = expr_last (else_);
else if (then_se)
t = expr_last (then_);
else
t = NULL;
if (t && TREE_CODE (t) == LABEL_EXPR)
end_label = LABEL_EXPR_LABEL (t);
/* If we don't care about jumping to the 'else' branch, jump to the end
if the condition is false. */
if (!false_label_p)
false_label_p = &end_label;
/* We only want to emit these labels if we aren't hijacking them. */
emit_end = (end_label == NULL_TREE);
emit_false = (false_label == NULL_TREE);
/* We only emit the jump over the else clause if we have to--if the
then clause may fall through. Otherwise we can wind up with a
useless jump and a useless label at the end of gimplified code,
which will cause us to think that this conditional as a whole
falls through even if it doesn't. If we then inline a function
which ends with such a condition, that can cause us to issue an
inappropriate warning about control reaching the end of a
non-void function. */
jump_over_else = block_may_fallthru (then_);
pred = shortcut_cond_r (pred, true_label_p, false_label_p,
EXPR_LOC_OR_LOC (expr, input_location));
expr = NULL;
append_to_statement_list (pred, &expr);
append_to_statement_list (then_, &expr);
if (else_se)
{
if (jump_over_else)
{
tree last = expr_last (expr);
t = build_and_jump (&end_label);
if (rexpr_has_location (last))
SET_EXPR_LOCATION (t, rexpr_location (last));
append_to_statement_list (t, &expr);
}
if (emit_false)
{
t = build1 (LABEL_EXPR, void_type_node, false_label);
append_to_statement_list (t, &expr);
}
append_to_statement_list (else_, &expr);
}
if (emit_end && end_label)
{
t = build1 (LABEL_EXPR, void_type_node, end_label);
append_to_statement_list (t, &expr);
}
return expr;
}
/* EXPR is used in a boolean context; make sure it has BOOLEAN_TYPE. */
tree
gimple_boolify (tree expr)
{
tree type = TREE_TYPE (expr);
location_t loc = EXPR_LOCATION (expr);
if (TREE_CODE (expr) == NE_EXPR
&& TREE_CODE (TREE_OPERAND (expr, 0)) == CALL_EXPR
&& integer_zerop (TREE_OPERAND (expr, 1)))
{
tree call = TREE_OPERAND (expr, 0);
tree fn = get_callee_fndecl (call);
/* For __builtin_expect ((long) (x), y) recurse into x as well
if x is truth_value_p. */
if (fn
&& fndecl_built_in_p (fn, BUILT_IN_EXPECT)
&& call_expr_nargs (call) == 2)
{
tree arg = CALL_EXPR_ARG (call, 0);
if (arg)
{
if (TREE_CODE (arg) == NOP_EXPR
&& TREE_TYPE (arg) == TREE_TYPE (call))
arg = TREE_OPERAND (arg, 0);
if (truth_value_p (TREE_CODE (arg)))
{
arg = gimple_boolify (arg);
CALL_EXPR_ARG (call, 0)
= fold_convert_loc (loc, TREE_TYPE (call), arg);
}
}
}
}
switch (TREE_CODE (expr))
{
case TRUTH_AND_EXPR:
case TRUTH_OR_EXPR:
case TRUTH_XOR_EXPR:
case TRUTH_ANDIF_EXPR:
case TRUTH_ORIF_EXPR:
/* Also boolify the arguments of truth exprs. */
TREE_OPERAND (expr, 1) = gimple_boolify (TREE_OPERAND (expr, 1));
/* FALLTHRU */
case TRUTH_NOT_EXPR:
TREE_OPERAND (expr, 0) = gimple_boolify (TREE_OPERAND (expr, 0));
/* These expressions always produce boolean results. */
if (TREE_CODE (type) != BOOLEAN_TYPE)
TREE_TYPE (expr) = boolean_type_node;
return expr;
case ANNOTATE_EXPR:
switch ((enum annot_expr_kind) TREE_INT_CST_LOW (TREE_OPERAND (expr, 1)))
{
case annot_expr_ivdep_kind:
case annot_expr_unroll_kind:
case annot_expr_no_vector_kind:
case annot_expr_vector_kind:
case annot_expr_parallel_kind:
TREE_OPERAND (expr, 0) = gimple_boolify (TREE_OPERAND (expr, 0));
if (TREE_CODE (type) != BOOLEAN_TYPE)
TREE_TYPE (expr) = boolean_type_node;
return expr;
default:
gcc_unreachable ();
}
default:
if (COMPARISON_CLASS_P (expr))
{
/* There expressions always prduce boolean results. */
if (TREE_CODE (type) != BOOLEAN_TYPE)
TREE_TYPE (expr) = boolean_type_node;
return expr;
}
/* Other expressions that get here must have boolean values, but
might need to be converted to the appropriate mode. */
if (TREE_CODE (type) == BOOLEAN_TYPE)
return expr;
return fold_convert_loc (loc, boolean_type_node, expr);
}
}
/* Given a conditional expression *EXPR_P without side effects, gimplify
its operands. New statements are inserted to PRE_P. */
static enum gimplify_status
gimplify_pure_cond_expr (tree *expr_p, gimple_seq *pre_p)
{
tree expr = *expr_p, cond;
enum gimplify_status ret, tret;
enum tree_code code;
cond = gimple_boolify (COND_EXPR_COND (expr));
/* We need to handle && and || specially, as their gimplification
creates pure cond_expr, thus leading to an infinite cycle otherwise. */
code = TREE_CODE (cond);
if (code == TRUTH_ANDIF_EXPR)
TREE_SET_CODE (cond, TRUTH_AND_EXPR);
else if (code == TRUTH_ORIF_EXPR)
TREE_SET_CODE (cond, TRUTH_OR_EXPR);
ret = gimplify_expr (&cond, pre_p, NULL, is_gimple_condexpr, fb_rvalue);
COND_EXPR_COND (*expr_p) = cond;
tret = gimplify_expr (&COND_EXPR_THEN (expr), pre_p, NULL,
is_gimple_val, fb_rvalue);
ret = MIN (ret, tret);
tret = gimplify_expr (&COND_EXPR_ELSE (expr), pre_p, NULL,
is_gimple_val, fb_rvalue);
return MIN (ret, tret);
}
/* Return true if evaluating EXPR could trap.
EXPR is GENERIC, while tree_could_trap_p can be called
only on GIMPLE. */
bool
generic_expr_could_trap_p (tree expr)
{
unsigned i, n;
if (!expr || is_gimple_val (expr))
return false;
if (!EXPR_P (expr) || tree_could_trap_p (expr))
return true;
n = TREE_OPERAND_LENGTH (expr);
for (i = 0; i < n; i++)
if (generic_expr_could_trap_p (TREE_OPERAND (expr, i)))
return true;
return false;
}
/* Convert the conditional expression pointed to by EXPR_P '(p) ? a : b;'
into
if (p) if (p)
t1 = a; a;
else or else
t1 = b; b;
t1;
The second form is used when *EXPR_P is of type void.
PRE_P points to the list where side effects that must happen before
*EXPR_P should be stored. */
static enum gimplify_status
gimplify_cond_expr (tree *expr_p, gimple_seq *pre_p, fallback_t fallback)
{
tree expr = *expr_p;
tree type = TREE_TYPE (expr);
location_t loc = EXPR_LOCATION (expr);
tree tmp, arm1, arm2;
enum gimplify_status ret;
tree label_true, label_false, label_cont;
bool have_then_clause_p, have_else_clause_p;
gcond *cond_stmt;
enum tree_code pred_code;
gimple_seq seq = NULL;
/* If this COND_EXPR has a value, copy the values into a temporary within
the arms. */
if (!VOID_TYPE_P (type))
{
tree then_ = TREE_OPERAND (expr, 1), else_ = TREE_OPERAND (expr, 2);
tree result;
/* If either an rvalue is ok or we do not require an lvalue, create the
temporary. But we cannot do that if the type is addressable. */
if (((fallback & fb_rvalue) || !(fallback & fb_lvalue))
&& !TREE_ADDRESSABLE (type))
{
if (gimplify_ctxp->allow_rhs_cond_expr
/* If either branch has side effects or could trap, it can't be
evaluated unconditionally. */
&& !TREE_SIDE_EFFECTS (then_)
&& !generic_expr_could_trap_p (then_)
&& !TREE_SIDE_EFFECTS (else_)
&& !generic_expr_could_trap_p (else_))
return gimplify_pure_cond_expr (expr_p, pre_p);
tmp = create_tmp_var (type, "iftmp");
result = tmp;
}
/* Otherwise, only create and copy references to the values. */
else
{
type = build_pointer_type (type);
if (!VOID_TYPE_P (TREE_TYPE (then_)))
then_ = build_fold_addr_expr_loc (loc, then_);
if (!VOID_TYPE_P (TREE_TYPE (else_)))
else_ = build_fold_addr_expr_loc (loc, else_);
expr
= build3 (COND_EXPR, type, TREE_OPERAND (expr, 0), then_, else_);
tmp = create_tmp_var (type, "iftmp");
result = build_simple_mem_ref_loc (loc, tmp);
}
/* Build the new then clause, `tmp = then_;'. But don't build the
assignment if the value is void; in C++ it can be if it's a throw. */
if (!VOID_TYPE_P (TREE_TYPE (then_)))
TREE_OPERAND (expr, 1) = build2 (INIT_EXPR, type, tmp, then_);
/* Similarly, build the new else clause, `tmp = else_;'. */
if (!VOID_TYPE_P (TREE_TYPE (else_)))
TREE_OPERAND (expr, 2) = build2 (INIT_EXPR, type, tmp, else_);
TREE_TYPE (expr) = void_type_node;
recalculate_side_effects (expr);
/* Move the COND_EXPR to the prequeue. */
gimplify_stmt (&expr, pre_p);
*expr_p = result;
return GS_ALL_DONE;
}
/* Remove any COMPOUND_EXPR so the following cases will be caught. */
STRIP_TYPE_NOPS (TREE_OPERAND (expr, 0));
if (TREE_CODE (TREE_OPERAND (expr, 0)) == COMPOUND_EXPR)
gimplify_compound_expr (&TREE_OPERAND (expr, 0), pre_p, true);
/* Make sure the condition has BOOLEAN_TYPE. */
TREE_OPERAND (expr, 0) = gimple_boolify (TREE_OPERAND (expr, 0));
/* Break apart && and || conditions. */
if (TREE_CODE (TREE_OPERAND (expr, 0)) == TRUTH_ANDIF_EXPR
|| TREE_CODE (TREE_OPERAND (expr, 0)) == TRUTH_ORIF_EXPR)
{
expr = shortcut_cond_expr (expr);
if (expr != *expr_p)
{
*expr_p = expr;
/* We can't rely on gimplify_expr to re-gimplify the expanded
form properly, as cleanups might cause the target labels to be
wrapped in a TRY_FINALLY_EXPR. To prevent that, we need to
set up a conditional context. */
gimple_push_condition ();
gimplify_stmt (expr_p, &seq);
gimple_pop_condition (pre_p);
gimple_seq_add_seq (pre_p, seq);
return GS_ALL_DONE;
}
}
/* Now do the normal gimplification. */
/* Gimplify condition. */
ret = gimplify_expr (&TREE_OPERAND (expr, 0), pre_p, NULL,
is_gimple_condexpr_for_cond, fb_rvalue);
if (ret == GS_ERROR)
return GS_ERROR;
gcc_assert (TREE_OPERAND (expr, 0) != NULL_TREE);
gimple_push_condition ();
have_then_clause_p = have_else_clause_p = false;
label_true = find_goto_label (TREE_OPERAND (expr, 1));
if (label_true
&& DECL_CONTEXT (GOTO_DESTINATION (label_true)) == current_function_decl
/* For -O0 avoid this optimization if the COND_EXPR and GOTO_EXPR
have different locations, otherwise we end up with incorrect
location information on the branches. */
&& (optimize
|| !EXPR_HAS_LOCATION (expr)
|| !rexpr_has_location (label_true)
|| EXPR_LOCATION (expr) == rexpr_location (label_true)))
{
have_then_clause_p = true;
label_true = GOTO_DESTINATION (label_true);
}
else
label_true = create_artificial_label (UNKNOWN_LOCATION);
label_false = find_goto_label (TREE_OPERAND (expr, 2));
if (label_false
&& DECL_CONTEXT (GOTO_DESTINATION (label_false)) == current_function_decl
/* For -O0 avoid this optimization if the COND_EXPR and GOTO_EXPR
have different locations, otherwise we end up with incorrect
location information on the branches. */
&& (optimize
|| !EXPR_HAS_LOCATION (expr)
|| !rexpr_has_location (label_false)
|| EXPR_LOCATION (expr) == rexpr_location (label_false)))
{
have_else_clause_p = true;
label_false = GOTO_DESTINATION (label_false);
}
else
label_false = create_artificial_label (UNKNOWN_LOCATION);
gimple_cond_get_ops_from_tree (COND_EXPR_COND (expr), &pred_code, &arm1,
&arm2);
cond_stmt = gimple_build_cond (pred_code, arm1, arm2, label_true,
label_false);
gimple_set_no_warning (cond_stmt, TREE_NO_WARNING (COND_EXPR_COND (expr)));
gimplify_seq_add_stmt (&seq, cond_stmt);
gimple_stmt_iterator gsi = gsi_last (seq);
maybe_fold_stmt (&gsi);
label_cont = NULL_TREE;
if (!have_then_clause_p)
{
/* For if (...) {} else { code; } put label_true after
the else block. */
if (TREE_OPERAND (expr, 1) == NULL_TREE
&& !have_else_clause_p
&& TREE_OPERAND (expr, 2) != NULL_TREE)
label_cont = label_true;
else
{
gimplify_seq_add_stmt (&seq, gimple_build_label (label_true));
have_then_clause_p = gimplify_stmt (&TREE_OPERAND (expr, 1), &seq);
/* For if (...) { code; } else {} or
if (...) { code; } else goto label; or
if (...) { code; return; } else { ... }
label_cont isn't needed. */
if (!have_else_clause_p
&& TREE_OPERAND (expr, 2) != NULL_TREE
&& gimple_seq_may_fallthru (seq))
{
gimple *g;
label_cont = create_artificial_label (UNKNOWN_LOCATION);
g = gimple_build_goto (label_cont);
/* GIMPLE_COND's are very low level; they have embedded
gotos. This particular embedded goto should not be marked
with the location of the original COND_EXPR, as it would
correspond to the COND_EXPR's condition, not the ELSE or the
THEN arms. To avoid marking it with the wrong location, flag
it as "no location". */
gimple_set_do_not_emit_location (g);
gimplify_seq_add_stmt (&seq, g);
}
}
}
if (!have_else_clause_p)
{
gimplify_seq_add_stmt (&seq, gimple_build_label (label_false));
have_else_clause_p = gimplify_stmt (&TREE_OPERAND (expr, 2), &seq);
}
if (label_cont)
gimplify_seq_add_stmt (&seq, gimple_build_label (label_cont));
gimple_pop_condition (pre_p);
gimple_seq_add_seq (pre_p, seq);
if (ret == GS_ERROR)
; /* Do nothing. */
else if (have_then_clause_p || have_else_clause_p)
ret = GS_ALL_DONE;
else
{
/* Both arms are empty; replace the COND_EXPR with its predicate. */
expr = TREE_OPERAND (expr, 0);
gimplify_stmt (&expr, pre_p);
}
*expr_p = NULL;
return ret;
}
/* Prepare the node pointed to by EXPR_P, an is_gimple_addressable expression,
to be marked addressable.
We cannot rely on such an expression being directly markable if a temporary
has been created by the gimplification. In this case, we create another
temporary and initialize it with a copy, which will become a store after we
mark it addressable. This can happen if the front-end passed us something
that it could not mark addressable yet, like a Fortran pass-by-reference
parameter (int) floatvar. */
static void
prepare_gimple_addressable (tree *expr_p, gimple_seq *seq_p)
{
while (handled_component_p (*expr_p))
expr_p = &TREE_OPERAND (*expr_p, 0);
if (is_gimple_reg (*expr_p))
{
/* Do not allow an SSA name as the temporary. */
tree var = get_initialized_tmp_var (*expr_p, seq_p, NULL, false);
DECL_GIMPLE_REG_P (var) = 0;
*expr_p = var;
}
}
/* A subroutine of gimplify_modify_expr. Replace a MODIFY_EXPR with
a call to __builtin_memcpy. */
static enum gimplify_status
gimplify_modify_expr_to_memcpy (tree *expr_p, tree size, bool want_value,
gimple_seq *seq_p)
{
tree t, to, to_ptr, from, from_ptr;
gcall *gs;
location_t loc = EXPR_LOCATION (*expr_p);
to = TREE_OPERAND (*expr_p, 0);
from = TREE_OPERAND (*expr_p, 1);
/* Mark the RHS addressable. Beware that it may not be possible to do so
directly if a temporary has been created by the gimplification. */
prepare_gimple_addressable (&from, seq_p);
mark_addressable (from);
from_ptr = build_fold_addr_expr_loc (loc, from);
gimplify_arg (&from_ptr, seq_p, loc);
mark_addressable (to);
to_ptr = build_fold_addr_expr_loc (loc, to);
gimplify_arg (&to_ptr, seq_p, loc);
t = builtin_decl_implicit (BUILT_IN_MEMCPY);
gs = gimple_build_call (t, 3, to_ptr, from_ptr, size);
if (want_value)
{
/* tmp = memcpy() */
t = create_tmp_var (TREE_TYPE (to_ptr));
gimple_call_set_lhs (gs, t);
gimplify_seq_add_stmt (seq_p, gs);
*expr_p = build_simple_mem_ref (t);
return GS_ALL_DONE;
}
gimplify_seq_add_stmt (seq_p, gs);
*expr_p = NULL;
return GS_ALL_DONE;
}
/* A subroutine of gimplify_modify_expr. Replace a MODIFY_EXPR with
a call to __builtin_memset. In this case we know that the RHS is
a CONSTRUCTOR with an empty element list. */
static enum gimplify_status
gimplify_modify_expr_to_memset (tree *expr_p, tree size, bool want_value,
gimple_seq *seq_p)
{
tree t, from, to, to_ptr;
gcall *gs;
location_t loc = EXPR_LOCATION (*expr_p);
/* Assert our assumptions, to abort instead of producing wrong code
silently if they are not met. Beware that the RHS CONSTRUCTOR might
not be immediately exposed. */
from = TREE_OPERAND (*expr_p, 1);
if (TREE_CODE (from) == WITH_SIZE_EXPR)
from = TREE_OPERAND (from, 0);
gcc_assert (TREE_CODE (from) == CONSTRUCTOR
&& vec_safe_is_empty (CONSTRUCTOR_ELTS (from)));
/* Now proceed. */
to = TREE_OPERAND (*expr_p, 0);
to_ptr = build_fold_addr_expr_loc (loc, to);
gimplify_arg (&to_ptr, seq_p, loc);
t = builtin_decl_implicit (BUILT_IN_MEMSET);
gs = gimple_build_call (t, 3, to_ptr, integer_zero_node, size);
if (want_value)
{
/* tmp = memset() */
t = create_tmp_var (TREE_TYPE (to_ptr));
gimple_call_set_lhs (gs, t);
gimplify_seq_add_stmt (seq_p, gs);
*expr_p = build1 (INDIRECT_REF, TREE_TYPE (to), t);
return GS_ALL_DONE;
}
gimplify_seq_add_stmt (seq_p, gs);
*expr_p = NULL;
return GS_ALL_DONE;
}
/* A subroutine of gimplify_init_ctor_preeval. Called via walk_tree,
determine, cautiously, if a CONSTRUCTOR overlaps the lhs of an
assignment. Return non-null if we detect a potential overlap. */
struct gimplify_init_ctor_preeval_data
{
/* The base decl of the lhs object. May be NULL, in which case we
have to assume the lhs is indirect. */
tree lhs_base_decl;
/* The alias set of the lhs object. */
alias_set_type lhs_alias_set;
};
static tree
gimplify_init_ctor_preeval_1 (tree *tp, int *walk_subtrees, void *xdata)
{
struct gimplify_init_ctor_preeval_data *data
= (struct gimplify_init_ctor_preeval_data *) xdata;
tree t = *tp;
/* If we find the base object, obviously we have overlap. */
if (data->lhs_base_decl == t)
return t;
/* If the constructor component is indirect, determine if we have a
potential overlap with the lhs. The only bits of information we
have to go on at this point are addressability and alias sets. */
if ((INDIRECT_REF_P (t)
|| TREE_CODE (t) == MEM_REF)
&& (!data->lhs_base_decl || TREE_ADDRESSABLE (data->lhs_base_decl))
&& alias_sets_conflict_p (data->lhs_alias_set, get_alias_set (t)))
return t;
/* If the constructor component is a call, determine if it can hide a
potential overlap with the lhs through an INDIRECT_REF like above.
??? Ugh - this is completely broken. In fact this whole analysis
doesn't look conservative. */
if (TREE_CODE (t) == CALL_EXPR)
{
tree type, fntype = TREE_TYPE (TREE_TYPE (CALL_EXPR_FN (t)));
for (type = TYPE_ARG_TYPES (fntype); type; type = TREE_CHAIN (type))
if (POINTER_TYPE_P (TREE_VALUE (type))
&& (!data->lhs_base_decl || TREE_ADDRESSABLE (data->lhs_base_decl))
&& alias_sets_conflict_p (data->lhs_alias_set,
get_alias_set
(TREE_TYPE (TREE_VALUE (type)))))
return t;
}
if (IS_TYPE_OR_DECL_P (t))
*walk_subtrees = 0;
return NULL;
}
/* A subroutine of gimplify_init_constructor. Pre-evaluate EXPR,
force values that overlap with the lhs (as described by *DATA)
into temporaries. */
static void
gimplify_init_ctor_preeval (tree *expr_p, gimple_seq *pre_p, gimple_seq *post_p,
struct gimplify_init_ctor_preeval_data *data)
{
enum gimplify_status one;
/* If the value is constant, then there's nothing to pre-evaluate. */
if (TREE_CONSTANT (*expr_p))
{
/* Ensure it does not have side effects, it might contain a reference to
the object we're initializing. */
gcc_assert (!TREE_SIDE_EFFECTS (*expr_p));
return;
}
/* If the type has non-trivial constructors, we can't pre-evaluate. */
if (TREE_ADDRESSABLE (TREE_TYPE (*expr_p)))
return;
/* Recurse for nested constructors. */
if (TREE_CODE (*expr_p) == CONSTRUCTOR)
{
unsigned HOST_WIDE_INT ix;
constructor_elt *ce;
vec<constructor_elt, va_gc> *v = CONSTRUCTOR_ELTS (*expr_p);
FOR_EACH_VEC_SAFE_ELT (v, ix, ce)
gimplify_init_ctor_preeval (&ce->value, pre_p, post_p, data);
return;
}
/* If this is a variable sized type, we must remember the size. */
maybe_with_size_expr (expr_p);
/* Gimplify the constructor element to something appropriate for the rhs
of a MODIFY_EXPR. Given that we know the LHS is an aggregate, we know
the gimplifier will consider this a store to memory. Doing this
gimplification now means that we won't have to deal with complicated
language-specific trees, nor trees like SAVE_EXPR that can induce
exponential search behavior. */
one = gimplify_expr (expr_p, pre_p, post_p, is_gimple_mem_rhs, fb_rvalue);
if (one == GS_ERROR)
{
*expr_p = NULL;
return;
}
/* If we gimplified to a bare decl, we can be sure that it doesn't overlap
with the lhs, since "a = { .x=a }" doesn't make sense. This will
always be true for all scalars, since is_gimple_mem_rhs insists on a
temporary variable for them. */
if (DECL_P (*expr_p))
return;
/* If this is of variable size, we have no choice but to assume it doesn't
overlap since we can't make a temporary for it. */
if (TREE_CODE (TYPE_SIZE (TREE_TYPE (*expr_p))) != INTEGER_CST)
return;
/* Otherwise, we must search for overlap ... */
if (!walk_tree (expr_p, gimplify_init_ctor_preeval_1, data, NULL))
return;
/* ... and if found, force the value into a temporary. */
*expr_p = get_formal_tmp_var (*expr_p, pre_p);
}
/* A subroutine of gimplify_init_ctor_eval. Create a loop for
a RANGE_EXPR in a CONSTRUCTOR for an array.
var = lower;
loop_entry:
object[var] = value;
if (var == upper)
goto loop_exit;
var = var + 1;
goto loop_entry;
loop_exit:
We increment var _after_ the loop exit check because we might otherwise
fail if upper == TYPE_MAX_VALUE (type for upper).
Note that we never have to deal with SAVE_EXPRs here, because this has
already been taken care of for us, in gimplify_init_ctor_preeval(). */
static void gimplify_init_ctor_eval (tree, vec<constructor_elt, va_gc> *,
gimple_seq *, bool);
static void
gimplify_init_ctor_eval_range (tree object, tree lower, tree upper,
tree value, tree array_elt_type,
gimple_seq *pre_p, bool cleared)
{
tree loop_entry_label, loop_exit_label, fall_thru_label;
tree var, var_type, cref, tmp;
loop_entry_label = create_artificial_label (UNKNOWN_LOCATION);
loop_exit_label = create_artificial_label (UNKNOWN_LOCATION);
fall_thru_label = create_artificial_label (UNKNOWN_LOCATION);
/* Create and initialize the index variable. */
var_type = TREE_TYPE (upper);
var = create_tmp_var (var_type);
gimplify_seq_add_stmt (pre_p, gimple_build_assign (var, lower));
/* Add the loop entry label. */
gimplify_seq_add_stmt (pre_p, gimple_build_label (loop_entry_label));
/* Build the reference. */
cref = build4 (ARRAY_REF, array_elt_type, unshare_expr (object),
var, NULL_TREE, NULL_TREE);
/* If we are a constructor, just call gimplify_init_ctor_eval to do
the store. Otherwise just assign value to the reference. */
if (TREE_CODE (value) == CONSTRUCTOR)
/* NB we might have to call ourself recursively through
gimplify_init_ctor_eval if the value is a constructor. */
gimplify_init_ctor_eval (cref, CONSTRUCTOR_ELTS (value),
pre_p, cleared);
else
{
if (gimplify_expr (&value, pre_p, NULL, is_gimple_val, fb_rvalue)
!= GS_ERROR)
gimplify_seq_add_stmt (pre_p, gimple_build_assign (cref, value));
}
/* We exit the loop when the index var is equal to the upper bound. */
gimplify_seq_add_stmt (pre_p,
gimple_build_cond (EQ_EXPR, var, upper,
loop_exit_label, fall_thru_label));
gimplify_seq_add_stmt (pre_p, gimple_build_label (fall_thru_label));
/* Otherwise, increment the index var... */
tmp = build2 (PLUS_EXPR, var_type, var,
fold_convert (var_type, integer_one_node));
gimplify_seq_add_stmt (pre_p, gimple_build_assign (var, tmp));
/* ...and jump back to the loop entry. */
gimplify_seq_add_stmt (pre_p, gimple_build_goto (loop_entry_label));
/* Add the loop exit label. */
gimplify_seq_add_stmt (pre_p, gimple_build_label (loop_exit_label));
}
/* Return true if FDECL is accessing a field that is zero sized. */
static bool
zero_sized_field_decl (const_tree fdecl)
{
if (TREE_CODE (fdecl) == FIELD_DECL && DECL_SIZE (fdecl)
&& integer_zerop (DECL_SIZE (fdecl)))
return true;
return false;
}
/* Return true if TYPE is zero sized. */
static bool
zero_sized_type (const_tree type)
{
if (AGGREGATE_TYPE_P (type) && TYPE_SIZE (type)
&& integer_zerop (TYPE_SIZE (type)))
return true;
return false;
}
/* A subroutine of gimplify_init_constructor. Generate individual
MODIFY_EXPRs for a CONSTRUCTOR. OBJECT is the LHS against which the
assignments should happen. ELTS is the CONSTRUCTOR_ELTS of the
CONSTRUCTOR. CLEARED is true if the entire LHS object has been
zeroed first. */
static void
gimplify_init_ctor_eval (tree object, vec<constructor_elt, va_gc> *elts,
gimple_seq *pre_p, bool cleared)
{
tree array_elt_type = NULL;
unsigned HOST_WIDE_INT ix;
tree purpose, value;
if (TREE_CODE (TREE_TYPE (object)) == ARRAY_TYPE)
array_elt_type = TYPE_MAIN_VARIANT (TREE_TYPE (TREE_TYPE (object)));
FOR_EACH_CONSTRUCTOR_ELT (elts, ix, purpose, value)
{
tree cref;
/* NULL values are created above for gimplification errors. */
if (value == NULL)
continue;
if (cleared && initializer_zerop (value))
continue;
/* ??? Here's to hoping the front end fills in all of the indices,
so we don't have to figure out what's missing ourselves. */
gcc_assert (purpose);
/* Skip zero-sized fields, unless value has side-effects. This can
happen with calls to functions returning a zero-sized type, which
we shouldn't discard. As a number of downstream passes don't
expect sets of zero-sized fields, we rely on the gimplification of
the MODIFY_EXPR we make below to drop the assignment statement. */
if (! TREE_SIDE_EFFECTS (value) && zero_sized_field_decl (purpose))
continue;
/* If we have a RANGE_EXPR, we have to build a loop to assign the
whole range. */
if (TREE_CODE (purpose) == RANGE_EXPR)
{
tree lower = TREE_OPERAND (purpose, 0);
tree upper = TREE_OPERAND (purpose, 1);
/* If the lower bound is equal to upper, just treat it as if
upper was the index. */
if (simple_cst_equal (lower, upper))
purpose = upper;
else
{
gimplify_init_ctor_eval_range (object, lower, upper, value,
array_elt_type, pre_p, cleared);
continue;
}
}
if (array_elt_type)
{
/* Do not use bitsizetype for ARRAY_REF indices. */
if (TYPE_DOMAIN (TREE_TYPE (object)))
purpose
= fold_convert (TREE_TYPE (TYPE_DOMAIN (TREE_TYPE (object))),
purpose);
cref = build4 (ARRAY_REF, array_elt_type, unshare_expr (object),
purpose, NULL_TREE, NULL_TREE);
}
else
{
gcc_assert (TREE_CODE (purpose) == FIELD_DECL);
cref = build3 (COMPONENT_REF, TREE_TYPE (purpose),
unshare_expr (object), purpose, NULL_TREE);
}
if (TREE_CODE (value) == CONSTRUCTOR
&& TREE_CODE (TREE_TYPE (value)) != VECTOR_TYPE)
gimplify_init_ctor_eval (cref, CONSTRUCTOR_ELTS (value),
pre_p, cleared);
else
{
tree init = build2 (INIT_EXPR, TREE_TYPE (cref), cref, value);
gimplify_and_add (init, pre_p);
ggc_free (init);
}
}
}
/* Return the appropriate RHS predicate for this LHS. */
gimple_predicate
rhs_predicate_for (tree lhs)
{
if (is_gimple_reg (lhs))
return is_gimple_reg_rhs_or_call;
else
return is_gimple_mem_rhs_or_call;
}
/* Return the initial guess for an appropriate RHS predicate for this LHS,
before the LHS has been gimplified. */
static gimple_predicate
initial_rhs_predicate_for (tree lhs)
{
if (is_gimple_reg_type (TREE_TYPE (lhs)))
return is_gimple_reg_rhs_or_call;
else
return is_gimple_mem_rhs_or_call;
}
/* Gimplify a C99 compound literal expression. This just means adding
the DECL_EXPR before the current statement and using its anonymous
decl instead. */
static enum gimplify_status
gimplify_compound_literal_expr (tree *expr_p, gimple_seq *pre_p,
bool (*gimple_test_f) (tree),
fallback_t fallback)
{
tree decl_s = COMPOUND_LITERAL_EXPR_DECL_EXPR (*expr_p);
tree decl = DECL_EXPR_DECL (decl_s);
tree init = DECL_INITIAL (decl);
/* Mark the decl as addressable if the compound literal
expression is addressable now, otherwise it is marked too late
after we gimplify the initialization expression. */
if (TREE_ADDRESSABLE (*expr_p))
TREE_ADDRESSABLE (decl) = 1;
/* Otherwise, if we don't need an lvalue and have a literal directly
substitute it. Check if it matches the gimple predicate, as
otherwise we'd generate a new temporary, and we can as well just
use the decl we already have. */
else if (!TREE_ADDRESSABLE (decl)
&& !TREE_THIS_VOLATILE (decl)
&& init
&& (fallback & fb_lvalue) == 0
&& gimple_test_f (init))
{
*expr_p = init;
return GS_OK;
}
/* Preliminarily mark non-addressed complex variables as eligible
for promotion to gimple registers. We'll transform their uses
as we find them. */
if ((TREE_CODE (TREE_TYPE (decl)) == COMPLEX_TYPE
|| TREE_CODE (TREE_TYPE (decl)) == VECTOR_TYPE)
&& !TREE_THIS_VOLATILE (decl)
&& !needs_to_live_in_memory (decl))
DECL_GIMPLE_REG_P (decl) = 1;
/* If the decl is not addressable, then it is being used in some
expression or on the right hand side of a statement, and it can
be put into a readonly data section. */
if (!TREE_ADDRESSABLE (decl) && (fallback & fb_lvalue) == 0)
TREE_READONLY (decl) = 1;
/* This decl isn't mentioned in the enclosing block, so add it to the
list of temps. FIXME it seems a bit of a kludge to say that
anonymous artificial vars aren't pushed, but everything else is. */
if (DECL_NAME (decl) == NULL_TREE && !DECL_SEEN_IN_BIND_EXPR_P (decl))
gimple_add_tmp_var (decl);
gimplify_and_add (decl_s, pre_p);
*expr_p = decl;
return GS_OK;
}
/* Optimize embedded COMPOUND_LITERAL_EXPRs within a CONSTRUCTOR,
return a new CONSTRUCTOR if something changed. */
static tree
optimize_compound_literals_in_ctor (tree orig_ctor)
{
tree ctor = orig_ctor;
vec<constructor_elt, va_gc> *elts = CONSTRUCTOR_ELTS (ctor);
unsigned int idx, num = vec_safe_length (elts);
for (idx = 0; idx < num; idx++)
{
tree value = (*elts)[idx].value;
tree newval = value;
if (TREE_CODE (value) == CONSTRUCTOR)
newval = optimize_compound_literals_in_ctor (value);
else if (TREE_CODE (value) == COMPOUND_LITERAL_EXPR)
{
tree decl_s = COMPOUND_LITERAL_EXPR_DECL_EXPR (value);
tree decl = DECL_EXPR_DECL (decl_s);
tree init = DECL_INITIAL (decl);
if (!TREE_ADDRESSABLE (value)
&& !TREE_ADDRESSABLE (decl)
&& init
&& TREE_CODE (init) == CONSTRUCTOR)
newval = optimize_compound_literals_in_ctor (init);
}
if (newval == value)
continue;
if (ctor == orig_ctor)
{
ctor = copy_node (orig_ctor);
CONSTRUCTOR_ELTS (ctor) = vec_safe_copy (elts);
elts = CONSTRUCTOR_ELTS (ctor);
}
(*elts)[idx].value = newval;
}
return ctor;
}
/* A subroutine of gimplify_modify_expr. Break out elements of a
CONSTRUCTOR used as an initializer into separate MODIFY_EXPRs.
Note that we still need to clear any elements that don't have explicit
initializers, so if not all elements are initialized we keep the
original MODIFY_EXPR, we just remove all of the constructor elements.
If NOTIFY_TEMP_CREATION is true, do not gimplify, just return
GS_ERROR if we would have to create a temporary when gimplifying
this constructor. Otherwise, return GS_OK.
If NOTIFY_TEMP_CREATION is false, just do the gimplification. */
static enum gimplify_status
gimplify_init_constructor (tree *expr_p, gimple_seq *pre_p, gimple_seq *post_p,
bool want_value, bool notify_temp_creation)
{
tree object, ctor, type;
enum gimplify_status ret;
vec<constructor_elt, va_gc> *elts;
gcc_assert (TREE_CODE (TREE_OPERAND (*expr_p, 1)) == CONSTRUCTOR);
if (!notify_temp_creation)
{
ret = gimplify_expr (&TREE_OPERAND (*expr_p, 0), pre_p, post_p,
is_gimple_lvalue, fb_lvalue);
if (ret == GS_ERROR)
return ret;
}
object = TREE_OPERAND (*expr_p, 0);
ctor = TREE_OPERAND (*expr_p, 1)
= optimize_compound_literals_in_ctor (TREE_OPERAND (*expr_p, 1));
type = TREE_TYPE (ctor);
elts = CONSTRUCTOR_ELTS (ctor);
ret = GS_ALL_DONE;
switch (TREE_CODE (type))
{
case RECORD_TYPE:
case UNION_TYPE:
case QUAL_UNION_TYPE:
case ARRAY_TYPE:
{
struct gimplify_init_ctor_preeval_data preeval_data;
HOST_WIDE_INT num_ctor_elements, num_nonzero_elements;
HOST_WIDE_INT num_unique_nonzero_elements;
bool cleared, complete_p, valid_const_initializer;
/* Use readonly data for initializers of this or smaller size
regardless of the num_nonzero_elements / num_unique_nonzero_elements
ratio. */
const HOST_WIDE_INT min_unique_size = 64;
/* If num_nonzero_elements / num_unique_nonzero_elements ratio
is smaller than this, use readonly data. */
const int unique_nonzero_ratio = 8;
/* Aggregate types must lower constructors to initialization of
individual elements. The exception is that a CONSTRUCTOR node
with no elements indicates zero-initialization of the whole. */
if (vec_safe_is_empty (elts))
{
if (notify_temp_creation)
return GS_OK;
break;
}
/* Fetch information about the constructor to direct later processing.
We might want to make static versions of it in various cases, and
can only do so if it known to be a valid constant initializer. */
valid_const_initializer
= categorize_ctor_elements (ctor, &num_nonzero_elements,
&num_unique_nonzero_elements,
&num_ctor_elements, &complete_p);
/* If a const aggregate variable is being initialized, then it
should never be a lose to promote the variable to be static. */
if (valid_const_initializer
&& num_nonzero_elements > 1
&& TREE_READONLY (object)
&& VAR_P (object)
&& !DECL_REGISTER (object)
&& (flag_merge_constants >= 2 || !TREE_ADDRESSABLE (object))
/* For ctors that have many repeated nonzero elements
represented through RANGE_EXPRs, prefer initializing
those through runtime loops over copies of large amounts
of data from readonly data section. */
&& (num_unique_nonzero_elements
> num_nonzero_elements / unique_nonzero_ratio
|| ((unsigned HOST_WIDE_INT) int_size_in_bytes (type)
<= (unsigned HOST_WIDE_INT) min_unique_size)))
{
if (notify_temp_creation)
return GS_ERROR;
DECL_INITIAL (object) = ctor;
TREE_STATIC (object) = 1;
if (!DECL_NAME (object))
DECL_NAME (object) = create_tmp_var_name ("C");
walk_tree (&DECL_INITIAL (object), force_labels_r, NULL, NULL);
/* ??? C++ doesn't automatically append a .<number> to the
assembler name, and even when it does, it looks at FE private
data structures to figure out what that number should be,
which are not set for this variable. I suppose this is
important for local statics for inline functions, which aren't
"local" in the object file sense. So in order to get a unique
TU-local symbol, we must invoke the lhd version now. */
lhd_set_decl_assembler_name (object);
*expr_p = NULL_TREE;
break;
}
/* If there are "lots" of initialized elements, even discounting
those that are not address constants (and thus *must* be
computed at runtime), then partition the constructor into
constant and non-constant parts. Block copy the constant
parts in, then generate code for the non-constant parts. */
/* TODO. There's code in cp/typeck.c to do this. */
if (int_size_in_bytes (TREE_TYPE (ctor)) < 0)
/* store_constructor will ignore the clearing of variable-sized
objects. Initializers for such objects must explicitly set
every field that needs to be set. */
cleared = false;
else if (!complete_p)
/* If the constructor isn't complete, clear the whole object
beforehand, unless CONSTRUCTOR_NO_CLEARING is set on it.
??? This ought not to be needed. For any element not present
in the initializer, we should simply set them to zero. Except
we'd need to *find* the elements that are not present, and that
requires trickery to avoid quadratic compile-time behavior in
large cases or excessive memory use in small cases. */
cleared = !CONSTRUCTOR_NO_CLEARING (ctor);
else if (num_ctor_elements - num_nonzero_elements
> CLEAR_RATIO (optimize_function_for_speed_p (cfun))
&& num_nonzero_elements < num_ctor_elements / 4)
/* If there are "lots" of zeros, it's more efficient to clear
the memory and then set the nonzero elements. */
cleared = true;
else
cleared = false;
/* If there are "lots" of initialized elements, and all of them
are valid address constants, then the entire initializer can
be dropped to memory, and then memcpy'd out. Don't do this
for sparse arrays, though, as it's more efficient to follow
the standard CONSTRUCTOR behavior of memset followed by
individual element initialization. Also don't do this for small
all-zero initializers (which aren't big enough to merit
clearing), and don't try to make bitwise copies of
TREE_ADDRESSABLE types. */
if (valid_const_initializer
&& !(cleared || num_nonzero_elements == 0)
&& !TREE_ADDRESSABLE (type))
{
HOST_WIDE_INT size = int_size_in_bytes (type);
unsigned int align;
/* ??? We can still get unbounded array types, at least
from the C++ front end. This seems wrong, but attempt
to work around it for now. */
if (size < 0)
{
size = int_size_in_bytes (TREE_TYPE (object));
if (size >= 0)
TREE_TYPE (ctor) = type = TREE_TYPE (object);
}
/* Find the maximum alignment we can assume for the object. */
/* ??? Make use of DECL_OFFSET_ALIGN. */
if (DECL_P (object))
align = DECL_ALIGN (object);
else
align = TYPE_ALIGN (type);
/* Do a block move either if the size is so small as to make
each individual move a sub-unit move on average, or if it
is so large as to make individual moves inefficient. */
if (size > 0
&& num_nonzero_elements > 1
/* For ctors that have many repeated nonzero elements
represented through RANGE_EXPRs, prefer initializing
those through runtime loops over copies of large amounts
of data from readonly data section. */
&& (num_unique_nonzero_elements
> num_nonzero_elements / unique_nonzero_ratio
|| size <= min_unique_size)
&& (size < num_nonzero_elements
|| !can_move_by_pieces (size, align)))
{
if (notify_temp_creation)
return GS_ERROR;
walk_tree (&ctor, force_labels_r, NULL, NULL);
ctor = tree_output_constant_def (ctor);
if (!useless_type_conversion_p (type, TREE_TYPE (ctor)))
ctor = build1 (VIEW_CONVERT_EXPR, type, ctor);
TREE_OPERAND (*expr_p, 1) = ctor;
/* This is no longer an assignment of a CONSTRUCTOR, but
we still may have processing to do on the LHS. So
pretend we didn't do anything here to let that happen. */
return GS_UNHANDLED;
}
}
/* If the target is volatile, we have non-zero elements and more than
one field to assign, initialize the target from a temporary. */
if (TREE_THIS_VOLATILE (object)
&& !TREE_ADDRESSABLE (type)
&& (num_nonzero_elements > 0 || !cleared)
&& vec_safe_length (elts) > 1)
{
tree temp = create_tmp_var (TYPE_MAIN_VARIANT (type));
TREE_OPERAND (*expr_p, 0) = temp;
*expr_p = build2 (COMPOUND_EXPR, TREE_TYPE (*expr_p),
*expr_p,
build2 (MODIFY_EXPR, void_type_node,
object, temp));
return GS_OK;
}
if (notify_temp_creation)
return GS_OK;
/* If there are nonzero elements and if needed, pre-evaluate to capture
elements overlapping with the lhs into temporaries. We must do this
before clearing to fetch the values before they are zeroed-out. */
if (num_nonzero_elements > 0 && TREE_CODE (*expr_p) != INIT_EXPR)
{
preeval_data.lhs_base_decl = get_base_address (object);
if (!DECL_P (preeval_data.lhs_base_decl))
preeval_data.lhs_base_decl = NULL;
preeval_data.lhs_alias_set = get_alias_set (object);
gimplify_init_ctor_preeval (&TREE_OPERAND (*expr_p, 1),
pre_p, post_p, &preeval_data);
}
bool ctor_has_side_effects_p
= TREE_SIDE_EFFECTS (TREE_OPERAND (*expr_p, 1));
if (cleared)
{
/* Zap the CONSTRUCTOR element list, which simplifies this case.
Note that we still have to gimplify, in order to handle the
case of variable sized types. Avoid shared tree structures. */
CONSTRUCTOR_ELTS (ctor) = NULL;
TREE_SIDE_EFFECTS (ctor) = 0;
object = unshare_expr (object);
gimplify_stmt (expr_p, pre_p);
}
/* If we have not block cleared the object, or if there are nonzero
elements in the constructor, or if the constructor has side effects,
add assignments to the individual scalar fields of the object. */
if (!cleared
|| num_nonzero_elements > 0
|| ctor_has_side_effects_p)
gimplify_init_ctor_eval (object, elts, pre_p, cleared);
*expr_p = NULL_TREE;
}
break;
case COMPLEX_TYPE:
{
tree r, i;
if (notify_temp_creation)
return GS_OK;
/* Extract the real and imaginary parts out of the ctor. */
gcc_assert (elts->length () == 2);
r = (*elts)[0].value;
i = (*elts)[1].value;
if (r == NULL || i == NULL)
{
tree zero = build_zero_cst (TREE_TYPE (type));
if (r == NULL)
r = zero;
if (i == NULL)
i = zero;
}
/* Complex types have either COMPLEX_CST or COMPLEX_EXPR to
represent creation of a complex value. */
if (TREE_CONSTANT (r) && TREE_CONSTANT (i))
{
ctor = build_complex (type, r, i);
TREE_OPERAND (*expr_p, 1) = ctor;
}
else
{
ctor = build2 (COMPLEX_EXPR, type, r, i);
TREE_OPERAND (*expr_p, 1) = ctor;
ret = gimplify_expr (&TREE_OPERAND (*expr_p, 1),
pre_p,
post_p,
rhs_predicate_for (TREE_OPERAND (*expr_p, 0)),
fb_rvalue);
}
}
break;
case VECTOR_TYPE:
{
unsigned HOST_WIDE_INT ix;
constructor_elt *ce;
if (notify_temp_creation)
return GS_OK;
/* Go ahead and simplify constant constructors to VECTOR_CST. */
if (TREE_CONSTANT (ctor))
{
bool constant_p = true;
tree value;
/* Even when ctor is constant, it might contain non-*_CST
elements, such as addresses or trapping values like
1.0/0.0 - 1.0/0.0. Such expressions don't belong
in VECTOR_CST nodes. */
FOR_EACH_CONSTRUCTOR_VALUE (elts, ix, value)
if (!CONSTANT_CLASS_P (value))
{
constant_p = false;
break;
}
if (constant_p)
{
TREE_OPERAND (*expr_p, 1) = build_vector_from_ctor (type, elts);
break;
}
TREE_CONSTANT (ctor) = 0;
}
/* Vector types use CONSTRUCTOR all the way through gimple
compilation as a general initializer. */
FOR_EACH_VEC_SAFE_ELT (elts, ix, ce)
{
enum gimplify_status tret;
tret = gimplify_expr (&ce->value, pre_p, post_p, is_gimple_val,
fb_rvalue);
if (tret == GS_ERROR)
ret = GS_ERROR;
else if (TREE_STATIC (ctor)
&& !initializer_constant_valid_p (ce->value,
TREE_TYPE (ce->value)))
TREE_STATIC (ctor) = 0;
}
if (!is_gimple_reg (TREE_OPERAND (*expr_p, 0)))
TREE_OPERAND (*expr_p, 1) = get_formal_tmp_var (ctor, pre_p);
}
break;
default:
/* So how did we get a CONSTRUCTOR for a scalar type? */
gcc_unreachable ();
}
if (ret == GS_ERROR)
return GS_ERROR;
/* If we have gimplified both sides of the initializer but have
not emitted an assignment, do so now. */
if (*expr_p)
{
tree lhs = TREE_OPERAND (*expr_p, 0);
tree rhs = TREE_OPERAND (*expr_p, 1);
if (want_value && object == lhs)
lhs = unshare_expr (lhs);
gassign *init = gimple_build_assign (lhs, rhs);
gimplify_seq_add_stmt (pre_p, init);
}
if (want_value)
{
*expr_p = object;
return GS_OK;
}
else
{
*expr_p = NULL;
return GS_ALL_DONE;
}
}
/* Given a pointer value OP0, return a simplified version of an
indirection through OP0, or NULL_TREE if no simplification is
possible. This may only be applied to a rhs of an expression.
Note that the resulting type may be different from the type pointed
to in the sense that it is still compatible from the langhooks
point of view. */
static tree
gimple_fold_indirect_ref_rhs (tree t)
{
return gimple_fold_indirect_ref (t);
}
/* Subroutine of gimplify_modify_expr to do simplifications of
MODIFY_EXPRs based on the code of the RHS. We loop for as long as
something changes. */
static enum gimplify_status
gimplify_modify_expr_rhs (tree *expr_p, tree *from_p, tree *to_p,
gimple_seq *pre_p, gimple_seq *post_p,
bool want_value)
{
enum gimplify_status ret = GS_UNHANDLED;
bool changed;
do
{
changed = false;
switch (TREE_CODE (*from_p))
{
case VAR_DECL:
/* If we're assigning from a read-only variable initialized with
a constructor, do the direct assignment from the constructor,
but only if neither source nor target are volatile since this
latter assignment might end up being done on a per-field basis. */
if (DECL_INITIAL (*from_p)
&& TREE_READONLY (*from_p)
&& !TREE_THIS_VOLATILE (*from_p)
&& !TREE_THIS_VOLATILE (*to_p)
&& TREE_CODE (DECL_INITIAL (*from_p)) == CONSTRUCTOR)
{
tree old_from = *from_p;
enum gimplify_status subret;
/* Move the constructor into the RHS. */
*from_p = unshare_expr (DECL_INITIAL (*from_p));
/* Let's see if gimplify_init_constructor will need to put
it in memory. */
subret = gimplify_init_constructor (expr_p, NULL, NULL,
false, true);
if (subret == GS_ERROR)
{
/* If so, revert the change. */
*from_p = old_from;
}
else
{
ret = GS_OK;
changed = true;
}
}
break;
case INDIRECT_REF:
{
/* If we have code like
*(const A*)(A*)&x
where the type of "x" is a (possibly cv-qualified variant
of "A"), treat the entire expression as identical to "x".
This kind of code arises in C++ when an object is bound
to a const reference, and if "x" is a TARGET_EXPR we want
to take advantage of the optimization below. */
bool volatile_p = TREE_THIS_VOLATILE (*from_p);
tree t = gimple_fold_indirect_ref_rhs (TREE_OPERAND (*from_p, 0));
if (t)
{
if (TREE_THIS_VOLATILE (t) != volatile_p)
{
if (DECL_P (t))
t = build_simple_mem_ref_loc (EXPR_LOCATION (*from_p),
build_fold_addr_expr (t));
if (REFERENCE_CLASS_P (t))
TREE_THIS_VOLATILE (t) = volatile_p;
}
*from_p = t;
ret = GS_OK;
changed = true;
}
break;
}
case TARGET_EXPR:
{
/* If we are initializing something from a TARGET_EXPR, strip the
TARGET_EXPR and initialize it directly, if possible. This can't
be done if the initializer is void, since that implies that the
temporary is set in some non-trivial way.
??? What about code that pulls out the temp and uses it
elsewhere? I think that such code never uses the TARGET_EXPR as
an initializer. If I'm wrong, we'll die because the temp won't
have any RTL. In that case, I guess we'll need to replace
references somehow. */
tree init = TARGET_EXPR_INITIAL (*from_p);
if (init
&& (TREE_CODE (*expr_p) != MODIFY_EXPR
|| !TARGET_EXPR_NO_ELIDE (*from_p))
&& !VOID_TYPE_P (TREE_TYPE (init)))
{
*from_p = init;
ret = GS_OK;
changed = true;
}
}
break;
case COMPOUND_EXPR:
/* Remove any COMPOUND_EXPR in the RHS so the following cases will be
caught. */
gimplify_compound_expr (from_p, pre_p, true);
ret = GS_OK;
changed = true;
break;
case CONSTRUCTOR:
/* If we already made some changes, let the front end have a
crack at this before we break it down. */
if (ret != GS_UNHANDLED)
break;
/* If we're initializing from a CONSTRUCTOR, break this into
individual MODIFY_EXPRs. */
return gimplify_init_constructor (expr_p, pre_p, post_p, want_value,
false);
case COND_EXPR:
/* If we're assigning to a non-register type, push the assignment
down into the branches. This is mandatory for ADDRESSABLE types,
since we cannot generate temporaries for such, but it saves a
copy in other cases as well. */
if (!is_gimple_reg_type (TREE_TYPE (*from_p)))
{
/* This code should mirror the code in gimplify_cond_expr. */
enum tree_code code = TREE_CODE (*expr_p);
tree cond = *from_p;
tree result = *to_p;
ret = gimplify_expr (&result, pre_p, post_p,
is_gimple_lvalue, fb_lvalue);
if (ret != GS_ERROR)
ret = GS_OK;
/* If we are going to write RESULT more than once, clear
TREE_READONLY flag, otherwise we might incorrectly promote
the variable to static const and initialize it at compile
time in one of the branches. */
if (VAR_P (result)
&& TREE_TYPE (TREE_OPERAND (cond, 1)) != void_type_node
&& TREE_TYPE (TREE_OPERAND (cond, 2)) != void_type_node)
TREE_READONLY (result) = 0;
if (TREE_TYPE (TREE_OPERAND (cond, 1)) != void_type_node)
TREE_OPERAND (cond, 1)
= build2 (code, void_type_node, result,
TREE_OPERAND (cond, 1));
if (TREE_TYPE (TREE_OPERAND (cond, 2)) != void_type_node)
TREE_OPERAND (cond, 2)
= build2 (code, void_type_node, unshare_expr (result),
TREE_OPERAND (cond, 2));
TREE_TYPE (cond) = void_type_node;
recalculate_side_effects (cond);
if (want_value)
{
gimplify_and_add (cond, pre_p);
*expr_p = unshare_expr (result);
}
else
*expr_p = cond;
return ret;
}
break;
case CALL_EXPR:
/* For calls that return in memory, give *to_p as the CALL_EXPR's
return slot so that we don't generate a temporary. */
if (!CALL_EXPR_RETURN_SLOT_OPT (*from_p)
&& aggregate_value_p (*from_p, *from_p))
{
bool use_target;
if (!(rhs_predicate_for (*to_p))(*from_p))
/* If we need a temporary, *to_p isn't accurate. */
use_target = false;
/* It's OK to use the return slot directly unless it's an NRV. */
else if (TREE_CODE (*to_p) == RESULT_DECL
&& DECL_NAME (*to_p) == NULL_TREE
&& needs_to_live_in_memory (*to_p))
use_target = true;
else if (is_gimple_reg_type (TREE_TYPE (*to_p))
|| (DECL_P (*to_p) && DECL_REGISTER (*to_p)))
/* Don't force regs into memory. */
use_target = false;
else if (TREE_CODE (*expr_p) == INIT_EXPR)
/* It's OK to use the target directly if it's being
initialized. */
use_target = true;
else if (TREE_CODE (TYPE_SIZE_UNIT (TREE_TYPE (*to_p)))
!= INTEGER_CST)
/* Always use the target and thus RSO for variable-sized types.
GIMPLE cannot deal with a variable-sized assignment
embedded in a call statement. */
use_target = true;
else if (TREE_CODE (*to_p) != SSA_NAME
&& (!is_gimple_variable (*to_p)
|| needs_to_live_in_memory (*to_p)))
/* Don't use the original target if it's already addressable;
if its address escapes, and the called function uses the
NRV optimization, a conforming program could see *to_p
change before the called function returns; see c++/19317.
When optimizing, the return_slot pass marks more functions
as safe after we have escape info. */
use_target = false;
else
use_target = true;
if (use_target)
{
CALL_EXPR_RETURN_SLOT_OPT (*from_p) = 1;
mark_addressable (*to_p);
}
}
break;
case WITH_SIZE_EXPR:
/* Likewise for calls that return an aggregate of non-constant size,
since we would not be able to generate a temporary at all. */
if (TREE_CODE (TREE_OPERAND (*from_p, 0)) == CALL_EXPR)
{
*from_p = TREE_OPERAND (*from_p, 0);
/* We don't change ret in this case because the
WITH_SIZE_EXPR might have been added in
gimplify_modify_expr, so returning GS_OK would lead to an
infinite loop. */
changed = true;
}
break;
/* If we're initializing from a container, push the initialization
inside it. */
case CLEANUP_POINT_EXPR:
case BIND_EXPR:
case STATEMENT_LIST:
{
tree wrap = *from_p;
tree t;
ret = gimplify_expr (to_p, pre_p, post_p, is_gimple_min_lval,
fb_lvalue);
if (ret != GS_ERROR)
ret = GS_OK;
t = voidify_wrapper_expr (wrap, *expr_p);
gcc_assert (t == *expr_p);
if (want_value)
{
gimplify_and_add (wrap, pre_p);
*expr_p = unshare_expr (*to_p);
}
else
*expr_p = wrap;
return GS_OK;
}
case COMPOUND_LITERAL_EXPR:
{
tree complit = TREE_OPERAND (*expr_p, 1);
tree decl_s = COMPOUND_LITERAL_EXPR_DECL_EXPR (complit);
tree decl = DECL_EXPR_DECL (decl_s);
tree init = DECL_INITIAL (decl);
/* struct T x = (struct T) { 0, 1, 2 } can be optimized
into struct T x = { 0, 1, 2 } if the address of the
compound literal has never been taken. */
if (!TREE_ADDRESSABLE (complit)
&& !TREE_ADDRESSABLE (decl)
&& init)
{
*expr_p = copy_node (*expr_p);
TREE_OPERAND (*expr_p, 1) = init;
return GS_OK;
}
}
default:
break;
}
}
while (changed);
return ret;
}
/* Return true if T looks like a valid GIMPLE statement. */
static bool
is_gimple_stmt (tree t)
{
const enum tree_code code = TREE_CODE (t);
switch (code)
{
case NOP_EXPR:
/* The only valid NOP_EXPR is the empty statement. */
return IS_EMPTY_STMT (t);
case BIND_EXPR:
case COND_EXPR:
/* These are only valid if they're void. */
return TREE_TYPE (t) == NULL || VOID_TYPE_P (TREE_TYPE (t));
case SWITCH_EXPR:
case GOTO_EXPR:
case RETURN_EXPR:
case LABEL_EXPR:
case CASE_LABEL_EXPR:
case TRY_CATCH_EXPR:
case TRY_FINALLY_EXPR:
case EH_FILTER_EXPR:
case CATCH_EXPR:
case ASM_EXPR:
case STATEMENT_LIST:
case OACC_PARALLEL:
case OACC_KERNELS:
case OACC_SERIAL:
case OACC_DATA:
case OACC_HOST_DATA:
case OACC_DECLARE:
case OACC_UPDATE:
case OACC_ENTER_DATA:
case OACC_EXIT_DATA:
case OACC_CACHE:
case OMP_PARALLEL:
case OMP_FOR:
case OMP_SIMD:
case OMP_DISTRIBUTE:
case OMP_LOOP:
case OACC_LOOP:
case OMP_SCAN:
case OMP_SECTIONS:
case OMP_SECTION:
case OMP_SINGLE:
case OMP_MASTER:
case OMP_TASKGROUP:
case OMP_ORDERED:
case OMP_CRITICAL:
case OMP_TASK:
case OMP_TARGET:
case OMP_TARGET_DATA:
case OMP_TARGET_UPDATE:
case OMP_TARGET_ENTER_DATA:
case OMP_TARGET_EXIT_DATA:
case OMP_TASKLOOP:
case OMP_TEAMS:
/* These are always void. */
return true;
case CALL_EXPR:
case MODIFY_EXPR:
case PREDICT_EXPR:
/* These are valid regardless of their type. */
return true;
default:
return false;
}
}
/* Promote partial stores to COMPLEX variables to total stores. *EXPR_P is
a MODIFY_EXPR with a lhs of a REAL/IMAGPART_EXPR of a variable with
DECL_GIMPLE_REG_P set.
IMPORTANT NOTE: This promotion is performed by introducing a load of the
other, unmodified part of the complex object just before the total store.
As a consequence, if the object is still uninitialized, an undefined value
will be loaded into a register, which may result in a spurious exception
if the register is floating-point and the value happens to be a signaling
NaN for example. Then the fully-fledged complex operations lowering pass
followed by a DCE pass are necessary in order to fix things up. */
static enum gimplify_status
gimplify_modify_expr_complex_part (tree *expr_p, gimple_seq *pre_p,
bool want_value)
{
enum tree_code code, ocode;
tree lhs, rhs, new_rhs, other, realpart, imagpart;
lhs = TREE_OPERAND (*expr_p, 0);
rhs = TREE_OPERAND (*expr_p, 1);
code = TREE_CODE (lhs);
lhs = TREE_OPERAND (lhs, 0);
ocode = code == REALPART_EXPR ? IMAGPART_EXPR : REALPART_EXPR;
other = build1 (ocode, TREE_TYPE (rhs), lhs);
TREE_NO_WARNING (other) = 1;
other = get_formal_tmp_var (other, pre_p);
realpart = code == REALPART_EXPR ? rhs : other;
imagpart = code == REALPART_EXPR ? other : rhs;
if (TREE_CONSTANT (realpart) && TREE_CONSTANT (imagpart))
new_rhs = build_complex (TREE_TYPE (lhs), realpart, imagpart);
else
new_rhs = build2 (COMPLEX_EXPR, TREE_TYPE (lhs), realpart, imagpart);
gimplify_seq_add_stmt (pre_p, gimple_build_assign (lhs, new_rhs));
*expr_p = (want_value) ? rhs : NULL_TREE;
return GS_ALL_DONE;
}
/* Gimplify the MODIFY_EXPR node pointed to by EXPR_P.
modify_expr
: varname '=' rhs
| '*' ID '=' rhs
PRE_P points to the list where side effects that must happen before
*EXPR_P should be stored.
POST_P points to the list where side effects that must happen after
*EXPR_P should be stored.
WANT_VALUE is nonzero iff we want to use the value of this expression
in another expression. */
static enum gimplify_status
gimplify_modify_expr (tree *expr_p, gimple_seq *pre_p, gimple_seq *post_p,
bool want_value)
{
tree *from_p = &TREE_OPERAND (*expr_p, 1);
tree *to_p = &TREE_OPERAND (*expr_p, 0);
enum gimplify_status ret = GS_UNHANDLED;
gimple *assign;
location_t loc = EXPR_LOCATION (*expr_p);
gimple_stmt_iterator gsi;
gcc_assert (TREE_CODE (*expr_p) == MODIFY_EXPR
|| TREE_CODE (*expr_p) == INIT_EXPR);
/* Trying to simplify a clobber using normal logic doesn't work,
so handle it here. */
if (TREE_CLOBBER_P (*from_p))
{
ret = gimplify_expr (to_p, pre_p, post_p, is_gimple_lvalue, fb_lvalue);
if (ret == GS_ERROR)
return ret;
gcc_assert (!want_value);
if (!VAR_P (*to_p) && TREE_CODE (*to_p) != MEM_REF)
{
tree addr = get_initialized_tmp_var (build_fold_addr_expr (*to_p),
pre_p, post_p);
*to_p = build_simple_mem_ref_loc (EXPR_LOCATION (*to_p), addr);
}
gimplify_seq_add_stmt (pre_p, gimple_build_assign (*to_p, *from_p));
*expr_p = NULL;
return GS_ALL_DONE;
}
/* Insert pointer conversions required by the middle-end that are not
required by the frontend. This fixes middle-end type checking for
for example gcc.dg/redecl-6.c. */
if (POINTER_TYPE_P (TREE_TYPE (*to_p)))
{
STRIP_USELESS_TYPE_CONVERSION (*from_p);
if (!useless_type_conversion_p (TREE_TYPE (*to_p), TREE_TYPE (*from_p)))
*from_p = fold_convert_loc (loc, TREE_TYPE (*to_p), *from_p);
}
/* See if any simplifications can be done based on what the RHS is. */
ret = gimplify_modify_expr_rhs (expr_p, from_p, to_p, pre_p, post_p,
want_value);
if (ret != GS_UNHANDLED)
return ret;
/* For zero sized types only gimplify the left hand side and right hand
side as statements and throw away the assignment. Do this after
gimplify_modify_expr_rhs so we handle TARGET_EXPRs of addressable
types properly. */
if (zero_sized_type (TREE_TYPE (*from_p))
&& !want_value
/* Don't do this for calls that return addressable types, expand_call
relies on those having a lhs. */
&& !(TREE_ADDRESSABLE (TREE_TYPE (*from_p))
&& TREE_CODE (*from_p) == CALL_EXPR))
{
gimplify_stmt (from_p, pre_p);
gimplify_stmt (to_p, pre_p);
*expr_p = NULL_TREE;
return GS_ALL_DONE;
}
/* If the value being copied is of variable width, compute the length
of the copy into a WITH_SIZE_EXPR. Note that we need to do this
before gimplifying any of the operands so that we can resolve any
PLACEHOLDER_EXPRs in the size. Also note that the RTL expander uses
the size of the expression to be copied, not of the destination, so
that is what we must do here. */
maybe_with_size_expr (from_p);
/* As a special case, we have to temporarily allow for assignments
with a CALL_EXPR on the RHS. Since in GIMPLE a function call is
a toplevel statement, when gimplifying the GENERIC expression
MODIFY_EXPR <a, CALL_EXPR <foo>>, we cannot create the tuple
GIMPLE_ASSIGN <a, GIMPLE_CALL <foo>>.
Instead, we need to create the tuple GIMPLE_CALL <a, foo>. To
prevent gimplify_expr from trying to create a new temporary for
foo's LHS, we tell it that it should only gimplify until it
reaches the CALL_EXPR. On return from gimplify_expr, the newly
created GIMPLE_CALL <foo> will be the last statement in *PRE_P
and all we need to do here is set 'a' to be its LHS. */
/* Gimplify the RHS first for C++17 and bug 71104. */
gimple_predicate initial_pred = initial_rhs_predicate_for (*to_p);
ret = gimplify_expr (from_p, pre_p, post_p, initial_pred, fb_rvalue);
if (ret == GS_ERROR)
return ret;
/* Then gimplify the LHS. */
/* If we gimplified the RHS to a CALL_EXPR and that call may return
twice we have to make sure to gimplify into non-SSA as otherwise
the abnormal edge added later will make those defs not dominate
their uses.
??? Technically this applies only to the registers used in the
resulting non-register *TO_P. */
bool saved_into_ssa = gimplify_ctxp->into_ssa;
if (saved_into_ssa
&& TREE_CODE (*from_p) == CALL_EXPR
&& call_expr_flags (*from_p) & ECF_RETURNS_TWICE)
gimplify_ctxp->into_ssa = false;
ret = gimplify_expr (to_p, pre_p, post_p, is_gimple_lvalue, fb_lvalue);
gimplify_ctxp->into_ssa = saved_into_ssa;
if (ret == GS_ERROR)
return ret;
/* Now that the LHS is gimplified, re-gimplify the RHS if our initial
guess for the predicate was wrong. */
gimple_predicate final_pred = rhs_predicate_for (*to_p);
if (final_pred != initial_pred)
{
ret = gimplify_expr (from_p, pre_p, post_p, final_pred, fb_rvalue);
if (ret == GS_ERROR)
return ret;
}
/* In case of va_arg internal fn wrappped in a WITH_SIZE_EXPR, add the type
size as argument to the call. */
if (TREE_CODE (*from_p) == WITH_SIZE_EXPR)
{
tree call = TREE_OPERAND (*from_p, 0);
tree vlasize = TREE_OPERAND (*from_p, 1);
if (TREE_CODE (call) == CALL_EXPR
&& CALL_EXPR_IFN (call) == IFN_VA_ARG)
{
int nargs = call_expr_nargs (call);
tree type = TREE_TYPE (call);
tree ap = CALL_EXPR_ARG (call, 0);
tree tag = CALL_EXPR_ARG (call, 1);
tree aptag = CALL_EXPR_ARG (call, 2);
tree newcall = build_call_expr_internal_loc (EXPR_LOCATION (call),
IFN_VA_ARG, type,
nargs + 1, ap, tag,
aptag, vlasize);
TREE_OPERAND (*from_p, 0) = newcall;
}
}
/* Now see if the above changed *from_p to something we handle specially. */
ret = gimplify_modify_expr_rhs (expr_p, from_p, to_p, pre_p, post_p,
want_value);
if (ret != GS_UNHANDLED)
return ret;
/* If we've got a variable sized assignment between two lvalues (i.e. does
not involve a call), then we can make things a bit more straightforward
by converting the assignment to memcpy or memset. */
if (TREE_CODE (*from_p) == WITH_SIZE_EXPR)
{
tree from = TREE_OPERAND (*from_p, 0);
tree size = TREE_OPERAND (*from_p, 1);
if (TREE_CODE (from) == CONSTRUCTOR)
return gimplify_modify_expr_to_memset (expr_p, size, want_value, pre_p);
if (is_gimple_addressable (from))
{
*from_p = from;
return gimplify_modify_expr_to_memcpy (expr_p, size, want_value,
pre_p);
}
}
/* Transform partial stores to non-addressable complex variables into
total stores. This allows us to use real instead of virtual operands
for these variables, which improves optimization. */
if ((TREE_CODE (*to_p) == REALPART_EXPR
|| TREE_CODE (*to_p) == IMAGPART_EXPR)
&& is_gimple_reg (TREE_OPERAND (*to_p, 0)))
return gimplify_modify_expr_complex_part (expr_p, pre_p, want_value);
/* Try to alleviate the effects of the gimplification creating artificial
temporaries (see for example is_gimple_reg_rhs) on the debug info, but
make sure not to create DECL_DEBUG_EXPR links across functions. */
if (!gimplify_ctxp->into_ssa
&& VAR_P (*from_p)
&& DECL_IGNORED_P (*from_p)
&& DECL_P (*to_p)
&& !DECL_IGNORED_P (*to_p)
&& decl_function_context (*to_p) == current_function_decl
&& decl_function_context (*from_p) == current_function_decl)
{
if (!DECL_NAME (*from_p) && DECL_NAME (*to_p))
DECL_NAME (*from_p)
= create_tmp_var_name (IDENTIFIER_POINTER (DECL_NAME (*to_p)));
DECL_HAS_DEBUG_EXPR_P (*from_p) = 1;
SET_DECL_DEBUG_EXPR (*from_p, *to_p);
}
if (want_value && TREE_THIS_VOLATILE (*to_p))
*from_p = get_initialized_tmp_var (*from_p, pre_p, post_p);
if (TREE_CODE (*from_p) == CALL_EXPR)
{
/* Since the RHS is a CALL_EXPR, we need to create a GIMPLE_CALL
instead of a GIMPLE_ASSIGN. */
gcall *call_stmt;
if (CALL_EXPR_FN (*from_p) == NULL_TREE)
{
/* Gimplify internal functions created in the FEs. */
int nargs = call_expr_nargs (*from_p), i;
enum internal_fn ifn = CALL_EXPR_IFN (*from_p);
auto_vec<tree> vargs (nargs);
for (i = 0; i < nargs; i++)
{
gimplify_arg (&CALL_EXPR_ARG (*from_p, i), pre_p,
EXPR_LOCATION (*from_p));
vargs.quick_push (CALL_EXPR_ARG (*from_p, i));
}
call_stmt = gimple_build_call_internal_vec (ifn, vargs);
gimple_call_set_nothrow (call_stmt, TREE_NOTHROW (*from_p));
gimple_set_location (call_stmt, EXPR_LOCATION (*expr_p));
}
else
{
tree fnptrtype = TREE_TYPE (CALL_EXPR_FN (*from_p));
CALL_EXPR_FN (*from_p) = TREE_OPERAND (CALL_EXPR_FN (*from_p), 0);
STRIP_USELESS_TYPE_CONVERSION (CALL_EXPR_FN (*from_p));
tree fndecl = get_callee_fndecl (*from_p);
if (fndecl
&& fndecl_built_in_p (fndecl, BUILT_IN_EXPECT)
&& call_expr_nargs (*from_p) == 3)
call_stmt = gimple_build_call_internal (IFN_BUILTIN_EXPECT, 3,
CALL_EXPR_ARG (*from_p, 0),
CALL_EXPR_ARG (*from_p, 1),
CALL_EXPR_ARG (*from_p, 2));
else
{
call_stmt = gimple_build_call_from_tree (*from_p, fnptrtype);
}
}
notice_special_calls (call_stmt);
if (!gimple_call_noreturn_p (call_stmt) || !should_remove_lhs_p (*to_p))
gimple_call_set_lhs (call_stmt, *to_p);
else if (TREE_CODE (*to_p) == SSA_NAME)
/* The above is somewhat premature, avoid ICEing later for a
SSA name w/o a definition. We may have uses in the GIMPLE IL.
??? This doesn't make it a default-def. */
SSA_NAME_DEF_STMT (*to_p) = gimple_build_nop ();
assign = call_stmt;
}
else
{
assign = gimple_build_assign (*to_p, *from_p);
gimple_set_location (assign, EXPR_LOCATION (*expr_p));
if (COMPARISON_CLASS_P (*from_p))
gimple_set_no_warning (assign, TREE_NO_WARNING (*from_p));
}
if (gimplify_ctxp->into_ssa && is_gimple_reg (*to_p))
{
/* We should have got an SSA name from the start. */
gcc_assert (TREE_CODE (*to_p) == SSA_NAME
|| ! gimple_in_ssa_p (cfun));
}
gimplify_seq_add_stmt (pre_p, assign);
gsi = gsi_last (*pre_p);
maybe_fold_stmt (&gsi);
if (want_value)
{
*expr_p = TREE_THIS_VOLATILE (*to_p) ? *from_p : unshare_expr (*to_p);
return GS_OK;
}
else
*expr_p = NULL;
return GS_ALL_DONE;
}
/* Gimplify a comparison between two variable-sized objects. Do this
with a call to BUILT_IN_MEMCMP. */
static enum gimplify_status
gimplify_variable_sized_compare (tree *expr_p)
{
location_t loc = EXPR_LOCATION (*expr_p);
tree op0 = TREE_OPERAND (*expr_p, 0);
tree op1 = TREE_OPERAND (*expr_p, 1);
tree t, arg, dest, src, expr;
arg = TYPE_SIZE_UNIT (TREE_TYPE (op0));
arg = unshare_expr (arg);
arg = SUBSTITUTE_PLACEHOLDER_IN_EXPR (arg, op0);
src = build_fold_addr_expr_loc (loc, op1);
dest = build_fold_addr_expr_loc (loc, op0);
t = builtin_decl_implicit (BUILT_IN_MEMCMP);
t = build_call_expr_loc (loc, t, 3, dest, src, arg);
expr
= build2 (TREE_CODE (*expr_p), TREE_TYPE (*expr_p), t, integer_zero_node);
SET_EXPR_LOCATION (expr, loc);
*expr_p = expr;
return GS_OK;
}
/* Gimplify a comparison between two aggregate objects of integral scalar
mode as a comparison between the bitwise equivalent scalar values. */
static enum gimplify_status
gimplify_scalar_mode_aggregate_compare (tree *expr_p)
{
location_t loc = EXPR_LOCATION (*expr_p);
tree op0 = TREE_OPERAND (*expr_p, 0);
tree op1 = TREE_OPERAND (*expr_p, 1);
tree type = TREE_TYPE (op0);
tree scalar_type = lang_hooks.types.type_for_mode (TYPE_MODE (type), 1);
op0 = fold_build1_loc (loc, VIEW_CONVERT_EXPR, scalar_type, op0);
op1 = fold_build1_loc (loc, VIEW_CONVERT_EXPR, scalar_type, op1);
*expr_p
= fold_build2_loc (loc, TREE_CODE (*expr_p), TREE_TYPE (*expr_p), op0, op1);
return GS_OK;
}
/* Gimplify an expression sequence. This function gimplifies each
expression and rewrites the original expression with the last
expression of the sequence in GIMPLE form.
PRE_P points to the list where the side effects for all the
expressions in the sequence will be emitted.
WANT_VALUE is true when the result of the last COMPOUND_EXPR is used. */
static enum gimplify_status
gimplify_compound_expr (tree *expr_p, gimple_seq *pre_p, bool want_value)
{
tree t = *expr_p;
do
{
tree *sub_p = &TREE_OPERAND (t, 0);
if (TREE_CODE (*sub_p) == COMPOUND_EXPR)
gimplify_compound_expr (sub_p, pre_p, false);
else
gimplify_stmt (sub_p, pre_p);
t = TREE_OPERAND (t, 1);
}
while (TREE_CODE (t) == COMPOUND_EXPR);
*expr_p = t;
if (want_value)
return GS_OK;
else
{
gimplify_stmt (expr_p, pre_p);
return GS_ALL_DONE;
}
}
/* Gimplify a SAVE_EXPR node. EXPR_P points to the expression to
gimplify. After gimplification, EXPR_P will point to a new temporary
that holds the original value of the SAVE_EXPR node.
PRE_P points to the list where side effects that must happen before
*EXPR_P should be stored. */
static enum gimplify_status
gimplify_save_expr (tree *expr_p, gimple_seq *pre_p, gimple_seq *post_p)
{
enum gimplify_status ret = GS_ALL_DONE;
tree val;
gcc_assert (TREE_CODE (*expr_p) == SAVE_EXPR);
val = TREE_OPERAND (*expr_p, 0);
/* If the SAVE_EXPR has not been resolved, then evaluate it once. */
if (!SAVE_EXPR_RESOLVED_P (*expr_p))
{
/* The operand may be a void-valued expression. It is
being executed only for its side-effects. */
if (TREE_TYPE (val) == void_type_node)
{
ret = gimplify_expr (&TREE_OPERAND (*expr_p, 0), pre_p, post_p,
is_gimple_stmt, fb_none);
val = NULL;
}
else
/* The temporary may not be an SSA name as later abnormal and EH
control flow may invalidate use/def domination. When in SSA
form then assume there are no such issues and SAVE_EXPRs only
appear via GENERIC foldings. */
val = get_initialized_tmp_var (val, pre_p, post_p,
gimple_in_ssa_p (cfun));
TREE_OPERAND (*expr_p, 0) = val;
SAVE_EXPR_RESOLVED_P (*expr_p) = 1;
}
*expr_p = val;
return ret;
}
/* Rewrite the ADDR_EXPR node pointed to by EXPR_P
unary_expr
: ...
| '&' varname
...
PRE_P points to the list where side effects that must happen before
*EXPR_P should be stored.
POST_P points to the list where side effects that must happen after
*EXPR_P should be stored. */
static enum gimplify_status
gimplify_addr_expr (tree *expr_p, gimple_seq *pre_p, gimple_seq *post_p)
{
tree expr = *expr_p;
tree op0 = TREE_OPERAND (expr, 0);
enum gimplify_status ret;
location_t loc = EXPR_LOCATION (*expr_p);
switch (TREE_CODE (op0))
{
case INDIRECT_REF:
do_indirect_ref:
/* Check if we are dealing with an expression of the form '&*ptr'.
While the front end folds away '&*ptr' into 'ptr', these
expressions may be generated internally by the compiler (e.g.,
builtins like __builtin_va_end). */
/* Caution: the silent array decomposition semantics we allow for
ADDR_EXPR means we can't always discard the pair. */
/* Gimplification of the ADDR_EXPR operand may drop
cv-qualification conversions, so make sure we add them if
needed. */
{
tree op00 = TREE_OPERAND (op0, 0);
tree t_expr = TREE_TYPE (expr);
tree t_op00 = TREE_TYPE (op00);
if (!useless_type_conversion_p (t_expr, t_op00))
op00 = fold_convert_loc (loc, TREE_TYPE (expr), op00);
*expr_p = op00;
ret = GS_OK;
}
break;
case VIEW_CONVERT_EXPR:
/* Take the address of our operand and then convert it to the type of
this ADDR_EXPR.
??? The interactions of VIEW_CONVERT_EXPR and aliasing is not at
all clear. The impact of this transformation is even less clear. */
/* If the operand is a useless conversion, look through it. Doing so
guarantees that the ADDR_EXPR and its operand will remain of the
same type. */
if (tree_ssa_useless_type_conversion (TREE_OPERAND (op0, 0)))
op0 = TREE_OPERAND (op0, 0);
*expr_p = fold_convert_loc (loc, TREE_TYPE (expr),
build_fold_addr_expr_loc (loc,
TREE_OPERAND (op0, 0)));
ret = GS_OK;
break;
case MEM_REF:
if (integer_zerop (TREE_OPERAND (op0, 1)))
goto do_indirect_ref;
/* fall through */
default:
/* If we see a call to a declared builtin or see its address
being taken (we can unify those cases here) then we can mark
the builtin for implicit generation by GCC. */
if (TREE_CODE (op0) == FUNCTION_DECL
&& fndecl_built_in_p (op0, BUILT_IN_NORMAL)
&& builtin_decl_declared_p (DECL_FUNCTION_CODE (op0)))
set_builtin_decl_implicit_p (DECL_FUNCTION_CODE (op0), true);
/* We use fb_either here because the C frontend sometimes takes
the address of a call that returns a struct; see
gcc.dg/c99-array-lval-1.c. The gimplifier will correctly make
the implied temporary explicit. */
/* Make the operand addressable. */
ret = gimplify_expr (&TREE_OPERAND (expr, 0), pre_p, post_p,
is_gimple_addressable, fb_either);
if (ret == GS_ERROR)
break;
/* Then mark it. Beware that it may not be possible to do so directly
if a temporary has been created by the gimplification. */
prepare_gimple_addressable (&TREE_OPERAND (expr, 0), pre_p);
op0 = TREE_OPERAND (expr, 0);
/* For various reasons, the gimplification of the expression
may have made a new INDIRECT_REF. */
if (TREE_CODE (op0) == INDIRECT_REF
|| (TREE_CODE (op0) == MEM_REF
&& integer_zerop (TREE_OPERAND (op0, 1))))
goto do_indirect_ref;
mark_addressable (TREE_OPERAND (expr, 0));
/* The FEs may end up building ADDR_EXPRs early on a decl with
an incomplete type. Re-build ADDR_EXPRs in canonical form
here. */
if (!types_compatible_p (TREE_TYPE (op0), TREE_TYPE (TREE_TYPE (expr))))
*expr_p = build_fold_addr_expr (op0);
/* Make sure TREE_CONSTANT and TREE_SIDE_EFFECTS are set properly. */
recompute_tree_invariant_for_addr_expr (*expr_p);
/* If we re-built the ADDR_EXPR add a conversion to the original type
if required. */
if (!useless_type_conversion_p (TREE_TYPE (expr), TREE_TYPE (*expr_p)))
*expr_p = fold_convert (TREE_TYPE (expr), *expr_p);
break;
}
return ret;
}
/* Gimplify the operands of an ASM_EXPR. Input operands should be a gimple
value; output operands should be a gimple lvalue. */
static enum gimplify_status
gimplify_asm_expr (tree *expr_p, gimple_seq *pre_p, gimple_seq *post_p)
{
tree expr;
int noutputs;
const char **oconstraints;
int i;
tree link;
const char *constraint;
bool allows_mem, allows_reg, is_inout;
enum gimplify_status ret, tret;
gasm *stmt;
vec<tree, va_gc> *inputs;
vec<tree, va_gc> *outputs;
vec<tree, va_gc> *clobbers;
vec<tree, va_gc> *labels;
tree link_next;
expr = *expr_p;
noutputs = list_length (ASM_OUTPUTS (expr));
oconstraints = (const char **) alloca ((noutputs) * sizeof (const char *));
inputs = NULL;
outputs = NULL;
clobbers = NULL;
labels = NULL;
ret = GS_ALL_DONE;
link_next = NULL_TREE;
for (i = 0, link = ASM_OUTPUTS (expr); link; ++i, link = link_next)
{
bool ok;
size_t constraint_len;
link_next = TREE_CHAIN (link);
oconstraints[i]
= constraint
= TREE_STRING_POINTER (TREE_VALUE (TREE_PURPOSE (link)));
constraint_len = strlen (constraint);
if (constraint_len == 0)
continue;
ok = parse_output_constraint (&constraint, i, 0, 0,
&allows_mem, &allows_reg, &is_inout);
if (!ok)
{
ret = GS_ERROR;
is_inout = false;
}
/* If we can't make copies, we can only accept memory.
Similarly for VLAs. */
tree outtype = TREE_TYPE (TREE_VALUE (link));
if (outtype != error_mark_node
&& (TREE_ADDRESSABLE (outtype)
|| !COMPLETE_TYPE_P (outtype)
|| !tree_fits_poly_uint64_p (TYPE_SIZE_UNIT (outtype))))
{
if (allows_mem)
allows_reg = 0;
else
{
error ("impossible constraint in %<asm%>");
error ("non-memory output %d must stay in memory", i);
return GS_ERROR;
}
}
if (!allows_reg && allows_mem)
mark_addressable (TREE_VALUE (link));
tret = gimplify_expr (&TREE_VALUE (link), pre_p, post_p,
is_inout ? is_gimple_min_lval : is_gimple_lvalue,
fb_lvalue | fb_mayfail);
if (tret == GS_ERROR)
{
error ("invalid lvalue in %<asm%> output %d", i);
ret = tret;
}
/* If the constraint does not allow memory make sure we gimplify
it to a register if it is not already but its base is. This
happens for complex and vector components. */
if (!allows_mem)
{
tree op = TREE_VALUE (link);
if (! is_gimple_val (op)
&& is_gimple_reg_type (TREE_TYPE (op))
&& is_gimple_reg (get_base_address (op)))
{
tree tem = create_tmp_reg (TREE_TYPE (op));
tree ass;
if (is_inout)
{
ass = build2 (MODIFY_EXPR, TREE_TYPE (tem),
tem, unshare_expr (op));
gimplify_and_add (ass, pre_p);
}
ass = build2 (MODIFY_EXPR, TREE_TYPE (tem), op, tem);
gimplify_and_add (ass, post_p);
TREE_VALUE (link) = tem;
tret = GS_OK;
}
}
vec_safe_push (outputs, link);
TREE_CHAIN (link) = NULL_TREE;
if (is_inout)
{
/* An input/output operand. To give the optimizers more
flexibility, split it into separate input and output
operands. */
tree input;
/* Buffer big enough to format a 32-bit UINT_MAX into. */
char buf[11];
/* Turn the in/out constraint into an output constraint. */
char *p = xstrdup (constraint);
p[0] = '=';
TREE_VALUE (TREE_PURPOSE (link)) = build_string (constraint_len, p);
/* And add a matching input constraint. */
if (allows_reg)
{
sprintf (buf, "%u", i);
/* If there are multiple alternatives in the constraint,
handle each of them individually. Those that allow register
will be replaced with operand number, the others will stay
unchanged. */
if (strchr (p, ',') != NULL)
{
size_t len = 0, buflen = strlen (buf);
char *beg, *end, *str, *dst;
for (beg = p + 1;;)
{
end = strchr (beg, ',');
if (end == NULL)
end = strchr (beg, '\0');
if ((size_t) (end - beg) < buflen)
len += buflen + 1;
else
len += end - beg + 1;
if (*end)
beg = end + 1;
else
break;
}
str = (char *) alloca (len);
for (beg = p + 1, dst = str;;)
{
const char *tem;
bool mem_p, reg_p, inout_p;
end = strchr (beg, ',');
if (end)
*end = '\0';
beg[-1] = '=';
tem = beg - 1;
parse_output_constraint (&tem, i, 0, 0,
&mem_p, ®_p, &inout_p);
if (dst != str)
*dst++ = ',';
if (reg_p)
{
memcpy (dst, buf, buflen);
dst += buflen;
}
else
{
if (end)
len = end - beg;
else
len = strlen (beg);
memcpy (dst, beg, len);
dst += len;
}
if (end)
beg = end + 1;
else
break;
}
*dst = '\0';
input = build_string (dst - str, str);
}
else
input = build_string (strlen (buf), buf);
}
else
input = build_string (constraint_len - 1, constraint + 1);
free (p);
input = build_tree_list (build_tree_list (NULL_TREE, input),
unshare_expr (TREE_VALUE (link)));
ASM_INPUTS (expr) = chainon (ASM_INPUTS (expr), input);
}
}
link_next = NULL_TREE;
for (link = ASM_INPUTS (expr); link; ++i, link = link_next)
{
link_next = TREE_CHAIN (link);
constraint = TREE_STRING_POINTER (TREE_VALUE (TREE_PURPOSE (link)));
parse_input_constraint (&constraint, 0, 0, noutputs, 0,
oconstraints, &allows_mem, &allows_reg);
/* If we can't make copies, we can only accept memory. */
tree intype = TREE_TYPE (TREE_VALUE (link));
if (intype != error_mark_node
&& (TREE_ADDRESSABLE (intype)
|| !COMPLETE_TYPE_P (intype)
|| !tree_fits_poly_uint64_p (TYPE_SIZE_UNIT (intype))))
{
if (allows_mem)
allows_reg = 0;
else
{
error ("impossible constraint in %<asm%>");
error ("non-memory input %d must stay in memory", i);
return GS_ERROR;
}
}
/* If the operand is a memory input, it should be an lvalue. */
if (!allows_reg && allows_mem)
{
tree inputv = TREE_VALUE (link);
STRIP_NOPS (inputv);
if (TREE_CODE (inputv) == PREDECREMENT_EXPR
|| TREE_CODE (inputv) == PREINCREMENT_EXPR
|| TREE_CODE (inputv) == POSTDECREMENT_EXPR
|| TREE_CODE (inputv) == POSTINCREMENT_EXPR
|| TREE_CODE (inputv) == MODIFY_EXPR)
TREE_VALUE (link) = error_mark_node;
tret = gimplify_expr (&TREE_VALUE (link), pre_p, post_p,
is_gimple_lvalue, fb_lvalue | fb_mayfail);
if (tret != GS_ERROR)
{
/* Unlike output operands, memory inputs are not guaranteed
to be lvalues by the FE, and while the expressions are
marked addressable there, if it is e.g. a statement
expression, temporaries in it might not end up being
addressable. They might be already used in the IL and thus
it is too late to make them addressable now though. */
tree x = TREE_VALUE (link);
while (handled_component_p (x))
x = TREE_OPERAND (x, 0);
if (TREE_CODE (x) == MEM_REF
&& TREE_CODE (TREE_OPERAND (x, 0)) == ADDR_EXPR)
x = TREE_OPERAND (TREE_OPERAND (x, 0), 0);
if ((VAR_P (x)
|| TREE_CODE (x) == PARM_DECL
|| TREE_CODE (x) == RESULT_DECL)
&& !TREE_ADDRESSABLE (x)
&& is_gimple_reg (x))
{
warning_at (EXPR_LOC_OR_LOC (TREE_VALUE (link),
input_location), 0,
"memory input %d is not directly addressable",
i);
prepare_gimple_addressable (&TREE_VALUE (link), pre_p);
}
}
mark_addressable (TREE_VALUE (link));
if (tret == GS_ERROR)
{
error_at (EXPR_LOC_OR_LOC (TREE_VALUE (link), input_location),
"memory input %d is not directly addressable", i);
ret = tret;
}
}
else
{
tret = gimplify_expr (&TREE_VALUE (link), pre_p, post_p,
is_gimple_asm_val, fb_rvalue);
if (tret == GS_ERROR)
ret = tret;
}
TREE_CHAIN (link) = NULL_TREE;
vec_safe_push (inputs, link);
}
link_next = NULL_TREE;
for (link = ASM_CLOBBERS (expr); link; ++i, link = link_next)
{
link_next = TREE_CHAIN (link);
TREE_CHAIN (link) = NULL_TREE;
vec_safe_push (clobbers, link);
}
link_next = NULL_TREE;
for (link = ASM_LABELS (expr); link; ++i, link = link_next)
{
link_next = TREE_CHAIN (link);
TREE_CHAIN (link) = NULL_TREE;
vec_safe_push (labels, link);
}
/* Do not add ASMs with errors to the gimple IL stream. */
if (ret != GS_ERROR)
{
stmt = gimple_build_asm_vec (TREE_STRING_POINTER (ASM_STRING (expr)),
inputs, outputs, clobbers, labels);
gimple_asm_set_volatile (stmt, ASM_VOLATILE_P (expr) || noutputs == 0);
gimple_asm_set_input (stmt, ASM_INPUT_P (expr));
gimple_asm_set_inline (stmt, ASM_INLINE_P (expr));
gimplify_seq_add_stmt (pre_p, stmt);
}
return ret;
}
/* Gimplify a CLEANUP_POINT_EXPR. Currently this works by adding
GIMPLE_WITH_CLEANUP_EXPRs to the prequeue as we encounter cleanups while
gimplifying the body, and converting them to TRY_FINALLY_EXPRs when we
return to this function.
FIXME should we complexify the prequeue handling instead? Or use flags
for all the cleanups and let the optimizer tighten them up? The current
code seems pretty fragile; it will break on a cleanup within any
non-conditional nesting. But any such nesting would be broken, anyway;
we can't write a TRY_FINALLY_EXPR that starts inside a nesting construct
and continues out of it. We can do that at the RTL level, though, so
having an optimizer to tighten up try/finally regions would be a Good
Thing. */
static enum gimplify_status
gimplify_cleanup_point_expr (tree *expr_p, gimple_seq *pre_p)
{
gimple_stmt_iterator iter;
gimple_seq body_sequence = NULL;
tree temp = voidify_wrapper_expr (*expr_p, NULL);
/* We only care about the number of conditions between the innermost
CLEANUP_POINT_EXPR and the cleanup. So save and reset the count and
any cleanups collected outside the CLEANUP_POINT_EXPR. */
int old_conds = gimplify_ctxp->conditions;
gimple_seq old_cleanups = gimplify_ctxp->conditional_cleanups;
bool old_in_cleanup_point_expr = gimplify_ctxp->in_cleanup_point_expr;
gimplify_ctxp->conditions = 0;
gimplify_ctxp->conditional_cleanups = NULL;
gimplify_ctxp->in_cleanup_point_expr = true;
gimplify_stmt (&TREE_OPERAND (*expr_p, 0), &body_sequence);
gimplify_ctxp->conditions = old_conds;
gimplify_ctxp->conditional_cleanups = old_cleanups;
gimplify_ctxp->in_cleanup_point_expr = old_in_cleanup_point_expr;
for (iter = gsi_start (body_sequence); !gsi_end_p (iter); )
{
gimple *wce = gsi_stmt (iter);
if (gimple_code (wce) == GIMPLE_WITH_CLEANUP_EXPR)
{
if (gsi_one_before_end_p (iter))
{
/* Note that gsi_insert_seq_before and gsi_remove do not
scan operands, unlike some other sequence mutators. */
if (!gimple_wce_cleanup_eh_only (wce))
gsi_insert_seq_before_without_update (&iter,
gimple_wce_cleanup (wce),
GSI_SAME_STMT);
gsi_remove (&iter, true);
break;
}
else
{
gtry *gtry;
gimple_seq seq;
enum gimple_try_flags kind;
if (gimple_wce_cleanup_eh_only (wce))
kind = GIMPLE_TRY_CATCH;
else
kind = GIMPLE_TRY_FINALLY;
seq = gsi_split_seq_after (iter);
gtry = gimple_build_try (seq, gimple_wce_cleanup (wce), kind);
/* Do not use gsi_replace here, as it may scan operands.
We want to do a simple structural modification only. */
gsi_set_stmt (&iter, gtry);
iter = gsi_start (gtry->eval);
}
}
else
gsi_next (&iter);
}
gimplify_seq_add_seq (pre_p, body_sequence);
if (temp)
{
*expr_p = temp;
return GS_OK;
}
else
{
*expr_p = NULL;
return GS_ALL_DONE;
}
}
/* Insert a cleanup marker for gimplify_cleanup_point_expr. CLEANUP
is the cleanup action required. EH_ONLY is true if the cleanup should
only be executed if an exception is thrown, not on normal exit.
If FORCE_UNCOND is true perform the cleanup unconditionally; this is
only valid for clobbers. */
static void
gimple_push_cleanup (tree var, tree cleanup, bool eh_only, gimple_seq *pre_p,
bool force_uncond = false)
{
gimple *wce;
gimple_seq cleanup_stmts = NULL;
/* Errors can result in improperly nested cleanups. Which results in
confusion when trying to resolve the GIMPLE_WITH_CLEANUP_EXPR. */
if (seen_error ())
return;
if (gimple_conditional_context ())
{
/* If we're in a conditional context, this is more complex. We only
want to run the cleanup if we actually ran the initialization that
necessitates it, but we want to run it after the end of the
conditional context. So we wrap the try/finally around the
condition and use a flag to determine whether or not to actually
run the destructor. Thus
test ? f(A()) : 0
becomes (approximately)
flag = 0;
try {
if (test) { A::A(temp); flag = 1; val = f(temp); }
else { val = 0; }
} finally {
if (flag) A::~A(temp);
}
val
*/
if (force_uncond)
{
gimplify_stmt (&cleanup, &cleanup_stmts);
wce = gimple_build_wce (cleanup_stmts);
gimplify_seq_add_stmt (&gimplify_ctxp->conditional_cleanups, wce);
}
else
{
tree flag = create_tmp_var (boolean_type_node, "cleanup");
gassign *ffalse = gimple_build_assign (flag, boolean_false_node);
gassign *ftrue = gimple_build_assign (flag, boolean_true_node);
cleanup = build3 (COND_EXPR, void_type_node, flag, cleanup, NULL);
gimplify_stmt (&cleanup, &cleanup_stmts);
wce = gimple_build_wce (cleanup_stmts);
gimplify_seq_add_stmt (&gimplify_ctxp->conditional_cleanups, ffalse);
gimplify_seq_add_stmt (&gimplify_ctxp->conditional_cleanups, wce);
gimplify_seq_add_stmt (pre_p, ftrue);
/* Because of this manipulation, and the EH edges that jump
threading cannot redirect, the temporary (VAR) will appear
to be used uninitialized. Don't warn. */
TREE_NO_WARNING (var) = 1;
}
}
else
{
gimplify_stmt (&cleanup, &cleanup_stmts);
wce = gimple_build_wce (cleanup_stmts);
gimple_wce_set_cleanup_eh_only (wce, eh_only);
gimplify_seq_add_stmt (pre_p, wce);
}
}
/* Gimplify a TARGET_EXPR which doesn't appear on the rhs of an INIT_EXPR. */
static enum gimplify_status
gimplify_target_expr (tree *expr_p, gimple_seq *pre_p, gimple_seq *post_p)
{
tree targ = *expr_p;
tree temp = TARGET_EXPR_SLOT (targ);
tree init = TARGET_EXPR_INITIAL (targ);
enum gimplify_status ret;
bool unpoison_empty_seq = false;
gimple_stmt_iterator unpoison_it;
if (init)
{
tree cleanup = NULL_TREE;
/* TARGET_EXPR temps aren't part of the enclosing block, so add it
to the temps list. Handle also variable length TARGET_EXPRs. */
if (!poly_int_tree_p (DECL_SIZE (temp)))
{
if (!TYPE_SIZES_GIMPLIFIED (TREE_TYPE (temp)))
gimplify_type_sizes (TREE_TYPE (temp), pre_p);
gimplify_vla_decl (temp, pre_p);
}
else
{
/* Save location where we need to place unpoisoning. It's possible
that a variable will be converted to needs_to_live_in_memory. */
unpoison_it = gsi_last (*pre_p);
unpoison_empty_seq = gsi_end_p (unpoison_it);
gimple_add_tmp_var (temp);
}
/* If TARGET_EXPR_INITIAL is void, then the mere evaluation of the
expression is supposed to initialize the slot. */
if (VOID_TYPE_P (TREE_TYPE (init)))
ret = gimplify_expr (&init, pre_p, post_p, is_gimple_stmt, fb_none);
else
{
tree init_expr = build2 (INIT_EXPR, void_type_node, temp, init);
init = init_expr;
ret = gimplify_expr (&init, pre_p, post_p, is_gimple_stmt, fb_none);
init = NULL;
ggc_free (init_expr);
}
if (ret == GS_ERROR)
{
/* PR c++/28266 Make sure this is expanded only once. */
TARGET_EXPR_INITIAL (targ) = NULL_TREE;
return GS_ERROR;
}
if (init)
gimplify_and_add (init, pre_p);
/* If needed, push the cleanup for the temp. */
if (TARGET_EXPR_CLEANUP (targ))
{
if (CLEANUP_EH_ONLY (targ))
gimple_push_cleanup (temp, TARGET_EXPR_CLEANUP (targ),
CLEANUP_EH_ONLY (targ), pre_p);
else
cleanup = TARGET_EXPR_CLEANUP (targ);
}
/* Add a clobber for the temporary going out of scope, like
gimplify_bind_expr. */
if (gimplify_ctxp->in_cleanup_point_expr
&& needs_to_live_in_memory (temp))
{
if (flag_stack_reuse == SR_ALL)
{
tree clobber = build_clobber (TREE_TYPE (temp));
clobber = build2 (MODIFY_EXPR, TREE_TYPE (temp), temp, clobber);
gimple_push_cleanup (temp, clobber, false, pre_p, true);
}
if (asan_poisoned_variables
&& DECL_ALIGN (temp) <= MAX_SUPPORTED_STACK_ALIGNMENT
&& !TREE_STATIC (temp)
&& dbg_cnt (asan_use_after_scope)
&& !gimplify_omp_ctxp)
{
tree asan_cleanup = build_asan_poison_call_expr (temp);
if (asan_cleanup)
{
if (unpoison_empty_seq)
unpoison_it = gsi_start (*pre_p);
asan_poison_variable (temp, false, &unpoison_it,
unpoison_empty_seq);
gimple_push_cleanup (temp, asan_cleanup, false, pre_p);
}
}
}
if (cleanup)
gimple_push_cleanup (temp, cleanup, false, pre_p);
/* Only expand this once. */
TREE_OPERAND (targ, 3) = init;
TARGET_EXPR_INITIAL (targ) = NULL_TREE;
}
else
/* We should have expanded this before. */
gcc_assert (DECL_SEEN_IN_BIND_EXPR_P (temp));
*expr_p = temp;
return GS_OK;
}
/* Gimplification of expression trees. */
/* Gimplify an expression which appears at statement context. The
corresponding GIMPLE statements are added to *SEQ_P. If *SEQ_P is
NULL, a new sequence is allocated.
Return true if we actually added a statement to the queue. */
bool
gimplify_stmt (tree *stmt_p, gimple_seq *seq_p)
{
gimple_seq_node last;
last = gimple_seq_last (*seq_p);
gimplify_expr (stmt_p, seq_p, NULL, is_gimple_stmt, fb_none);
return last != gimple_seq_last (*seq_p);
}
/* Add FIRSTPRIVATE entries for DECL in the OpenMP the surrounding parallels
to CTX. If entries already exist, force them to be some flavor of private.
If there is no enclosing parallel, do nothing. */
void
omp_firstprivatize_variable (struct gimplify_omp_ctx *ctx, tree decl)
{
splay_tree_node n;
if (decl == NULL || !DECL_P (decl) || ctx->region_type == ORT_NONE)
return;
do
{
n = splay_tree_lookup (ctx->variables, (splay_tree_key)decl);
if (n != NULL)
{
if (n->value & GOVD_SHARED)
n->value = GOVD_FIRSTPRIVATE | (n->value & GOVD_SEEN);
else if (n->value & GOVD_MAP)
n->value |= GOVD_MAP_TO_ONLY;
else
return;
}
else if ((ctx->region_type & ORT_TARGET) != 0)
{
if (ctx->defaultmap[GDMK_SCALAR] & GOVD_FIRSTPRIVATE)
omp_add_variable (ctx, decl, GOVD_FIRSTPRIVATE);
else
omp_add_variable (ctx, decl, GOVD_MAP | GOVD_MAP_TO_ONLY);
}
else if (ctx->region_type != ORT_WORKSHARE
&& ctx->region_type != ORT_TASKGROUP
&& ctx->region_type != ORT_SIMD
&& ctx->region_type != ORT_ACC
&& !(ctx->region_type & ORT_TARGET_DATA))
omp_add_variable (ctx, decl, GOVD_FIRSTPRIVATE);
ctx = ctx->outer_context;
}
while (ctx);
}
/* Similarly for each of the type sizes of TYPE. */
static void
omp_firstprivatize_type_sizes (struct gimplify_omp_ctx *ctx, tree type)
{
if (type == NULL || type == error_mark_node)
return;
type = TYPE_MAIN_VARIANT (type);
if (ctx->privatized_types->add (type))
return;
switch (TREE_CODE (type))
{
case INTEGER_TYPE:
case ENUMERAL_TYPE:
case BOOLEAN_TYPE:
case REAL_TYPE:
case FIXED_POINT_TYPE:
omp_firstprivatize_variable (ctx, TYPE_MIN_VALUE (type));
omp_firstprivatize_variable (ctx, TYPE_MAX_VALUE (type));
break;
case ARRAY_TYPE:
omp_firstprivatize_type_sizes (ctx, TREE_TYPE (type));
omp_firstprivatize_type_sizes (ctx, TYPE_DOMAIN (type));
break;
case RECORD_TYPE:
case UNION_TYPE:
case QUAL_UNION_TYPE:
{
tree field;
for (field = TYPE_FIELDS (type); field; field = DECL_CHAIN (field))
if (TREE_CODE (field) == FIELD_DECL)
{
omp_firstprivatize_variable (ctx, DECL_FIELD_OFFSET (field));
omp_firstprivatize_type_sizes (ctx, TREE_TYPE (field));
}
}
break;
case POINTER_TYPE:
case REFERENCE_TYPE:
omp_firstprivatize_type_sizes (ctx, TREE_TYPE (type));
break;
default:
break;
}
omp_firstprivatize_variable (ctx, TYPE_SIZE (type));
omp_firstprivatize_variable (ctx, TYPE_SIZE_UNIT (type));
lang_hooks.types.omp_firstprivatize_type_sizes (ctx, type);
}
/* Add an entry for DECL in the OMP context CTX with FLAGS. */
static void
omp_add_variable (struct gimplify_omp_ctx *ctx, tree decl, unsigned int flags)
{
splay_tree_node n;
unsigned int nflags;
tree t;
if (error_operand_p (decl) || ctx->region_type == ORT_NONE)
return;
/* Never elide decls whose type has TREE_ADDRESSABLE set. This means
there are constructors involved somewhere. Exception is a shared clause,
there is nothing privatized in that case. */
if ((flags & GOVD_SHARED) == 0
&& (TREE_ADDRESSABLE (TREE_TYPE (decl))
|| TYPE_NEEDS_CONSTRUCTING (TREE_TYPE (decl))))
flags |= GOVD_SEEN;
n = splay_tree_lookup (ctx->variables, (splay_tree_key)decl);
if (n != NULL && (n->value & GOVD_DATA_SHARE_CLASS) != 0)
{
/* We shouldn't be re-adding the decl with the same data
sharing class. */
gcc_assert ((n->value & GOVD_DATA_SHARE_CLASS & flags) == 0);
nflags = n->value | flags;
/* The only combination of data sharing classes we should see is
FIRSTPRIVATE and LASTPRIVATE. However, OpenACC permits
reduction variables to be used in data sharing clauses. */
gcc_assert ((ctx->region_type & ORT_ACC) != 0
|| ((nflags & GOVD_DATA_SHARE_CLASS)
== (GOVD_FIRSTPRIVATE | GOVD_LASTPRIVATE))
|| (flags & GOVD_DATA_SHARE_CLASS) == 0);
n->value = nflags;
return;
}
/* When adding a variable-sized variable, we have to handle all sorts
of additional bits of data: the pointer replacement variable, and
the parameters of the type. */
if (DECL_SIZE (decl) && TREE_CODE (DECL_SIZE (decl)) != INTEGER_CST)
{
/* Add the pointer replacement variable as PRIVATE if the variable
replacement is private, else FIRSTPRIVATE since we'll need the
address of the original variable either for SHARED, or for the
copy into or out of the context. */
if (!(flags & GOVD_LOCAL) && ctx->region_type != ORT_TASKGROUP)
{
if (flags & GOVD_MAP)
nflags = GOVD_MAP | GOVD_MAP_TO_ONLY | GOVD_EXPLICIT;
else if (flags & GOVD_PRIVATE)
nflags = GOVD_PRIVATE;
else if (((ctx->region_type & (ORT_TARGET | ORT_TARGET_DATA)) != 0
&& (flags & GOVD_FIRSTPRIVATE))
|| (ctx->region_type == ORT_TARGET_DATA
&& (flags & GOVD_DATA_SHARE_CLASS) == 0))
nflags = GOVD_PRIVATE | GOVD_EXPLICIT;
else
nflags = GOVD_FIRSTPRIVATE;
nflags |= flags & GOVD_SEEN;
t = DECL_VALUE_EXPR (decl);
gcc_assert (TREE_CODE (t) == INDIRECT_REF);
t = TREE_OPERAND (t, 0);
gcc_assert (DECL_P (t));
omp_add_variable (ctx, t, nflags);
}
/* Add all of the variable and type parameters (which should have
been gimplified to a formal temporary) as FIRSTPRIVATE. */
omp_firstprivatize_variable (ctx, DECL_SIZE_UNIT (decl));
omp_firstprivatize_variable (ctx, DECL_SIZE (decl));
omp_firstprivatize_type_sizes (ctx, TREE_TYPE (decl));
/* The variable-sized variable itself is never SHARED, only some form
of PRIVATE. The sharing would take place via the pointer variable
which we remapped above. */
if (flags & GOVD_SHARED)
flags = GOVD_SHARED | GOVD_DEBUG_PRIVATE
| (flags & (GOVD_SEEN | GOVD_EXPLICIT));
/* We're going to make use of the TYPE_SIZE_UNIT at least in the
alloca statement we generate for the variable, so make sure it
is available. This isn't automatically needed for the SHARED
case, since we won't be allocating local storage then.
For local variables TYPE_SIZE_UNIT might not be gimplified yet,
in this case omp_notice_variable will be called later
on when it is gimplified. */
else if (! (flags & (GOVD_LOCAL | GOVD_MAP))
&& DECL_P (TYPE_SIZE_UNIT (TREE_TYPE (decl))))
omp_notice_variable (ctx, TYPE_SIZE_UNIT (TREE_TYPE (decl)), true);
}
else if ((flags & (GOVD_MAP | GOVD_LOCAL)) == 0
&& lang_hooks.decls.omp_privatize_by_reference (decl))
{
omp_firstprivatize_type_sizes (ctx, TREE_TYPE (decl));
/* Similar to the direct variable sized case above, we'll need the
size of references being privatized. */
if ((flags & GOVD_SHARED) == 0)
{
t = TYPE_SIZE_UNIT (TREE_TYPE (TREE_TYPE (decl)));
if (DECL_P (t))
omp_notice_variable (ctx, t, true);
}
}
if (n != NULL)
n->value |= flags;
else
splay_tree_insert (ctx->variables, (splay_tree_key)decl, flags);
/* For reductions clauses in OpenACC loop directives, by default create a
copy clause on the enclosing parallel construct for carrying back the
results. */
if (ctx->region_type == ORT_ACC && (flags & GOVD_REDUCTION))
{
struct gimplify_omp_ctx *outer_ctx = ctx->outer_context;
while (outer_ctx)
{
n = splay_tree_lookup (outer_ctx->variables, (splay_tree_key)decl);
if (n != NULL)
{
/* Ignore local variables and explicitly declared clauses. */
if (n->value & (GOVD_LOCAL | GOVD_EXPLICIT))
break;
else if (outer_ctx->region_type == ORT_ACC_KERNELS)
{
/* According to the OpenACC spec, such a reduction variable
should already have a copy map on a kernels construct,
verify that here. */
gcc_assert (!(n->value & GOVD_FIRSTPRIVATE)
&& (n->value & GOVD_MAP));
}
else if (outer_ctx->region_type == ORT_ACC_PARALLEL)
{
/* Remove firstprivate and make it a copy map. */
n->value &= ~GOVD_FIRSTPRIVATE;
n->value |= GOVD_MAP;
}
}
else if (outer_ctx->region_type == ORT_ACC_PARALLEL)
{
splay_tree_insert (outer_ctx->variables, (splay_tree_key)decl,
GOVD_MAP | GOVD_SEEN);
break;
}
outer_ctx = outer_ctx->outer_context;
}
}
}
/* Notice a threadprivate variable DECL used in OMP context CTX.
This just prints out diagnostics about threadprivate variable uses
in untied tasks. If DECL2 is non-NULL, prevent this warning
on that variable. */
static bool
omp_notice_threadprivate_variable (struct gimplify_omp_ctx *ctx, tree decl,
tree decl2)
{
splay_tree_node n;
struct gimplify_omp_ctx *octx;
for (octx = ctx; octx; octx = octx->outer_context)
if ((octx->region_type & ORT_TARGET) != 0
|| octx->order_concurrent)
{
n = splay_tree_lookup (octx->variables, (splay_tree_key)decl);
if (n == NULL)
{
if (octx->order_concurrent)
{
error ("threadprivate variable %qE used in a region with"
" %<order(concurrent)%> clause", DECL_NAME (decl));
error_at (octx->location, "enclosing region");
}
else
{
error ("threadprivate variable %qE used in target region",
DECL_NAME (decl));
error_at (octx->location, "enclosing target region");
}
splay_tree_insert (octx->variables, (splay_tree_key)decl, 0);
}
if (decl2)
splay_tree_insert (octx->variables, (splay_tree_key)decl2, 0);
}
if (ctx->region_type != ORT_UNTIED_TASK)
return false;
n = splay_tree_lookup (ctx->variables, (splay_tree_key)decl);
if (n == NULL)
{
error ("threadprivate variable %qE used in untied task",
DECL_NAME (decl));
error_at (ctx->location, "enclosing task");
splay_tree_insert (ctx->variables, (splay_tree_key)decl, 0);
}
if (decl2)
splay_tree_insert (ctx->variables, (splay_tree_key)decl2, 0);
return false;
}
/* Return true if global var DECL is device resident. */
static bool
device_resident_p (tree decl)
{
tree attr = lookup_attribute ("oacc declare target", DECL_ATTRIBUTES (decl));
if (!attr)
return false;
for (tree t = TREE_VALUE (attr); t; t = TREE_PURPOSE (t))
{
tree c = TREE_VALUE (t);
if (OMP_CLAUSE_MAP_KIND (c) == GOMP_MAP_DEVICE_RESIDENT)
return true;
}
return false;
}
/* Return true if DECL has an ACC DECLARE attribute. */
static bool
is_oacc_declared (tree decl)
{
tree t = TREE_CODE (decl) == MEM_REF ? TREE_OPERAND (decl, 0) : decl;
tree declared = lookup_attribute ("oacc declare target", DECL_ATTRIBUTES (t));
return declared != NULL_TREE;
}
/* Determine outer default flags for DECL mentioned in an OMP region
but not declared in an enclosing clause.
??? Some compiler-generated variables (like SAVE_EXPRs) could be
remapped firstprivate instead of shared. To some extent this is
addressed in omp_firstprivatize_type_sizes, but not
effectively. */
static unsigned
omp_default_clause (struct gimplify_omp_ctx *ctx, tree decl,
bool in_code, unsigned flags)
{
enum omp_clause_default_kind default_kind = ctx->default_kind;
enum omp_clause_default_kind kind;
kind = lang_hooks.decls.omp_predetermined_sharing (decl);
if (kind != OMP_CLAUSE_DEFAULT_UNSPECIFIED)
default_kind = kind;
else if (VAR_P (decl) && TREE_STATIC (decl) && DECL_IN_CONSTANT_POOL (decl))
default_kind = OMP_CLAUSE_DEFAULT_SHARED;
switch (default_kind)
{
case OMP_CLAUSE_DEFAULT_NONE:
{
const char *rtype;
if (ctx->region_type & ORT_PARALLEL)
rtype = "parallel";
else if ((ctx->region_type & ORT_TASKLOOP) == ORT_TASKLOOP)
rtype = "taskloop";
else if (ctx->region_type & ORT_TASK)
rtype = "task";
else if (ctx->region_type & ORT_TEAMS)
rtype = "teams";
else
gcc_unreachable ();
error ("%qE not specified in enclosing %qs",
DECL_NAME (lang_hooks.decls.omp_report_decl (decl)), rtype);
error_at (ctx->location, "enclosing %qs", rtype);
}
/* FALLTHRU */
case OMP_CLAUSE_DEFAULT_SHARED:
flags |= GOVD_SHARED;
break;
case OMP_CLAUSE_DEFAULT_PRIVATE:
flags |= GOVD_PRIVATE;
break;
case OMP_CLAUSE_DEFAULT_FIRSTPRIVATE:
flags |= GOVD_FIRSTPRIVATE;
break;
case OMP_CLAUSE_DEFAULT_UNSPECIFIED:
/* decl will be either GOVD_FIRSTPRIVATE or GOVD_SHARED. */
gcc_assert ((ctx->region_type & ORT_TASK) != 0);
if (struct gimplify_omp_ctx *octx = ctx->outer_context)
{
omp_notice_variable (octx, decl, in_code);
for (; octx; octx = octx->outer_context)
{
splay_tree_node n2;
n2 = splay_tree_lookup (octx->variables, (splay_tree_key) decl);
if ((octx->region_type & (ORT_TARGET_DATA | ORT_TARGET)) != 0
&& (n2 == NULL || (n2->value & GOVD_DATA_SHARE_CLASS) == 0))
continue;
if (n2 && (n2->value & GOVD_DATA_SHARE_CLASS) != GOVD_SHARED)
{
flags |= GOVD_FIRSTPRIVATE;
goto found_outer;
}
if ((octx->region_type & (ORT_PARALLEL | ORT_TEAMS)) != 0)
{
flags |= GOVD_SHARED;
goto found_outer;
}
}
}
if (TREE_CODE (decl) == PARM_DECL
|| (!is_global_var (decl)
&& DECL_CONTEXT (decl) == current_function_decl))
flags |= GOVD_FIRSTPRIVATE;
else
flags |= GOVD_SHARED;
found_outer:
break;
default:
gcc_unreachable ();
}
return flags;
}
/* Determine outer default flags for DECL mentioned in an OACC region
but not declared in an enclosing clause. */
static unsigned
oacc_default_clause (struct gimplify_omp_ctx *ctx, tree decl, unsigned flags)
{
const char *rkind;
bool on_device = false;
bool is_private = false;
bool declared = is_oacc_declared (decl);
tree type = TREE_TYPE (decl);
if (lang_hooks.decls.omp_privatize_by_reference (decl))
type = TREE_TYPE (type);
/* For Fortran COMMON blocks, only used variables in those blocks are
transfered and remapped. The block itself will have a private clause to
avoid transfering the data twice.
The hook evaluates to false by default. For a variable in Fortran's COMMON
or EQUIVALENCE block, returns 'true' (as we have shared=false) - as only
the variables in such a COMMON/EQUIVALENCE block shall be privatized not
the whole block. For C++ and Fortran, it can also be true under certain
other conditions, if DECL_HAS_VALUE_EXPR. */
if (RECORD_OR_UNION_TYPE_P (type))
is_private = lang_hooks.decls.omp_disregard_value_expr (decl, false);
if ((ctx->region_type & (ORT_ACC_PARALLEL | ORT_ACC_KERNELS)) != 0
&& is_global_var (decl)
&& device_resident_p (decl)
&& !is_private)
{
on_device = true;
flags |= GOVD_MAP_TO_ONLY;
}
switch (ctx->region_type)
{
case ORT_ACC_KERNELS:
rkind = "kernels";
if (is_private)
flags |= GOVD_FIRSTPRIVATE;
else if (AGGREGATE_TYPE_P (type))
{
/* Aggregates default to 'present_or_copy', or 'present'. */
if (ctx->default_kind != OMP_CLAUSE_DEFAULT_PRESENT)
flags |= GOVD_MAP;
else
flags |= GOVD_MAP | GOVD_MAP_FORCE_PRESENT;
}
else
/* Scalars default to 'copy'. */
flags |= GOVD_MAP | GOVD_MAP_FORCE;
break;
case ORT_ACC_PARALLEL:
case ORT_ACC_SERIAL:
rkind = ctx->region_type == ORT_ACC_PARALLEL ? "parallel" : "serial";
if (is_private)
flags |= GOVD_FIRSTPRIVATE;
else if (on_device || declared)
flags |= GOVD_MAP;
else if (AGGREGATE_TYPE_P (type))
{
/* Aggregates default to 'present_or_copy', or 'present'. */
if (ctx->default_kind != OMP_CLAUSE_DEFAULT_PRESENT)
flags |= GOVD_MAP;
else
flags |= GOVD_MAP | GOVD_MAP_FORCE_PRESENT;
}
else
/* Scalars default to 'firstprivate'. */
flags |= GOVD_FIRSTPRIVATE;
break;
default:
gcc_unreachable ();
}
if (DECL_ARTIFICIAL (decl))
; /* We can get compiler-generated decls, and should not complain
about them. */
else if (ctx->default_kind == OMP_CLAUSE_DEFAULT_NONE)
{
error ("%qE not specified in enclosing OpenACC %qs construct",
DECL_NAME (lang_hooks.decls.omp_report_decl (decl)), rkind);
inform (ctx->location, "enclosing OpenACC %qs construct", rkind);
}
else if (ctx->default_kind == OMP_CLAUSE_DEFAULT_PRESENT)
; /* Handled above. */
else
gcc_checking_assert (ctx->default_kind == OMP_CLAUSE_DEFAULT_SHARED);
return flags;
}
/* Record the fact that DECL was used within the OMP context CTX.
IN_CODE is true when real code uses DECL, and false when we should
merely emit default(none) errors. Return true if DECL is going to
be remapped and thus DECL shouldn't be gimplified into its
DECL_VALUE_EXPR (if any). */
static bool
omp_notice_variable (struct gimplify_omp_ctx *ctx, tree decl, bool in_code)
{
splay_tree_node n;
unsigned flags = in_code ? GOVD_SEEN : 0;
bool ret = false, shared;
if (error_operand_p (decl))
return false;
if (ctx->region_type == ORT_NONE)
return lang_hooks.decls.omp_disregard_value_expr (decl, false);
if (is_global_var (decl))
{
/* Threadprivate variables are predetermined. */
if (DECL_THREAD_LOCAL_P (decl))
return omp_notice_threadprivate_variable (ctx, decl, NULL_TREE);
if (DECL_HAS_VALUE_EXPR_P (decl))
{
if (ctx->region_type & ORT_ACC)
/* For OpenACC, defer expansion of value to avoid transfering
privatized common block data instead of im-/explicitly transfered
variables which are in common blocks. */
;
else
{
tree value = get_base_address (DECL_VALUE_EXPR (decl));
if (value && DECL_P (value) && DECL_THREAD_LOCAL_P (value))
return omp_notice_threadprivate_variable (ctx, decl, value);
}
}
if (gimplify_omp_ctxp->outer_context == NULL
&& VAR_P (decl)
&& oacc_get_fn_attrib (current_function_decl))
{
location_t loc = DECL_SOURCE_LOCATION (decl);
if (lookup_attribute ("omp declare target link",
DECL_ATTRIBUTES (decl)))
{
error_at (loc,
"%qE with %<link%> clause used in %<routine%> function",
DECL_NAME (decl));
return false;
}
else if (!lookup_attribute ("omp declare target",
DECL_ATTRIBUTES (decl)))
{
error_at (loc,
"%qE requires a %<declare%> directive for use "
"in a %<routine%> function", DECL_NAME (decl));
return false;
}
}
}
n = splay_tree_lookup (ctx->variables, (splay_tree_key)decl);
if ((ctx->region_type & ORT_TARGET) != 0)
{
if (ctx->region_type & ORT_ACC)
/* For OpenACC, as remarked above, defer expansion. */
shared = false;
else
shared = true;
ret = lang_hooks.decls.omp_disregard_value_expr (decl, shared);
if (n == NULL)
{
unsigned nflags = flags;
if ((ctx->region_type & ORT_ACC) == 0)
{
bool is_declare_target = false;
if (is_global_var (decl)
&& varpool_node::get_create (decl)->offloadable)
{
struct gimplify_omp_ctx *octx;
for (octx = ctx->outer_context;
octx; octx = octx->outer_context)
{
n = splay_tree_lookup (octx->variables,
(splay_tree_key)decl);
if (n
&& (n->value & GOVD_DATA_SHARE_CLASS) != GOVD_SHARED
&& (n->value & GOVD_DATA_SHARE_CLASS) != 0)
break;
}
is_declare_target = octx == NULL;
}
if (!is_declare_target)
{
int gdmk;
if (TREE_CODE (TREE_TYPE (decl)) == POINTER_TYPE
|| (TREE_CODE (TREE_TYPE (decl)) == REFERENCE_TYPE
&& (TREE_CODE (TREE_TYPE (TREE_TYPE (decl)))
== POINTER_TYPE)))
gdmk = GDMK_POINTER;
else if (lang_hooks.decls.omp_scalar_p (decl))
gdmk = GDMK_SCALAR;
else
gdmk = GDMK_AGGREGATE;
if (ctx->defaultmap[gdmk] == 0)
{
tree d = lang_hooks.decls.omp_report_decl (decl);
error ("%qE not specified in enclosing %<target%>",
DECL_NAME (d));
error_at (ctx->location, "enclosing %<target%>");
}
else if (ctx->defaultmap[gdmk]
& (GOVD_MAP_0LEN_ARRAY | GOVD_FIRSTPRIVATE))
nflags |= ctx->defaultmap[gdmk];
else
{
gcc_assert (ctx->defaultmap[gdmk] & GOVD_MAP);
nflags |= ctx->defaultmap[gdmk] & ~GOVD_MAP;
}
}
}
struct gimplify_omp_ctx *octx = ctx->outer_context;
if ((ctx->region_type & ORT_ACC) && octx)
{
/* Look in outer OpenACC contexts, to see if there's a
data attribute for this variable. */
omp_notice_variable (octx, decl, in_code);
for (; octx; octx = octx->outer_context)
{
if (!(octx->region_type & (ORT_TARGET_DATA | ORT_TARGET)))
break;
splay_tree_node n2
= splay_tree_lookup (octx->variables,
(splay_tree_key) decl);
if (n2)
{
if (octx->region_type == ORT_ACC_HOST_DATA)
error ("variable %qE declared in enclosing "
"%<host_data%> region", DECL_NAME (decl));
nflags |= GOVD_MAP;
if (octx->region_type == ORT_ACC_DATA
&& (n2->value & GOVD_MAP_0LEN_ARRAY))
nflags |= GOVD_MAP_0LEN_ARRAY;
goto found_outer;
}
}
}
if ((nflags & ~(GOVD_MAP_TO_ONLY | GOVD_MAP_FROM_ONLY
| GOVD_MAP_ALLOC_ONLY)) == flags)
{
tree type = TREE_TYPE (decl);
if (gimplify_omp_ctxp->target_firstprivatize_array_bases
&& lang_hooks.decls.omp_privatize_by_reference (decl))
type = TREE_TYPE (type);
if (!lang_hooks.types.omp_mappable_type (type))
{
error ("%qD referenced in target region does not have "
"a mappable type", decl);
nflags |= GOVD_MAP | GOVD_EXPLICIT;
}
else
{
if ((ctx->region_type & ORT_ACC) != 0)
nflags = oacc_default_clause (ctx, decl, flags);
else
nflags |= GOVD_MAP;
}
}
found_outer:
omp_add_variable (ctx, decl, nflags);
}
else
{
/* If nothing changed, there's nothing left to do. */
if ((n->value & flags) == flags)
return ret;
flags |= n->value;
n->value = flags;
}
goto do_outer;
}
if (n == NULL)
{
if (ctx->region_type == ORT_WORKSHARE
|| ctx->region_type == ORT_TASKGROUP
|| ctx->region_type == ORT_SIMD
|| ctx->region_type == ORT_ACC
|| (ctx->region_type & ORT_TARGET_DATA) != 0)
goto do_outer;
flags = omp_default_clause (ctx, decl, in_code, flags);
if ((flags & GOVD_PRIVATE)
&& lang_hooks.decls.omp_private_outer_ref (decl))
flags |= GOVD_PRIVATE_OUTER_REF;
omp_add_variable (ctx, decl, flags);
shared = (flags & GOVD_SHARED) != 0;
ret = lang_hooks.decls.omp_disregard_value_expr (decl, shared);
goto do_outer;
}
/* Don't mark as GOVD_SEEN addressable temporaries seen only in simd
lb, b or incr expressions, those shouldn't be turned into simd arrays. */
if (ctx->region_type == ORT_SIMD
&& ctx->in_for_exprs
&& ((n->value & (GOVD_PRIVATE | GOVD_SEEN | GOVD_EXPLICIT))
== GOVD_PRIVATE))
flags &= ~GOVD_SEEN;
if ((n->value & (GOVD_SEEN | GOVD_LOCAL)) == 0
&& (flags & (GOVD_SEEN | GOVD_LOCAL)) == GOVD_SEEN
&& DECL_SIZE (decl))
{
if (TREE_CODE (DECL_SIZE (decl)) != INTEGER_CST)
{
splay_tree_node n2;
tree t = DECL_VALUE_EXPR (decl);
gcc_assert (TREE_CODE (t) == INDIRECT_REF);
t = TREE_OPERAND (t, 0);
gcc_assert (DECL_P (t));
n2 = splay_tree_lookup (ctx->variables, (splay_tree_key) t);
n2->value |= GOVD_SEEN;
}
else if (lang_hooks.decls.omp_privatize_by_reference (decl)
&& TYPE_SIZE_UNIT (TREE_TYPE (TREE_TYPE (decl)))
&& (TREE_CODE (TYPE_SIZE_UNIT (TREE_TYPE (TREE_TYPE (decl))))
!= INTEGER_CST))
{
splay_tree_node n2;
tree t = TYPE_SIZE_UNIT (TREE_TYPE (TREE_TYPE (decl)));
gcc_assert (DECL_P (t));
n2 = splay_tree_lookup (ctx->variables, (splay_tree_key) t);
if (n2)
omp_notice_variable (ctx, t, true);
}
}
if (ctx->region_type & ORT_ACC)
/* For OpenACC, as remarked above, defer expansion. */
shared = false;
else
shared = ((flags | n->value) & GOVD_SHARED) != 0;
ret = lang_hooks.decls.omp_disregard_value_expr (decl, shared);
/* If nothing changed, there's nothing left to do. */
if ((n->value & flags) == flags)
return ret;
flags |= n->value;
n->value = flags;
do_outer:
/* If the variable is private in the current context, then we don't
need to propagate anything to an outer context. */
if ((flags & GOVD_PRIVATE) && !(flags & GOVD_PRIVATE_OUTER_REF))
return ret;
if ((flags & (GOVD_LINEAR | GOVD_LINEAR_LASTPRIVATE_NO_OUTER))
== (GOVD_LINEAR | GOVD_LINEAR_LASTPRIVATE_NO_OUTER))
return ret;
if ((flags & (GOVD_FIRSTPRIVATE | GOVD_LASTPRIVATE
| GOVD_LINEAR_LASTPRIVATE_NO_OUTER))
== (GOVD_LASTPRIVATE | GOVD_LINEAR_LASTPRIVATE_NO_OUTER))
return ret;
if (ctx->outer_context
&& omp_notice_variable (ctx->outer_context, decl, in_code))
return true;
return ret;
}
/* Verify that DECL is private within CTX. If there's specific information
to the contrary in the innermost scope, generate an error. */
static bool
omp_is_private (struct gimplify_omp_ctx *ctx, tree decl, int simd)
{
splay_tree_node n;
n = splay_tree_lookup (ctx->variables, (splay_tree_key)decl);
if (n != NULL)
{
if (n->value & GOVD_SHARED)
{
if (ctx == gimplify_omp_ctxp)
{
if (simd)
error ("iteration variable %qE is predetermined linear",
DECL_NAME (decl));
else
error ("iteration variable %qE should be private",
DECL_NAME (decl));
n->value = GOVD_PRIVATE;
return true;
}
else
return false;
}
else if ((n->value & GOVD_EXPLICIT) != 0
&& (ctx == gimplify_omp_ctxp
|| (ctx->region_type == ORT_COMBINED_PARALLEL
&& gimplify_omp_ctxp->outer_context == ctx)))
{
if ((n->value & GOVD_FIRSTPRIVATE) != 0)
error ("iteration variable %qE should not be firstprivate",
DECL_NAME (decl));
else if ((n->value & GOVD_REDUCTION) != 0)
error ("iteration variable %qE should not be reduction",
DECL_NAME (decl));
else if (simd != 1 && (n->value & GOVD_LINEAR) != 0)
error ("iteration variable %qE should not be linear",
DECL_NAME (decl));
}
return (ctx == gimplify_omp_ctxp
|| (ctx->region_type == ORT_COMBINED_PARALLEL
&& gimplify_omp_ctxp->outer_context == ctx));
}
if (ctx->region_type != ORT_WORKSHARE
&& ctx->region_type != ORT_TASKGROUP
&& ctx->region_type != ORT_SIMD
&& ctx->region_type != ORT_ACC)
return false;
else if (ctx->outer_context)
return omp_is_private (ctx->outer_context, decl, simd);
return false;
}
/* Return true if DECL is private within a parallel region
that binds to the current construct's context or in parallel
region's REDUCTION clause. */
static bool
omp_check_private (struct gimplify_omp_ctx *ctx, tree decl, bool copyprivate)
{
splay_tree_node n;
do
{
ctx = ctx->outer_context;
if (ctx == NULL)
{
if (is_global_var (decl))
return false;
/* References might be private, but might be shared too,
when checking for copyprivate, assume they might be
private, otherwise assume they might be shared. */
if (copyprivate)
return true;
if (lang_hooks.decls.omp_privatize_by_reference (decl))
return false;
/* Treat C++ privatized non-static data members outside
of the privatization the same. */
if (omp_member_access_dummy_var (decl))
return false;
return true;
}
n = splay_tree_lookup (ctx->variables, (splay_tree_key) decl);
if ((ctx->region_type & (ORT_TARGET | ORT_TARGET_DATA)) != 0
&& (n == NULL || (n->value & GOVD_DATA_SHARE_CLASS) == 0))
continue;
if (n != NULL)
{
if ((n->value & GOVD_LOCAL) != 0
&& omp_member_access_dummy_var (decl))
return false;
return (n->value & GOVD_SHARED) == 0;
}
}
while (ctx->region_type == ORT_WORKSHARE
|| ctx->region_type == ORT_TASKGROUP
|| ctx->region_type == ORT_SIMD
|| ctx->region_type == ORT_ACC);
return false;
}
/* Callback for walk_tree to find a DECL_EXPR for the given DECL. */
static tree
find_decl_expr (tree *tp, int *walk_subtrees, void *data)
{
tree t = *tp;
/* If this node has been visited, unmark it and keep looking. */
if (TREE_CODE (t) == DECL_EXPR && DECL_EXPR_DECL (t) == (tree) data)
return t;
if (IS_TYPE_OR_DECL_P (t))
*walk_subtrees = 0;
return NULL_TREE;
}
/* If *LIST_P contains any OpenMP depend clauses with iterators,
lower all the depend clauses by populating corresponding depend
array. Returns 0 if there are no such depend clauses, or
2 if all depend clauses should be removed, 1 otherwise. */
static int
gimplify_omp_depend (tree *list_p, gimple_seq *pre_p)
{
tree c;
gimple *g;
size_t n[4] = { 0, 0, 0, 0 };
bool unused[4];
tree counts[4] = { NULL_TREE, NULL_TREE, NULL_TREE, NULL_TREE };
tree last_iter = NULL_TREE, last_count = NULL_TREE;
size_t i, j;
location_t first_loc = UNKNOWN_LOCATION;
for (c = *list_p; c; c = OMP_CLAUSE_CHAIN (c))
if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_DEPEND)
{
switch (OMP_CLAUSE_DEPEND_KIND (c))
{
case OMP_CLAUSE_DEPEND_IN:
i = 2;
break;
case OMP_CLAUSE_DEPEND_OUT:
case OMP_CLAUSE_DEPEND_INOUT:
i = 0;
break;
case OMP_CLAUSE_DEPEND_MUTEXINOUTSET:
i = 1;
break;
case OMP_CLAUSE_DEPEND_DEPOBJ:
i = 3;
break;
case OMP_CLAUSE_DEPEND_SOURCE:
case OMP_CLAUSE_DEPEND_SINK:
continue;
default:
gcc_unreachable ();
}
tree t = OMP_CLAUSE_DECL (c);
if (first_loc == UNKNOWN_LOCATION)
first_loc = OMP_CLAUSE_LOCATION (c);
if (TREE_CODE (t) == TREE_LIST
&& TREE_PURPOSE (t)
&& TREE_CODE (TREE_PURPOSE (t)) == TREE_VEC)
{
if (TREE_PURPOSE (t) != last_iter)
{
tree tcnt = size_one_node;
for (tree it = TREE_PURPOSE (t); it; it = TREE_CHAIN (it))
{
if (gimplify_expr (&TREE_VEC_ELT (it, 1), pre_p, NULL,
is_gimple_val, fb_rvalue) == GS_ERROR
|| gimplify_expr (&TREE_VEC_ELT (it, 2), pre_p, NULL,
is_gimple_val, fb_rvalue) == GS_ERROR
|| gimplify_expr (&TREE_VEC_ELT (it, 3), pre_p, NULL,
is_gimple_val, fb_rvalue) == GS_ERROR
|| (gimplify_expr (&TREE_VEC_ELT (it, 4), pre_p, NULL,
is_gimple_val, fb_rvalue)
== GS_ERROR))
return 2;
tree var = TREE_VEC_ELT (it, 0);
tree begin = TREE_VEC_ELT (it, 1);
tree end = TREE_VEC_ELT (it, 2);
tree step = TREE_VEC_ELT (it, 3);
tree orig_step = TREE_VEC_ELT (it, 4);
tree type = TREE_TYPE (var);
tree stype = TREE_TYPE (step);
location_t loc = DECL_SOURCE_LOCATION (var);
tree endmbegin;
/* Compute count for this iterator as
orig_step > 0
? (begin < end ? (end - begin + (step - 1)) / step : 0)
: (begin > end ? (end - begin + (step + 1)) / step : 0)
and compute product of those for the entire depend
clause. */
if (POINTER_TYPE_P (type))
endmbegin = fold_build2_loc (loc, POINTER_DIFF_EXPR,
stype, end, begin);
else
endmbegin = fold_build2_loc (loc, MINUS_EXPR, type,
end, begin);
tree stepm1 = fold_build2_loc (loc, MINUS_EXPR, stype,
step,
build_int_cst (stype, 1));
tree stepp1 = fold_build2_loc (loc, PLUS_EXPR, stype, step,
build_int_cst (stype, 1));
tree pos = fold_build2_loc (loc, PLUS_EXPR, stype,
unshare_expr (endmbegin),
stepm1);
pos = fold_build2_loc (loc, TRUNC_DIV_EXPR, stype,
pos, step);
tree neg = fold_build2_loc (loc, PLUS_EXPR, stype,
endmbegin, stepp1);
if (TYPE_UNSIGNED (stype))
{
neg = fold_build1_loc (loc, NEGATE_EXPR, stype, neg);
step = fold_build1_loc (loc, NEGATE_EXPR, stype, step);
}
neg = fold_build2_loc (loc, TRUNC_DIV_EXPR, stype,
neg, step);
step = NULL_TREE;
tree cond = fold_build2_loc (loc, LT_EXPR,
boolean_type_node,
begin, end);
pos = fold_build3_loc (loc, COND_EXPR, stype, cond, pos,
build_int_cst (stype, 0));
cond = fold_build2_loc (loc, LT_EXPR, boolean_type_node,
end, begin);
neg = fold_build3_loc (loc, COND_EXPR, stype, cond, neg,
build_int_cst (stype, 0));
tree osteptype = TREE_TYPE (orig_step);
cond = fold_build2_loc (loc, GT_EXPR, boolean_type_node,
orig_step,
build_int_cst (osteptype, 0));
tree cnt = fold_build3_loc (loc, COND_EXPR, stype,
cond, pos, neg);
cnt = fold_convert_loc (loc, sizetype, cnt);
if (gimplify_expr (&cnt, pre_p, NULL, is_gimple_val,
fb_rvalue) == GS_ERROR)
return 2;
tcnt = size_binop_loc (loc, MULT_EXPR, tcnt, cnt);
}
if (gimplify_expr (&tcnt, pre_p, NULL, is_gimple_val,
fb_rvalue) == GS_ERROR)
return 2;
last_iter = TREE_PURPOSE (t);
last_count = tcnt;
}
if (counts[i] == NULL_TREE)
counts[i] = last_count;
else
counts[i] = size_binop_loc (OMP_CLAUSE_LOCATION (c),
PLUS_EXPR, counts[i], last_count);
}
else
n[i]++;
}
for (i = 0; i < 4; i++)
if (counts[i])
break;
if (i == 4)
return 0;
tree total = size_zero_node;
for (i = 0; i < 4; i++)
{
unused[i] = counts[i] == NULL_TREE && n[i] == 0;
if (counts[i] == NULL_TREE)
counts[i] = size_zero_node;
if (n[i])
counts[i] = size_binop (PLUS_EXPR, counts[i], size_int (n[i]));
if (gimplify_expr (&counts[i], pre_p, NULL, is_gimple_val,
fb_rvalue) == GS_ERROR)
return 2;
total = size_binop (PLUS_EXPR, total, counts[i]);
}
if (gimplify_expr (&total, pre_p, NULL, is_gimple_val, fb_rvalue)
== GS_ERROR)
return 2;
bool is_old = unused[1] && unused[3];
tree totalpx = size_binop (PLUS_EXPR, unshare_expr (total),
size_int (is_old ? 1 : 4));
tree type = build_array_type (ptr_type_node, build_index_type (totalpx));
tree array = create_tmp_var_raw (type);
TREE_ADDRESSABLE (array) = 1;
if (!poly_int_tree_p (totalpx))
{
if (!TYPE_SIZES_GIMPLIFIED (TREE_TYPE (array)))
gimplify_type_sizes (TREE_TYPE (array), pre_p);
if (gimplify_omp_ctxp)
{
struct gimplify_omp_ctx *ctx = gimplify_omp_ctxp;
while (ctx
&& (ctx->region_type == ORT_WORKSHARE
|| ctx->region_type == ORT_TASKGROUP
|| ctx->region_type == ORT_SIMD
|| ctx->region_type == ORT_ACC))
ctx = ctx->outer_context;
if (ctx)
omp_add_variable (ctx, array, GOVD_LOCAL | GOVD_SEEN);
}
gimplify_vla_decl (array, pre_p);
}
else
gimple_add_tmp_var (array);
tree r = build4 (ARRAY_REF, ptr_type_node, array, size_int (0), NULL_TREE,
NULL_TREE);
tree tem;
if (!is_old)
{
tem = build2 (MODIFY_EXPR, void_type_node, r,
build_int_cst (ptr_type_node, 0));
gimplify_and_add (tem, pre_p);
r = build4 (ARRAY_REF, ptr_type_node, array, size_int (1), NULL_TREE,
NULL_TREE);
}
tem = build2 (MODIFY_EXPR, void_type_node, r,
fold_convert (ptr_type_node, total));
gimplify_and_add (tem, pre_p);
for (i = 1; i < (is_old ? 2 : 4); i++)
{
r = build4 (ARRAY_REF, ptr_type_node, array, size_int (i + !is_old),
NULL_TREE, NULL_TREE);
tem = build2 (MODIFY_EXPR, void_type_node, r, counts[i - 1]);
gimplify_and_add (tem, pre_p);
}
tree cnts[4];
for (j = 4; j; j--)
if (!unused[j - 1])
break;
for (i = 0; i < 4; i++)
{
if (i && (i >= j || unused[i - 1]))
{
cnts[i] = cnts[i - 1];
continue;
}
cnts[i] = create_tmp_var (sizetype);
if (i == 0)
g = gimple_build_assign (cnts[i], size_int (is_old ? 2 : 5));
else
{
tree t;
if (is_old)
t = size_binop (PLUS_EXPR, counts[0], size_int (2));
else
t = size_binop (PLUS_EXPR, cnts[i - 1], counts[i - 1]);
if (gimplify_expr (&t, pre_p, NULL, is_gimple_val, fb_rvalue)
== GS_ERROR)
return 2;
g = gimple_build_assign (cnts[i], t);
}
gimple_seq_add_stmt (pre_p, g);
}
last_iter = NULL_TREE;
tree last_bind = NULL_TREE;
tree *last_body = NULL;
for (c = *list_p; c; c = OMP_CLAUSE_CHAIN (c))
if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_DEPEND)
{
switch (OMP_CLAUSE_DEPEND_KIND (c))
{
case OMP_CLAUSE_DEPEND_IN:
i = 2;
break;
case OMP_CLAUSE_DEPEND_OUT:
case OMP_CLAUSE_DEPEND_INOUT:
i = 0;
break;
case OMP_CLAUSE_DEPEND_MUTEXINOUTSET:
i = 1;
break;
case OMP_CLAUSE_DEPEND_DEPOBJ:
i = 3;
break;
case OMP_CLAUSE_DEPEND_SOURCE:
case OMP_CLAUSE_DEPEND_SINK:
continue;
default:
gcc_unreachable ();
}
tree t = OMP_CLAUSE_DECL (c);
if (TREE_CODE (t) == TREE_LIST
&& TREE_PURPOSE (t)
&& TREE_CODE (TREE_PURPOSE (t)) == TREE_VEC)
{
if (TREE_PURPOSE (t) != last_iter)
{
if (last_bind)
gimplify_and_add (last_bind, pre_p);
tree block = TREE_VEC_ELT (TREE_PURPOSE (t), 5);
last_bind = build3 (BIND_EXPR, void_type_node,
BLOCK_VARS (block), NULL, block);
TREE_SIDE_EFFECTS (last_bind) = 1;
SET_EXPR_LOCATION (last_bind, OMP_CLAUSE_LOCATION (c));
tree *p = &BIND_EXPR_BODY (last_bind);
for (tree it = TREE_PURPOSE (t); it; it = TREE_CHAIN (it))
{
tree var = TREE_VEC_ELT (it, 0);
tree begin = TREE_VEC_ELT (it, 1);
tree end = TREE_VEC_ELT (it, 2);
tree step = TREE_VEC_ELT (it, 3);
tree orig_step = TREE_VEC_ELT (it, 4);
tree type = TREE_TYPE (var);
location_t loc = DECL_SOURCE_LOCATION (var);
/* Emit:
var = begin;
goto cond_label;
beg_label:
...
var = var + step;
cond_label:
if (orig_step > 0) {
if (var < end) goto beg_label;
} else {
if (var > end) goto beg_label;
}
for each iterator, with inner iterators added to
the ... above. */
tree beg_label = create_artificial_label (loc);
tree cond_label = NULL_TREE;
tem = build2_loc (loc, MODIFY_EXPR, void_type_node,
var, begin);
append_to_statement_list_force (tem, p);
tem = build_and_jump (&cond_label);
append_to_statement_list_force (tem, p);
tem = build1 (LABEL_EXPR, void_type_node, beg_label);
append_to_statement_list (tem, p);
tree bind = build3 (BIND_EXPR, void_type_node, NULL_TREE,
NULL_TREE, NULL_TREE);
TREE_SIDE_EFFECTS (bind) = 1;
SET_EXPR_LOCATION (bind, loc);
append_to_statement_list_force (bind, p);
if (POINTER_TYPE_P (type))
tem = build2_loc (loc, POINTER_PLUS_EXPR, type,
var, fold_convert_loc (loc, sizetype,
step));
else
tem = build2_loc (loc, PLUS_EXPR, type, var, step);
tem = build2_loc (loc, MODIFY_EXPR, void_type_node,
var, tem);
append_to_statement_list_force (tem, p);
tem = build1 (LABEL_EXPR, void_type_node, cond_label);
append_to_statement_list (tem, p);
tree cond = fold_build2_loc (loc, LT_EXPR,
boolean_type_node,
var, end);
tree pos
= fold_build3_loc (loc, COND_EXPR, void_type_node,
cond, build_and_jump (&beg_label),
void_node);
cond = fold_build2_loc (loc, GT_EXPR, boolean_type_node,
var, end);
tree neg
= fold_build3_loc (loc, COND_EXPR, void_type_node,
cond, build_and_jump (&beg_label),
void_node);
tree osteptype = TREE_TYPE (orig_step);
cond = fold_build2_loc (loc, GT_EXPR, boolean_type_node,
orig_step,
build_int_cst (osteptype, 0));
tem = fold_build3_loc (loc, COND_EXPR, void_type_node,
cond, pos, neg);
append_to_statement_list_force (tem, p);
p = &BIND_EXPR_BODY (bind);
}
last_body = p;
}
last_iter = TREE_PURPOSE (t);
if (TREE_CODE (TREE_VALUE (t)) == COMPOUND_EXPR)
{
append_to_statement_list (TREE_OPERAND (TREE_VALUE (t),
0), last_body);
TREE_VALUE (t) = TREE_OPERAND (TREE_VALUE (t), 1);
}
if (error_operand_p (TREE_VALUE (t)))
return 2;
TREE_VALUE (t) = build_fold_addr_expr (TREE_VALUE (t));
r = build4 (ARRAY_REF, ptr_type_node, array, cnts[i],
NULL_TREE, NULL_TREE);
tem = build2_loc (OMP_CLAUSE_LOCATION (c), MODIFY_EXPR,
void_type_node, r, TREE_VALUE (t));
append_to_statement_list_force (tem, last_body);
tem = build2_loc (OMP_CLAUSE_LOCATION (c), MODIFY_EXPR,
void_type_node, cnts[i],
size_binop (PLUS_EXPR, cnts[i], size_int (1)));
append_to_statement_list_force (tem, last_body);
TREE_VALUE (t) = null_pointer_node;
}
else
{
if (last_bind)
{
gimplify_and_add (last_bind, pre_p);
last_bind = NULL_TREE;
}
if (TREE_CODE (OMP_CLAUSE_DECL (c)) == COMPOUND_EXPR)
{
gimplify_expr (&TREE_OPERAND (OMP_CLAUSE_DECL (c), 0), pre_p,
NULL, is_gimple_val, fb_rvalue);
OMP_CLAUSE_DECL (c) = TREE_OPERAND (OMP_CLAUSE_DECL (c), 1);
}
if (error_operand_p (OMP_CLAUSE_DECL (c)))
return 2;
OMP_CLAUSE_DECL (c) = build_fold_addr_expr (OMP_CLAUSE_DECL (c));
if (gimplify_expr (&OMP_CLAUSE_DECL (c), pre_p, NULL,
is_gimple_val, fb_rvalue) == GS_ERROR)
return 2;
r = build4 (ARRAY_REF, ptr_type_node, array, cnts[i],
NULL_TREE, NULL_TREE);
tem = build2 (MODIFY_EXPR, void_type_node, r, OMP_CLAUSE_DECL (c));
gimplify_and_add (tem, pre_p);
g = gimple_build_assign (cnts[i], size_binop (PLUS_EXPR, cnts[i],
size_int (1)));
gimple_seq_add_stmt (pre_p, g);
}
}
if (last_bind)
gimplify_and_add (last_bind, pre_p);
tree cond = boolean_false_node;
if (is_old)
{
if (!unused[0])
cond = build2_loc (first_loc, NE_EXPR, boolean_type_node, cnts[0],
size_binop_loc (first_loc, PLUS_EXPR, counts[0],
size_int (2)));
if (!unused[2])
cond = build2_loc (first_loc, TRUTH_OR_EXPR, boolean_type_node, cond,
build2_loc (first_loc, NE_EXPR, boolean_type_node,
cnts[2],
size_binop_loc (first_loc, PLUS_EXPR,
totalpx,
size_int (1))));
}
else
{
tree prev = size_int (5);
for (i = 0; i < 4; i++)
{
if (unused[i])
continue;
prev = size_binop_loc (first_loc, PLUS_EXPR, counts[i], prev);
cond = build2_loc (first_loc, TRUTH_OR_EXPR, boolean_type_node, cond,
build2_loc (first_loc, NE_EXPR, boolean_type_node,
cnts[i], unshare_expr (prev)));
}
}
tem = build3_loc (first_loc, COND_EXPR, void_type_node, cond,
build_call_expr_loc (first_loc,
builtin_decl_explicit (BUILT_IN_TRAP),
0), void_node);
gimplify_and_add (tem, pre_p);
c = build_omp_clause (UNKNOWN_LOCATION, OMP_CLAUSE_DEPEND);
OMP_CLAUSE_DEPEND_KIND (c) = OMP_CLAUSE_DEPEND_LAST;
OMP_CLAUSE_DECL (c) = build_fold_addr_expr (array);
OMP_CLAUSE_CHAIN (c) = *list_p;
*list_p = c;
return 1;
}
/* Insert a GOMP_MAP_ALLOC or GOMP_MAP_RELEASE node following a
GOMP_MAP_STRUCT mapping. C is an always_pointer mapping. STRUCT_NODE is
the struct node to insert the new mapping after (when the struct node is
initially created). PREV_NODE is the first of two or three mappings for a
pointer, and is either:
- the node before C, when a pair of mappings is used, e.g. for a C/C++
array section.
- not the node before C. This is true when we have a reference-to-pointer
type (with a mapping for the reference and for the pointer), or for
Fortran derived-type mappings with a GOMP_MAP_TO_PSET.
If SCP is non-null, the new node is inserted before *SCP.
if SCP is null, the new node is inserted before PREV_NODE.
The return type is:
- PREV_NODE, if SCP is non-null.
- The newly-created ALLOC or RELEASE node, if SCP is null.
- The second newly-created ALLOC or RELEASE node, if we are mapping a
reference to a pointer. */
static tree
insert_struct_comp_map (enum tree_code code, tree c, tree struct_node,
tree prev_node, tree *scp)
{
enum gomp_map_kind mkind
= (code == OMP_TARGET_EXIT_DATA || code == OACC_EXIT_DATA)
? GOMP_MAP_RELEASE : GOMP_MAP_ALLOC;
tree c2 = build_omp_clause (OMP_CLAUSE_LOCATION (c), OMP_CLAUSE_MAP);
tree cl = scp ? prev_node : c2;
OMP_CLAUSE_SET_MAP_KIND (c2, mkind);
OMP_CLAUSE_DECL (c2) = unshare_expr (OMP_CLAUSE_DECL (c));
OMP_CLAUSE_CHAIN (c2) = scp ? *scp : prev_node;
if (OMP_CLAUSE_CHAIN (prev_node) != c
&& OMP_CLAUSE_CODE (OMP_CLAUSE_CHAIN (prev_node)) == OMP_CLAUSE_MAP
&& (OMP_CLAUSE_MAP_KIND (OMP_CLAUSE_CHAIN (prev_node))
== GOMP_MAP_TO_PSET))
OMP_CLAUSE_SIZE (c2) = OMP_CLAUSE_SIZE (OMP_CLAUSE_CHAIN (prev_node));
else
OMP_CLAUSE_SIZE (c2) = TYPE_SIZE_UNIT (ptr_type_node);
if (struct_node)
OMP_CLAUSE_CHAIN (struct_node) = c2;
/* We might need to create an additional mapping if we have a reference to a
pointer (in C++). Don't do this if we have something other than a
GOMP_MAP_ALWAYS_POINTER though, i.e. a GOMP_MAP_TO_PSET. */
if (OMP_CLAUSE_CHAIN (prev_node) != c
&& OMP_CLAUSE_CODE (OMP_CLAUSE_CHAIN (prev_node)) == OMP_CLAUSE_MAP
&& ((OMP_CLAUSE_MAP_KIND (OMP_CLAUSE_CHAIN (prev_node))
== GOMP_MAP_ALWAYS_POINTER)
|| (OMP_CLAUSE_MAP_KIND (OMP_CLAUSE_CHAIN (prev_node))
== GOMP_MAP_ATTACH_DETACH)))
{
tree c4 = OMP_CLAUSE_CHAIN (prev_node);
tree c3 = build_omp_clause (OMP_CLAUSE_LOCATION (c), OMP_CLAUSE_MAP);
OMP_CLAUSE_SET_MAP_KIND (c3, mkind);
OMP_CLAUSE_DECL (c3) = unshare_expr (OMP_CLAUSE_DECL (c4));
OMP_CLAUSE_SIZE (c3) = TYPE_SIZE_UNIT (ptr_type_node);
OMP_CLAUSE_CHAIN (c3) = prev_node;
if (!scp)
OMP_CLAUSE_CHAIN (c2) = c3;
else
cl = c3;
}
if (scp)
*scp = c2;
return cl;
}
/* Strip ARRAY_REFS or an indirect ref off BASE, find the containing object,
and set *BITPOSP and *POFFSETP to the bit offset of the access.
If BASE_REF is non-NULL and the containing object is a reference, set
*BASE_REF to that reference before dereferencing the object.
If BASE_REF is NULL, check that the containing object is a COMPONENT_REF or
has array type, else return NULL. */
static tree
extract_base_bit_offset (tree base, tree *base_ref, poly_int64 *bitposp,
poly_offset_int *poffsetp)
{
tree offset;
poly_int64 bitsize, bitpos;
machine_mode mode;
int unsignedp, reversep, volatilep = 0;
poly_offset_int poffset;
if (base_ref)
{
*base_ref = NULL_TREE;
while (TREE_CODE (base) == ARRAY_REF)
base = TREE_OPERAND (base, 0);
if (TREE_CODE (base) == INDIRECT_REF)
base = TREE_OPERAND (base, 0);
}
else
{
if (TREE_CODE (base) == ARRAY_REF)
{
while (TREE_CODE (base) == ARRAY_REF)
base = TREE_OPERAND (base, 0);
if (TREE_CODE (base) != COMPONENT_REF
|| TREE_CODE (TREE_TYPE (base)) != ARRAY_TYPE)
return NULL_TREE;
}
else if (TREE_CODE (base) == INDIRECT_REF
&& TREE_CODE (TREE_OPERAND (base, 0)) == COMPONENT_REF
&& (TREE_CODE (TREE_TYPE (TREE_OPERAND (base, 0)))
== REFERENCE_TYPE))
base = TREE_OPERAND (base, 0);
}
base = get_inner_reference (base, &bitsize, &bitpos, &offset, &mode,
&unsignedp, &reversep, &volatilep);
tree orig_base = base;
if ((TREE_CODE (base) == INDIRECT_REF
|| (TREE_CODE (base) == MEM_REF
&& integer_zerop (TREE_OPERAND (base, 1))))
&& DECL_P (TREE_OPERAND (base, 0))
&& TREE_CODE (TREE_TYPE (TREE_OPERAND (base, 0))) == REFERENCE_TYPE)
base = TREE_OPERAND (base, 0);
gcc_assert (offset == NULL_TREE || poly_int_tree_p (offset));
if (offset)
poffset = wi::to_poly_offset (offset);
else
poffset = 0;
if (maybe_ne (bitpos, 0))
poffset += bits_to_bytes_round_down (bitpos);
*bitposp = bitpos;
*poffsetp = poffset;
/* Set *BASE_REF if BASE was a dereferenced reference variable. */
if (base_ref && orig_base != base)
*base_ref = orig_base;
return base;
}
/* Scan the OMP clauses in *LIST_P, installing mappings into a new
and previous omp contexts. */
static void
gimplify_scan_omp_clauses (tree *list_p, gimple_seq *pre_p,
enum omp_region_type region_type,
enum tree_code code)
{
struct gimplify_omp_ctx *ctx, *outer_ctx;
tree c;
hash_map<tree, tree> *struct_map_to_clause = NULL;
hash_set<tree> *struct_deref_set = NULL;
tree *prev_list_p = NULL, *orig_list_p = list_p;
int handled_depend_iterators = -1;
int nowait = -1;
ctx = new_omp_context (region_type);
ctx->code = code;
outer_ctx = ctx->outer_context;
if (code == OMP_TARGET)
{
if (!lang_GNU_Fortran ())
ctx->defaultmap[GDMK_POINTER] = GOVD_MAP | GOVD_MAP_0LEN_ARRAY;
ctx->defaultmap[GDMK_SCALAR] = GOVD_FIRSTPRIVATE;
}
if (!lang_GNU_Fortran ())
switch (code)
{
case OMP_TARGET:
case OMP_TARGET_DATA:
case OMP_TARGET_ENTER_DATA:
case OMP_TARGET_EXIT_DATA:
case OACC_DECLARE:
case OACC_HOST_DATA:
case OACC_PARALLEL:
case OACC_KERNELS:
ctx->target_firstprivatize_array_bases = true;
default:
break;
}
while ((c = *list_p) != NULL)
{
bool remove = false;
bool notice_outer = true;
const char *check_non_private = NULL;
unsigned int flags;
tree decl;
switch (OMP_CLAUSE_CODE (c))
{
case OMP_CLAUSE_PRIVATE:
flags = GOVD_PRIVATE | GOVD_EXPLICIT;
if (lang_hooks.decls.omp_private_outer_ref (OMP_CLAUSE_DECL (c)))
{
flags |= GOVD_PRIVATE_OUTER_REF;
OMP_CLAUSE_PRIVATE_OUTER_REF (c) = 1;
}
else
notice_outer = false;
goto do_add;
case OMP_CLAUSE_SHARED:
flags = GOVD_SHARED | GOVD_EXPLICIT;
goto do_add;
case OMP_CLAUSE_FIRSTPRIVATE:
flags = GOVD_FIRSTPRIVATE | GOVD_EXPLICIT;
check_non_private = "firstprivate";
goto do_add;
case OMP_CLAUSE_LASTPRIVATE:
if (OMP_CLAUSE_LASTPRIVATE_CONDITIONAL (c))
switch (code)
{
case OMP_DISTRIBUTE:
error_at (OMP_CLAUSE_LOCATION (c),
"conditional %<lastprivate%> clause on "
"%qs construct", "distribute");
OMP_CLAUSE_LASTPRIVATE_CONDITIONAL (c) = 0;
break;
case OMP_TASKLOOP:
error_at (OMP_CLAUSE_LOCATION (c),
"conditional %<lastprivate%> clause on "
"%qs construct", "taskloop");
OMP_CLAUSE_LASTPRIVATE_CONDITIONAL (c) = 0;
break;
default:
break;
}
flags = GOVD_LASTPRIVATE | GOVD_SEEN | GOVD_EXPLICIT;
if (code != OMP_LOOP)
check_non_private = "lastprivate";
decl = OMP_CLAUSE_DECL (c);
if (error_operand_p (decl))
goto do_add;
if (OMP_CLAUSE_LASTPRIVATE_CONDITIONAL (c)
&& !lang_hooks.decls.omp_scalar_p (decl))
{
error_at (OMP_CLAUSE_LOCATION (c),
"non-scalar variable %qD in conditional "
"%<lastprivate%> clause", decl);
OMP_CLAUSE_LASTPRIVATE_CONDITIONAL (c) = 0;
}
if (OMP_CLAUSE_LASTPRIVATE_CONDITIONAL (c))
flags |= GOVD_LASTPRIVATE_CONDITIONAL;
if (outer_ctx
&& (outer_ctx->region_type == ORT_COMBINED_PARALLEL
|| ((outer_ctx->region_type & ORT_COMBINED_TEAMS)
== ORT_COMBINED_TEAMS))
&& splay_tree_lookup (outer_ctx->variables,
(splay_tree_key) decl) == NULL)
{
omp_add_variable (outer_ctx, decl, GOVD_SHARED | GOVD_SEEN);
if (outer_ctx->outer_context)
omp_notice_variable (outer_ctx->outer_context, decl, true);
}
else if (outer_ctx
&& (outer_ctx->region_type & ORT_TASK) != 0
&& outer_ctx->combined_loop
&& splay_tree_lookup (outer_ctx->variables,
(splay_tree_key) decl) == NULL)
{
omp_add_variable (outer_ctx, decl, GOVD_LASTPRIVATE | GOVD_SEEN);
if (outer_ctx->outer_context)
omp_notice_variable (outer_ctx->outer_context, decl, true);
}
else if (outer_ctx
&& (outer_ctx->region_type == ORT_WORKSHARE
|| outer_ctx->region_type == ORT_ACC)
&& outer_ctx->combined_loop
&& splay_tree_lookup (outer_ctx->variables,
(splay_tree_key) decl) == NULL
&& !omp_check_private (outer_ctx, decl, false))
{
omp_add_variable (outer_ctx, decl, GOVD_LASTPRIVATE | GOVD_SEEN);
if (outer_ctx->outer_context
&& (outer_ctx->outer_context->region_type
== ORT_COMBINED_PARALLEL)
&& splay_tree_lookup (outer_ctx->outer_context->variables,
(splay_tree_key) decl) == NULL)
{
struct gimplify_omp_ctx *octx = outer_ctx->outer_context;
omp_add_variable (octx, decl, GOVD_SHARED | GOVD_SEEN);
if (octx->outer_context)
{
octx = octx->outer_context;
if (octx->region_type == ORT_WORKSHARE
&& octx->combined_loop
&& splay_tree_lookup (octx->variables,
(splay_tree_key) decl) == NULL
&& !omp_check_private (octx, decl, false))
{
omp_add_variable (octx, decl,
GOVD_LASTPRIVATE | GOVD_SEEN);
octx = octx->outer_context;
if (octx
&& ((octx->region_type & ORT_COMBINED_TEAMS)
== ORT_COMBINED_TEAMS)
&& (splay_tree_lookup (octx->variables,
(splay_tree_key) decl)
== NULL))
{
omp_add_variable (octx, decl,
GOVD_SHARED | GOVD_SEEN);
octx = octx->outer_context;
}
}
if (octx)
omp_notice_variable (octx, decl, true);
}
}
else if (outer_ctx->outer_context)
omp_notice_variable (outer_ctx->outer_context, decl, true);
}
goto do_add;
case OMP_CLAUSE_REDUCTION:
if (OMP_CLAUSE_REDUCTION_TASK (c))
{
if (region_type == ORT_WORKSHARE)
{
if (nowait == -1)
nowait = omp_find_clause (*list_p,
OMP_CLAUSE_NOWAIT) != NULL_TREE;
if (nowait
&& (outer_ctx == NULL
|| outer_ctx->region_type != ORT_COMBINED_PARALLEL))
{
error_at (OMP_CLAUSE_LOCATION (c),
"%<task%> reduction modifier on a construct "
"with a %<nowait%> clause");
OMP_CLAUSE_REDUCTION_TASK (c) = 0;
}
}
else if ((region_type & ORT_PARALLEL) != ORT_PARALLEL)
{
error_at (OMP_CLAUSE_LOCATION (c),
"invalid %<task%> reduction modifier on construct "
"other than %<parallel%>, %<for%> or %<sections%>");
OMP_CLAUSE_REDUCTION_TASK (c) = 0;
}
}
if (OMP_CLAUSE_REDUCTION_INSCAN (c))
switch (code)
{
case OMP_SECTIONS:
error_at (OMP_CLAUSE_LOCATION (c),
"%<inscan%> %<reduction%> clause on "
"%qs construct", "sections");
OMP_CLAUSE_REDUCTION_INSCAN (c) = 0;
break;
case OMP_PARALLEL:
error_at (OMP_CLAUSE_LOCATION (c),
"%<inscan%> %<reduction%> clause on "
"%qs construct", "parallel");
OMP_CLAUSE_REDUCTION_INSCAN (c) = 0;
break;
case OMP_TEAMS:
error_at (OMP_CLAUSE_LOCATION (c),
"%<inscan%> %<reduction%> clause on "
"%qs construct", "teams");
OMP_CLAUSE_REDUCTION_INSCAN (c) = 0;
break;
case OMP_TASKLOOP:
error_at (OMP_CLAUSE_LOCATION (c),
"%<inscan%> %<reduction%> clause on "
"%qs construct", "taskloop");
OMP_CLAUSE_REDUCTION_INSCAN (c) = 0;
break;
default:
break;
}
/* FALLTHRU */
case OMP_CLAUSE_IN_REDUCTION:
case OMP_CLAUSE_TASK_REDUCTION:
flags = GOVD_REDUCTION | GOVD_SEEN | GOVD_EXPLICIT;
/* OpenACC permits reductions on private variables. */
if (!(region_type & ORT_ACC)
/* taskgroup is actually not a worksharing region. */
&& code != OMP_TASKGROUP)
check_non_private = omp_clause_code_name[OMP_CLAUSE_CODE (c)];
decl = OMP_CLAUSE_DECL (c);
if (TREE_CODE (decl) == MEM_REF)
{
tree type = TREE_TYPE (decl);
bool saved_into_ssa = gimplify_ctxp->into_ssa;
gimplify_ctxp->into_ssa = false;
if (gimplify_expr (&TYPE_MAX_VALUE (TYPE_DOMAIN (type)), pre_p,
NULL, is_gimple_val, fb_rvalue, false)
== GS_ERROR)
{
gimplify_ctxp->into_ssa = saved_into_ssa;
remove = true;
break;
}
gimplify_ctxp->into_ssa = saved_into_ssa;
tree v = TYPE_MAX_VALUE (TYPE_DOMAIN (type));
if (DECL_P (v))
{
omp_firstprivatize_variable (ctx, v);
omp_notice_variable (ctx, v, true);
}
decl = TREE_OPERAND (decl, 0);
if (TREE_CODE (decl) == POINTER_PLUS_EXPR)
{
gimplify_ctxp->into_ssa = false;
if (gimplify_expr (&TREE_OPERAND (decl, 1), pre_p,
NULL, is_gimple_val, fb_rvalue, false)
== GS_ERROR)
{
gimplify_ctxp->into_ssa = saved_into_ssa;
remove = true;
break;
}
gimplify_ctxp->into_ssa = saved_into_ssa;
v = TREE_OPERAND (decl, 1);
if (DECL_P (v))
{
omp_firstprivatize_variable (ctx, v);
omp_notice_variable (ctx, v, true);
}
decl = TREE_OPERAND (decl, 0);
}
if (TREE_CODE (decl) == ADDR_EXPR
|| TREE_CODE (decl) == INDIRECT_REF)
decl = TREE_OPERAND (decl, 0);
}
goto do_add_decl;
case OMP_CLAUSE_LINEAR:
if (gimplify_expr (&OMP_CLAUSE_LINEAR_STEP (c), pre_p, NULL,
is_gimple_val, fb_rvalue) == GS_ERROR)
{
remove = true;
break;
}
else
{
if (code == OMP_SIMD
&& !OMP_CLAUSE_LINEAR_NO_COPYIN (c))
{
struct gimplify_omp_ctx *octx = outer_ctx;
if (octx
&& octx->region_type == ORT_WORKSHARE
&& octx->combined_loop
&& !octx->distribute)
{
if (octx->outer_context
&& (octx->outer_context->region_type
== ORT_COMBINED_PARALLEL))
octx = octx->outer_context->outer_context;
else
octx = octx->outer_context;
}
if (octx
&& octx->region_type == ORT_WORKSHARE
&& octx->combined_loop
&& octx->distribute)
{
error_at (OMP_CLAUSE_LOCATION (c),
"%<linear%> clause for variable other than "
"loop iterator specified on construct "
"combined with %<distribute%>");
remove = true;
break;
}
}
/* For combined #pragma omp parallel for simd, need to put
lastprivate and perhaps firstprivate too on the
parallel. Similarly for #pragma omp for simd. */
struct gimplify_omp_ctx *octx = outer_ctx;
decl = NULL_TREE;
do
{
if (OMP_CLAUSE_LINEAR_NO_COPYIN (c)
&& OMP_CLAUSE_LINEAR_NO_COPYOUT (c))
break;
decl = OMP_CLAUSE_DECL (c);
if (error_operand_p (decl))
{
decl = NULL_TREE;
break;
}
flags = GOVD_SEEN;
if (!OMP_CLAUSE_LINEAR_NO_COPYIN (c))
flags |= GOVD_FIRSTPRIVATE;
if (!OMP_CLAUSE_LINEAR_NO_COPYOUT (c))
flags |= GOVD_LASTPRIVATE;
if (octx
&& octx->region_type == ORT_WORKSHARE
&& octx->combined_loop)
{
if (octx->outer_context
&& (octx->outer_context->region_type
== ORT_COMBINED_PARALLEL))
octx = octx->outer_context;
else if (omp_check_private (octx, decl, false))
break;
}
else if (octx
&& (octx->region_type & ORT_TASK) != 0
&& octx->combined_loop)
;
else if (octx
&& octx->region_type == ORT_COMBINED_PARALLEL
&& ctx->region_type == ORT_WORKSHARE
&& octx == outer_ctx)
flags = GOVD_SEEN | GOVD_SHARED;
else if (octx
&& ((octx->region_type & ORT_COMBINED_TEAMS)
== ORT_COMBINED_TEAMS))
flags = GOVD_SEEN | GOVD_SHARED;
else if (octx
&& octx->region_type == ORT_COMBINED_TARGET)
{
flags &= ~GOVD_LASTPRIVATE;
if (flags == GOVD_SEEN)
break;
}
else
break;
splay_tree_node on
= splay_tree_lookup (octx->variables,
(splay_tree_key) decl);
if (on && (on->value & GOVD_DATA_SHARE_CLASS) != 0)
{
octx = NULL;
break;
}
omp_add_variable (octx, decl, flags);
if (octx->outer_context == NULL)
break;
octx = octx->outer_context;
}
while (1);
if (octx
&& decl
&& (!OMP_CLAUSE_LINEAR_NO_COPYIN (c)
|| !OMP_CLAUSE_LINEAR_NO_COPYOUT (c)))
omp_notice_variable (octx, decl, true);
}
flags = GOVD_LINEAR | GOVD_EXPLICIT;
if (OMP_CLAUSE_LINEAR_NO_COPYIN (c)
&& OMP_CLAUSE_LINEAR_NO_COPYOUT (c))
{
notice_outer = false;
flags |= GOVD_LINEAR_LASTPRIVATE_NO_OUTER;
}
goto do_add;
case OMP_CLAUSE_MAP:
decl = OMP_CLAUSE_DECL (c);
if (error_operand_p (decl))
remove = true;
switch (code)
{
case OMP_TARGET:
break;
case OACC_DATA:
if (TREE_CODE (TREE_TYPE (decl)) != ARRAY_TYPE)
break;
/* FALLTHRU */
case OMP_TARGET_DATA:
case OMP_TARGET_ENTER_DATA:
case OMP_TARGET_EXIT_DATA:
case OACC_ENTER_DATA:
case OACC_EXIT_DATA:
case OACC_HOST_DATA:
if (OMP_CLAUSE_MAP_KIND (c) == GOMP_MAP_FIRSTPRIVATE_POINTER
|| (OMP_CLAUSE_MAP_KIND (c)
== GOMP_MAP_FIRSTPRIVATE_REFERENCE))
/* For target {,enter ,exit }data only the array slice is
mapped, but not the pointer to it. */
remove = true;
break;
default:
break;
}
/* For Fortran, not only the pointer to the data is mapped but also
the address of the pointer, the array descriptor etc.; for
'exit data' - and in particular for 'delete:' - having an 'alloc:'
does not make sense. Likewise, for 'update' only transferring the
data itself is needed as the rest has been handled in previous
directives. However, for 'exit data', the array descriptor needs
to be delete; hence, we turn the MAP_TO_PSET into a MAP_DELETE.
NOTE: Generally, it is not safe to perform "enter data" operations
on arrays where the data *or the descriptor* may go out of scope
before a corresponding "exit data" operation -- and such a
descriptor may be synthesized temporarily, e.g. to pass an
explicit-shape array to a function expecting an assumed-shape
argument. Performing "enter data" inside the called function
would thus be problematic. */
if (code == OMP_TARGET_EXIT_DATA
&& OMP_CLAUSE_MAP_KIND (c) == GOMP_MAP_TO_PSET)
OMP_CLAUSE_SET_MAP_KIND (c, OMP_CLAUSE_MAP_KIND (*prev_list_p)
== GOMP_MAP_DELETE
? GOMP_MAP_DELETE : GOMP_MAP_RELEASE);
else if ((code == OMP_TARGET_EXIT_DATA || code == OMP_TARGET_UPDATE)
&& (OMP_CLAUSE_MAP_KIND (c) == GOMP_MAP_POINTER
|| OMP_CLAUSE_MAP_KIND (c) == GOMP_MAP_TO_PSET))
remove = true;
if (remove)
break;
if (DECL_P (decl) && outer_ctx && (region_type & ORT_ACC))
{
struct gimplify_omp_ctx *octx;
for (octx = outer_ctx; octx; octx = octx->outer_context)
{
if (octx->region_type != ORT_ACC_HOST_DATA)
break;
splay_tree_node n2
= splay_tree_lookup (octx->variables,
(splay_tree_key) decl);
if (n2)
error_at (OMP_CLAUSE_LOCATION (c), "variable %qE "
"declared in enclosing %<host_data%> region",
DECL_NAME (decl));
}
}
if (OMP_CLAUSE_SIZE (c) == NULL_TREE)
OMP_CLAUSE_SIZE (c) = DECL_P (decl) ? DECL_SIZE_UNIT (decl)
: TYPE_SIZE_UNIT (TREE_TYPE (decl));
if (gimplify_expr (&OMP_CLAUSE_SIZE (c), pre_p,
NULL, is_gimple_val, fb_rvalue) == GS_ERROR)
{
remove = true;
break;
}
else if ((OMP_CLAUSE_MAP_KIND (c) == GOMP_MAP_FIRSTPRIVATE_POINTER
|| (OMP_CLAUSE_MAP_KIND (c)
== GOMP_MAP_FIRSTPRIVATE_REFERENCE))
&& TREE_CODE (OMP_CLAUSE_SIZE (c)) != INTEGER_CST)
{
OMP_CLAUSE_SIZE (c)
= get_initialized_tmp_var (OMP_CLAUSE_SIZE (c), pre_p, NULL,
false);
omp_add_variable (ctx, OMP_CLAUSE_SIZE (c),
GOVD_FIRSTPRIVATE | GOVD_SEEN);
}
if (!DECL_P (decl))
{
tree d = decl, *pd;
if (TREE_CODE (d) == ARRAY_REF)
{
while (TREE_CODE (d) == ARRAY_REF)
d = TREE_OPERAND (d, 0);
if (TREE_CODE (d) == COMPONENT_REF
&& TREE_CODE (TREE_TYPE (d)) == ARRAY_TYPE)
decl = d;
}
pd = &OMP_CLAUSE_DECL (c);
if (d == decl
&& TREE_CODE (decl) == INDIRECT_REF
&& TREE_CODE (TREE_OPERAND (decl, 0)) == COMPONENT_REF
&& (TREE_CODE (TREE_TYPE (TREE_OPERAND (decl, 0)))
== REFERENCE_TYPE))
{
pd = &TREE_OPERAND (decl, 0);
decl = TREE_OPERAND (decl, 0);
}
bool indir_p = false;
tree orig_decl = decl;
tree decl_ref = NULL_TREE;
if ((region_type & ORT_ACC) != 0
&& TREE_CODE (*pd) == COMPONENT_REF
&& OMP_CLAUSE_MAP_KIND (c) == GOMP_MAP_ATTACH_DETACH
&& code != OACC_UPDATE)
{
while (TREE_CODE (decl) == COMPONENT_REF)
{
decl = TREE_OPERAND (decl, 0);
if ((TREE_CODE (decl) == MEM_REF
&& integer_zerop (TREE_OPERAND (decl, 1)))
|| INDIRECT_REF_P (decl))
{
indir_p = true;
decl = TREE_OPERAND (decl, 0);
}
if (TREE_CODE (decl) == INDIRECT_REF
&& DECL_P (TREE_OPERAND (decl, 0))
&& (TREE_CODE (TREE_TYPE (TREE_OPERAND (decl, 0)))
== REFERENCE_TYPE))
{
decl_ref = decl;
decl = TREE_OPERAND (decl, 0);
}
}
}
else if (TREE_CODE (decl) == COMPONENT_REF)
{
while (TREE_CODE (decl) == COMPONENT_REF)
decl = TREE_OPERAND (decl, 0);
if (TREE_CODE (decl) == INDIRECT_REF
&& DECL_P (TREE_OPERAND (decl, 0))
&& (TREE_CODE (TREE_TYPE (TREE_OPERAND (decl, 0)))
== REFERENCE_TYPE))
decl = TREE_OPERAND (decl, 0);
}
if (decl != orig_decl && DECL_P (decl) && indir_p)
{
gomp_map_kind k = (code == OACC_EXIT_DATA) ? GOMP_MAP_DETACH
: GOMP_MAP_ATTACH;
/* We have a dereference of a struct member. Make this an
attach/detach operation, and ensure the base pointer is
mapped as a FIRSTPRIVATE_POINTER. */
OMP_CLAUSE_SET_MAP_KIND (c, k);
flags = GOVD_MAP | GOVD_SEEN | GOVD_EXPLICIT;
tree next_clause = OMP_CLAUSE_CHAIN (c);
if (k == GOMP_MAP_ATTACH
&& code != OACC_ENTER_DATA
&& (!next_clause
|| (OMP_CLAUSE_CODE (next_clause) != OMP_CLAUSE_MAP)
|| (OMP_CLAUSE_MAP_KIND (next_clause)
!= GOMP_MAP_POINTER)
|| OMP_CLAUSE_DECL (next_clause) != decl)
&& (!struct_deref_set
|| !struct_deref_set->contains (decl)))
{
if (!struct_deref_set)
struct_deref_set = new hash_set<tree> ();
/* As well as the attach, we also need a
FIRSTPRIVATE_POINTER clause to properly map the
pointer to the struct base. */
tree c2 = build_omp_clause (OMP_CLAUSE_LOCATION (c),
OMP_CLAUSE_MAP);
OMP_CLAUSE_SET_MAP_KIND (c2, GOMP_MAP_ALLOC);
OMP_CLAUSE_MAP_MAYBE_ZERO_LENGTH_ARRAY_SECTION (c2)
= 1;
tree charptr_zero
= build_int_cst (build_pointer_type (char_type_node),
0);
OMP_CLAUSE_DECL (c2)
= build2 (MEM_REF, char_type_node,
decl_ref ? decl_ref : decl, charptr_zero);
OMP_CLAUSE_SIZE (c2) = size_zero_node;
tree c3 = build_omp_clause (OMP_CLAUSE_LOCATION (c),
OMP_CLAUSE_MAP);
OMP_CLAUSE_SET_MAP_KIND (c3,
GOMP_MAP_FIRSTPRIVATE_POINTER);
OMP_CLAUSE_DECL (c3) = decl;
OMP_CLAUSE_SIZE (c3) = size_zero_node;
tree mapgrp = *prev_list_p;
*prev_list_p = c2;
OMP_CLAUSE_CHAIN (c3) = mapgrp;
OMP_CLAUSE_CHAIN (c2) = c3;
struct_deref_set->add (decl);
}
goto do_add_decl;
}
/* An "attach/detach" operation on an update directive should
behave as a GOMP_MAP_ALWAYS_POINTER. Beware that
unlike attach or detach map kinds, GOMP_MAP_ALWAYS_POINTER
depends on the previous mapping. */
if (code == OACC_UPDATE
&& OMP_CLAUSE_MAP_KIND (c) == GOMP_MAP_ATTACH_DETACH)
OMP_CLAUSE_SET_MAP_KIND (c, GOMP_MAP_ALWAYS_POINTER);
if (gimplify_expr (pd, pre_p, NULL, is_gimple_lvalue, fb_lvalue)
== GS_ERROR)
{
remove = true;
break;
}
if (DECL_P (decl)
&& OMP_CLAUSE_MAP_KIND (c) != GOMP_MAP_TO_PSET
&& OMP_CLAUSE_MAP_KIND (c) != GOMP_MAP_ATTACH
&& OMP_CLAUSE_MAP_KIND (c) != GOMP_MAP_DETACH
&& code != OACC_UPDATE)
{
if (error_operand_p (decl))
{
remove = true;
break;
}
tree stype = TREE_TYPE (decl);
if (TREE_CODE (stype) == REFERENCE_TYPE)
stype = TREE_TYPE (stype);
if (TYPE_SIZE_UNIT (stype) == NULL
|| TREE_CODE (TYPE_SIZE_UNIT (stype)) != INTEGER_CST)
{
error_at (OMP_CLAUSE_LOCATION (c),
"mapping field %qE of variable length "
"structure", OMP_CLAUSE_DECL (c));
remove = true;
break;
}
if (OMP_CLAUSE_MAP_KIND (c) == GOMP_MAP_ALWAYS_POINTER
|| OMP_CLAUSE_MAP_KIND (c) == GOMP_MAP_ATTACH_DETACH)
{
/* Error recovery. */
if (prev_list_p == NULL)
{
remove = true;
break;
}
if (OMP_CLAUSE_CHAIN (*prev_list_p) != c)
{
tree ch = OMP_CLAUSE_CHAIN (*prev_list_p);
if (ch == NULL_TREE || OMP_CLAUSE_CHAIN (ch) != c)
{
remove = true;
break;
}
}
}
poly_offset_int offset1;
poly_int64 bitpos1;
tree base_ref;
tree base
= extract_base_bit_offset (OMP_CLAUSE_DECL (c), &base_ref,
&bitpos1, &offset1);
gcc_assert (base == decl);
splay_tree_node n
= splay_tree_lookup (ctx->variables, (splay_tree_key)decl);
bool ptr = (OMP_CLAUSE_MAP_KIND (c)
== GOMP_MAP_ALWAYS_POINTER);
bool attach_detach = (OMP_CLAUSE_MAP_KIND (c)
== GOMP_MAP_ATTACH_DETACH);
bool attach = OMP_CLAUSE_MAP_KIND (c) == GOMP_MAP_ATTACH
|| OMP_CLAUSE_MAP_KIND (c) == GOMP_MAP_DETACH;
bool has_attachments = false;
/* For OpenACC, pointers in structs should trigger an
attach action. */
if (attach_detach && (region_type & ORT_ACC) != 0)
{
/* Turn a GOMP_MAP_ATTACH_DETACH clause into a
GOMP_MAP_ATTACH or GOMP_MAP_DETACH clause after we
have detected a case that needs a GOMP_MAP_STRUCT
mapping added. */
gomp_map_kind k
= (code == OACC_EXIT_DATA) ? GOMP_MAP_DETACH
: GOMP_MAP_ATTACH;
OMP_CLAUSE_SET_MAP_KIND (c, k);
has_attachments = true;
}
if (n == NULL || (n->value & GOVD_MAP) == 0)
{
tree l = build_omp_clause (OMP_CLAUSE_LOCATION (c),
OMP_CLAUSE_MAP);
gomp_map_kind k = attach ? GOMP_MAP_FORCE_PRESENT
: GOMP_MAP_STRUCT;
OMP_CLAUSE_SET_MAP_KIND (l, k);
if (base_ref)
OMP_CLAUSE_DECL (l) = unshare_expr (base_ref);
else
OMP_CLAUSE_DECL (l) = decl;
OMP_CLAUSE_SIZE (l)
= (!attach
? size_int (1)
: DECL_P (OMP_CLAUSE_DECL (l))
? DECL_SIZE_UNIT (OMP_CLAUSE_DECL (l))
: TYPE_SIZE_UNIT (TREE_TYPE (OMP_CLAUSE_DECL (l))));
if (struct_map_to_clause == NULL)
struct_map_to_clause = new hash_map<tree, tree>;
struct_map_to_clause->put (decl, l);
if (ptr || attach_detach)
{
insert_struct_comp_map (code, c, l, *prev_list_p,
NULL);
*prev_list_p = l;
prev_list_p = NULL;
}
else
{
OMP_CLAUSE_CHAIN (l) = c;
*list_p = l;
list_p = &OMP_CLAUSE_CHAIN (l);
}
if (base_ref && code == OMP_TARGET)
{
tree c2 = build_omp_clause (OMP_CLAUSE_LOCATION (c),
OMP_CLAUSE_MAP);
enum gomp_map_kind mkind
= GOMP_MAP_FIRSTPRIVATE_REFERENCE;
OMP_CLAUSE_SET_MAP_KIND (c2, mkind);
OMP_CLAUSE_DECL (c2) = decl;
OMP_CLAUSE_SIZE (c2) = size_zero_node;
OMP_CLAUSE_CHAIN (c2) = OMP_CLAUSE_CHAIN (l);
OMP_CLAUSE_CHAIN (l) = c2;
}
flags = GOVD_MAP | GOVD_EXPLICIT;
if (GOMP_MAP_ALWAYS_P (OMP_CLAUSE_MAP_KIND (c))
|| ptr
|| attach_detach)
flags |= GOVD_SEEN;
if (has_attachments)
flags |= GOVD_MAP_HAS_ATTACHMENTS;
goto do_add_decl;
}
else if (struct_map_to_clause)
{
tree *osc = struct_map_to_clause->get (decl);
tree *sc = NULL, *scp = NULL;
if (GOMP_MAP_ALWAYS_P (OMP_CLAUSE_MAP_KIND (c))
|| ptr
|| attach_detach)
n->value |= GOVD_SEEN;
sc = &OMP_CLAUSE_CHAIN (*osc);
if (*sc != c
&& (OMP_CLAUSE_MAP_KIND (*sc)
== GOMP_MAP_FIRSTPRIVATE_REFERENCE))
sc = &OMP_CLAUSE_CHAIN (*sc);
/* Here "prev_list_p" is the end of the inserted
alloc/release nodes after the struct node, OSC. */
for (; *sc != c; sc = &OMP_CLAUSE_CHAIN (*sc))
if ((ptr || attach_detach) && sc == prev_list_p)
break;
else if (TREE_CODE (OMP_CLAUSE_DECL (*sc))
!= COMPONENT_REF
&& (TREE_CODE (OMP_CLAUSE_DECL (*sc))
!= INDIRECT_REF)
&& (TREE_CODE (OMP_CLAUSE_DECL (*sc))
!= ARRAY_REF))
break;
else
{
tree sc_decl = OMP_CLAUSE_DECL (*sc);
poly_offset_int offsetn;
poly_int64 bitposn;
tree base
= extract_base_bit_offset (sc_decl, NULL,
&bitposn, &offsetn);
if (base != decl)
break;
if (scp)
continue;
tree d1 = OMP_CLAUSE_DECL (*sc);
tree d2 = OMP_CLAUSE_DECL (c);
while (TREE_CODE (d1) == ARRAY_REF)
d1 = TREE_OPERAND (d1, 0);
while (TREE_CODE (d2) == ARRAY_REF)
d2 = TREE_OPERAND (d2, 0);
if (TREE_CODE (d1) == INDIRECT_REF)
d1 = TREE_OPERAND (d1, 0);
if (TREE_CODE (d2) == INDIRECT_REF)
d2 = TREE_OPERAND (d2, 0);
while (TREE_CODE (d1) == COMPONENT_REF)
if (TREE_CODE (d2) == COMPONENT_REF
&& TREE_OPERAND (d1, 1)
== TREE_OPERAND (d2, 1))
{
d1 = TREE_OPERAND (d1, 0);
d2 = TREE_OPERAND (d2, 0);
}
else
break;
if (d1 == d2)
{
error_at (OMP_CLAUSE_LOCATION (c),
"%qE appears more than once in map "
"clauses", OMP_CLAUSE_DECL (c));
remove = true;
break;
}
if (maybe_lt (offset1, offsetn)
|| (known_eq (offset1, offsetn)
&& maybe_lt (bitpos1, bitposn)))
{
if (ptr || attach_detach)
scp = sc;
else
break;
}
}
if (remove)
break;
if (!attach)
OMP_CLAUSE_SIZE (*osc)
= size_binop (PLUS_EXPR, OMP_CLAUSE_SIZE (*osc),
size_one_node);
if (ptr || attach_detach)
{
tree cl = insert_struct_comp_map (code, c, NULL,
*prev_list_p, scp);
if (sc == prev_list_p)
{
*sc = cl;
prev_list_p = NULL;
}
else
{
*prev_list_p = OMP_CLAUSE_CHAIN (c);
list_p = prev_list_p;
prev_list_p = NULL;
OMP_CLAUSE_CHAIN (c) = *sc;
*sc = cl;
continue;
}
}
else if (*sc != c)
{
*list_p = OMP_CLAUSE_CHAIN (c);
OMP_CLAUSE_CHAIN (c) = *sc;
*sc = c;
continue;
}
}
}
if (!remove
&& OMP_CLAUSE_MAP_KIND (c) != GOMP_MAP_ALWAYS_POINTER
&& OMP_CLAUSE_MAP_KIND (c) != GOMP_MAP_ATTACH_DETACH
&& OMP_CLAUSE_MAP_KIND (c) != GOMP_MAP_TO_PSET
&& OMP_CLAUSE_CHAIN (c)
&& OMP_CLAUSE_CODE (OMP_CLAUSE_CHAIN (c)) == OMP_CLAUSE_MAP
&& ((OMP_CLAUSE_MAP_KIND (OMP_CLAUSE_CHAIN (c))
== GOMP_MAP_ALWAYS_POINTER)
|| (OMP_CLAUSE_MAP_KIND (OMP_CLAUSE_CHAIN (c))
== GOMP_MAP_ATTACH_DETACH)
|| (OMP_CLAUSE_MAP_KIND (OMP_CLAUSE_CHAIN (c))
== GOMP_MAP_TO_PSET)))
prev_list_p = list_p;
break;
}
flags = GOVD_MAP | GOVD_EXPLICIT;
if (OMP_CLAUSE_MAP_KIND (c) == GOMP_MAP_ALWAYS_TO
|| OMP_CLAUSE_MAP_KIND (c) == GOMP_MAP_ALWAYS_TOFROM)
flags |= GOVD_MAP_ALWAYS_TO;
goto do_add;
case OMP_CLAUSE_DEPEND:
if (OMP_CLAUSE_DEPEND_KIND (c) == OMP_CLAUSE_DEPEND_SINK)
{
tree deps = OMP_CLAUSE_DECL (c);
while (deps && TREE_CODE (deps) == TREE_LIST)
{
if (TREE_CODE (TREE_PURPOSE (deps)) == TRUNC_DIV_EXPR
&& DECL_P (TREE_OPERAND (TREE_PURPOSE (deps), 1)))
gimplify_expr (&TREE_OPERAND (TREE_PURPOSE (deps), 1),
pre_p, NULL, is_gimple_val, fb_rvalue);
deps = TREE_CHAIN (deps);
}
break;
}
else if (OMP_CLAUSE_DEPEND_KIND (c) == OMP_CLAUSE_DEPEND_SOURCE)
break;
if (handled_depend_iterators == -1)
handled_depend_iterators = gimplify_omp_depend (list_p, pre_p);
if (handled_depend_iterators)
{
if (handled_depend_iterators == 2)
remove = true;
break;
}
if (TREE_CODE (OMP_CLAUSE_DECL (c)) == COMPOUND_EXPR)
{
gimplify_expr (&TREE_OPERAND (OMP_CLAUSE_DECL (c), 0), pre_p,
NULL, is_gimple_val, fb_rvalue);
OMP_CLAUSE_DECL (c) = TREE_OPERAND (OMP_CLAUSE_DECL (c), 1);
}
if (error_operand_p (OMP_CLAUSE_DECL (c)))
{
remove = true;
break;
}
OMP_CLAUSE_DECL (c) = build_fold_addr_expr (OMP_CLAUSE_DECL (c));
if (gimplify_expr (&OMP_CLAUSE_DECL (c), pre_p, NULL,
is_gimple_val, fb_rvalue) == GS_ERROR)
{
remove = true;
break;
}
if (code == OMP_TASK)
ctx->has_depend = true;
break;
case OMP_CLAUSE_TO:
case OMP_CLAUSE_FROM:
case OMP_CLAUSE__CACHE_:
decl = OMP_CLAUSE_DECL (c);
if (error_operand_p (decl))
{
remove = true;
break;
}
if (OMP_CLAUSE_SIZE (c) == NULL_TREE)
OMP_CLAUSE_SIZE (c) = DECL_P (decl) ? DECL_SIZE_UNIT (decl)
: TYPE_SIZE_UNIT (TREE_TYPE (decl));
if (gimplify_expr (&OMP_CLAUSE_SIZE (c), pre_p,
NULL, is_gimple_val, fb_rvalue) == GS_ERROR)
{
remove = true;
break;
}
if (!DECL_P (decl))
{
if (gimplify_expr (&OMP_CLAUSE_DECL (c), pre_p,
NULL, is_gimple_lvalue, fb_lvalue)
== GS_ERROR)
{
remove = true;
break;
}
break;
}
goto do_notice;
case OMP_CLAUSE_USE_DEVICE_PTR:
case OMP_CLAUSE_USE_DEVICE_ADDR:
flags = GOVD_EXPLICIT;
goto do_add;
case OMP_CLAUSE_IS_DEVICE_PTR:
flags = GOVD_FIRSTPRIVATE | GOVD_EXPLICIT;
goto do_add;
do_add:
decl = OMP_CLAUSE_DECL (c);
do_add_decl:
if (error_operand_p (decl))
{
remove = true;
break;
}
if (DECL_NAME (decl) == NULL_TREE && (flags & GOVD_SHARED) == 0)
{
tree t = omp_member_access_dummy_var (decl);
if (t)
{
tree v = DECL_VALUE_EXPR (decl);
DECL_NAME (decl) = DECL_NAME (TREE_OPERAND (v, 1));
if (outer_ctx)
omp_notice_variable (outer_ctx, t, true);
}
}
if (code == OACC_DATA
&& OMP_CLAUSE_CODE (c) == OMP_CLAUSE_MAP
&& OMP_CLAUSE_MAP_KIND (c) == GOMP_MAP_FIRSTPRIVATE_POINTER)
flags |= GOVD_MAP_0LEN_ARRAY;
omp_add_variable (ctx, decl, flags);
if ((OMP_CLAUSE_CODE (c) == OMP_CLAUSE_REDUCTION
|| OMP_CLAUSE_CODE (c) == OMP_CLAUSE_IN_REDUCTION
|| OMP_CLAUSE_CODE (c) == OMP_CLAUSE_TASK_REDUCTION)
&& OMP_CLAUSE_REDUCTION_PLACEHOLDER (c))
{
omp_add_variable (ctx, OMP_CLAUSE_REDUCTION_PLACEHOLDER (c),
GOVD_LOCAL | GOVD_SEEN);
if (OMP_CLAUSE_REDUCTION_DECL_PLACEHOLDER (c)
&& walk_tree (&OMP_CLAUSE_REDUCTION_INIT (c),
find_decl_expr,
OMP_CLAUSE_REDUCTION_DECL_PLACEHOLDER (c),
NULL) == NULL_TREE)
omp_add_variable (ctx,
OMP_CLAUSE_REDUCTION_DECL_PLACEHOLDER (c),
GOVD_LOCAL | GOVD_SEEN);
gimplify_omp_ctxp = ctx;
push_gimplify_context ();
OMP_CLAUSE_REDUCTION_GIMPLE_INIT (c) = NULL;
OMP_CLAUSE_REDUCTION_GIMPLE_MERGE (c) = NULL;
gimplify_and_add (OMP_CLAUSE_REDUCTION_INIT (c),
&OMP_CLAUSE_REDUCTION_GIMPLE_INIT (c));
pop_gimplify_context
(gimple_seq_first_stmt (OMP_CLAUSE_REDUCTION_GIMPLE_INIT (c)));
push_gimplify_context ();
gimplify_and_add (OMP_CLAUSE_REDUCTION_MERGE (c),
&OMP_CLAUSE_REDUCTION_GIMPLE_MERGE (c));
pop_gimplify_context
(gimple_seq_first_stmt (OMP_CLAUSE_REDUCTION_GIMPLE_MERGE (c)));
OMP_CLAUSE_REDUCTION_INIT (c) = NULL_TREE;
OMP_CLAUSE_REDUCTION_MERGE (c) = NULL_TREE;
gimplify_omp_ctxp = outer_ctx;
}
else if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_LASTPRIVATE
&& OMP_CLAUSE_LASTPRIVATE_STMT (c))
{
gimplify_omp_ctxp = ctx;
push_gimplify_context ();
if (TREE_CODE (OMP_CLAUSE_LASTPRIVATE_STMT (c)) != BIND_EXPR)
{
tree bind = build3 (BIND_EXPR, void_type_node, NULL,
NULL, NULL);
TREE_SIDE_EFFECTS (bind) = 1;
BIND_EXPR_BODY (bind) = OMP_CLAUSE_LASTPRIVATE_STMT (c);
OMP_CLAUSE_LASTPRIVATE_STMT (c) = bind;
}
gimplify_and_add (OMP_CLAUSE_LASTPRIVATE_STMT (c),
&OMP_CLAUSE_LASTPRIVATE_GIMPLE_SEQ (c));
pop_gimplify_context
(gimple_seq_first_stmt (OMP_CLAUSE_LASTPRIVATE_GIMPLE_SEQ (c)));
OMP_CLAUSE_LASTPRIVATE_STMT (c) = NULL_TREE;
gimplify_omp_ctxp = outer_ctx;
}
else if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_LINEAR
&& OMP_CLAUSE_LINEAR_STMT (c))
{
gimplify_omp_ctxp = ctx;
push_gimplify_context ();
if (TREE_CODE (OMP_CLAUSE_LINEAR_STMT (c)) != BIND_EXPR)
{
tree bind = build3 (BIND_EXPR, void_type_node, NULL,
NULL, NULL);
TREE_SIDE_EFFECTS (bind) = 1;
BIND_EXPR_BODY (bind) = OMP_CLAUSE_LINEAR_STMT (c);
OMP_CLAUSE_LINEAR_STMT (c) = bind;
}
gimplify_and_add (OMP_CLAUSE_LINEAR_STMT (c),
&OMP_CLAUSE_LINEAR_GIMPLE_SEQ (c));
pop_gimplify_context
(gimple_seq_first_stmt (OMP_CLAUSE_LINEAR_GIMPLE_SEQ (c)));
OMP_CLAUSE_LINEAR_STMT (c) = NULL_TREE;
gimplify_omp_ctxp = outer_ctx;
}
if (notice_outer)
goto do_notice;
break;
case OMP_CLAUSE_COPYIN:
case OMP_CLAUSE_COPYPRIVATE:
decl = OMP_CLAUSE_DECL (c);
if (error_operand_p (decl))
{
remove = true;
break;
}
if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_COPYPRIVATE
&& !remove
&& !omp_check_private (ctx, decl, true))
{
remove = true;
if (is_global_var (decl))
{
if (DECL_THREAD_LOCAL_P (decl))
remove = false;
else if (DECL_HAS_VALUE_EXPR_P (decl))
{
tree value = get_base_address (DECL_VALUE_EXPR (decl));
if (value
&& DECL_P (value)
&& DECL_THREAD_LOCAL_P (value))
remove = false;
}
}
if (remove)
error_at (OMP_CLAUSE_LOCATION (c),
"copyprivate variable %qE is not threadprivate"
" or private in outer context", DECL_NAME (decl));
}
do_notice:
if ((OMP_CLAUSE_CODE (c) == OMP_CLAUSE_REDUCTION
|| OMP_CLAUSE_CODE (c) == OMP_CLAUSE_FIRSTPRIVATE
|| OMP_CLAUSE_CODE (c) == OMP_CLAUSE_LASTPRIVATE)
&& outer_ctx
&& ((region_type & ORT_TASKLOOP) == ORT_TASKLOOP
|| (region_type == ORT_WORKSHARE
&& OMP_CLAUSE_CODE (c) == OMP_CLAUSE_REDUCTION
&& (OMP_CLAUSE_REDUCTION_INSCAN (c)
|| code == OMP_LOOP)))
&& (outer_ctx->region_type == ORT_COMBINED_PARALLEL
|| (code == OMP_LOOP
&& OMP_CLAUSE_CODE (c) == OMP_CLAUSE_REDUCTION
&& ((outer_ctx->region_type & ORT_COMBINED_TEAMS)
== ORT_COMBINED_TEAMS))))
{
splay_tree_node on
= splay_tree_lookup (outer_ctx->variables,
(splay_tree_key)decl);
if (on == NULL || (on->value & GOVD_DATA_SHARE_CLASS) == 0)
{
if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_REDUCTION
&& TREE_CODE (OMP_CLAUSE_DECL (c)) == MEM_REF
&& (TREE_CODE (TREE_TYPE (decl)) == POINTER_TYPE
|| (TREE_CODE (TREE_TYPE (decl)) == REFERENCE_TYPE
&& (TREE_CODE (TREE_TYPE (TREE_TYPE (decl)))
== POINTER_TYPE))))
omp_firstprivatize_variable (outer_ctx, decl);
else
{
omp_add_variable (outer_ctx, decl,
GOVD_SEEN | GOVD_SHARED);
if (outer_ctx->outer_context)
omp_notice_variable (outer_ctx->outer_context, decl,
true);
}
}
}
if (outer_ctx)
omp_notice_variable (outer_ctx, decl, true);
if (check_non_private
&& region_type == ORT_WORKSHARE
&& (OMP_CLAUSE_CODE (c) != OMP_CLAUSE_REDUCTION
|| decl == OMP_CLAUSE_DECL (c)
|| (TREE_CODE (OMP_CLAUSE_DECL (c)) == MEM_REF
&& (TREE_CODE (TREE_OPERAND (OMP_CLAUSE_DECL (c), 0))
== ADDR_EXPR
|| (TREE_CODE (TREE_OPERAND (OMP_CLAUSE_DECL (c), 0))
== POINTER_PLUS_EXPR
&& (TREE_CODE (TREE_OPERAND (TREE_OPERAND
(OMP_CLAUSE_DECL (c), 0), 0))
== ADDR_EXPR)))))
&& omp_check_private (ctx, decl, false))
{
error ("%s variable %qE is private in outer context",
check_non_private, DECL_NAME (decl));
remove = true;
}
break;
case OMP_CLAUSE_IF:
if (OMP_CLAUSE_IF_MODIFIER (c) != ERROR_MARK
&& OMP_CLAUSE_IF_MODIFIER (c) != code)
{
const char *p[2];
for (int i = 0; i < 2; i++)
switch (i ? OMP_CLAUSE_IF_MODIFIER (c) : code)
{
case VOID_CST: p[i] = "cancel"; break;
case OMP_PARALLEL: p[i] = "parallel"; break;
case OMP_SIMD: p[i] = "simd"; break;
case OMP_TASK: p[i] = "task"; break;
case OMP_TASKLOOP: p[i] = "taskloop"; break;
case OMP_TARGET_DATA: p[i] = "target data"; break;
case OMP_TARGET: p[i] = "target"; break;
case OMP_TARGET_UPDATE: p[i] = "target update"; break;
case OMP_TARGET_ENTER_DATA:
p[i] = "target enter data"; break;
case OMP_TARGET_EXIT_DATA: p[i] = "target exit data"; break;
default: gcc_unreachable ();
}
error_at (OMP_CLAUSE_LOCATION (c),
"expected %qs %<if%> clause modifier rather than %qs",
p[0], p[1]);
remove = true;
}
/* Fall through. */
case OMP_CLAUSE_FINAL:
OMP_CLAUSE_OPERAND (c, 0)
= gimple_boolify (OMP_CLAUSE_OPERAND (c, 0));
/* Fall through. */
case OMP_CLAUSE_SCHEDULE:
case OMP_CLAUSE_NUM_THREADS:
case OMP_CLAUSE_NUM_TEAMS:
case OMP_CLAUSE_THREAD_LIMIT:
case OMP_CLAUSE_DIST_SCHEDULE:
case OMP_CLAUSE_DEVICE:
case OMP_CLAUSE_PRIORITY:
case OMP_CLAUSE_GRAINSIZE:
case OMP_CLAUSE_NUM_TASKS:
case OMP_CLAUSE_HINT:
case OMP_CLAUSE_ASYNC:
case OMP_CLAUSE_WAIT:
case OMP_CLAUSE_NUM_GANGS:
case OMP_CLAUSE_NUM_WORKERS:
case OMP_CLAUSE_VECTOR_LENGTH:
case OMP_CLAUSE_WORKER:
case OMP_CLAUSE_VECTOR:
if (gimplify_expr (&OMP_CLAUSE_OPERAND (c, 0), pre_p, NULL,
is_gimple_val, fb_rvalue) == GS_ERROR)
remove = true;
break;
case OMP_CLAUSE_GANG:
if (gimplify_expr (&OMP_CLAUSE_OPERAND (c, 0), pre_p, NULL,
is_gimple_val, fb_rvalue) == GS_ERROR)
remove = true;
if (gimplify_expr (&OMP_CLAUSE_OPERAND (c, 1), pre_p, NULL,
is_gimple_val, fb_rvalue) == GS_ERROR)
remove = true;
break;
case OMP_CLAUSE_NOWAIT:
nowait = 1;
break;
case OMP_CLAUSE_ORDERED:
case OMP_CLAUSE_UNTIED:
case OMP_CLAUSE_COLLAPSE:
case OMP_CLAUSE_TILE:
case OMP_CLAUSE_AUTO:
case OMP_CLAUSE_SEQ:
case OMP_CLAUSE_INDEPENDENT:
case OMP_CLAUSE_MERGEABLE:
case OMP_CLAUSE_PROC_BIND:
case OMP_CLAUSE_SAFELEN:
case OMP_CLAUSE_SIMDLEN:
case OMP_CLAUSE_NOGROUP:
case OMP_CLAUSE_THREADS:
case OMP_CLAUSE_SIMD:
case OMP_CLAUSE_BIND:
case OMP_CLAUSE_IF_PRESENT:
case OMP_CLAUSE_FINALIZE:
break;
case OMP_CLAUSE_ORDER:
ctx->order_concurrent = true;
break;
case OMP_CLAUSE_DEFAULTMAP:
enum gimplify_defaultmap_kind gdmkmin, gdmkmax;
switch (OMP_CLAUSE_DEFAULTMAP_CATEGORY (c))
{
case OMP_CLAUSE_DEFAULTMAP_CATEGORY_UNSPECIFIED:
gdmkmin = GDMK_SCALAR;
gdmkmax = GDMK_POINTER;
break;
case OMP_CLAUSE_DEFAULTMAP_CATEGORY_SCALAR:
gdmkmin = gdmkmax = GDMK_SCALAR;
break;
case OMP_CLAUSE_DEFAULTMAP_CATEGORY_AGGREGATE:
gdmkmin = gdmkmax = GDMK_AGGREGATE;
break;
case OMP_CLAUSE_DEFAULTMAP_CATEGORY_ALLOCATABLE:
gdmkmin = gdmkmax = GDMK_ALLOCATABLE;
break;
case OMP_CLAUSE_DEFAULTMAP_CATEGORY_POINTER:
gdmkmin = gdmkmax = GDMK_POINTER;
break;
default:
gcc_unreachable ();
}
for (int gdmk = gdmkmin; gdmk <= gdmkmax; gdmk++)
switch (OMP_CLAUSE_DEFAULTMAP_BEHAVIOR (c))
{
case OMP_CLAUSE_DEFAULTMAP_ALLOC:
ctx->defaultmap[gdmk] = GOVD_MAP | GOVD_MAP_ALLOC_ONLY;
break;
case OMP_CLAUSE_DEFAULTMAP_TO:
ctx->defaultmap[gdmk] = GOVD_MAP | GOVD_MAP_TO_ONLY;
break;
case OMP_CLAUSE_DEFAULTMAP_FROM:
ctx->defaultmap[gdmk] = GOVD_MAP | GOVD_MAP_FROM_ONLY;
break;
case OMP_CLAUSE_DEFAULTMAP_TOFROM:
ctx->defaultmap[gdmk] = GOVD_MAP;
break;
case OMP_CLAUSE_DEFAULTMAP_FIRSTPRIVATE:
ctx->defaultmap[gdmk] = GOVD_FIRSTPRIVATE;
break;
case OMP_CLAUSE_DEFAULTMAP_NONE:
ctx->defaultmap[gdmk] = 0;
break;
case OMP_CLAUSE_DEFAULTMAP_DEFAULT:
switch (gdmk)
{
case GDMK_SCALAR:
ctx->defaultmap[gdmk] = GOVD_FIRSTPRIVATE;
break;
case GDMK_AGGREGATE:
case GDMK_ALLOCATABLE:
ctx->defaultmap[gdmk] = GOVD_MAP;
break;
case GDMK_POINTER:
ctx->defaultmap[gdmk] = GOVD_MAP | GOVD_MAP_0LEN_ARRAY;
break;
default:
gcc_unreachable ();
}
break;
default:
gcc_unreachable ();
}
break;
case OMP_CLAUSE_ALIGNED:
decl = OMP_CLAUSE_DECL (c);
if (error_operand_p (decl))
{
remove = true;
break;
}
if (gimplify_expr (&OMP_CLAUSE_ALIGNED_ALIGNMENT (c), pre_p, NULL,
is_gimple_val, fb_rvalue) == GS_ERROR)
{
remove = true;
break;
}
if (!is_global_var (decl)
&& TREE_CODE (TREE_TYPE (decl)) == POINTER_TYPE)
omp_add_variable (ctx, decl, GOVD_ALIGNED);
break;
case OMP_CLAUSE_NONTEMPORAL:
decl = OMP_CLAUSE_DECL (c);
if (error_operand_p (decl))
{
remove = true;
break;
}
omp_add_variable (ctx, decl, GOVD_NONTEMPORAL);
break;
case OMP_CLAUSE_DEFAULT:
ctx->default_kind = OMP_CLAUSE_DEFAULT_KIND (c);
break;
case OMP_CLAUSE_INCLUSIVE:
case OMP_CLAUSE_EXCLUSIVE:
decl = OMP_CLAUSE_DECL (c);
{
splay_tree_node n = splay_tree_lookup (outer_ctx->variables,
(splay_tree_key) decl);
if (n == NULL || (n->value & GOVD_REDUCTION) == 0)
{
error_at (OMP_CLAUSE_LOCATION (c),
"%qD specified in %qs clause but not in %<inscan%> "
"%<reduction%> clause on the containing construct",
decl, omp_clause_code_name[OMP_CLAUSE_CODE (c)]);
remove = true;
}
else
{
n->value |= GOVD_REDUCTION_INSCAN;
if (outer_ctx->region_type == ORT_SIMD
&& outer_ctx->outer_context
&& outer_ctx->outer_context->region_type == ORT_WORKSHARE)
{
n = splay_tree_lookup (outer_ctx->outer_context->variables,
(splay_tree_key) decl);
if (n && (n->value & GOVD_REDUCTION) != 0)
n->value |= GOVD_REDUCTION_INSCAN;
}
}
}
break;
default:
gcc_unreachable ();
}
if (code == OACC_DATA
&& OMP_CLAUSE_CODE (c) == OMP_CLAUSE_MAP
&& (OMP_CLAUSE_MAP_KIND (c) == GOMP_MAP_FIRSTPRIVATE_POINTER
|| OMP_CLAUSE_MAP_KIND (c) == GOMP_MAP_FIRSTPRIVATE_REFERENCE))
remove = true;
if (remove)
*list_p = OMP_CLAUSE_CHAIN (c);
else
list_p = &OMP_CLAUSE_CHAIN (c);
}
ctx->clauses = *orig_list_p;
gimplify_omp_ctxp = ctx;
if (struct_map_to_clause)
delete struct_map_to_clause;
if (struct_deref_set)
delete struct_deref_set;
}
/* Return true if DECL is a candidate for shared to firstprivate
optimization. We only consider non-addressable scalars, not
too big, and not references. */
static bool
omp_shared_to_firstprivate_optimizable_decl_p (tree decl)
{
if (TREE_ADDRESSABLE (decl))
return false;
tree type = TREE_TYPE (decl);
if (!is_gimple_reg_type (type)
|| TREE_CODE (type) == REFERENCE_TYPE
|| TREE_ADDRESSABLE (type))
return false;
/* Don't optimize too large decls, as each thread/task will have
its own. */
HOST_WIDE_INT len = int_size_in_bytes (type);
if (len == -1 || len > 4 * POINTER_SIZE / BITS_PER_UNIT)
return false;
if (lang_hooks.decls.omp_privatize_by_reference (decl))
return false;
return true;
}
/* Helper function of omp_find_stores_op and gimplify_adjust_omp_clauses*.
For omp_shared_to_firstprivate_optimizable_decl_p decl mark it as
GOVD_WRITTEN in outer contexts. */
static void
omp_mark_stores (struct gimplify_omp_ctx *ctx, tree decl)
{
for (; ctx; ctx = ctx->outer_context)
{
splay_tree_node n = splay_tree_lookup (ctx->variables,
(splay_tree_key) decl);
if (n == NULL)
continue;
else if (n->value & GOVD_SHARED)
{
n->value |= GOVD_WRITTEN;
return;
}
else if (n->value & GOVD_DATA_SHARE_CLASS)
return;
}
}
/* Helper callback for walk_gimple_seq to discover possible stores
to omp_shared_to_firstprivate_optimizable_decl_p decls and set
GOVD_WRITTEN if they are GOVD_SHARED in some outer context
for those. */
static tree
omp_find_stores_op (tree *tp, int *walk_subtrees, void *data)
{
struct walk_stmt_info *wi = (struct walk_stmt_info *) data;
*walk_subtrees = 0;
if (!wi->is_lhs)
return NULL_TREE;
tree op = *tp;
do
{
if (handled_component_p (op))
op = TREE_OPERAND (op, 0);
else if ((TREE_CODE (op) == MEM_REF || TREE_CODE (op) == TARGET_MEM_REF)
&& TREE_CODE (TREE_OPERAND (op, 0)) == ADDR_EXPR)
op = TREE_OPERAND (TREE_OPERAND (op, 0), 0);
else
break;
}
while (1);
if (!DECL_P (op) || !omp_shared_to_firstprivate_optimizable_decl_p (op))
return NULL_TREE;
omp_mark_stores (gimplify_omp_ctxp, op);
return NULL_TREE;
}
/* Helper callback for walk_gimple_seq to discover possible stores
to omp_shared_to_firstprivate_optimizable_decl_p decls and set
GOVD_WRITTEN if they are GOVD_SHARED in some outer context
for those. */
static tree
omp_find_stores_stmt (gimple_stmt_iterator *gsi_p,
bool *handled_ops_p,
struct walk_stmt_info *wi)
{
gimple *stmt = gsi_stmt (*gsi_p);
switch (gimple_code (stmt))
{
/* Don't recurse on OpenMP constructs for which
gimplify_adjust_omp_clauses already handled the bodies,
except handle gimple_omp_for_pre_body. */
case GIMPLE_OMP_FOR:
*handled_ops_p = true;
if (gimple_omp_for_pre_body (stmt))
walk_gimple_seq (gimple_omp_for_pre_body (stmt),
omp_find_stores_stmt, omp_find_stores_op, wi);
break;
case GIMPLE_OMP_PARALLEL:
case GIMPLE_OMP_TASK:
case GIMPLE_OMP_SECTIONS:
case GIMPLE_OMP_SINGLE:
case GIMPLE_OMP_TARGET:
case GIMPLE_OMP_TEAMS:
case GIMPLE_OMP_CRITICAL:
*handled_ops_p = true;
break;
default:
break;
}
return NULL_TREE;
}
struct gimplify_adjust_omp_clauses_data
{
tree *list_p;
gimple_seq *pre_p;
};
/* For all variables that were not actually used within the context,
remove PRIVATE, SHARED, and FIRSTPRIVATE clauses. */
static int
gimplify_adjust_omp_clauses_1 (splay_tree_node n, void *data)
{
tree *list_p = ((struct gimplify_adjust_omp_clauses_data *) data)->list_p;
gimple_seq *pre_p
= ((struct gimplify_adjust_omp_clauses_data *) data)->pre_p;
tree decl = (tree) n->key;
unsigned flags = n->value;
enum omp_clause_code code;
tree clause;
bool private_debug;
if (gimplify_omp_ctxp->region_type == ORT_COMBINED_PARALLEL
&& (flags & GOVD_LASTPRIVATE_CONDITIONAL) != 0)
flags = GOVD_SHARED | GOVD_SEEN | GOVD_WRITTEN;
if (flags & (GOVD_EXPLICIT | GOVD_LOCAL))
return 0;
if ((flags & GOVD_SEEN) == 0)
return 0;
if ((flags & GOVD_MAP_HAS_ATTACHMENTS) != 0)
return 0;
if (flags & GOVD_DEBUG_PRIVATE)
{
gcc_assert ((flags & GOVD_DATA_SHARE_CLASS) == GOVD_SHARED);
private_debug = true;
}
else if (flags & GOVD_MAP)
private_debug = false;
else
private_debug
= lang_hooks.decls.omp_private_debug_clause (decl,
!!(flags & GOVD_SHARED));
if (private_debug)
code = OMP_CLAUSE_PRIVATE;
else if (flags & GOVD_MAP)
{
code = OMP_CLAUSE_MAP;
if ((gimplify_omp_ctxp->region_type & ORT_ACC) == 0
&& TYPE_ATOMIC (strip_array_types (TREE_TYPE (decl))))
{
error ("%<_Atomic%> %qD in implicit %<map%> clause", decl);
return 0;
}
if (VAR_P (decl)
&& DECL_IN_CONSTANT_POOL (decl)
&& !lookup_attribute ("omp declare target",
DECL_ATTRIBUTES (decl)))
{
tree id = get_identifier ("omp declare target");
DECL_ATTRIBUTES (decl)
= tree_cons (id, NULL_TREE, DECL_ATTRIBUTES (decl));
varpool_node *node = varpool_node::get (decl);
if (node)
{
node->offloadable = 1;
if (ENABLE_OFFLOADING)
g->have_offload = true;
}
}
}
else if (flags & GOVD_SHARED)
{
if (is_global_var (decl))
{
struct gimplify_omp_ctx *ctx = gimplify_omp_ctxp->outer_context;
while (ctx != NULL)
{
splay_tree_node on
= splay_tree_lookup (ctx->variables, (splay_tree_key) decl);
if (on && (on->value & (GOVD_FIRSTPRIVATE | GOVD_LASTPRIVATE
| GOVD_PRIVATE | GOVD_REDUCTION
| GOVD_LINEAR | GOVD_MAP)) != 0)
break;
ctx = ctx->outer_context;
}
if (ctx == NULL)
return 0;
}
code = OMP_CLAUSE_SHARED;
/* Don't optimize shared into firstprivate for read-only vars
on tasks with depend clause, we shouldn't try to copy them
until the dependencies are satisfied. */
if (gimplify_omp_ctxp->has_depend)
flags |= GOVD_WRITTEN;
}
else if (flags & GOVD_PRIVATE)
code = OMP_CLAUSE_PRIVATE;
else if (flags & GOVD_FIRSTPRIVATE)
{
code = OMP_CLAUSE_FIRSTPRIVATE;
if ((gimplify_omp_ctxp->region_type & ORT_TARGET)
&& (gimplify_omp_ctxp->region_type & ORT_ACC) == 0
&& TYPE_ATOMIC (strip_array_types (TREE_TYPE (decl))))
{
error ("%<_Atomic%> %qD in implicit %<firstprivate%> clause on "
"%<target%> construct", decl);
return 0;
}
}
else if (flags & GOVD_LASTPRIVATE)
code = OMP_CLAUSE_LASTPRIVATE;
else if (flags & (GOVD_ALIGNED | GOVD_NONTEMPORAL))
return 0;
else if (flags & GOVD_CONDTEMP)
{
code = OMP_CLAUSE__CONDTEMP_;
gimple_add_tmp_var (decl);
}
else
gcc_unreachable ();
if (((flags & GOVD_LASTPRIVATE)
|| (code == OMP_CLAUSE_SHARED && (flags & GOVD_WRITTEN)))
&& omp_shared_to_firstprivate_optimizable_decl_p (decl))
omp_mark_stores (gimplify_omp_ctxp->outer_context, decl);
tree chain = *list_p;
clause = build_omp_clause (input_location, code);
OMP_CLAUSE_DECL (clause) = decl;
OMP_CLAUSE_CHAIN (clause) = chain;
if (private_debug)
OMP_CLAUSE_PRIVATE_DEBUG (clause) = 1;
else if (code == OMP_CLAUSE_PRIVATE && (flags & GOVD_PRIVATE_OUTER_REF))
OMP_CLAUSE_PRIVATE_OUTER_REF (clause) = 1;
else if (code == OMP_CLAUSE_SHARED
&& (flags & GOVD_WRITTEN) == 0
&& omp_shared_to_firstprivate_optimizable_decl_p (decl))
OMP_CLAUSE_SHARED_READONLY (clause) = 1;
else if (code == OMP_CLAUSE_FIRSTPRIVATE && (flags & GOVD_EXPLICIT) == 0)
OMP_CLAUSE_FIRSTPRIVATE_IMPLICIT (clause) = 1;
else if (code == OMP_CLAUSE_MAP && (flags & GOVD_MAP_0LEN_ARRAY) != 0)
{
tree nc = build_omp_clause (input_location, OMP_CLAUSE_MAP);
OMP_CLAUSE_DECL (nc) = decl;
if (TREE_CODE (TREE_TYPE (decl)) == REFERENCE_TYPE
&& TREE_CODE (TREE_TYPE (TREE_TYPE (decl))) == POINTER_TYPE)
OMP_CLAUSE_DECL (clause)
= build_simple_mem_ref_loc (input_location, decl);
OMP_CLAUSE_DECL (clause)
= build2 (MEM_REF, char_type_node, OMP_CLAUSE_DECL (clause),
build_int_cst (build_pointer_type (char_type_node), 0));
OMP_CLAUSE_SIZE (clause) = size_zero_node;
OMP_CLAUSE_SIZE (nc) = size_zero_node;
OMP_CLAUSE_SET_MAP_KIND (clause, GOMP_MAP_ALLOC);
OMP_CLAUSE_MAP_MAYBE_ZERO_LENGTH_ARRAY_SECTION (clause) = 1;
OMP_CLAUSE_SET_MAP_KIND (nc, GOMP_MAP_FIRSTPRIVATE_POINTER);
OMP_CLAUSE_CHAIN (nc) = chain;
OMP_CLAUSE_CHAIN (clause) = nc;
struct gimplify_omp_ctx *ctx = gimplify_omp_ctxp;
gimplify_omp_ctxp = ctx->outer_context;
gimplify_expr (&TREE_OPERAND (OMP_CLAUSE_DECL (clause), 0),
pre_p, NULL, is_gimple_val, fb_rvalue);
gimplify_omp_ctxp = ctx;
}
else if (code == OMP_CLAUSE_MAP)
{
int kind;
/* Not all combinations of these GOVD_MAP flags are actually valid. */
switch (flags & (GOVD_MAP_TO_ONLY
| GOVD_MAP_FORCE
| GOVD_MAP_FORCE_PRESENT
| GOVD_MAP_ALLOC_ONLY
| GOVD_MAP_FROM_ONLY))
{
case 0:
kind = GOMP_MAP_TOFROM;
break;
case GOVD_MAP_FORCE:
kind = GOMP_MAP_TOFROM | GOMP_MAP_FLAG_FORCE;
break;
case GOVD_MAP_TO_ONLY:
kind = GOMP_MAP_TO;
break;
case GOVD_MAP_FROM_ONLY:
kind = GOMP_MAP_FROM;
break;
case GOVD_MAP_ALLOC_ONLY:
kind = GOMP_MAP_ALLOC;
break;
case GOVD_MAP_TO_ONLY | GOVD_MAP_FORCE:
kind = GOMP_MAP_TO | GOMP_MAP_FLAG_FORCE;
break;
case GOVD_MAP_FORCE_PRESENT:
kind = GOMP_MAP_FORCE_PRESENT;
break;
default:
gcc_unreachable ();
}
OMP_CLAUSE_SET_MAP_KIND (clause, kind);
if (DECL_SIZE (decl)
&& TREE_CODE (DECL_SIZE (decl)) != INTEGER_CST)
{
tree decl2 = DECL_VALUE_EXPR (decl);
gcc_assert (TREE_CODE (decl2) == INDIRECT_REF);
decl2 = TREE_OPERAND (decl2, 0);
gcc_assert (DECL_P (decl2));
tree mem = build_simple_mem_ref (decl2);
OMP_CLAUSE_DECL (clause) = mem;
OMP_CLAUSE_SIZE (clause) = TYPE_SIZE_UNIT (TREE_TYPE (decl));
if (gimplify_omp_ctxp->outer_context)
{
struct gimplify_omp_ctx *ctx = gimplify_omp_ctxp->outer_context;
omp_notice_variable (ctx, decl2, true);
omp_notice_variable (ctx, OMP_CLAUSE_SIZE (clause), true);
}
tree nc = build_omp_clause (OMP_CLAUSE_LOCATION (clause),
OMP_CLAUSE_MAP);
OMP_CLAUSE_DECL (nc) = decl;
OMP_CLAUSE_SIZE (nc) = size_zero_node;
if (gimplify_omp_ctxp->target_firstprivatize_array_bases)
OMP_CLAUSE_SET_MAP_KIND (nc, GOMP_MAP_FIRSTPRIVATE_POINTER);
else
OMP_CLAUSE_SET_MAP_KIND (nc, GOMP_MAP_POINTER);
OMP_CLAUSE_CHAIN (nc) = OMP_CLAUSE_CHAIN (clause);
OMP_CLAUSE_CHAIN (clause) = nc;
}
else if (gimplify_omp_ctxp->target_firstprivatize_array_bases
&& lang_hooks.decls.omp_privatize_by_reference (decl))
{
OMP_CLAUSE_DECL (clause) = build_simple_mem_ref (decl);
OMP_CLAUSE_SIZE (clause)
= unshare_expr (TYPE_SIZE_UNIT (TREE_TYPE (TREE_TYPE (decl))));
struct gimplify_omp_ctx *ctx = gimplify_omp_ctxp;
gimplify_omp_ctxp = ctx->outer_context;
gimplify_expr (&OMP_CLAUSE_SIZE (clause),
pre_p, NULL, is_gimple_val, fb_rvalue);
gimplify_omp_ctxp = ctx;
tree nc = build_omp_clause (OMP_CLAUSE_LOCATION (clause),
OMP_CLAUSE_MAP);
OMP_CLAUSE_DECL (nc) = decl;
OMP_CLAUSE_SIZE (nc) = size_zero_node;
OMP_CLAUSE_SET_MAP_KIND (nc, GOMP_MAP_FIRSTPRIVATE_REFERENCE);
OMP_CLAUSE_CHAIN (nc) = OMP_CLAUSE_CHAIN (clause);
OMP_CLAUSE_CHAIN (clause) = nc;
}
else
OMP_CLAUSE_SIZE (clause) = DECL_SIZE_UNIT (decl);
}
if (code == OMP_CLAUSE_FIRSTPRIVATE && (flags & GOVD_LASTPRIVATE) != 0)
{
tree nc = build_omp_clause (input_location, OMP_CLAUSE_LASTPRIVATE);
OMP_CLAUSE_DECL (nc) = decl;
OMP_CLAUSE_LASTPRIVATE_FIRSTPRIVATE (nc) = 1;
OMP_CLAUSE_CHAIN (nc) = chain;
OMP_CLAUSE_CHAIN (clause) = nc;
struct gimplify_omp_ctx *ctx = gimplify_omp_ctxp;
gimplify_omp_ctxp = ctx->outer_context;
lang_hooks.decls.omp_finish_clause (nc, pre_p);
gimplify_omp_ctxp = ctx;
}
*list_p = clause;
struct gimplify_omp_ctx *ctx = gimplify_omp_ctxp;
gimplify_omp_ctxp = ctx->outer_context;
lang_hooks.decls.omp_finish_clause (clause, pre_p);
if (gimplify_omp_ctxp)
for (; clause != chain; clause = OMP_CLAUSE_CHAIN (clause))
if (OMP_CLAUSE_CODE (clause) == OMP_CLAUSE_MAP
&& DECL_P (OMP_CLAUSE_SIZE (clause)))
omp_notice_variable (gimplify_omp_ctxp, OMP_CLAUSE_SIZE (clause),
true);
gimplify_omp_ctxp = ctx;
return 0;
}
static void
gimplify_adjust_omp_clauses (gimple_seq *pre_p, gimple_seq body, tree *list_p,
enum tree_code code)
{
struct gimplify_omp_ctx *ctx = gimplify_omp_ctxp;
tree *orig_list_p = list_p;
tree c, decl;
bool has_inscan_reductions = false;
if (body)
{
struct gimplify_omp_ctx *octx;
for (octx = ctx; octx; octx = octx->outer_context)
if ((octx->region_type & (ORT_PARALLEL | ORT_TASK | ORT_TEAMS)) != 0)
break;
if (octx)
{
struct walk_stmt_info wi;
memset (&wi, 0, sizeof (wi));
walk_gimple_seq (body, omp_find_stores_stmt,
omp_find_stores_op, &wi);
}
}
if (ctx->add_safelen1)
{
/* If there are VLAs in the body of simd loop, prevent
vectorization. */
gcc_assert (ctx->region_type == ORT_SIMD);
c = build_omp_clause (UNKNOWN_LOCATION, OMP_CLAUSE_SAFELEN);
OMP_CLAUSE_SAFELEN_EXPR (c) = integer_one_node;
OMP_CLAUSE_CHAIN (c) = *list_p;
*list_p = c;
list_p = &OMP_CLAUSE_CHAIN (c);
}
if (ctx->region_type == ORT_WORKSHARE
&& ctx->outer_context
&& ctx->outer_context->region_type == ORT_COMBINED_PARALLEL)
{
for (c = ctx->outer_context->clauses; c; c = OMP_CLAUSE_CHAIN (c))
if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_LASTPRIVATE
&& OMP_CLAUSE_LASTPRIVATE_CONDITIONAL (c))
{
decl = OMP_CLAUSE_DECL (c);
splay_tree_node n
= splay_tree_lookup (ctx->outer_context->variables,
(splay_tree_key) decl);
gcc_checking_assert (!splay_tree_lookup (ctx->variables,
(splay_tree_key) decl));
omp_add_variable (ctx, decl, n->value);
tree c2 = copy_node (c);
OMP_CLAUSE_CHAIN (c2) = *list_p;
*list_p = c2;
if ((n->value & GOVD_FIRSTPRIVATE) == 0)
continue;
c2 = build_omp_clause (OMP_CLAUSE_LOCATION (c),
OMP_CLAUSE_FIRSTPRIVATE);
OMP_CLAUSE_DECL (c2) = decl;
OMP_CLAUSE_CHAIN (c2) = *list_p;
*list_p = c2;
}
}
while ((c = *list_p) != NULL)
{
splay_tree_node n;
bool remove = false;
switch (OMP_CLAUSE_CODE (c))
{
case OMP_CLAUSE_FIRSTPRIVATE:
if ((ctx->region_type & ORT_TARGET)
&& (ctx->region_type & ORT_ACC) == 0
&& TYPE_ATOMIC (strip_array_types
(TREE_TYPE (OMP_CLAUSE_DECL (c)))))
{
error_at (OMP_CLAUSE_LOCATION (c),
"%<_Atomic%> %qD in %<firstprivate%> clause on "
"%<target%> construct", OMP_CLAUSE_DECL (c));
remove = true;
break;
}
/* FALLTHRU */
case OMP_CLAUSE_PRIVATE:
case OMP_CLAUSE_SHARED:
case OMP_CLAUSE_LINEAR:
decl = OMP_CLAUSE_DECL (c);
n = splay_tree_lookup (ctx->variables, (splay_tree_key) decl);
remove = !(n->value & GOVD_SEEN);
if ((n->value & GOVD_LASTPRIVATE_CONDITIONAL) != 0
&& code == OMP_PARALLEL
&& OMP_CLAUSE_CODE (c) == OMP_CLAUSE_FIRSTPRIVATE)
remove = true;
if (! remove)
{
bool shared = OMP_CLAUSE_CODE (c) == OMP_CLAUSE_SHARED;
if ((n->value & GOVD_DEBUG_PRIVATE)
|| lang_hooks.decls.omp_private_debug_clause (decl, shared))
{
gcc_assert ((n->value & GOVD_DEBUG_PRIVATE) == 0
|| ((n->value & GOVD_DATA_SHARE_CLASS)
== GOVD_SHARED));
OMP_CLAUSE_SET_CODE (c, OMP_CLAUSE_PRIVATE);
OMP_CLAUSE_PRIVATE_DEBUG (c) = 1;
}
if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_SHARED
&& ctx->has_depend
&& DECL_P (decl))
n->value |= GOVD_WRITTEN;
if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_SHARED
&& (n->value & GOVD_WRITTEN) == 0
&& DECL_P (decl)
&& omp_shared_to_firstprivate_optimizable_decl_p (decl))
OMP_CLAUSE_SHARED_READONLY (c) = 1;
else if (DECL_P (decl)
&& ((OMP_CLAUSE_CODE (c) == OMP_CLAUSE_SHARED
&& (n->value & GOVD_WRITTEN) != 0)
|| (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_LINEAR
&& !OMP_CLAUSE_LINEAR_NO_COPYOUT (c)))
&& omp_shared_to_firstprivate_optimizable_decl_p (decl))
omp_mark_stores (gimplify_omp_ctxp->outer_context, decl);
}
break;
case OMP_CLAUSE_LASTPRIVATE:
/* Make sure OMP_CLAUSE_LASTPRIVATE_FIRSTPRIVATE is set to
accurately reflect the presence of a FIRSTPRIVATE clause. */
decl = OMP_CLAUSE_DECL (c);
n = splay_tree_lookup (ctx->variables, (splay_tree_key) decl);
OMP_CLAUSE_LASTPRIVATE_FIRSTPRIVATE (c)
= (n->value & GOVD_FIRSTPRIVATE) != 0;
if (code == OMP_DISTRIBUTE
&& OMP_CLAUSE_LASTPRIVATE_FIRSTPRIVATE (c))
{
remove = true;
error_at (OMP_CLAUSE_LOCATION (c),
"same variable used in %<firstprivate%> and "
"%<lastprivate%> clauses on %<distribute%> "
"construct");
}
if (!remove
&& OMP_CLAUSE_CODE (c) == OMP_CLAUSE_LASTPRIVATE
&& DECL_P (decl)
&& omp_shared_to_firstprivate_optimizable_decl_p (decl))
omp_mark_stores (gimplify_omp_ctxp->outer_context, decl);
if (OMP_CLAUSE_LASTPRIVATE_CONDITIONAL (c) && code == OMP_PARALLEL)
remove = true;
break;
case OMP_CLAUSE_ALIGNED:
decl = OMP_CLAUSE_DECL (c);
if (!is_global_var (decl))
{
n = splay_tree_lookup (ctx->variables, (splay_tree_key) decl);
remove = n == NULL || !(n->value & GOVD_SEEN);
if (!remove && TREE_CODE (TREE_TYPE (decl)) == POINTER_TYPE)
{
struct gimplify_omp_ctx *octx;
if (n != NULL
&& (n->value & (GOVD_DATA_SHARE_CLASS
& ~GOVD_FIRSTPRIVATE)))
remove = true;
else
for (octx = ctx->outer_context; octx;
octx = octx->outer_context)
{
n = splay_tree_lookup (octx->variables,
(splay_tree_key) decl);
if (n == NULL)
continue;
if (n->value & GOVD_LOCAL)
break;
/* We have to avoid assigning a shared variable
to itself when trying to add
__builtin_assume_aligned. */
if (n->value & GOVD_SHARED)
{
remove = true;
break;
}
}
}
}
else if (TREE_CODE (TREE_TYPE (decl)) == ARRAY_TYPE)
{
n = splay_tree_lookup (ctx->variables, (splay_tree_key) decl);
if (n != NULL && (n->value & GOVD_DATA_SHARE_CLASS) != 0)
remove = true;
}
break;
case OMP_CLAUSE_NONTEMPORAL:
decl = OMP_CLAUSE_DECL (c);
n = splay_tree_lookup (ctx->variables, (splay_tree_key) decl);
remove = n == NULL || !(n->value & GOVD_SEEN);
break;
case OMP_CLAUSE_MAP:
if (code == OMP_TARGET_EXIT_DATA
&& OMP_CLAUSE_MAP_KIND (c) == GOMP_MAP_ALWAYS_POINTER)
{
remove = true;
break;
}
decl = OMP_CLAUSE_DECL (c);
/* Data clauses associated with reductions must be
compatible with present_or_copy. Warn and adjust the clause
if that is not the case. */
if (ctx->region_type == ORT_ACC_PARALLEL
|| ctx->region_type == ORT_ACC_SERIAL)
{
tree t = DECL_P (decl) ? decl : TREE_OPERAND (decl, 0);
n = NULL;
if (DECL_P (t))
n = splay_tree_lookup (ctx->variables, (splay_tree_key) t);
if (n && (n->value & GOVD_REDUCTION))
{
enum gomp_map_kind kind = OMP_CLAUSE_MAP_KIND (c);
OMP_CLAUSE_MAP_IN_REDUCTION (c) = 1;
if ((kind & GOMP_MAP_TOFROM) != GOMP_MAP_TOFROM
&& kind != GOMP_MAP_FORCE_PRESENT
&& kind != GOMP_MAP_POINTER)
{
warning_at (OMP_CLAUSE_LOCATION (c), 0,
"incompatible data clause with reduction "
"on %qE; promoting to %<present_or_copy%>",
DECL_NAME (t));
OMP_CLAUSE_SET_MAP_KIND (c, GOMP_MAP_TOFROM);
}
}
}
if (!DECL_P (decl))
{
if ((ctx->region_type & ORT_TARGET) != 0
&& OMP_CLAUSE_MAP_KIND (c) == GOMP_MAP_FIRSTPRIVATE_POINTER)
{
if (TREE_CODE (decl) == INDIRECT_REF
&& TREE_CODE (TREE_OPERAND (decl, 0)) == COMPONENT_REF
&& (TREE_CODE (TREE_TYPE (TREE_OPERAND (decl, 0)))
== REFERENCE_TYPE))
decl = TREE_OPERAND (decl, 0);
if (TREE_CODE (decl) == COMPONENT_REF)
{
while (TREE_CODE (decl) == COMPONENT_REF)
decl = TREE_OPERAND (decl, 0);
if (DECL_P (decl))
{
n = splay_tree_lookup (ctx->variables,
(splay_tree_key) decl);
if (!(n->value & GOVD_SEEN))
remove = true;
}
}
}
break;
}
n = splay_tree_lookup (ctx->variables, (splay_tree_key) decl);
if ((ctx->region_type & ORT_TARGET) != 0
&& !(n->value & GOVD_SEEN)
&& GOMP_MAP_ALWAYS_P (OMP_CLAUSE_MAP_KIND (c)) == 0
&& (!is_global_var (decl)
|| !lookup_attribute ("omp declare target link",
DECL_ATTRIBUTES (decl))))
{
remove = true;
/* For struct element mapping, if struct is never referenced
in target block and none of the mapping has always modifier,
remove all the struct element mappings, which immediately
follow the GOMP_MAP_STRUCT map clause. */
if (OMP_CLAUSE_MAP_KIND (c) == GOMP_MAP_STRUCT)
{
HOST_WIDE_INT cnt = tree_to_shwi (OMP_CLAUSE_SIZE (c));
while (cnt--)
OMP_CLAUSE_CHAIN (c)
= OMP_CLAUSE_CHAIN (OMP_CLAUSE_CHAIN (c));
}
}
else if (OMP_CLAUSE_MAP_KIND (c) == GOMP_MAP_STRUCT
&& (code == OMP_TARGET_EXIT_DATA
|| code == OACC_EXIT_DATA))
remove = true;
else if (DECL_SIZE (decl)
&& TREE_CODE (DECL_SIZE (decl)) != INTEGER_CST
&& OMP_CLAUSE_MAP_KIND (c) != GOMP_MAP_POINTER
&& OMP_CLAUSE_MAP_KIND (c) != GOMP_MAP_FIRSTPRIVATE_POINTER
&& (OMP_CLAUSE_MAP_KIND (c)
!= GOMP_MAP_FIRSTPRIVATE_REFERENCE))
{
/* For GOMP_MAP_FORCE_DEVICEPTR, we'll never enter here, because
for these, TREE_CODE (DECL_SIZE (decl)) will always be
INTEGER_CST. */
gcc_assert (OMP_CLAUSE_MAP_KIND (c) != GOMP_MAP_FORCE_DEVICEPTR);
tree decl2 = DECL_VALUE_EXPR (decl);
gcc_assert (TREE_CODE (decl2) == INDIRECT_REF);
decl2 = TREE_OPERAND (decl2, 0);
gcc_assert (DECL_P (decl2));
tree mem = build_simple_mem_ref (decl2);
OMP_CLAUSE_DECL (c) = mem;
OMP_CLAUSE_SIZE (c) = TYPE_SIZE_UNIT (TREE_TYPE (decl));
if (ctx->outer_context)
{
omp_notice_variable (ctx->outer_context, decl2, true);
omp_notice_variable (ctx->outer_context,
OMP_CLAUSE_SIZE (c), true);
}
if (((ctx->region_type & ORT_TARGET) != 0
|| !ctx->target_firstprivatize_array_bases)
&& ((n->value & GOVD_SEEN) == 0
|| (n->value & (GOVD_PRIVATE | GOVD_FIRSTPRIVATE)) == 0))
{
tree nc = build_omp_clause (OMP_CLAUSE_LOCATION (c),
OMP_CLAUSE_MAP);
OMP_CLAUSE_DECL (nc) = decl;
OMP_CLAUSE_SIZE (nc) = size_zero_node;
if (ctx->target_firstprivatize_array_bases)
OMP_CLAUSE_SET_MAP_KIND (nc,
GOMP_MAP_FIRSTPRIVATE_POINTER);
else
OMP_CLAUSE_SET_MAP_KIND (nc, GOMP_MAP_POINTER);
OMP_CLAUSE_CHAIN (nc) = OMP_CLAUSE_CHAIN (c);
OMP_CLAUSE_CHAIN (c) = nc;
c = nc;
}
}
else
{
if (OMP_CLAUSE_SIZE (c) == NULL_TREE)
OMP_CLAUSE_SIZE (c) = DECL_SIZE_UNIT (decl);
gcc_assert ((n->value & GOVD_SEEN) == 0
|| ((n->value & (GOVD_PRIVATE | GOVD_FIRSTPRIVATE))
== 0));
}
break;
case OMP_CLAUSE_TO:
case OMP_CLAUSE_FROM:
case OMP_CLAUSE__CACHE_:
decl = OMP_CLAUSE_DECL (c);
if (!DECL_P (decl))
break;
if (DECL_SIZE (decl)
&& TREE_CODE (DECL_SIZE (decl)) != INTEGER_CST)
{
tree decl2 = DECL_VALUE_EXPR (decl);
gcc_assert (TREE_CODE (decl2) == INDIRECT_REF);
decl2 = TREE_OPERAND (decl2, 0);
gcc_assert (DECL_P (decl2));
tree mem = build_simple_mem_ref (decl2);
OMP_CLAUSE_DECL (c) = mem;
OMP_CLAUSE_SIZE (c) = TYPE_SIZE_UNIT (TREE_TYPE (decl));
if (ctx->outer_context)
{
omp_notice_variable (ctx->outer_context, decl2, true);
omp_notice_variable (ctx->outer_context,
OMP_CLAUSE_SIZE (c), true);
}
}
else if (OMP_CLAUSE_SIZE (c) == NULL_TREE)
OMP_CLAUSE_SIZE (c) = DECL_SIZE_UNIT (decl);
break;
case OMP_CLAUSE_REDUCTION:
if (OMP_CLAUSE_REDUCTION_INSCAN (c))
{
decl = OMP_CLAUSE_DECL (c);
n = splay_tree_lookup (ctx->variables, (splay_tree_key) decl);
if ((n->value & GOVD_REDUCTION_INSCAN) == 0)
{
remove = true;
error_at (OMP_CLAUSE_LOCATION (c),
"%qD specified in %<inscan%> %<reduction%> clause "
"but not in %<scan%> directive clause", decl);
break;
}
has_inscan_reductions = true;
}
/* FALLTHRU */
case OMP_CLAUSE_IN_REDUCTION:
case OMP_CLAUSE_TASK_REDUCTION:
decl = OMP_CLAUSE_DECL (c);
/* OpenACC reductions need a present_or_copy data clause.
Add one if necessary. Emit error when the reduction is private. */
if (ctx->region_type == ORT_ACC_PARALLEL
|| ctx->region_type == ORT_ACC_SERIAL)
{
n = splay_tree_lookup (ctx->variables, (splay_tree_key) decl);
if (n->value & (GOVD_PRIVATE | GOVD_FIRSTPRIVATE))
{
remove = true;
error_at (OMP_CLAUSE_LOCATION (c), "invalid private "
"reduction on %qE", DECL_NAME (decl));
}
else if ((n->value & GOVD_MAP) == 0)
{
tree next = OMP_CLAUSE_CHAIN (c);
tree nc = build_omp_clause (UNKNOWN_LOCATION, OMP_CLAUSE_MAP);
OMP_CLAUSE_SET_MAP_KIND (nc, GOMP_MAP_TOFROM);
OMP_CLAUSE_DECL (nc) = decl;
OMP_CLAUSE_CHAIN (c) = nc;
lang_hooks.decls.omp_finish_clause (nc, pre_p);
while (1)
{
OMP_CLAUSE_MAP_IN_REDUCTION (nc) = 1;
if (OMP_CLAUSE_CHAIN (nc) == NULL)
break;
nc = OMP_CLAUSE_CHAIN (nc);
}
OMP_CLAUSE_CHAIN (nc) = next;
n->value |= GOVD_MAP;
}
}
if (DECL_P (decl)
&& omp_shared_to_firstprivate_optimizable_decl_p (decl))
omp_mark_stores (gimplify_omp_ctxp->outer_context, decl);
break;
case OMP_CLAUSE_COPYIN:
case OMP_CLAUSE_COPYPRIVATE:
case OMP_CLAUSE_IF:
case OMP_CLAUSE_NUM_THREADS:
case OMP_CLAUSE_NUM_TEAMS:
case OMP_CLAUSE_THREAD_LIMIT:
case OMP_CLAUSE_DIST_SCHEDULE:
case OMP_CLAUSE_DEVICE:
case OMP_CLAUSE_SCHEDULE:
case OMP_CLAUSE_NOWAIT:
case OMP_CLAUSE_ORDERED:
case OMP_CLAUSE_DEFAULT:
case OMP_CLAUSE_UNTIED:
case OMP_CLAUSE_COLLAPSE:
case OMP_CLAUSE_FINAL:
case OMP_CLAUSE_MERGEABLE:
case OMP_CLAUSE_PROC_BIND:
case OMP_CLAUSE_SAFELEN:
case OMP_CLAUSE_SIMDLEN:
case OMP_CLAUSE_DEPEND:
case OMP_CLAUSE_PRIORITY:
case OMP_CLAUSE_GRAINSIZE:
case OMP_CLAUSE_NUM_TASKS:
case OMP_CLAUSE_NOGROUP:
case OMP_CLAUSE_THREADS:
case OMP_CLAUSE_SIMD:
case OMP_CLAUSE_HINT:
case OMP_CLAUSE_DEFAULTMAP:
case OMP_CLAUSE_ORDER:
case OMP_CLAUSE_BIND:
case OMP_CLAUSE_USE_DEVICE_PTR:
case OMP_CLAUSE_USE_DEVICE_ADDR:
case OMP_CLAUSE_IS_DEVICE_PTR:
case OMP_CLAUSE_ASYNC:
case OMP_CLAUSE_WAIT:
case OMP_CLAUSE_INDEPENDENT:
case OMP_CLAUSE_NUM_GANGS:
case OMP_CLAUSE_NUM_WORKERS:
case OMP_CLAUSE_VECTOR_LENGTH:
case OMP_CLAUSE_GANG:
case OMP_CLAUSE_WORKER:
case OMP_CLAUSE_VECTOR:
case OMP_CLAUSE_AUTO:
case OMP_CLAUSE_SEQ:
case OMP_CLAUSE_TILE:
case OMP_CLAUSE_IF_PRESENT:
case OMP_CLAUSE_FINALIZE:
case OMP_CLAUSE_INCLUSIVE:
case OMP_CLAUSE_EXCLUSIVE:
break;
default:
gcc_unreachable ();
}
if (remove)
*list_p = OMP_CLAUSE_CHAIN (c);
else
list_p = &OMP_CLAUSE_CHAIN (c);
}
/* Add in any implicit data sharing. */
struct gimplify_adjust_omp_clauses_data data;
data.list_p = list_p;
data.pre_p = pre_p;
splay_tree_foreach (ctx->variables, gimplify_adjust_omp_clauses_1, &data);
if (has_inscan_reductions)
for (c = *orig_list_p; c; c = OMP_CLAUSE_CHAIN (c))
if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_LINEAR
&& !OMP_CLAUSE_LINEAR_NO_COPYIN (c))
{
error_at (OMP_CLAUSE_LOCATION (c),
"%<inscan%> %<reduction%> clause used together with "
"%<linear%> clause for a variable other than loop "
"iterator");
break;
}
gimplify_omp_ctxp = ctx->outer_context;
delete_omp_context (ctx);
}
/* Return 0 if CONSTRUCTS selectors don't match the OpenMP context,
-1 if unknown yet (simd is involved, won't be known until vectorization)
and 1 if they do. If SCORES is non-NULL, it should point to an array
of at least 2*NCONSTRUCTS+2 ints, and will be filled with the positions
of the CONSTRUCTS (position -1 if it will never match) followed by
number of constructs in the OpenMP context construct trait. If the
score depends on whether it will be in a declare simd clone or not,
the function returns 2 and there will be two sets of the scores, the first
one for the case that it is not in a declare simd clone, the other
that it is in a declare simd clone. */
int
omp_construct_selector_matches (enum tree_code *constructs, int nconstructs,
int *scores)
{
int matched = 0, cnt = 0;
bool simd_seen = false;
bool target_seen = false;
int declare_simd_cnt = -1;
auto_vec<enum tree_code, 16> codes;
for (struct gimplify_omp_ctx *ctx = gimplify_omp_ctxp; ctx;)
{
if (((ctx->region_type & ORT_PARALLEL) && ctx->code == OMP_PARALLEL)
|| ((ctx->region_type & (ORT_TARGET | ORT_IMPLICIT_TARGET | ORT_ACC))
== ORT_TARGET && ctx->code == OMP_TARGET)
|| ((ctx->region_type & ORT_TEAMS) && ctx->code == OMP_TEAMS)
|| (ctx->region_type == ORT_WORKSHARE && ctx->code == OMP_FOR)
|| (ctx->region_type == ORT_SIMD
&& ctx->code == OMP_SIMD
&& !omp_find_clause (ctx->clauses, OMP_CLAUSE_BIND)))
{
++cnt;
if (scores)
codes.safe_push (ctx->code);
else if (matched < nconstructs && ctx->code == constructs[matched])
{
if (ctx->code == OMP_SIMD)
{
if (matched)
return 0;
simd_seen = true;
}
++matched;
}
if (ctx->code == OMP_TARGET)
{
if (scores == NULL)
return matched < nconstructs ? 0 : simd_seen ? -1 : 1;
target_seen = true;
break;
}
}
else if (ctx->region_type == ORT_WORKSHARE
&& ctx->code == OMP_LOOP
&& ctx->outer_context
&& ctx->outer_context->region_type == ORT_COMBINED_PARALLEL
&& ctx->outer_context->outer_context
&& ctx->outer_context->outer_context->code == OMP_LOOP
&& ctx->outer_context->outer_context->distribute)
ctx = ctx->outer_context->outer_context;
ctx = ctx->outer_context;
}
if (!target_seen
&& lookup_attribute ("omp declare simd",
DECL_ATTRIBUTES (current_function_decl)))
{
/* Declare simd is a maybe case, it is supposed to be added only to the
omp-simd-clone.c added clones and not to the base function. */
declare_simd_cnt = cnt++;
if (scores)
codes.safe_push (OMP_SIMD);
else if (cnt == 0
&& constructs[0] == OMP_SIMD)
{
gcc_assert (matched == 0);
simd_seen = true;
if (++matched == nconstructs)
return -1;
}
}
if (tree attr = lookup_attribute ("omp declare variant variant",
DECL_ATTRIBUTES (current_function_decl)))
{
enum tree_code variant_constructs[5];
int variant_nconstructs = 0;
if (!target_seen)
variant_nconstructs
= omp_constructor_traits_to_codes (TREE_VALUE (attr),
variant_constructs);
for (int i = 0; i < variant_nconstructs; i++)
{
++cnt;
if (scores)
codes.safe_push (variant_constructs[i]);
else if (matched < nconstructs
&& variant_constructs[i] == constructs[matched])
{
if (variant_constructs[i] == OMP_SIMD)
{
if (matched)
return 0;
simd_seen = true;
}
++matched;
}
}
}
if (!target_seen
&& lookup_attribute ("omp declare target block",
DECL_ATTRIBUTES (current_function_decl)))
{
if (scores)
codes.safe_push (OMP_TARGET);
else if (matched < nconstructs && constructs[matched] == OMP_TARGET)
++matched;
}
if (scores)
{
for (int pass = 0; pass < (declare_simd_cnt == -1 ? 1 : 2); pass++)
{
int j = codes.length () - 1;
for (int i = nconstructs - 1; i >= 0; i--)
{
while (j >= 0
&& (pass != 0 || declare_simd_cnt != j)
&& constructs[i] != codes[j])
--j;
if (pass == 0 && declare_simd_cnt != -1 && j > declare_simd_cnt)
*scores++ = j - 1;
else
*scores++ = j;
}
*scores++ = ((pass == 0 && declare_simd_cnt != -1)
? codes.length () - 1 : codes.length ());
}
return declare_simd_cnt == -1 ? 1 : 2;
}
if (matched == nconstructs)
return simd_seen ? -1 : 1;
return 0;
}
/* Gimplify OACC_CACHE. */
static void
gimplify_oacc_cache (tree *expr_p, gimple_seq *pre_p)
{
tree expr = *expr_p;
gimplify_scan_omp_clauses (&OACC_CACHE_CLAUSES (expr), pre_p, ORT_ACC,
OACC_CACHE);
gimplify_adjust_omp_clauses (pre_p, NULL, &OACC_CACHE_CLAUSES (expr),
OACC_CACHE);
/* TODO: Do something sensible with this information. */
*expr_p = NULL_TREE;
}
/* Helper function of gimplify_oacc_declare. The helper's purpose is to,
if required, translate 'kind' in CLAUSE into an 'entry' kind and 'exit'
kind. The entry kind will replace the one in CLAUSE, while the exit
kind will be used in a new omp_clause and returned to the caller. */
static tree
gimplify_oacc_declare_1 (tree clause)
{
HOST_WIDE_INT kind, new_op;
bool ret = false;
tree c = NULL;
kind = OMP_CLAUSE_MAP_KIND (clause);
switch (kind)
{
case GOMP_MAP_ALLOC:
new_op = GOMP_MAP_RELEASE;
ret = true;
break;
case GOMP_MAP_FROM:
OMP_CLAUSE_SET_MAP_KIND (clause, GOMP_MAP_FORCE_ALLOC);
new_op = GOMP_MAP_FROM;
ret = true;
break;
case GOMP_MAP_TOFROM:
OMP_CLAUSE_SET_MAP_KIND (clause, GOMP_MAP_TO);
new_op = GOMP_MAP_FROM;
ret = true;
break;
case GOMP_MAP_DEVICE_RESIDENT:
case GOMP_MAP_FORCE_DEVICEPTR:
case GOMP_MAP_FORCE_PRESENT:
case GOMP_MAP_LINK:
case GOMP_MAP_POINTER:
case GOMP_MAP_TO:
break;
default:
gcc_unreachable ();
break;
}
if (ret)
{
c = build_omp_clause (OMP_CLAUSE_LOCATION (clause), OMP_CLAUSE_MAP);
OMP_CLAUSE_SET_MAP_KIND (c, new_op);
OMP_CLAUSE_DECL (c) = OMP_CLAUSE_DECL (clause);
}
return c;
}
/* Gimplify OACC_DECLARE. */
static void
gimplify_oacc_declare (tree *expr_p, gimple_seq *pre_p)
{
tree expr = *expr_p;
gomp_target *stmt;
tree clauses, t, decl;
clauses = OACC_DECLARE_CLAUSES (expr);
gimplify_scan_omp_clauses (&clauses, pre_p, ORT_TARGET_DATA, OACC_DECLARE);
gimplify_adjust_omp_clauses (pre_p, NULL, &clauses, OACC_DECLARE);
for (t = clauses; t; t = OMP_CLAUSE_CHAIN (t))
{
decl = OMP_CLAUSE_DECL (t);
if (TREE_CODE (decl) == MEM_REF)
decl = TREE_OPERAND (decl, 0);
if (VAR_P (decl) && !is_oacc_declared (decl))
{
tree attr = get_identifier ("oacc declare target");
DECL_ATTRIBUTES (decl) = tree_cons (attr, NULL_TREE,
DECL_ATTRIBUTES (decl));
}
if (VAR_P (decl)
&& !is_global_var (decl)
&& DECL_CONTEXT (decl) == current_function_decl)
{
tree c = gimplify_oacc_declare_1 (t);
if (c)
{
if (oacc_declare_returns == NULL)
oacc_declare_returns = new hash_map<tree, tree>;
oacc_declare_returns->put (decl, c);
}
}
if (gimplify_omp_ctxp)
omp_add_variable (gimplify_omp_ctxp, decl, GOVD_SEEN);
}
stmt = gimple_build_omp_target (NULL, GF_OMP_TARGET_KIND_OACC_DECLARE,
clauses);
gimplify_seq_add_stmt (pre_p, stmt);
*expr_p = NULL_TREE;
}
/* Gimplify the contents of an OMP_PARALLEL statement. This involves
gimplification of the body, as well as scanning the body for used
variables. We need to do this scan now, because variable-sized
decls will be decomposed during gimplification. */
static void
gimplify_omp_parallel (tree *expr_p, gimple_seq *pre_p)
{
tree expr = *expr_p;
gimple *g;
gimple_seq body = NULL;
gimplify_scan_omp_clauses (&OMP_PARALLEL_CLAUSES (expr), pre_p,
OMP_PARALLEL_COMBINED (expr)
? ORT_COMBINED_PARALLEL
: ORT_PARALLEL, OMP_PARALLEL);
push_gimplify_context ();
g = gimplify_and_return_first (OMP_PARALLEL_BODY (expr), &body);
if (gimple_code (g) == GIMPLE_BIND)
pop_gimplify_context (g);
else
pop_gimplify_context (NULL);
gimplify_adjust_omp_clauses (pre_p, body, &OMP_PARALLEL_CLAUSES (expr),
OMP_PARALLEL);
g = gimple_build_omp_parallel (body,
OMP_PARALLEL_CLAUSES (expr),
NULL_TREE, NULL_TREE);
if (OMP_PARALLEL_COMBINED (expr))
gimple_omp_set_subcode (g, GF_OMP_PARALLEL_COMBINED);
gimplify_seq_add_stmt (pre_p, g);
*expr_p = NULL_TREE;
}
/* Gimplify the contents of an OMP_TASK statement. This involves
gimplification of the body, as well as scanning the body for used
variables. We need to do this scan now, because variable-sized
decls will be decomposed during gimplification. */
static void
gimplify_omp_task (tree *expr_p, gimple_seq *pre_p)
{
tree expr = *expr_p;
gimple *g;
gimple_seq body = NULL;
if (OMP_TASK_BODY (expr) == NULL_TREE)
for (tree c = OMP_TASK_CLAUSES (expr); c; c = OMP_CLAUSE_CHAIN (c))
if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_DEPEND
&& OMP_CLAUSE_DEPEND_KIND (c) == OMP_CLAUSE_DEPEND_MUTEXINOUTSET)
{
error_at (OMP_CLAUSE_LOCATION (c),
"%<mutexinoutset%> kind in %<depend%> clause on a "
"%<taskwait%> construct");
break;
}
gimplify_scan_omp_clauses (&OMP_TASK_CLAUSES (expr), pre_p,
omp_find_clause (OMP_TASK_CLAUSES (expr),
OMP_CLAUSE_UNTIED)
? ORT_UNTIED_TASK : ORT_TASK, OMP_TASK);
if (OMP_TASK_BODY (expr))
{
push_gimplify_context ();
g = gimplify_and_return_first (OMP_TASK_BODY (expr), &body);
if (gimple_code (g) == GIMPLE_BIND)
pop_gimplify_context (g);
else
pop_gimplify_context (NULL);
}
gimplify_adjust_omp_clauses (pre_p, body, &OMP_TASK_CLAUSES (expr),
OMP_TASK);
g = gimple_build_omp_task (body,
OMP_TASK_CLAUSES (expr),
NULL_TREE, NULL_TREE,
NULL_TREE, NULL_TREE, NULL_TREE);
if (OMP_TASK_BODY (expr) == NULL_TREE)
gimple_omp_task_set_taskwait_p (g, true);
gimplify_seq_add_stmt (pre_p, g);
*expr_p = NULL_TREE;
}
/* Gimplify the gross structure of an OMP_FOR statement. */
static enum gimplify_status
gimplify_omp_for (tree *expr_p, gimple_seq *pre_p)
{
tree for_stmt, orig_for_stmt, inner_for_stmt = NULL_TREE, decl, var, t;
enum gimplify_status ret = GS_ALL_DONE;
enum gimplify_status tret;
gomp_for *gfor;
gimple_seq for_body, for_pre_body;
int i;
bitmap has_decl_expr = NULL;
enum omp_region_type ort = ORT_WORKSHARE;
orig_for_stmt = for_stmt = *expr_p;
bool loop_p = (omp_find_clause (OMP_FOR_CLAUSES (for_stmt), OMP_CLAUSE_BIND)
!= NULL_TREE);
if (OMP_FOR_INIT (for_stmt) == NULL_TREE)
{
tree *data[4] = { NULL, NULL, NULL, NULL };
gcc_assert (TREE_CODE (for_stmt) != OACC_LOOP);
inner_for_stmt = walk_tree (&OMP_FOR_BODY (for_stmt),
find_combined_omp_for, data, NULL);
if (inner_for_stmt == NULL_TREE)
{
gcc_assert (seen_error ());
*expr_p = NULL_TREE;
return GS_ERROR;
}
if (data[2] && OMP_FOR_PRE_BODY (*data[2]))
{
append_to_statement_list_force (OMP_FOR_PRE_BODY (*data[2]),
&OMP_FOR_PRE_BODY (for_stmt));
OMP_FOR_PRE_BODY (*data[2]) = NULL_TREE;
}
if (OMP_FOR_PRE_BODY (inner_for_stmt))
{
append_to_statement_list_force (OMP_FOR_PRE_BODY (inner_for_stmt),
&OMP_FOR_PRE_BODY (for_stmt));
OMP_FOR_PRE_BODY (inner_for_stmt) = NULL_TREE;
}
if (data[0])
{
/* We have some statements or variable declarations in between
the composite construct directives. Move them around the
inner_for_stmt. */
data[0] = expr_p;
for (i = 0; i < 3; i++)
if (data[i])
{
tree t = *data[i];
if (i < 2 && data[i + 1] == &OMP_BODY (t))
data[i + 1] = data[i];
*data[i] = OMP_BODY (t);
tree body = build3 (BIND_EXPR, void_type_node, NULL_TREE,
NULL_TREE, make_node (BLOCK));
OMP_BODY (t) = body;
append_to_statement_list_force (inner_for_stmt,
&BIND_EXPR_BODY (body));
*data[3] = t;
data[3] = tsi_stmt_ptr (tsi_start (BIND_EXPR_BODY (body)));
gcc_assert (*data[3] == inner_for_stmt);
}
return GS_OK;
}
for (i = 0; i < TREE_VEC_LENGTH (OMP_FOR_INIT (inner_for_stmt)); i++)
if (!loop_p
&& OMP_FOR_ORIG_DECLS (inner_for_stmt)
&& TREE_CODE (TREE_VEC_ELT (OMP_FOR_ORIG_DECLS (inner_for_stmt),
i)) == TREE_LIST
&& TREE_PURPOSE (TREE_VEC_ELT (OMP_FOR_ORIG_DECLS (inner_for_stmt),
i)))
{
tree orig = TREE_VEC_ELT (OMP_FOR_ORIG_DECLS (inner_for_stmt), i);
/* Class iterators aren't allowed on OMP_SIMD, so the only
case we need to solve is distribute parallel for. They are
allowed on the loop construct, but that is already handled
in gimplify_omp_loop. */
gcc_assert (TREE_CODE (inner_for_stmt) == OMP_FOR
&& TREE_CODE (for_stmt) == OMP_DISTRIBUTE
&& data[1]);
tree orig_decl = TREE_PURPOSE (orig);
tree last = TREE_VALUE (orig);
tree *pc;
for (pc = &OMP_FOR_CLAUSES (inner_for_stmt);
*pc; pc = &OMP_CLAUSE_CHAIN (*pc))
if ((OMP_CLAUSE_CODE (*pc) == OMP_CLAUSE_PRIVATE
|| OMP_CLAUSE_CODE (*pc) == OMP_CLAUSE_LASTPRIVATE)
&& OMP_CLAUSE_DECL (*pc) == orig_decl)
break;
if (*pc == NULL_TREE)
{
tree *spc;
for (spc = &OMP_PARALLEL_CLAUSES (*data[1]);
*spc; spc = &OMP_CLAUSE_CHAIN (*spc))
if (OMP_CLAUSE_CODE (*spc) == OMP_CLAUSE_PRIVATE
&& OMP_CLAUSE_DECL (*spc) == orig_decl)
break;
if (*spc)
{
tree c = *spc;
*spc = OMP_CLAUSE_CHAIN (c);
OMP_CLAUSE_CHAIN (c) = NULL_TREE;
*pc = c;
}
}
if (*pc == NULL_TREE)
;
else if (OMP_CLAUSE_CODE (*pc) == OMP_CLAUSE_PRIVATE)
{
/* private clause will appear only on inner_for_stmt.
Change it into firstprivate, and add private clause
on for_stmt. */
tree c = copy_node (*pc);
OMP_CLAUSE_CHAIN (c) = OMP_FOR_CLAUSES (for_stmt);
OMP_FOR_CLAUSES (for_stmt) = c;
OMP_CLAUSE_CODE (*pc) = OMP_CLAUSE_FIRSTPRIVATE;
lang_hooks.decls.omp_finish_clause (*pc, pre_p);
}
else
{
/* lastprivate clause will appear on both inner_for_stmt
and for_stmt. Add firstprivate clause to
inner_for_stmt. */
tree c = build_omp_clause (OMP_CLAUSE_LOCATION (*pc),
OMP_CLAUSE_FIRSTPRIVATE);
OMP_CLAUSE_DECL (c) = OMP_CLAUSE_DECL (*pc);
OMP_CLAUSE_CHAIN (c) = *pc;
*pc = c;
lang_hooks.decls.omp_finish_clause (*pc, pre_p);
}
tree c = build_omp_clause (UNKNOWN_LOCATION,
OMP_CLAUSE_FIRSTPRIVATE);
OMP_CLAUSE_DECL (c) = last;
OMP_CLAUSE_CHAIN (c) = OMP_PARALLEL_CLAUSES (*data[1]);
OMP_PARALLEL_CLAUSES (*data[1]) = c;
c = build_omp_clause (UNKNOWN_LOCATION,
*pc ? OMP_CLAUSE_SHARED
: OMP_CLAUSE_FIRSTPRIVATE);
OMP_CLAUSE_DECL (c) = orig_decl;
OMP_CLAUSE_CHAIN (c) = OMP_PARALLEL_CLAUSES (*data[1]);
OMP_PARALLEL_CLAUSES (*data[1]) = c;
}
/* Similarly, take care of C++ range for temporaries, those should
be firstprivate on OMP_PARALLEL if any. */
if (data[1])
for (i = 0; i < TREE_VEC_LENGTH (OMP_FOR_INIT (inner_for_stmt)); i++)
if (OMP_FOR_ORIG_DECLS (inner_for_stmt)
&& TREE_CODE (TREE_VEC_ELT (OMP_FOR_ORIG_DECLS (inner_for_stmt),
i)) == TREE_LIST
&& TREE_CHAIN (TREE_VEC_ELT (OMP_FOR_ORIG_DECLS (inner_for_stmt),
i)))
{
tree orig
= TREE_VEC_ELT (OMP_FOR_ORIG_DECLS (inner_for_stmt), i);
tree v = TREE_CHAIN (orig);
tree c = build_omp_clause (UNKNOWN_LOCATION,
OMP_CLAUSE_FIRSTPRIVATE);
/* First add firstprivate clause for the __for_end artificial
decl. */
OMP_CLAUSE_DECL (c) = TREE_VEC_ELT (v, 1);
if (TREE_CODE (TREE_TYPE (OMP_CLAUSE_DECL (c)))
== REFERENCE_TYPE)
OMP_CLAUSE_FIRSTPRIVATE_NO_REFERENCE (c) = 1;
OMP_CLAUSE_CHAIN (c) = OMP_PARALLEL_CLAUSES (*data[1]);
OMP_PARALLEL_CLAUSES (*data[1]) = c;
if (TREE_VEC_ELT (v, 0))
{
/* And now the same for __for_range artificial decl if it
exists. */
c = build_omp_clause (UNKNOWN_LOCATION,
OMP_CLAUSE_FIRSTPRIVATE);
OMP_CLAUSE_DECL (c) = TREE_VEC_ELT (v, 0);
if (TREE_CODE (TREE_TYPE (OMP_CLAUSE_DECL (c)))
== REFERENCE_TYPE)
OMP_CLAUSE_FIRSTPRIVATE_NO_REFERENCE (c) = 1;
OMP_CLAUSE_CHAIN (c) = OMP_PARALLEL_CLAUSES (*data[1]);
OMP_PARALLEL_CLAUSES (*data[1]) = c;
}
}
}
switch (TREE_CODE (for_stmt))
{
case OMP_FOR:
case OMP_DISTRIBUTE:
break;
case OACC_LOOP:
ort = ORT_ACC;
break;
case OMP_TASKLOOP:
if (omp_find_clause (OMP_FOR_CLAUSES (for_stmt), OMP_CLAUSE_UNTIED))
ort = ORT_UNTIED_TASKLOOP;
else
ort = ORT_TASKLOOP;
break;
case OMP_SIMD:
ort = ORT_SIMD;
break;
default:
gcc_unreachable ();
}
/* Set OMP_CLAUSE_LINEAR_NO_COPYIN flag on explicit linear
clause for the IV. */
if (ort == ORT_SIMD && TREE_VEC_LENGTH (OMP_FOR_INIT (for_stmt)) == 1)
{
t = TREE_VEC_ELT (OMP_FOR_INIT (for_stmt), 0);
gcc_assert (TREE_CODE (t) == MODIFY_EXPR);
decl = TREE_OPERAND (t, 0);
for (tree c = OMP_FOR_CLAUSES (for_stmt); c; c = OMP_CLAUSE_CHAIN (c))
if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_LINEAR
&& OMP_CLAUSE_DECL (c) == decl)
{
OMP_CLAUSE_LINEAR_NO_COPYIN (c) = 1;
break;
}
}
if (TREE_CODE (for_stmt) != OMP_TASKLOOP)
gimplify_scan_omp_clauses (&OMP_FOR_CLAUSES (for_stmt), pre_p, ort,
loop_p && TREE_CODE (for_stmt) != OMP_SIMD
? OMP_LOOP : TREE_CODE (for_stmt));
if (TREE_CODE (for_stmt) == OMP_DISTRIBUTE)
gimplify_omp_ctxp->distribute = true;
/* Handle OMP_FOR_INIT. */
for_pre_body = NULL;
if ((ort == ORT_SIMD
|| (inner_for_stmt && TREE_CODE (inner_for_stmt) == OMP_SIMD))
&& OMP_FOR_PRE_BODY (for_stmt))
{
has_decl_expr = BITMAP_ALLOC (NULL);
if (TREE_CODE (OMP_FOR_PRE_BODY (for_stmt)) == DECL_EXPR
&& TREE_CODE (DECL_EXPR_DECL (OMP_FOR_PRE_BODY (for_stmt)))
== VAR_DECL)
{
t = OMP_FOR_PRE_BODY (for_stmt);
bitmap_set_bit (has_decl_expr, DECL_UID (DECL_EXPR_DECL (t)));
}
else if (TREE_CODE (OMP_FOR_PRE_BODY (for_stmt)) == STATEMENT_LIST)
{
tree_stmt_iterator si;
for (si = tsi_start (OMP_FOR_PRE_BODY (for_stmt)); !tsi_end_p (si);
tsi_next (&si))
{
t = tsi_stmt (si);
if (TREE_CODE (t) == DECL_EXPR
&& TREE_CODE (DECL_EXPR_DECL (t)) == VAR_DECL)
bitmap_set_bit (has_decl_expr, DECL_UID (DECL_EXPR_DECL (t)));
}
}
}
if (OMP_FOR_PRE_BODY (for_stmt))
{
if (TREE_CODE (for_stmt) != OMP_TASKLOOP || gimplify_omp_ctxp)
gimplify_and_add (OMP_FOR_PRE_BODY (for_stmt), &for_pre_body);
else
{
struct gimplify_omp_ctx ctx;
memset (&ctx, 0, sizeof (ctx));
ctx.region_type = ORT_NONE;
gimplify_omp_ctxp = &ctx;
gimplify_and_add (OMP_FOR_PRE_BODY (for_stmt), &for_pre_body);
gimplify_omp_ctxp = NULL;
}
}
OMP_FOR_PRE_BODY (for_stmt) = NULL_TREE;
if (OMP_FOR_INIT (for_stmt) == NULL_TREE)
for_stmt = inner_for_stmt;
/* For taskloop, need to gimplify the start, end and step before the
taskloop, outside of the taskloop omp context. */
if (TREE_CODE (orig_for_stmt) == OMP_TASKLOOP)
{
for (i = 0; i < TREE_VEC_LENGTH (OMP_FOR_INIT (for_stmt)); i++)
{
t = TREE_VEC_ELT (OMP_FOR_INIT (for_stmt), i);
if (!is_gimple_constant (TREE_OPERAND (t, 1)))
{
tree type = TREE_TYPE (TREE_OPERAND (t, 0));
TREE_OPERAND (t, 1)
= get_initialized_tmp_var (TREE_OPERAND (t, 1),
gimple_seq_empty_p (for_pre_body)
? pre_p : &for_pre_body, NULL,
false);
/* Reference to pointer conversion is considered useless,
but is significant for firstprivate clause. Force it
here. */
if (TREE_CODE (type) == POINTER_TYPE
&& (TREE_CODE (TREE_TYPE (TREE_OPERAND (t, 1)))
== REFERENCE_TYPE))
{
tree v = create_tmp_var (TYPE_MAIN_VARIANT (type));
tree m = build2 (INIT_EXPR, TREE_TYPE (v), v,
TREE_OPERAND (t, 1));
gimplify_and_add (m, gimple_seq_empty_p (for_pre_body)
? pre_p : &for_pre_body);
TREE_OPERAND (t, 1) = v;
}
tree c = build_omp_clause (input_location,
OMP_CLAUSE_FIRSTPRIVATE);
OMP_CLAUSE_DECL (c) = TREE_OPERAND (t, 1);
OMP_CLAUSE_CHAIN (c) = OMP_FOR_CLAUSES (orig_for_stmt);
OMP_FOR_CLAUSES (orig_for_stmt) = c;
}
/* Handle OMP_FOR_COND. */
t = TREE_VEC_ELT (OMP_FOR_COND (for_stmt), i);
if (!is_gimple_constant (TREE_OPERAND (t, 1)))
{
tree type = TREE_TYPE (TREE_OPERAND (t, 0));
TREE_OPERAND (t, 1)
= get_initialized_tmp_var (TREE_OPERAND (t, 1),
gimple_seq_empty_p (for_pre_body)
? pre_p : &for_pre_body, NULL,
false);
/* Reference to pointer conversion is considered useless,
but is significant for firstprivate clause. Force it
here. */
if (TREE_CODE (type) == POINTER_TYPE
&& (TREE_CODE (TREE_TYPE (TREE_OPERAND (t, 1)))
== REFERENCE_TYPE))
{
tree v = create_tmp_var (TYPE_MAIN_VARIANT (type));
tree m = build2 (INIT_EXPR, TREE_TYPE (v), v,
TREE_OPERAND (t, 1));
gimplify_and_add (m, gimple_seq_empty_p (for_pre_body)
? pre_p : &for_pre_body);
TREE_OPERAND (t, 1) = v;
}
tree c = build_omp_clause (input_location,
OMP_CLAUSE_FIRSTPRIVATE);
OMP_CLAUSE_DECL (c) = TREE_OPERAND (t, 1);
OMP_CLAUSE_CHAIN (c) = OMP_FOR_CLAUSES (orig_for_stmt);
OMP_FOR_CLAUSES (orig_for_stmt) = c;
}
/* Handle OMP_FOR_INCR. */
t = TREE_VEC_ELT (OMP_FOR_INCR (for_stmt), i);
if (TREE_CODE (t) == MODIFY_EXPR)
{
decl = TREE_OPERAND (t, 0);
t = TREE_OPERAND (t, 1);
tree *tp = &TREE_OPERAND (t, 1);
if (TREE_CODE (t) == PLUS_EXPR && *tp == decl)
tp = &TREE_OPERAND (t, 0);
if (!is_gimple_constant (*tp))
{
gimple_seq *seq = gimple_seq_empty_p (for_pre_body)
? pre_p : &for_pre_body;
*tp = get_initialized_tmp_var (*tp, seq, NULL, false);
tree c = build_omp_clause (input_location,
OMP_CLAUSE_FIRSTPRIVATE);
OMP_CLAUSE_DECL (c) = *tp;
OMP_CLAUSE_CHAIN (c) = OMP_FOR_CLAUSES (orig_for_stmt);
OMP_FOR_CLAUSES (orig_for_stmt) = c;
}
}
}
gimplify_scan_omp_clauses (&OMP_FOR_CLAUSES (orig_for_stmt), pre_p, ort,
OMP_TASKLOOP);
}
if (orig_for_stmt != for_stmt)
gimplify_omp_ctxp->combined_loop = true;
for_body = NULL;
gcc_assert (TREE_VEC_LENGTH (OMP_FOR_INIT (for_stmt))
== TREE_VEC_LENGTH (OMP_FOR_COND (for_stmt)));
gcc_assert (TREE_VEC_LENGTH (OMP_FOR_INIT (for_stmt))
== TREE_VEC_LENGTH (OMP_FOR_INCR (for_stmt)));
tree c = omp_find_clause (OMP_FOR_CLAUSES (for_stmt), OMP_CLAUSE_ORDERED);
bool is_doacross = false;
if (c && OMP_CLAUSE_ORDERED_EXPR (c))
{
is_doacross = true;
gimplify_omp_ctxp->loop_iter_var.create (TREE_VEC_LENGTH
(OMP_FOR_INIT (for_stmt))
* 2);
}
int collapse = 1, tile = 0;
c = omp_find_clause (OMP_FOR_CLAUSES (for_stmt), OMP_CLAUSE_COLLAPSE);
if (c)
collapse = tree_to_shwi (OMP_CLAUSE_COLLAPSE_EXPR (c));
c = omp_find_clause (OMP_FOR_CLAUSES (for_stmt), OMP_CLAUSE_TILE);
if (c)
tile = list_length (OMP_CLAUSE_TILE_LIST (c));
for (i = 0; i < TREE_VEC_LENGTH (OMP_FOR_INIT (for_stmt)); i++)
{
t = TREE_VEC_ELT (OMP_FOR_INIT (for_stmt), i);
gcc_assert (TREE_CODE (t) == MODIFY_EXPR);
decl = TREE_OPERAND (t, 0);
gcc_assert (DECL_P (decl));
gcc_assert (INTEGRAL_TYPE_P (TREE_TYPE (decl))
|| POINTER_TYPE_P (TREE_TYPE (decl)));
if (is_doacross)
{
if (TREE_CODE (for_stmt) == OMP_FOR && OMP_FOR_ORIG_DECLS (for_stmt))
{
tree orig_decl = TREE_VEC_ELT (OMP_FOR_ORIG_DECLS (for_stmt), i);
if (TREE_CODE (orig_decl) == TREE_LIST)
{
orig_decl = TREE_PURPOSE (orig_decl);
if (!orig_decl)
orig_decl = decl;
}
gimplify_omp_ctxp->loop_iter_var.quick_push (orig_decl);
}
else
gimplify_omp_ctxp->loop_iter_var.quick_push (decl);
gimplify_omp_ctxp->loop_iter_var.quick_push (decl);
}
/* Make sure the iteration variable is private. */
tree c = NULL_TREE;
tree c2 = NULL_TREE;
if (orig_for_stmt != for_stmt)
{
/* Preserve this information until we gimplify the inner simd. */
if (has_decl_expr
&& bitmap_bit_p (has_decl_expr, DECL_UID (decl)))
TREE_PRIVATE (t) = 1;
}
else if (ort == ORT_SIMD)
{
splay_tree_node n = splay_tree_lookup (gimplify_omp_ctxp->variables,
(splay_tree_key) decl);
omp_is_private (gimplify_omp_ctxp, decl,
1 + (TREE_VEC_LENGTH (OMP_FOR_INIT (for_stmt))
!= 1));
if (n != NULL && (n->value & GOVD_DATA_SHARE_CLASS) != 0)
{
omp_notice_variable (gimplify_omp_ctxp, decl, true);
if (n->value & GOVD_LASTPRIVATE_CONDITIONAL)
for (tree c3 = omp_find_clause (OMP_FOR_CLAUSES (for_stmt),
OMP_CLAUSE_LASTPRIVATE);
c3; c3 = omp_find_clause (OMP_CLAUSE_CHAIN (c3),
OMP_CLAUSE_LASTPRIVATE))
if (OMP_CLAUSE_DECL (c3) == decl)
{
warning_at (OMP_CLAUSE_LOCATION (c3), 0,
"conditional %<lastprivate%> on loop "
"iterator %qD ignored", decl);
OMP_CLAUSE_LASTPRIVATE_CONDITIONAL (c3) = 0;
n->value &= ~GOVD_LASTPRIVATE_CONDITIONAL;
}
}
else if (TREE_VEC_LENGTH (OMP_FOR_INIT (for_stmt)) == 1 && !loop_p)
{
c = build_omp_clause (input_location, OMP_CLAUSE_LINEAR);
OMP_CLAUSE_LINEAR_NO_COPYIN (c) = 1;
unsigned int flags = GOVD_LINEAR | GOVD_EXPLICIT | GOVD_SEEN;
if ((has_decl_expr
&& bitmap_bit_p (has_decl_expr, DECL_UID (decl)))
|| TREE_PRIVATE (t))
{
OMP_CLAUSE_LINEAR_NO_COPYOUT (c) = 1;
flags |= GOVD_LINEAR_LASTPRIVATE_NO_OUTER;
}
struct gimplify_omp_ctx *outer
= gimplify_omp_ctxp->outer_context;
if (outer && !OMP_CLAUSE_LINEAR_NO_COPYOUT (c))
{
if (outer->region_type == ORT_WORKSHARE
&& outer->combined_loop)
{
n = splay_tree_lookup (outer->variables,
(splay_tree_key)decl);
if (n != NULL && (n->value & GOVD_LOCAL) != 0)
{
OMP_CLAUSE_LINEAR_NO_COPYOUT (c) = 1;
flags |= GOVD_LINEAR_LASTPRIVATE_NO_OUTER;
}
else
{
struct gimplify_omp_ctx *octx = outer->outer_context;
if (octx
&& octx->region_type == ORT_COMBINED_PARALLEL
&& octx->outer_context
&& (octx->outer_context->region_type
== ORT_WORKSHARE)
&& octx->outer_context->combined_loop)
{
octx = octx->outer_context;
n = splay_tree_lookup (octx->variables,
(splay_tree_key)decl);
if (n != NULL && (n->value & GOVD_LOCAL) != 0)
{
OMP_CLAUSE_LINEAR_NO_COPYOUT (c) = 1;
flags |= GOVD_LINEAR_LASTPRIVATE_NO_OUTER;
}
}
}
}
}
OMP_CLAUSE_DECL (c) = decl;
OMP_CLAUSE_CHAIN (c) = OMP_FOR_CLAUSES (for_stmt);
OMP_FOR_CLAUSES (for_stmt) = c;
omp_add_variable (gimplify_omp_ctxp, decl, flags);
if (outer && !OMP_CLAUSE_LINEAR_NO_COPYOUT (c))
{
if (outer->region_type == ORT_WORKSHARE
&& outer->combined_loop)
{
if (outer->outer_context
&& (outer->outer_context->region_type
== ORT_COMBINED_PARALLEL))
outer = outer->outer_context;
else if (omp_check_private (outer, decl, false))
outer = NULL;
}
else if (((outer->region_type & ORT_TASKLOOP)
== ORT_TASKLOOP)
&& outer->combined_loop
&& !omp_check_private (gimplify_omp_ctxp,
decl, false))
;
else if (outer->region_type != ORT_COMBINED_PARALLEL)
{
omp_notice_variable (outer, decl, true);
outer = NULL;
}
if (outer)
{
n = splay_tree_lookup (outer->variables,
(splay_tree_key)decl);
if (n == NULL || (n->value & GOVD_DATA_SHARE_CLASS) == 0)
{
omp_add_variable (outer, decl,
GOVD_LASTPRIVATE | GOVD_SEEN);
if (outer->region_type == ORT_COMBINED_PARALLEL
&& outer->outer_context
&& (outer->outer_context->region_type
== ORT_WORKSHARE)
&& outer->outer_context->combined_loop)
{
outer = outer->outer_context;
n = splay_tree_lookup (outer->variables,
(splay_tree_key)decl);
if (omp_check_private (outer, decl, false))
outer = NULL;
else if (n == NULL
|| ((n->value & GOVD_DATA_SHARE_CLASS)
== 0))
omp_add_variable (outer, decl,
GOVD_LASTPRIVATE
| GOVD_SEEN);
else
outer = NULL;
}
if (outer && outer->outer_context
&& ((outer->outer_context->region_type
& ORT_COMBINED_TEAMS) == ORT_COMBINED_TEAMS
|| (((outer->region_type & ORT_TASKLOOP)
== ORT_TASKLOOP)
&& (outer->outer_context->region_type
== ORT_COMBINED_PARALLEL))))
{
outer = outer->outer_context;
n = splay_tree_lookup (outer->variables,
(splay_tree_key)decl);
if (n == NULL
|| (n->value & GOVD_DATA_SHARE_CLASS) == 0)
omp_add_variable (outer, decl,
GOVD_SHARED | GOVD_SEEN);
else
outer = NULL;
}
if (outer && outer->outer_context)
omp_notice_variable (outer->outer_context, decl,
true);
}
}
}
}
else
{
bool lastprivate
= (!has_decl_expr
|| !bitmap_bit_p (has_decl_expr, DECL_UID (decl)));
if (TREE_PRIVATE (t))
lastprivate = false;
if (loop_p && OMP_FOR_ORIG_DECLS (for_stmt))
{
tree elt = TREE_VEC_ELT (OMP_FOR_ORIG_DECLS (for_stmt), i);
if (TREE_CODE (elt) == TREE_LIST && TREE_PURPOSE (elt))
lastprivate = false;
}
struct gimplify_omp_ctx *outer
= gimplify_omp_ctxp->outer_context;
if (outer && lastprivate)
{
if (outer->region_type == ORT_WORKSHARE
&& outer->combined_loop)
{
n = splay_tree_lookup (outer->variables,
(splay_tree_key)decl);
if (n != NULL && (n->value & GOVD_LOCAL) != 0)
{
lastprivate = false;
outer = NULL;
}
else if (outer->outer_context
&& (outer->outer_context->region_type
== ORT_COMBINED_PARALLEL))
outer = outer->outer_context;
else if (omp_check_private (outer, decl, false))
outer = NULL;
}
else if (((outer->region_type & ORT_TASKLOOP)
== ORT_TASKLOOP)
&& outer->combined_loop
&& !omp_check_private (gimplify_omp_ctxp,
decl, false))
;
else if (outer->region_type != ORT_COMBINED_PARALLEL)
{
omp_notice_variable (outer, decl, true);
outer = NULL;
}
if (outer)
{
n = splay_tree_lookup (outer->variables,
(splay_tree_key)decl);
if (n == NULL || (n->value & GOVD_DATA_SHARE_CLASS) == 0)
{
omp_add_variable (outer, decl,
GOVD_LASTPRIVATE | GOVD_SEEN);
if (outer->region_type == ORT_COMBINED_PARALLEL
&& outer->outer_context
&& (outer->outer_context->region_type
== ORT_WORKSHARE)
&& outer->outer_context->combined_loop)
{
outer = outer->outer_context;
n = splay_tree_lookup (outer->variables,
(splay_tree_key)decl);
if (omp_check_private (outer, decl, false))
outer = NULL;
else if (n == NULL
|| ((n->value & GOVD_DATA_SHARE_CLASS)
== 0))
omp_add_variable (outer, decl,
GOVD_LASTPRIVATE
| GOVD_SEEN);
else
outer = NULL;
}
if (outer && outer->outer_context
&& ((outer->outer_context->region_type
& ORT_COMBINED_TEAMS) == ORT_COMBINED_TEAMS
|| (((outer->region_type & ORT_TASKLOOP)
== ORT_TASKLOOP)
&& (outer->outer_context->region_type
== ORT_COMBINED_PARALLEL))))
{
outer = outer->outer_context;
n = splay_tree_lookup (outer->variables,
(splay_tree_key)decl);
if (n == NULL
|| (n->value & GOVD_DATA_SHARE_CLASS) == 0)
omp_add_variable (outer, decl,
GOVD_SHARED | GOVD_SEEN);
else
outer = NULL;
}
if (outer && outer->outer_context)
omp_notice_variable (outer->outer_context, decl,
true);
}
}
}
c = build_omp_clause (input_location,
lastprivate ? OMP_CLAUSE_LASTPRIVATE
: OMP_CLAUSE_PRIVATE);
OMP_CLAUSE_DECL (c) = decl;
OMP_CLAUSE_CHAIN (c) = OMP_FOR_CLAUSES (for_stmt);
OMP_FOR_CLAUSES (for_stmt) = c;
omp_add_variable (gimplify_omp_ctxp, decl,
(lastprivate ? GOVD_LASTPRIVATE : GOVD_PRIVATE)
| GOVD_EXPLICIT | GOVD_SEEN);
c = NULL_TREE;
}
}
else if (omp_is_private (gimplify_omp_ctxp, decl, 0))
{
omp_notice_variable (gimplify_omp_ctxp, decl, true);
splay_tree_node n = splay_tree_lookup (gimplify_omp_ctxp->variables,
(splay_tree_key) decl);
if (n && (n->value & GOVD_LASTPRIVATE_CONDITIONAL))
for (tree c3 = omp_find_clause (OMP_FOR_CLAUSES (for_stmt),
OMP_CLAUSE_LASTPRIVATE);
c3; c3 = omp_find_clause (OMP_CLAUSE_CHAIN (c3),
OMP_CLAUSE_LASTPRIVATE))
if (OMP_CLAUSE_DECL (c3) == decl)
{
warning_at (OMP_CLAUSE_LOCATION (c3), 0,
"conditional %<lastprivate%> on loop "
"iterator %qD ignored", decl);
OMP_CLAUSE_LASTPRIVATE_CONDITIONAL (c3) = 0;
n->value &= ~GOVD_LASTPRIVATE_CONDITIONAL;
}
}
else
omp_add_variable (gimplify_omp_ctxp, decl, GOVD_PRIVATE | GOVD_SEEN);
/* If DECL is not a gimple register, create a temporary variable to act
as an iteration counter. This is valid, since DECL cannot be
modified in the body of the loop. Similarly for any iteration vars
in simd with collapse > 1 where the iterator vars must be
lastprivate. */
if (orig_for_stmt != for_stmt)
var = decl;
else if (!is_gimple_reg (decl)
|| (ort == ORT_SIMD
&& TREE_VEC_LENGTH (OMP_FOR_INIT (for_stmt)) > 1))
{
struct gimplify_omp_ctx *ctx = gimplify_omp_ctxp;
/* Make sure omp_add_variable is not called on it prematurely.
We call it ourselves a few lines later. */
gimplify_omp_ctxp = NULL;
var = create_tmp_var (TREE_TYPE (decl), get_name (decl));
gimplify_omp_ctxp = ctx;
TREE_OPERAND (t, 0) = var;
gimplify_seq_add_stmt (&for_body, gimple_build_assign (decl, var));
if (ort == ORT_SIMD
&& TREE_VEC_LENGTH (OMP_FOR_INIT (for_stmt)) == 1)
{
c2 = build_omp_clause (input_location, OMP_CLAUSE_LINEAR);
OMP_CLAUSE_LINEAR_NO_COPYIN (c2) = 1;
OMP_CLAUSE_LINEAR_NO_COPYOUT (c2) = 1;
OMP_CLAUSE_DECL (c2) = var;
OMP_CLAUSE_CHAIN (c2) = OMP_FOR_CLAUSES (for_stmt);
OMP_FOR_CLAUSES (for_stmt) = c2;
omp_add_variable (gimplify_omp_ctxp, var,
GOVD_LINEAR | GOVD_EXPLICIT | GOVD_SEEN);
if (c == NULL_TREE)
{
c = c2;
c2 = NULL_TREE;
}
}
else
omp_add_variable (gimplify_omp_ctxp, var,
GOVD_PRIVATE | GOVD_SEEN);
}
else
var = decl;
gimplify_omp_ctxp->in_for_exprs = true;
tret = gimplify_expr (&TREE_OPERAND (t, 1), &for_pre_body, NULL,
is_gimple_val, fb_rvalue, false);
gimplify_omp_ctxp->in_for_exprs = false;
ret = MIN (ret, tret);
if (ret == GS_ERROR)
return ret;
/* Handle OMP_FOR_COND. */
t = TREE_VEC_ELT (OMP_FOR_COND (for_stmt), i);
gcc_assert (COMPARISON_CLASS_P (t));
gcc_assert (TREE_OPERAND (t, 0) == decl);
gimplify_omp_ctxp->in_for_exprs = true;
tret = gimplify_expr (&TREE_OPERAND (t, 1), &for_pre_body, NULL,
is_gimple_val, fb_rvalue, false);
gimplify_omp_ctxp->in_for_exprs = false;
ret = MIN (ret, tret);
/* Handle OMP_FOR_INCR. */
t = TREE_VEC_ELT (OMP_FOR_INCR (for_stmt), i);
switch (TREE_CODE (t))
{
case PREINCREMENT_EXPR:
case POSTINCREMENT_EXPR:
{
tree decl = TREE_OPERAND (t, 0);
/* c_omp_for_incr_canonicalize_ptr() should have been
called to massage things appropriately. */
gcc_assert (!POINTER_TYPE_P (TREE_TYPE (decl)));
if (orig_for_stmt != for_stmt)
break;
t = build_int_cst (TREE_TYPE (decl), 1);
if (c)
OMP_CLAUSE_LINEAR_STEP (c) = t;
t = build2 (PLUS_EXPR, TREE_TYPE (decl), var, t);
t = build2 (MODIFY_EXPR, TREE_TYPE (var), var, t);
TREE_VEC_ELT (OMP_FOR_INCR (for_stmt), i) = t;
break;
}
case PREDECREMENT_EXPR:
case POSTDECREMENT_EXPR:
/* c_omp_for_incr_canonicalize_ptr() should have been
called to massage things appropriately. */
gcc_assert (!POINTER_TYPE_P (TREE_TYPE (decl)));
if (orig_for_stmt != for_stmt)
break;
t = build_int_cst (TREE_TYPE (decl), -1);
if (c)
OMP_CLAUSE_LINEAR_STEP (c) = t;
t = build2 (PLUS_EXPR, TREE_TYPE (decl), var, t);
t = build2 (MODIFY_EXPR, TREE_TYPE (var), var, t);
TREE_VEC_ELT (OMP_FOR_INCR (for_stmt), i) = t;
break;
case MODIFY_EXPR:
gcc_assert (TREE_OPERAND (t, 0) == decl);
TREE_OPERAND (t, 0) = var;
t = TREE_OPERAND (t, 1);
switch (TREE_CODE (t))
{
case PLUS_EXPR:
if (TREE_OPERAND (t, 1) == decl)
{
TREE_OPERAND (t, 1) = TREE_OPERAND (t, 0);
TREE_OPERAND (t, 0) = var;
break;
}
/* Fallthru. */
case MINUS_EXPR:
case POINTER_PLUS_EXPR:
gcc_assert (TREE_OPERAND (t, 0) == decl);
TREE_OPERAND (t, 0) = var;
break;
default:
gcc_unreachable ();
}
gimplify_omp_ctxp->in_for_exprs = true;
tret = gimplify_expr (&TREE_OPERAND (t, 1), &for_pre_body, NULL,
is_gimple_val, fb_rvalue, false);
ret = MIN (ret, tret);
if (c)
{
tree step = TREE_OPERAND (t, 1);
tree stept = TREE_TYPE (decl);
if (POINTER_TYPE_P (stept))
stept = sizetype;
step = fold_convert (stept, step);
if (TREE_CODE (t) == MINUS_EXPR)
step = fold_build1 (NEGATE_EXPR, stept, step);
OMP_CLAUSE_LINEAR_STEP (c) = step;
if (step != TREE_OPERAND (t, 1))
{
tret = gimplify_expr (&OMP_CLAUSE_LINEAR_STEP (c),
&for_pre_body, NULL,
is_gimple_val, fb_rvalue, false);
ret = MIN (ret, tret);
}
}
gimplify_omp_ctxp->in_for_exprs = false;
break;
default:
gcc_unreachable ();
}
if (c2)
{
gcc_assert (c);
OMP_CLAUSE_LINEAR_STEP (c2) = OMP_CLAUSE_LINEAR_STEP (c);
}
if ((var != decl || collapse > 1 || tile) && orig_for_stmt == for_stmt)
{
for (c = OMP_FOR_CLAUSES (for_stmt); c ; c = OMP_CLAUSE_CHAIN (c))
if (((OMP_CLAUSE_CODE (c) == OMP_CLAUSE_LASTPRIVATE
&& OMP_CLAUSE_LASTPRIVATE_GIMPLE_SEQ (c) == NULL)
|| (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_LINEAR
&& !OMP_CLAUSE_LINEAR_NO_COPYOUT (c)
&& OMP_CLAUSE_LINEAR_GIMPLE_SEQ (c) == NULL))
&& OMP_CLAUSE_DECL (c) == decl)
{
if (is_doacross && (collapse == 1 || i >= collapse))
t = var;
else
{
t = TREE_VEC_ELT (OMP_FOR_INCR (for_stmt), i);
gcc_assert (TREE_CODE (t) == MODIFY_EXPR);
gcc_assert (TREE_OPERAND (t, 0) == var);
t = TREE_OPERAND (t, 1);
gcc_assert (TREE_CODE (t) == PLUS_EXPR
|| TREE_CODE (t) == MINUS_EXPR
|| TREE_CODE (t) == POINTER_PLUS_EXPR);
gcc_assert (TREE_OPERAND (t, 0) == var);
t = build2 (TREE_CODE (t), TREE_TYPE (decl),
is_doacross ? var : decl,
TREE_OPERAND (t, 1));
}
gimple_seq *seq;
if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_LASTPRIVATE)
seq = &OMP_CLAUSE_LASTPRIVATE_GIMPLE_SEQ (c);
else
seq = &OMP_CLAUSE_LINEAR_GIMPLE_SEQ (c);
push_gimplify_context ();
gimplify_assign (decl, t, seq);
gimple *bind = NULL;
if (gimplify_ctxp->temps)
{
bind = gimple_build_bind (NULL_TREE, *seq, NULL_TREE);
*seq = NULL;
gimplify_seq_add_stmt (seq, bind);
}
pop_gimplify_context (bind);
}
}
}
BITMAP_FREE (has_decl_expr);
if (TREE_CODE (orig_for_stmt) == OMP_TASKLOOP
|| (loop_p && orig_for_stmt == for_stmt))
{
push_gimplify_context ();
if (TREE_CODE (OMP_FOR_BODY (orig_for_stmt)) != BIND_EXPR)
{
OMP_FOR_BODY (orig_for_stmt)
= build3 (BIND_EXPR, void_type_node, NULL,
OMP_FOR_BODY (orig_for_stmt), NULL);
TREE_SIDE_EFFECTS (OMP_FOR_BODY (orig_for_stmt)) = 1;
}
}
gimple *g = gimplify_and_return_first (OMP_FOR_BODY (orig_for_stmt),
&for_body);
if (TREE_CODE (orig_for_stmt) == OMP_TASKLOOP
|| (loop_p && orig_for_stmt == for_stmt))
{
if (gimple_code (g) == GIMPLE_BIND)
pop_gimplify_context (g);
else
pop_gimplify_context (NULL);
}
if (orig_for_stmt != for_stmt)
for (i = 0; i < TREE_VEC_LENGTH (OMP_FOR_INIT (for_stmt)); i++)
{
t = TREE_VEC_ELT (OMP_FOR_INIT (for_stmt), i);
decl = TREE_OPERAND (t, 0);
struct gimplify_omp_ctx *ctx = gimplify_omp_ctxp;
if (TREE_CODE (orig_for_stmt) == OMP_TASKLOOP)
gimplify_omp_ctxp = ctx->outer_context;
var = create_tmp_var (TREE_TYPE (decl), get_name (decl));
gimplify_omp_ctxp = ctx;
omp_add_variable (gimplify_omp_ctxp, var, GOVD_PRIVATE | GOVD_SEEN);
TREE_OPERAND (t, 0) = var;
t = TREE_VEC_ELT (OMP_FOR_INCR (for_stmt), i);
TREE_OPERAND (t, 1) = copy_node (TREE_OPERAND (t, 1));
TREE_OPERAND (TREE_OPERAND (t, 1), 0) = var;
}
gimplify_adjust_omp_clauses (pre_p, for_body,
&OMP_FOR_CLAUSES (orig_for_stmt),
TREE_CODE (orig_for_stmt));
int kind;
switch (TREE_CODE (orig_for_stmt))
{
case OMP_FOR: kind = GF_OMP_FOR_KIND_FOR; break;
case OMP_SIMD: kind = GF_OMP_FOR_KIND_SIMD; break;
case OMP_DISTRIBUTE: kind = GF_OMP_FOR_KIND_DISTRIBUTE; break;
case OMP_TASKLOOP: kind = GF_OMP_FOR_KIND_TASKLOOP; break;
case OACC_LOOP: kind = GF_OMP_FOR_KIND_OACC_LOOP; break;
default:
gcc_unreachable ();
}
if (loop_p && kind == GF_OMP_FOR_KIND_SIMD)
{
gimplify_seq_add_seq (pre_p, for_pre_body);
for_pre_body = NULL;
}
gfor = gimple_build_omp_for (for_body, kind, OMP_FOR_CLAUSES (orig_for_stmt),
TREE_VEC_LENGTH (OMP_FOR_INIT (for_stmt)),
for_pre_body);
if (orig_for_stmt != for_stmt)
gimple_omp_for_set_combined_p (gfor, true);
if (gimplify_omp_ctxp
&& (gimplify_omp_ctxp->combined_loop
|| (gimplify_omp_ctxp->region_type == ORT_COMBINED_PARALLEL
&& gimplify_omp_ctxp->outer_context
&& gimplify_omp_ctxp->outer_context->combined_loop)))
{
gimple_omp_for_set_combined_into_p (gfor, true);
if (gimplify_omp_ctxp->combined_loop)
gcc_assert (TREE_CODE (orig_for_stmt) == OMP_SIMD);
else
gcc_assert (TREE_CODE (orig_for_stmt) == OMP_FOR);
}
for (i = 0; i < TREE_VEC_LENGTH (OMP_FOR_INIT (for_stmt)); i++)
{
t = TREE_VEC_ELT (OMP_FOR_INIT (for_stmt), i);
gimple_omp_for_set_index (gfor, i, TREE_OPERAND (t, 0));
gimple_omp_for_set_initial (gfor, i, TREE_OPERAND (t, 1));
t = TREE_VEC_ELT (OMP_FOR_COND (for_stmt), i);
gimple_omp_for_set_cond (gfor, i, TREE_CODE (t));
gimple_omp_for_set_final (gfor, i, TREE_OPERAND (t, 1));
t = TREE_VEC_ELT (OMP_FOR_INCR (for_stmt), i);
gimple_omp_for_set_incr (gfor, i, TREE_OPERAND (t, 1));
}
/* OMP_TASKLOOP is gimplified as two GIMPLE_OMP_FOR taskloop
constructs with GIMPLE_OMP_TASK sandwiched in between them.
The outer taskloop stands for computing the number of iterations,
counts for collapsed loops and holding taskloop specific clauses.
The task construct stands for the effect of data sharing on the
explicit task it creates and the inner taskloop stands for expansion
of the static loop inside of the explicit task construct. */
if (TREE_CODE (orig_for_stmt) == OMP_TASKLOOP)
{
tree *gfor_clauses_ptr = gimple_omp_for_clauses_ptr (gfor);
tree task_clauses = NULL_TREE;
tree c = *gfor_clauses_ptr;
tree *gtask_clauses_ptr = &task_clauses;
tree outer_for_clauses = NULL_TREE;
tree *gforo_clauses_ptr = &outer_for_clauses;
for (; c; c = OMP_CLAUSE_CHAIN (c))
switch (OMP_CLAUSE_CODE (c))
{
/* These clauses are allowed on task, move them there. */
case OMP_CLAUSE_SHARED:
case OMP_CLAUSE_FIRSTPRIVATE:
case OMP_CLAUSE_DEFAULT:
case OMP_CLAUSE_IF:
case OMP_CLAUSE_UNTIED:
case OMP_CLAUSE_FINAL:
case OMP_CLAUSE_MERGEABLE:
case OMP_CLAUSE_PRIORITY:
case OMP_CLAUSE_REDUCTION:
case OMP_CLAUSE_IN_REDUCTION:
*gtask_clauses_ptr = c;
gtask_clauses_ptr = &OMP_CLAUSE_CHAIN (c);
break;
case OMP_CLAUSE_PRIVATE:
if (OMP_CLAUSE_PRIVATE_TASKLOOP_IV (c))
{
/* We want private on outer for and firstprivate
on task. */
*gtask_clauses_ptr
= build_omp_clause (OMP_CLAUSE_LOCATION (c),
OMP_CLAUSE_FIRSTPRIVATE);
OMP_CLAUSE_DECL (*gtask_clauses_ptr) = OMP_CLAUSE_DECL (c);
lang_hooks.decls.omp_finish_clause (*gtask_clauses_ptr, NULL);
gtask_clauses_ptr = &OMP_CLAUSE_CHAIN (*gtask_clauses_ptr);
*gforo_clauses_ptr = c;
gforo_clauses_ptr = &OMP_CLAUSE_CHAIN (c);
}
else
{
*gtask_clauses_ptr = c;
gtask_clauses_ptr = &OMP_CLAUSE_CHAIN (c);
}
break;
/* These clauses go into outer taskloop clauses. */
case OMP_CLAUSE_GRAINSIZE:
case OMP_CLAUSE_NUM_TASKS:
case OMP_CLAUSE_NOGROUP:
*gforo_clauses_ptr = c;
gforo_clauses_ptr = &OMP_CLAUSE_CHAIN (c);
break;
/* Taskloop clause we duplicate on both taskloops. */
case OMP_CLAUSE_COLLAPSE:
*gfor_clauses_ptr = c;
gfor_clauses_ptr = &OMP_CLAUSE_CHAIN (c);
*gforo_clauses_ptr = copy_node (c);
gforo_clauses_ptr = &OMP_CLAUSE_CHAIN (*gforo_clauses_ptr);
break;
/* For lastprivate, keep the clause on inner taskloop, and add
a shared clause on task. If the same decl is also firstprivate,
add also firstprivate clause on the inner taskloop. */
case OMP_CLAUSE_LASTPRIVATE:
if (OMP_CLAUSE_LASTPRIVATE_LOOP_IV (c))
{
/* For taskloop C++ lastprivate IVs, we want:
1) private on outer taskloop
2) firstprivate and shared on task
3) lastprivate on inner taskloop */
*gtask_clauses_ptr
= build_omp_clause (OMP_CLAUSE_LOCATION (c),
OMP_CLAUSE_FIRSTPRIVATE);
OMP_CLAUSE_DECL (*gtask_clauses_ptr) = OMP_CLAUSE_DECL (c);
lang_hooks.decls.omp_finish_clause (*gtask_clauses_ptr, NULL);
gtask_clauses_ptr = &OMP_CLAUSE_CHAIN (*gtask_clauses_ptr);
OMP_CLAUSE_LASTPRIVATE_FIRSTPRIVATE (c) = 1;
*gforo_clauses_ptr = build_omp_clause (OMP_CLAUSE_LOCATION (c),
OMP_CLAUSE_PRIVATE);
OMP_CLAUSE_DECL (*gforo_clauses_ptr) = OMP_CLAUSE_DECL (c);
OMP_CLAUSE_PRIVATE_TASKLOOP_IV (*gforo_clauses_ptr) = 1;
TREE_TYPE (*gforo_clauses_ptr) = TREE_TYPE (c);
gforo_clauses_ptr = &OMP_CLAUSE_CHAIN (*gforo_clauses_ptr);
}
*gfor_clauses_ptr = c;
gfor_clauses_ptr = &OMP_CLAUSE_CHAIN (c);
*gtask_clauses_ptr
= build_omp_clause (OMP_CLAUSE_LOCATION (c), OMP_CLAUSE_SHARED);
OMP_CLAUSE_DECL (*gtask_clauses_ptr) = OMP_CLAUSE_DECL (c);
if (OMP_CLAUSE_LASTPRIVATE_FIRSTPRIVATE (c))
OMP_CLAUSE_SHARED_FIRSTPRIVATE (*gtask_clauses_ptr) = 1;
gtask_clauses_ptr
= &OMP_CLAUSE_CHAIN (*gtask_clauses_ptr);
break;
default:
gcc_unreachable ();
}
*gfor_clauses_ptr = NULL_TREE;
*gtask_clauses_ptr = NULL_TREE;
*gforo_clauses_ptr = NULL_TREE;
g = gimple_build_bind (NULL_TREE, gfor, NULL_TREE);
g = gimple_build_omp_task (g, task_clauses, NULL_TREE, NULL_TREE,
NULL_TREE, NULL_TREE, NULL_TREE);
gimple_omp_task_set_taskloop_p (g, true);
g = gimple_build_bind (NULL_TREE, g, NULL_TREE);
gomp_for *gforo
= gimple_build_omp_for (g, GF_OMP_FOR_KIND_TASKLOOP, outer_for_clauses,
gimple_omp_for_collapse (gfor),
gimple_omp_for_pre_body (gfor));
gimple_omp_for_set_pre_body (gfor, NULL);
gimple_omp_for_set_combined_p (gforo, true);
gimple_omp_for_set_combined_into_p (gfor, true);
for (i = 0; i < (int) gimple_omp_for_collapse (gfor); i++)
{
tree type = TREE_TYPE (gimple_omp_for_index (gfor, i));
tree v = create_tmp_var (type);
gimple_omp_for_set_index (gforo, i, v);
t = unshare_expr (gimple_omp_for_initial (gfor, i));
gimple_omp_for_set_initial (gforo, i, t);
gimple_omp_for_set_cond (gforo, i,
gimple_omp_for_cond (gfor, i));
t = unshare_expr (gimple_omp_for_final (gfor, i));
gimple_omp_for_set_final (gforo, i, t);
t = unshare_expr (gimple_omp_for_incr (gfor, i));
gcc_assert (TREE_OPERAND (t, 0) == gimple_omp_for_index (gfor, i));
TREE_OPERAND (t, 0) = v;
gimple_omp_for_set_incr (gforo, i, t);
t = build_omp_clause (input_location, OMP_CLAUSE_PRIVATE);
OMP_CLAUSE_DECL (t) = v;
OMP_CLAUSE_CHAIN (t) = gimple_omp_for_clauses (gforo);
gimple_omp_for_set_clauses (gforo, t);
}
gimplify_seq_add_stmt (pre_p, gforo);
}
else
gimplify_seq_add_stmt (pre_p, gfor);
if (TREE_CODE (orig_for_stmt) == OMP_FOR)
{
struct gimplify_omp_ctx *ctx = gimplify_omp_ctxp;
unsigned lastprivate_conditional = 0;
while (ctx
&& (ctx->region_type == ORT_TARGET_DATA
|| ctx->region_type == ORT_TASKGROUP))
ctx = ctx->outer_context;
if (ctx && (ctx->region_type & ORT_PARALLEL) != 0)
for (tree c = gimple_omp_for_clauses (gfor);
c; c = OMP_CLAUSE_CHAIN (c))
if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_LASTPRIVATE
&& OMP_CLAUSE_LASTPRIVATE_CONDITIONAL (c))
++lastprivate_conditional;
if (lastprivate_conditional)
{
struct omp_for_data fd;
omp_extract_for_data (gfor, &fd, NULL);
tree type = build_array_type_nelts (unsigned_type_for (fd.iter_type),
lastprivate_conditional);
tree var = create_tmp_var_raw (type);
tree c = build_omp_clause (UNKNOWN_LOCATION, OMP_CLAUSE__CONDTEMP_);
OMP_CLAUSE_DECL (c) = var;
OMP_CLAUSE_CHAIN (c) = gimple_omp_for_clauses (gfor);
gimple_omp_for_set_clauses (gfor, c);
omp_add_variable (ctx, var, GOVD_CONDTEMP | GOVD_SEEN);
}
}
else if (TREE_CODE (orig_for_stmt) == OMP_SIMD)
{
unsigned lastprivate_conditional = 0;
for (tree c = gimple_omp_for_clauses (gfor); c; c = OMP_CLAUSE_CHAIN (c))
if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_LASTPRIVATE
&& OMP_CLAUSE_LASTPRIVATE_CONDITIONAL (c))
++lastprivate_conditional;
if (lastprivate_conditional)
{
struct omp_for_data fd;
omp_extract_for_data (gfor, &fd, NULL);
tree type = unsigned_type_for (fd.iter_type);
while (lastprivate_conditional--)
{
tree c = build_omp_clause (UNKNOWN_LOCATION,
OMP_CLAUSE__CONDTEMP_);
OMP_CLAUSE_DECL (c) = create_tmp_var (type);
OMP_CLAUSE_CHAIN (c) = gimple_omp_for_clauses (gfor);
gimple_omp_for_set_clauses (gfor, c);
}
}
}
if (ret != GS_ALL_DONE)
return GS_ERROR;
*expr_p = NULL_TREE;
return GS_ALL_DONE;
}
/* Helper for gimplify_omp_loop, called through walk_tree. */
static tree
replace_reduction_placeholders (tree *tp, int *walk_subtrees, void *data)
{
if (DECL_P (*tp))
{
tree *d = (tree *) data;
if (*tp == OMP_CLAUSE_REDUCTION_PLACEHOLDER (d[0]))
{
*tp = OMP_CLAUSE_REDUCTION_PLACEHOLDER (d[1]);
*walk_subtrees = 0;
}
else if (*tp == OMP_CLAUSE_REDUCTION_DECL_PLACEHOLDER (d[0]))
{
*tp = OMP_CLAUSE_REDUCTION_DECL_PLACEHOLDER (d[1]);
*walk_subtrees = 0;
}
}
return NULL_TREE;
}
/* Gimplify the gross structure of an OMP_LOOP statement. */
static enum gimplify_status
gimplify_omp_loop (tree *expr_p, gimple_seq *pre_p)
{
tree for_stmt = *expr_p;
tree clauses = OMP_FOR_CLAUSES (for_stmt);
struct gimplify_omp_ctx *octx = gimplify_omp_ctxp;
enum omp_clause_bind_kind kind = OMP_CLAUSE_BIND_THREAD;
int i;
/* If order is not present, the behavior is as if order(concurrent)
appeared. */
tree order = omp_find_clause (clauses, OMP_CLAUSE_ORDER);
if (order == NULL_TREE)
{
order = build_omp_clause (UNKNOWN_LOCATION, OMP_CLAUSE_ORDER);
OMP_CLAUSE_CHAIN (order) = clauses;
OMP_FOR_CLAUSES (for_stmt) = clauses = order;
}
tree bind = omp_find_clause (clauses, OMP_CLAUSE_BIND);
if (bind == NULL_TREE)
{
if (!flag_openmp) /* flag_openmp_simd */
;
else if (octx && (octx->region_type & ORT_TEAMS) != 0)
kind = OMP_CLAUSE_BIND_TEAMS;
else if (octx && (octx->region_type & ORT_PARALLEL) != 0)
kind = OMP_CLAUSE_BIND_PARALLEL;
else
{
for (; octx; octx = octx->outer_context)
{
if ((octx->region_type & ORT_ACC) != 0
|| octx->region_type == ORT_NONE
|| octx->region_type == ORT_IMPLICIT_TARGET)
continue;
break;
}
if (octx == NULL && !in_omp_construct)
error_at (EXPR_LOCATION (for_stmt),
"%<bind%> clause not specified on a %<loop%> "
"construct not nested inside another OpenMP construct");
}
bind = build_omp_clause (UNKNOWN_LOCATION, OMP_CLAUSE_BIND);
OMP_CLAUSE_CHAIN (bind) = clauses;
OMP_CLAUSE_BIND_KIND (bind) = kind;
OMP_FOR_CLAUSES (for_stmt) = bind;
}
else
switch (OMP_CLAUSE_BIND_KIND (bind))
{
case OMP_CLAUSE_BIND_THREAD:
break;
case OMP_CLAUSE_BIND_PARALLEL:
if (!flag_openmp) /* flag_openmp_simd */
{
OMP_CLAUSE_BIND_KIND (bind) = OMP_CLAUSE_BIND_THREAD;
break;
}
for (; octx; octx = octx->outer_context)
if (octx->region_type == ORT_SIMD
&& omp_find_clause (octx->clauses, OMP_CLAUSE_BIND) == NULL_TREE)
{
error_at (EXPR_LOCATION (for_stmt),
"%<bind(parallel)%> on a %<loop%> construct nested "
"inside %<simd%> construct");
OMP_CLAUSE_BIND_KIND (bind) = OMP_CLAUSE_BIND_THREAD;
break;
}
kind = OMP_CLAUSE_BIND_PARALLEL;
break;
case OMP_CLAUSE_BIND_TEAMS:
if (!flag_openmp) /* flag_openmp_simd */
{
OMP_CLAUSE_BIND_KIND (bind) = OMP_CLAUSE_BIND_THREAD;
break;
}
if ((octx
&& octx->region_type != ORT_IMPLICIT_TARGET
&& octx->region_type != ORT_NONE
&& (octx->region_type & ORT_TEAMS) == 0)
|| in_omp_construct)
{
error_at (EXPR_LOCATION (for_stmt),
"%<bind(teams)%> on a %<loop%> region not strictly "
"nested inside of a %<teams%> region");
OMP_CLAUSE_BIND_KIND (bind) = OMP_CLAUSE_BIND_THREAD;
break;
}
kind = OMP_CLAUSE_BIND_TEAMS;
break;
default:
gcc_unreachable ();
}
for (tree *pc = &OMP_FOR_CLAUSES (for_stmt); *pc; )
switch (OMP_CLAUSE_CODE (*pc))
{
case OMP_CLAUSE_REDUCTION:
if (OMP_CLAUSE_REDUCTION_INSCAN (*pc))
{
error_at (OMP_CLAUSE_LOCATION (*pc),
"%<inscan%> %<reduction%> clause on "
"%qs construct", "loop");
OMP_CLAUSE_REDUCTION_INSCAN (*pc) = 0;
}
if (OMP_CLAUSE_REDUCTION_TASK (*pc))
{
error_at (OMP_CLAUSE_LOCATION (*pc),
"invalid %<task%> reduction modifier on construct "
"other than %<parallel%>, %<for%> or %<sections%>");
OMP_CLAUSE_REDUCTION_TASK (*pc) = 0;
}
pc = &OMP_CLAUSE_CHAIN (*pc);
break;
case OMP_CLAUSE_LASTPRIVATE:
for (i = 0; i < TREE_VEC_LENGTH (OMP_FOR_INIT (for_stmt)); i++)
{
tree t = TREE_VEC_ELT (OMP_FOR_INIT (for_stmt), i);
gcc_assert (TREE_CODE (t) == MODIFY_EXPR);
if (OMP_CLAUSE_DECL (*pc) == TREE_OPERAND (t, 0))
break;
if (OMP_FOR_ORIG_DECLS (for_stmt)
&& TREE_CODE (TREE_VEC_ELT (OMP_FOR_ORIG_DECLS (for_stmt),
i)) == TREE_LIST
&& TREE_PURPOSE (TREE_VEC_ELT (OMP_FOR_ORIG_DECLS (for_stmt),
i)))
{
tree orig = TREE_VEC_ELT (OMP_FOR_ORIG_DECLS (for_stmt), i);
if (OMP_CLAUSE_DECL (*pc) == TREE_PURPOSE (orig))
break;
}
}
if (i == TREE_VEC_LENGTH (OMP_FOR_INIT (for_stmt)))
{
error_at (OMP_CLAUSE_LOCATION (*pc),
"%<lastprivate%> clause on a %<loop%> construct refers "
"to a variable %qD which is not the loop iterator",
OMP_CLAUSE_DECL (*pc));
*pc = OMP_CLAUSE_CHAIN (*pc);
break;
}
pc = &OMP_CLAUSE_CHAIN (*pc);
break;
default:
pc = &OMP_CLAUSE_CHAIN (*pc);
break;
}
TREE_SET_CODE (for_stmt, OMP_SIMD);
int last;
switch (kind)
{
case OMP_CLAUSE_BIND_THREAD: last = 0; break;
case OMP_CLAUSE_BIND_PARALLEL: last = 1; break;
case OMP_CLAUSE_BIND_TEAMS: last = 2; break;
}
for (int pass = 1; pass <= last; pass++)
{
if (pass == 2)
{
tree bind = build3 (BIND_EXPR, void_type_node, NULL, NULL, NULL);
append_to_statement_list (*expr_p, &BIND_EXPR_BODY (bind));
*expr_p = make_node (OMP_PARALLEL);
TREE_TYPE (*expr_p) = void_type_node;
OMP_PARALLEL_BODY (*expr_p) = bind;
OMP_PARALLEL_COMBINED (*expr_p) = 1;
SET_EXPR_LOCATION (*expr_p, EXPR_LOCATION (for_stmt));
tree *pc = &OMP_PARALLEL_CLAUSES (*expr_p);
for (i = 0; i < TREE_VEC_LENGTH (OMP_FOR_INIT (for_stmt)); i++)
if (OMP_FOR_ORIG_DECLS (for_stmt)
&& (TREE_CODE (TREE_VEC_ELT (OMP_FOR_ORIG_DECLS (for_stmt), i))
== TREE_LIST))
{
tree elt = TREE_VEC_ELT (OMP_FOR_ORIG_DECLS (for_stmt), i);
if (TREE_PURPOSE (elt) && TREE_VALUE (elt))
{
*pc = build_omp_clause (UNKNOWN_LOCATION,
OMP_CLAUSE_FIRSTPRIVATE);
OMP_CLAUSE_DECL (*pc) = TREE_VALUE (elt);
pc = &OMP_CLAUSE_CHAIN (*pc);
}
}
}
tree t = make_node (pass == 2 ? OMP_DISTRIBUTE : OMP_FOR);
tree *pc = &OMP_FOR_CLAUSES (t);
TREE_TYPE (t) = void_type_node;
OMP_FOR_BODY (t) = *expr_p;
SET_EXPR_LOCATION (t, EXPR_LOCATION (for_stmt));
for (tree c = OMP_FOR_CLAUSES (for_stmt); c; c = OMP_CLAUSE_CHAIN (c))
switch (OMP_CLAUSE_CODE (c))
{
case OMP_CLAUSE_BIND:
case OMP_CLAUSE_ORDER:
case OMP_CLAUSE_COLLAPSE:
*pc = copy_node (c);
pc = &OMP_CLAUSE_CHAIN (*pc);
break;
case OMP_CLAUSE_PRIVATE:
case OMP_CLAUSE_FIRSTPRIVATE:
/* Only needed on innermost. */
break;
case OMP_CLAUSE_LASTPRIVATE:
if (OMP_CLAUSE_LASTPRIVATE_LOOP_IV (c) && pass != last)
{
*pc = build_omp_clause (OMP_CLAUSE_LOCATION (c),
OMP_CLAUSE_FIRSTPRIVATE);
OMP_CLAUSE_DECL (*pc) = OMP_CLAUSE_DECL (c);
lang_hooks.decls.omp_finish_clause (*pc, NULL);
pc = &OMP_CLAUSE_CHAIN (*pc);
}
*pc = copy_node (c);
OMP_CLAUSE_LASTPRIVATE_STMT (*pc) = NULL_TREE;
TREE_TYPE (*pc) = unshare_expr (TREE_TYPE (c));
if (OMP_CLAUSE_LASTPRIVATE_LOOP_IV (c))
{
if (pass != last)
OMP_CLAUSE_LASTPRIVATE_FIRSTPRIVATE (*pc) = 1;
else
lang_hooks.decls.omp_finish_clause (*pc, NULL);
OMP_CLAUSE_LASTPRIVATE_LOOP_IV (*pc) = 0;
}
pc = &OMP_CLAUSE_CHAIN (*pc);
break;
case OMP_CLAUSE_REDUCTION:
*pc = copy_node (c);
OMP_CLAUSE_DECL (*pc) = unshare_expr (OMP_CLAUSE_DECL (c));
TREE_TYPE (*pc) = unshare_expr (TREE_TYPE (c));
OMP_CLAUSE_REDUCTION_INIT (*pc)
= unshare_expr (OMP_CLAUSE_REDUCTION_INIT (c));
OMP_CLAUSE_REDUCTION_MERGE (*pc)
= unshare_expr (OMP_CLAUSE_REDUCTION_MERGE (c));
if (OMP_CLAUSE_REDUCTION_PLACEHOLDER (*pc))
{
OMP_CLAUSE_REDUCTION_PLACEHOLDER (*pc)
= copy_node (OMP_CLAUSE_REDUCTION_PLACEHOLDER (c));
if (OMP_CLAUSE_REDUCTION_DECL_PLACEHOLDER (*pc))
OMP_CLAUSE_REDUCTION_DECL_PLACEHOLDER (*pc)
= copy_node (OMP_CLAUSE_REDUCTION_DECL_PLACEHOLDER (c));
tree nc = *pc;
tree data[2] = { c, nc };
walk_tree_without_duplicates (&OMP_CLAUSE_REDUCTION_INIT (nc),
replace_reduction_placeholders,
data);
walk_tree_without_duplicates (&OMP_CLAUSE_REDUCTION_MERGE (nc),
replace_reduction_placeholders,
data);
}
pc = &OMP_CLAUSE_CHAIN (*pc);
break;
default:
gcc_unreachable ();
}
*pc = NULL_TREE;
*expr_p = t;
}
return gimplify_omp_for (expr_p, pre_p);
}
/* Helper function of optimize_target_teams, find OMP_TEAMS inside
of OMP_TARGET's body. */
static tree
find_omp_teams (tree *tp, int *walk_subtrees, void *)
{
*walk_subtrees = 0;
switch (TREE_CODE (*tp))
{
case OMP_TEAMS:
return *tp;
case BIND_EXPR:
case STATEMENT_LIST:
*walk_subtrees = 1;
break;
default:
break;
}
return NULL_TREE;
}
/* Helper function of optimize_target_teams, determine if the expression
can be computed safely before the target construct on the host. */
static tree
computable_teams_clause (tree *tp, int *walk_subtrees, void *)
{
splay_tree_node n;
if (TYPE_P (*tp))
{
*walk_subtrees = 0;
return NULL_TREE;
}
switch (TREE_CODE (*tp))
{
case VAR_DECL:
case PARM_DECL:
case RESULT_DECL:
*walk_subtrees = 0;
if (error_operand_p (*tp)
|| !INTEGRAL_TYPE_P (TREE_TYPE (*tp))
|| DECL_HAS_VALUE_EXPR_P (*tp)
|| DECL_THREAD_LOCAL_P (*tp)
|| TREE_SIDE_EFFECTS (*tp)
|| TREE_THIS_VOLATILE (*tp))
return *tp;
if (is_global_var (*tp)
&& (lookup_attribute ("omp declare target", DECL_ATTRIBUTES (*tp))
|| lookup_attribute ("omp declare target link",
DECL_ATTRIBUTES (*tp))))
return *tp;
if (VAR_P (*tp)
&& !DECL_SEEN_IN_BIND_EXPR_P (*tp)
&& !is_global_var (*tp)
&& decl_function_context (*tp) == current_function_decl)
return *tp;
n = splay_tree_lookup (gimplify_omp_ctxp->variables,
(splay_tree_key) *tp);
if (n == NULL)
{
if (gimplify_omp_ctxp->defaultmap[GDMK_SCALAR] & GOVD_FIRSTPRIVATE)
return NULL_TREE;
return *tp;
}
else if (n->value & GOVD_LOCAL)
return *tp;
else if (n->value & GOVD_FIRSTPRIVATE)
return NULL_TREE;
else if ((n->value & (GOVD_MAP | GOVD_MAP_ALWAYS_TO))
== (GOVD_MAP | GOVD_MAP_ALWAYS_TO))
return NULL_TREE;
return *tp;
case INTEGER_CST:
if (!INTEGRAL_TYPE_P (TREE_TYPE (*tp)))
return *tp;
return NULL_TREE;
case TARGET_EXPR:
if (TARGET_EXPR_INITIAL (*tp)
|| TREE_CODE (TARGET_EXPR_SLOT (*tp)) != VAR_DECL)
return *tp;
return computable_teams_clause (&TARGET_EXPR_SLOT (*tp),
walk_subtrees, NULL);
/* Allow some reasonable subset of integral arithmetics. */
case PLUS_EXPR:
case MINUS_EXPR:
case MULT_EXPR:
case TRUNC_DIV_EXPR:
case CEIL_DIV_EXPR:
case FLOOR_DIV_EXPR:
case ROUND_DIV_EXPR:
case TRUNC_MOD_EXPR:
case CEIL_MOD_EXPR:
case FLOOR_MOD_EXPR:
case ROUND_MOD_EXPR:
case RDIV_EXPR:
case EXACT_DIV_EXPR:
case MIN_EXPR:
case MAX_EXPR:
case LSHIFT_EXPR:
case RSHIFT_EXPR:
case BIT_IOR_EXPR:
case BIT_XOR_EXPR:
case BIT_AND_EXPR:
case NEGATE_EXPR:
case ABS_EXPR:
case BIT_NOT_EXPR:
case NON_LVALUE_EXPR:
CASE_CONVERT:
if (!INTEGRAL_TYPE_P (TREE_TYPE (*tp)))
return *tp;
return NULL_TREE;
/* And disallow anything else, except for comparisons. */
default:
if (COMPARISON_CLASS_P (*tp))
return NULL_TREE;
return *tp;
}
}
/* Try to determine if the num_teams and/or thread_limit expressions
can have their values determined already before entering the
target construct.
INTEGER_CSTs trivially are,
integral decls that are firstprivate (explicitly or implicitly)
or explicitly map(always, to:) or map(always, tofrom:) on the target
region too, and expressions involving simple arithmetics on those
too, function calls are not ok, dereferencing something neither etc.
Add NUM_TEAMS and THREAD_LIMIT clauses to the OMP_CLAUSES of
EXPR based on what we find:
0 stands for clause not specified at all, use implementation default
-1 stands for value that can't be determined easily before entering
the target construct.
If teams construct is not present at all, use 1 for num_teams
and 0 for thread_limit (only one team is involved, and the thread
limit is implementation defined. */
static void
optimize_target_teams (tree target, gimple_seq *pre_p)
{
tree body = OMP_BODY (target);
tree teams = walk_tree (&body, find_omp_teams, NULL, NULL);
tree num_teams = integer_zero_node;
tree thread_limit = integer_zero_node;
location_t num_teams_loc = EXPR_LOCATION (target);
location_t thread_limit_loc = EXPR_LOCATION (target);
tree c, *p, expr;
struct gimplify_omp_ctx *target_ctx = gimplify_omp_ctxp;
if (teams == NULL_TREE)
num_teams = integer_one_node;
else
for (c = OMP_TEAMS_CLAUSES (teams); c; c = OMP_CLAUSE_CHAIN (c))
{
if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_NUM_TEAMS)
{
p = &num_teams;
num_teams_loc = OMP_CLAUSE_LOCATION (c);
}
else if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_THREAD_LIMIT)
{
p = &thread_limit;
thread_limit_loc = OMP_CLAUSE_LOCATION (c);
}
else
continue;
expr = OMP_CLAUSE_OPERAND (c, 0);
if (TREE_CODE (expr) == INTEGER_CST)
{
*p = expr;
continue;
}
if (walk_tree (&expr, computable_teams_clause, NULL, NULL))
{
*p = integer_minus_one_node;
continue;
}
*p = expr;
gimplify_omp_ctxp = gimplify_omp_ctxp->outer_context;
if (gimplify_expr (p, pre_p, NULL, is_gimple_val, fb_rvalue, false)
== GS_ERROR)
{
gimplify_omp_ctxp = target_ctx;
*p = integer_minus_one_node;
continue;
}
gimplify_omp_ctxp = target_ctx;
if (!DECL_P (expr) && TREE_CODE (expr) != TARGET_EXPR)
OMP_CLAUSE_OPERAND (c, 0) = *p;
}
c = build_omp_clause (thread_limit_loc, OMP_CLAUSE_THREAD_LIMIT);
OMP_CLAUSE_THREAD_LIMIT_EXPR (c) = thread_limit;
OMP_CLAUSE_CHAIN (c) = OMP_TARGET_CLAUSES (target);
OMP_TARGET_CLAUSES (target) = c;
c = build_omp_clause (num_teams_loc, OMP_CLAUSE_NUM_TEAMS);
OMP_CLAUSE_NUM_TEAMS_EXPR (c) = num_teams;
OMP_CLAUSE_CHAIN (c) = OMP_TARGET_CLAUSES (target);
OMP_TARGET_CLAUSES (target) = c;
}
/* Gimplify the gross structure of several OMP constructs. */
static void
gimplify_omp_workshare (tree *expr_p, gimple_seq *pre_p)
{
tree expr = *expr_p;
gimple *stmt;
gimple_seq body = NULL;
enum omp_region_type ort;
switch (TREE_CODE (expr))
{
case OMP_SECTIONS:
case OMP_SINGLE:
ort = ORT_WORKSHARE;
break;
case OMP_TARGET:
ort = OMP_TARGET_COMBINED (expr) ? ORT_COMBINED_TARGET : ORT_TARGET;
break;
case OACC_KERNELS:
ort = ORT_ACC_KERNELS;
break;
case OACC_PARALLEL:
ort = ORT_ACC_PARALLEL;
break;
case OACC_SERIAL:
ort = ORT_ACC_SERIAL;
break;
case OACC_DATA:
ort = ORT_ACC_DATA;
break;
case OMP_TARGET_DATA:
ort = ORT_TARGET_DATA;
break;
case OMP_TEAMS:
ort = OMP_TEAMS_COMBINED (expr) ? ORT_COMBINED_TEAMS : ORT_TEAMS;
if (gimplify_omp_ctxp == NULL
|| gimplify_omp_ctxp->region_type == ORT_IMPLICIT_TARGET)
ort = (enum omp_region_type) (ort | ORT_HOST_TEAMS);
break;
case OACC_HOST_DATA:
ort = ORT_ACC_HOST_DATA;
break;
default:
gcc_unreachable ();
}
bool save_in_omp_construct = in_omp_construct;
if ((ort & ORT_ACC) == 0)
in_omp_construct = false;
gimplify_scan_omp_clauses (&OMP_CLAUSES (expr), pre_p, ort,
TREE_CODE (expr));
if (TREE_CODE (expr) == OMP_TARGET)
optimize_target_teams (expr, pre_p);
if ((ort & (ORT_TARGET | ORT_TARGET_DATA)) != 0
|| (ort & ORT_HOST_TEAMS) == ORT_HOST_TEAMS)
{
push_gimplify_context ();
gimple *g = gimplify_and_return_first (OMP_BODY (expr), &body);
if (gimple_code (g) == GIMPLE_BIND)
pop_gimplify_context (g);
else
pop_gimplify_context (NULL);
if ((ort & ORT_TARGET_DATA) != 0)
{
enum built_in_function end_ix;
switch (TREE_CODE (expr))
{
case OACC_DATA:
case OACC_HOST_DATA:
end_ix = BUILT_IN_GOACC_DATA_END;
break;
case OMP_TARGET_DATA:
end_ix = BUILT_IN_GOMP_TARGET_END_DATA;
break;
default:
gcc_unreachable ();
}
tree fn = builtin_decl_explicit (end_ix);
g = gimple_build_call (fn, 0);
gimple_seq cleanup = NULL;
gimple_seq_add_stmt (&cleanup, g);
g = gimple_build_try (body, cleanup, GIMPLE_TRY_FINALLY);
body = NULL;
gimple_seq_add_stmt (&body, g);
}
}
else
gimplify_and_add (OMP_BODY (expr), &body);
gimplify_adjust_omp_clauses (pre_p, body, &OMP_CLAUSES (expr),
TREE_CODE (expr));
in_omp_construct = save_in_omp_construct;
switch (TREE_CODE (expr))
{
case OACC_DATA:
stmt = gimple_build_omp_target (body, GF_OMP_TARGET_KIND_OACC_DATA,
OMP_CLAUSES (expr));
break;
case OACC_HOST_DATA:
if (omp_find_clause (OMP_CLAUSES (expr), OMP_CLAUSE_IF_PRESENT))
{
for (tree c = OMP_CLAUSES (expr); c; c = OMP_CLAUSE_CHAIN (c))
if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_USE_DEVICE_PTR)
OMP_CLAUSE_USE_DEVICE_PTR_IF_PRESENT (c) = 1;
}
stmt = gimple_build_omp_target (body, GF_OMP_TARGET_KIND_OACC_HOST_DATA,
OMP_CLAUSES (expr));
break;
case OACC_KERNELS:
stmt = gimple_build_omp_target (body, GF_OMP_TARGET_KIND_OACC_KERNELS,
OMP_CLAUSES (expr));
break;
case OACC_PARALLEL:
stmt = gimple_build_omp_target (body, GF_OMP_TARGET_KIND_OACC_PARALLEL,
OMP_CLAUSES (expr));
break;
case OACC_SERIAL:
stmt = gimple_build_omp_target (body, GF_OMP_TARGET_KIND_OACC_SERIAL,
OMP_CLAUSES (expr));
break;
case OMP_SECTIONS:
stmt = gimple_build_omp_sections (body, OMP_CLAUSES (expr));
break;
case OMP_SINGLE:
stmt = gimple_build_omp_single (body, OMP_CLAUSES (expr));
break;
case OMP_TARGET:
stmt = gimple_build_omp_target (body, GF_OMP_TARGET_KIND_REGION,
OMP_CLAUSES (expr));
break;
case OMP_TARGET_DATA:
/* Put use_device_{ptr,addr} clauses last, as map clauses are supposed
to be evaluated before the use_device_{ptr,addr} clauses if they
refer to the same variables. */
{
tree use_device_clauses;
tree *pc, *uc = &use_device_clauses;
for (pc = &OMP_CLAUSES (expr); *pc; )
if (OMP_CLAUSE_CODE (*pc) == OMP_CLAUSE_USE_DEVICE_PTR
|| OMP_CLAUSE_CODE (*pc) == OMP_CLAUSE_USE_DEVICE_ADDR)
{
*uc = *pc;
*pc = OMP_CLAUSE_CHAIN (*pc);
uc = &OMP_CLAUSE_CHAIN (*uc);
}
else
pc = &OMP_CLAUSE_CHAIN (*pc);
*uc = NULL_TREE;
*pc = use_device_clauses;
stmt = gimple_build_omp_target (body, GF_OMP_TARGET_KIND_DATA,
OMP_CLAUSES (expr));
}
break;
case OMP_TEAMS:
stmt = gimple_build_omp_teams (body, OMP_CLAUSES (expr));
if ((ort & ORT_HOST_TEAMS) == ORT_HOST_TEAMS)
gimple_omp_teams_set_host (as_a <gomp_teams *> (stmt), true);
break;
default:
gcc_unreachable ();
}
gimplify_seq_add_stmt (pre_p, stmt);
*expr_p = NULL_TREE;
}
/* Gimplify the gross structure of OpenACC enter/exit data, update, and OpenMP
target update constructs. */
static void
gimplify_omp_target_update (tree *expr_p, gimple_seq *pre_p)
{
tree expr = *expr_p;
int kind;
gomp_target *stmt;
enum omp_region_type ort = ORT_WORKSHARE;
switch (TREE_CODE (expr))
{
case OACC_ENTER_DATA:
case OACC_EXIT_DATA:
kind = GF_OMP_TARGET_KIND_OACC_ENTER_EXIT_DATA;
ort = ORT_ACC;
break;
case OACC_UPDATE:
kind = GF_OMP_TARGET_KIND_OACC_UPDATE;
ort = ORT_ACC;
break;
case OMP_TARGET_UPDATE:
kind = GF_OMP_TARGET_KIND_UPDATE;
break;
case OMP_TARGET_ENTER_DATA:
kind = GF_OMP_TARGET_KIND_ENTER_DATA;
break;
case OMP_TARGET_EXIT_DATA:
kind = GF_OMP_TARGET_KIND_EXIT_DATA;
break;
default:
gcc_unreachable ();
}
gimplify_scan_omp_clauses (&OMP_STANDALONE_CLAUSES (expr), pre_p,
ort, TREE_CODE (expr));
gimplify_adjust_omp_clauses (pre_p, NULL, &OMP_STANDALONE_CLAUSES (expr),
TREE_CODE (expr));
if (TREE_CODE (expr) == OACC_UPDATE
&& omp_find_clause (OMP_STANDALONE_CLAUSES (expr),
OMP_CLAUSE_IF_PRESENT))
{
/* The runtime uses GOMP_MAP_{TO,FROM} to denote the if_present
clause. */
for (tree c = OMP_STANDALONE_CLAUSES (expr); c; c = OMP_CLAUSE_CHAIN (c))
if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_MAP)
switch (OMP_CLAUSE_MAP_KIND (c))
{
case GOMP_MAP_FORCE_TO:
OMP_CLAUSE_SET_MAP_KIND (c, GOMP_MAP_TO);
break;
case GOMP_MAP_FORCE_FROM:
OMP_CLAUSE_SET_MAP_KIND (c, GOMP_MAP_FROM);
break;
default:
break;
}
}
else if (TREE_CODE (expr) == OACC_EXIT_DATA
&& omp_find_clause (OMP_STANDALONE_CLAUSES (expr),
OMP_CLAUSE_FINALIZE))
{
/* Use GOMP_MAP_DELETE/GOMP_MAP_FORCE_FROM to denote "finalize"
semantics. */
bool have_clause = false;
for (tree c = OMP_STANDALONE_CLAUSES (expr); c; c = OMP_CLAUSE_CHAIN (c))
if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_MAP)
switch (OMP_CLAUSE_MAP_KIND (c))
{
case GOMP_MAP_FROM:
OMP_CLAUSE_SET_MAP_KIND (c, GOMP_MAP_FORCE_FROM);
have_clause = true;
break;
case GOMP_MAP_RELEASE:
OMP_CLAUSE_SET_MAP_KIND (c, GOMP_MAP_DELETE);
have_clause = true;
break;
case GOMP_MAP_POINTER:
case GOMP_MAP_TO_PSET:
/* TODO PR92929: we may see these here, but they'll always follow
one of the clauses above, and will be handled by libgomp as
one group, so no handling required here. */
gcc_assert (have_clause);
break;
case GOMP_MAP_DETACH:
OMP_CLAUSE_SET_MAP_KIND (c, GOMP_MAP_FORCE_DETACH);
have_clause = false;
break;
case GOMP_MAP_STRUCT:
have_clause = false;
break;
default:
gcc_unreachable ();
}
}
stmt = gimple_build_omp_target (NULL, kind, OMP_STANDALONE_CLAUSES (expr));
gimplify_seq_add_stmt (pre_p, stmt);
*expr_p = NULL_TREE;
}
/* A subroutine of gimplify_omp_atomic. The front end is supposed to have
stabilized the lhs of the atomic operation as *ADDR. Return true if
EXPR is this stabilized form. */
static bool
goa_lhs_expr_p (tree expr, tree addr)
{
/* Also include casts to other type variants. The C front end is fond
of adding these for e.g. volatile variables. This is like
STRIP_TYPE_NOPS but includes the main variant lookup. */
STRIP_USELESS_TYPE_CONVERSION (expr);
if (TREE_CODE (expr) == INDIRECT_REF)
{
expr = TREE_OPERAND (expr, 0);
while (expr != addr
&& (CONVERT_EXPR_P (expr)
|| TREE_CODE (expr) == NON_LVALUE_EXPR)
&& TREE_CODE (expr) == TREE_CODE (addr)
&& types_compatible_p (TREE_TYPE (expr), TREE_TYPE (addr)))
{
expr = TREE_OPERAND (expr, 0);
addr = TREE_OPERAND (addr, 0);
}
if (expr == addr)
return true;
return (TREE_CODE (addr) == ADDR_EXPR
&& TREE_CODE (expr) == ADDR_EXPR
&& TREE_OPERAND (addr, 0) == TREE_OPERAND (expr, 0));
}
if (TREE_CODE (addr) == ADDR_EXPR && expr == TREE_OPERAND (addr, 0))
return true;
return false;
}
/* Walk *EXPR_P and replace appearances of *LHS_ADDR with LHS_VAR. If an
expression does not involve the lhs, evaluate it into a temporary.
Return 1 if the lhs appeared as a subexpression, 0 if it did not,
or -1 if an error was encountered. */
static int
goa_stabilize_expr (tree *expr_p, gimple_seq *pre_p, tree lhs_addr,
tree lhs_var)
{
tree expr = *expr_p;
int saw_lhs;
if (goa_lhs_expr_p (expr, lhs_addr))
{
*expr_p = lhs_var;
return 1;
}
if (is_gimple_val (expr))
return 0;
saw_lhs = 0;
switch (TREE_CODE_CLASS (TREE_CODE (expr)))
{
case tcc_binary:
case tcc_comparison:
saw_lhs |= goa_stabilize_expr (&TREE_OPERAND (expr, 1), pre_p, lhs_addr,
lhs_var);
/* FALLTHRU */
case tcc_unary:
saw_lhs |= goa_stabilize_expr (&TREE_OPERAND (expr, 0), pre_p, lhs_addr,
lhs_var);
break;
case tcc_expression:
switch (TREE_CODE (expr))
{
case TRUTH_ANDIF_EXPR:
case TRUTH_ORIF_EXPR:
case TRUTH_AND_EXPR:
case TRUTH_OR_EXPR:
case TRUTH_XOR_EXPR:
case BIT_INSERT_EXPR:
saw_lhs |= goa_stabilize_expr (&TREE_OPERAND (expr, 1), pre_p,
lhs_addr, lhs_var);
/* FALLTHRU */
case TRUTH_NOT_EXPR:
saw_lhs |= goa_stabilize_expr (&TREE_OPERAND (expr, 0), pre_p,
lhs_addr, lhs_var);
break;
case COMPOUND_EXPR:
/* Break out any preevaluations from cp_build_modify_expr. */
for (; TREE_CODE (expr) == COMPOUND_EXPR;
expr = TREE_OPERAND (expr, 1))
gimplify_stmt (&TREE_OPERAND (expr, 0), pre_p);
*expr_p = expr;
return goa_stabilize_expr (expr_p, pre_p, lhs_addr, lhs_var);
default:
break;
}
break;
case tcc_reference:
if (TREE_CODE (expr) == BIT_FIELD_REF)
saw_lhs |= goa_stabilize_expr (&TREE_OPERAND (expr, 0), pre_p,
lhs_addr, lhs_var);
break;
default:
break;
}
if (saw_lhs == 0)
{
enum gimplify_status gs;
gs = gimplify_expr (expr_p, pre_p, NULL, is_gimple_val, fb_rvalue);
if (gs != GS_ALL_DONE)
saw_lhs = -1;
}
return saw_lhs;
}
/* Gimplify an OMP_ATOMIC statement. */
static enum gimplify_status
gimplify_omp_atomic (tree *expr_p, gimple_seq *pre_p)
{
tree addr = TREE_OPERAND (*expr_p, 0);
tree rhs = TREE_CODE (*expr_p) == OMP_ATOMIC_READ
? NULL : TREE_OPERAND (*expr_p, 1);
tree type = TYPE_MAIN_VARIANT (TREE_TYPE (TREE_TYPE (addr)));
tree tmp_load;
gomp_atomic_load *loadstmt;
gomp_atomic_store *storestmt;
tmp_load = create_tmp_reg (type);
if (rhs && goa_stabilize_expr (&rhs, pre_p, addr, tmp_load) < 0)
return GS_ERROR;
if (gimplify_expr (&addr, pre_p, NULL, is_gimple_val, fb_rvalue)
!= GS_ALL_DONE)
return GS_ERROR;
loadstmt = gimple_build_omp_atomic_load (tmp_load, addr,
OMP_ATOMIC_MEMORY_ORDER (*expr_p));
gimplify_seq_add_stmt (pre_p, loadstmt);
if (rhs)
{
/* BIT_INSERT_EXPR is not valid for non-integral bitfield
representatives. Use BIT_FIELD_REF on the lhs instead. */
if (TREE_CODE (rhs) == BIT_INSERT_EXPR
&& !INTEGRAL_TYPE_P (TREE_TYPE (tmp_load)))
{
tree bitpos = TREE_OPERAND (rhs, 2);
tree op1 = TREE_OPERAND (rhs, 1);
tree bitsize;
tree tmp_store = tmp_load;
if (TREE_CODE (*expr_p) == OMP_ATOMIC_CAPTURE_OLD)
tmp_store = get_initialized_tmp_var (tmp_load, pre_p);
if (INTEGRAL_TYPE_P (TREE_TYPE (op1)))
bitsize = bitsize_int (TYPE_PRECISION (TREE_TYPE (op1)));
else
bitsize = TYPE_SIZE (TREE_TYPE (op1));
gcc_assert (TREE_OPERAND (rhs, 0) == tmp_load);
tree t = build2_loc (EXPR_LOCATION (rhs),
MODIFY_EXPR, void_type_node,
build3_loc (EXPR_LOCATION (rhs), BIT_FIELD_REF,
TREE_TYPE (op1), tmp_store, bitsize,
bitpos), op1);
gimplify_and_add (t, pre_p);
rhs = tmp_store;
}
if (gimplify_expr (&rhs, pre_p, NULL, is_gimple_val, fb_rvalue)
!= GS_ALL_DONE)
return GS_ERROR;
}
if (TREE_CODE (*expr_p) == OMP_ATOMIC_READ)
rhs = tmp_load;
storestmt
= gimple_build_omp_atomic_store (rhs, OMP_ATOMIC_MEMORY_ORDER (*expr_p));
gimplify_seq_add_stmt (pre_p, storestmt);
switch (TREE_CODE (*expr_p))
{
case OMP_ATOMIC_READ:
case OMP_ATOMIC_CAPTURE_OLD:
*expr_p = tmp_load;
gimple_omp_atomic_set_need_value (loadstmt);
break;
case OMP_ATOMIC_CAPTURE_NEW:
*expr_p = rhs;
gimple_omp_atomic_set_need_value (storestmt);
break;
default:
*expr_p = NULL;
break;
}
return GS_ALL_DONE;
}
/* Gimplify a TRANSACTION_EXPR. This involves gimplification of the
body, and adding some EH bits. */
static enum gimplify_status
gimplify_transaction (tree *expr_p, gimple_seq *pre_p)
{
tree expr = *expr_p, temp, tbody = TRANSACTION_EXPR_BODY (expr);
gimple *body_stmt;
gtransaction *trans_stmt;
gimple_seq body = NULL;
int subcode = 0;
/* Wrap the transaction body in a BIND_EXPR so we have a context
where to put decls for OMP. */
if (TREE_CODE (tbody) != BIND_EXPR)
{
tree bind = build3 (BIND_EXPR, void_type_node, NULL, tbody, NULL);
TREE_SIDE_EFFECTS (bind) = 1;
SET_EXPR_LOCATION (bind, EXPR_LOCATION (tbody));
TRANSACTION_EXPR_BODY (expr) = bind;
}
push_gimplify_context ();
temp = voidify_wrapper_expr (*expr_p, NULL);
body_stmt = gimplify_and_return_first (TRANSACTION_EXPR_BODY (expr), &body);
pop_gimplify_context (body_stmt);
trans_stmt = gimple_build_transaction (body);
if (TRANSACTION_EXPR_OUTER (expr))
subcode = GTMA_IS_OUTER;
else if (TRANSACTION_EXPR_RELAXED (expr))
subcode = GTMA_IS_RELAXED;
gimple_transaction_set_subcode (trans_stmt, subcode);
gimplify_seq_add_stmt (pre_p, trans_stmt);
if (temp)
{
*expr_p = temp;
return GS_OK;
}
*expr_p = NULL_TREE;
return GS_ALL_DONE;
}
/* Gimplify an OMP_ORDERED construct. EXPR is the tree version. BODY
is the OMP_BODY of the original EXPR (which has already been
gimplified so it's not present in the EXPR).
Return the gimplified GIMPLE_OMP_ORDERED tuple. */
static gimple *
gimplify_omp_ordered (tree expr, gimple_seq body)
{
tree c, decls;
int failures = 0;
unsigned int i;
tree source_c = NULL_TREE;
tree sink_c = NULL_TREE;
if (gimplify_omp_ctxp)
{
for (c = OMP_ORDERED_CLAUSES (expr); c; c = OMP_CLAUSE_CHAIN (c))
if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_DEPEND
&& gimplify_omp_ctxp->loop_iter_var.is_empty ()
&& (OMP_CLAUSE_DEPEND_KIND (c) == OMP_CLAUSE_DEPEND_SINK
|| OMP_CLAUSE_DEPEND_KIND (c) == OMP_CLAUSE_DEPEND_SOURCE))
{
error_at (OMP_CLAUSE_LOCATION (c),
"%<ordered%> construct with %<depend%> clause must be "
"closely nested inside a loop with %<ordered%> clause "
"with a parameter");
failures++;
}
else if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_DEPEND
&& OMP_CLAUSE_DEPEND_KIND (c) == OMP_CLAUSE_DEPEND_SINK)
{
bool fail = false;
for (decls = OMP_CLAUSE_DECL (c), i = 0;
decls && TREE_CODE (decls) == TREE_LIST;
decls = TREE_CHAIN (decls), ++i)
if (i >= gimplify_omp_ctxp->loop_iter_var.length () / 2)
continue;
else if (TREE_VALUE (decls)
!= gimplify_omp_ctxp->loop_iter_var[2 * i])
{
error_at (OMP_CLAUSE_LOCATION (c),
"variable %qE is not an iteration "
"of outermost loop %d, expected %qE",
TREE_VALUE (decls), i + 1,
gimplify_omp_ctxp->loop_iter_var[2 * i]);
fail = true;
failures++;
}
else
TREE_VALUE (decls)
= gimplify_omp_ctxp->loop_iter_var[2 * i + 1];
if (!fail && i != gimplify_omp_ctxp->loop_iter_var.length () / 2)
{
error_at (OMP_CLAUSE_LOCATION (c),
"number of variables in %<depend%> clause with "
"%<sink%> modifier does not match number of "
"iteration variables");
failures++;
}
sink_c = c;
}
else if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_DEPEND
&& OMP_CLAUSE_DEPEND_KIND (c) == OMP_CLAUSE_DEPEND_SOURCE)
{
if (source_c)
{
error_at (OMP_CLAUSE_LOCATION (c),
"more than one %<depend%> clause with %<source%> "
"modifier on an %<ordered%> construct");
failures++;
}
else
source_c = c;
}
}
if (source_c && sink_c)
{
error_at (OMP_CLAUSE_LOCATION (source_c),
"%<depend%> clause with %<source%> modifier specified "
"together with %<depend%> clauses with %<sink%> modifier "
"on the same construct");
failures++;
}
if (failures)
return gimple_build_nop ();
return gimple_build_omp_ordered (body, OMP_ORDERED_CLAUSES (expr));
}
/* Convert the GENERIC expression tree *EXPR_P to GIMPLE. If the
expression produces a value to be used as an operand inside a GIMPLE
statement, the value will be stored back in *EXPR_P. This value will
be a tree of class tcc_declaration, tcc_constant, tcc_reference or
an SSA_NAME. The corresponding sequence of GIMPLE statements is
emitted in PRE_P and POST_P.
Additionally, this process may overwrite parts of the input
expression during gimplification. Ideally, it should be
possible to do non-destructive gimplification.
EXPR_P points to the GENERIC expression to convert to GIMPLE. If
the expression needs to evaluate to a value to be used as
an operand in a GIMPLE statement, this value will be stored in
*EXPR_P on exit. This happens when the caller specifies one
of fb_lvalue or fb_rvalue fallback flags.
PRE_P will contain the sequence of GIMPLE statements corresponding
to the evaluation of EXPR and all the side-effects that must
be executed before the main expression. On exit, the last
statement of PRE_P is the core statement being gimplified. For
instance, when gimplifying 'if (++a)' the last statement in
PRE_P will be 'if (t.1)' where t.1 is the result of
pre-incrementing 'a'.
POST_P will contain the sequence of GIMPLE statements corresponding
to the evaluation of all the side-effects that must be executed
after the main expression. If this is NULL, the post
side-effects are stored at the end of PRE_P.
The reason why the output is split in two is to handle post
side-effects explicitly. In some cases, an expression may have
inner and outer post side-effects which need to be emitted in
an order different from the one given by the recursive
traversal. For instance, for the expression (*p--)++ the post
side-effects of '--' must actually occur *after* the post
side-effects of '++'. However, gimplification will first visit
the inner expression, so if a separate POST sequence was not
used, the resulting sequence would be:
1 t.1 = *p
2 p = p - 1
3 t.2 = t.1 + 1
4 *p = t.2
However, the post-decrement operation in line #2 must not be
evaluated until after the store to *p at line #4, so the
correct sequence should be:
1 t.1 = *p
2 t.2 = t.1 + 1
3 *p = t.2
4 p = p - 1
So, by specifying a separate post queue, it is possible
to emit the post side-effects in the correct order.
If POST_P is NULL, an internal queue will be used. Before
returning to the caller, the sequence POST_P is appended to
the main output sequence PRE_P.
GIMPLE_TEST_F points to a function that takes a tree T and
returns nonzero if T is in the GIMPLE form requested by the
caller. The GIMPLE predicates are in gimple.c.
FALLBACK tells the function what sort of a temporary we want if
gimplification cannot produce an expression that complies with
GIMPLE_TEST_F.
fb_none means that no temporary should be generated
fb_rvalue means that an rvalue is OK to generate
fb_lvalue means that an lvalue is OK to generate
fb_either means that either is OK, but an lvalue is preferable.
fb_mayfail means that gimplification may fail (in which case
GS_ERROR will be returned)
The return value is either GS_ERROR or GS_ALL_DONE, since this
function iterates until EXPR is completely gimplified or an error
occurs. */
enum gimplify_status
gimplify_expr (tree *expr_p, gimple_seq *pre_p, gimple_seq *post_p,
bool (*gimple_test_f) (tree), fallback_t fallback)
{
tree tmp;
gimple_seq internal_pre = NULL;
gimple_seq internal_post = NULL;
tree save_expr;
bool is_statement;
location_t saved_location;
enum gimplify_status ret;
gimple_stmt_iterator pre_last_gsi, post_last_gsi;
tree label;
save_expr = *expr_p;
if (save_expr == NULL_TREE)
return GS_ALL_DONE;
/* If we are gimplifying a top-level statement, PRE_P must be valid. */
is_statement = gimple_test_f == is_gimple_stmt;
if (is_statement)
gcc_assert (pre_p);
/* Consistency checks. */
if (gimple_test_f == is_gimple_reg)
gcc_assert (fallback & (fb_rvalue | fb_lvalue));
else if (gimple_test_f == is_gimple_val
|| gimple_test_f == is_gimple_call_addr
|| gimple_test_f == is_gimple_condexpr
|| gimple_test_f == is_gimple_condexpr_for_cond
|| gimple_test_f == is_gimple_mem_rhs
|| gimple_test_f == is_gimple_mem_rhs_or_call
|| gimple_test_f == is_gimple_reg_rhs
|| gimple_test_f == is_gimple_reg_rhs_or_call
|| gimple_test_f == is_gimple_asm_val
|| gimple_test_f == is_gimple_mem_ref_addr)
gcc_assert (fallback & fb_rvalue);
else if (gimple_test_f == is_gimple_min_lval
|| gimple_test_f == is_gimple_lvalue)
gcc_assert (fallback & fb_lvalue);
else if (gimple_test_f == is_gimple_addressable)
gcc_assert (fallback & fb_either);
else if (gimple_test_f == is_gimple_stmt)
gcc_assert (fallback == fb_none);
else
{
/* We should have recognized the GIMPLE_TEST_F predicate to
know what kind of fallback to use in case a temporary is
needed to hold the value or address of *EXPR_P. */
gcc_unreachable ();
}
/* We used to check the predicate here and return immediately if it
succeeds. This is wrong; the design is for gimplification to be
idempotent, and for the predicates to only test for valid forms, not
whether they are fully simplified. */
if (pre_p == NULL)
pre_p = &internal_pre;
if (post_p == NULL)
post_p = &internal_post;
/* Remember the last statements added to PRE_P and POST_P. Every
new statement added by the gimplification helpers needs to be
annotated with location information. To centralize the
responsibility, we remember the last statement that had been
added to both queues before gimplifying *EXPR_P. If
gimplification produces new statements in PRE_P and POST_P, those
statements will be annotated with the same location information
as *EXPR_P. */
pre_last_gsi = gsi_last (*pre_p);
post_last_gsi = gsi_last (*post_p);
saved_location = input_location;
if (save_expr != error_mark_node
&& EXPR_HAS_LOCATION (*expr_p))
input_location = EXPR_LOCATION (*expr_p);
/* Loop over the specific gimplifiers until the toplevel node
remains the same. */
do
{
/* Strip away as many useless type conversions as possible
at the toplevel. */
STRIP_USELESS_TYPE_CONVERSION (*expr_p);
/* Remember the expr. */
save_expr = *expr_p;
/* Die, die, die, my darling. */
if (error_operand_p (save_expr))
{
ret = GS_ERROR;
break;
}
/* Do any language-specific gimplification. */
ret = ((enum gimplify_status)
lang_hooks.gimplify_expr (expr_p, pre_p, post_p));
if (ret == GS_OK)
{
if (*expr_p == NULL_TREE)
break;
if (*expr_p != save_expr)
continue;
}
else if (ret != GS_UNHANDLED)
break;
/* Make sure that all the cases set 'ret' appropriately. */
ret = GS_UNHANDLED;
switch (TREE_CODE (*expr_p))
{
/* First deal with the special cases. */
case POSTINCREMENT_EXPR:
case POSTDECREMENT_EXPR:
case PREINCREMENT_EXPR:
case PREDECREMENT_EXPR:
ret = gimplify_self_mod_expr (expr_p, pre_p, post_p,
fallback != fb_none,
TREE_TYPE (*expr_p));
break;
case VIEW_CONVERT_EXPR:
if ((fallback & fb_rvalue)
&& is_gimple_reg_type (TREE_TYPE (*expr_p))
&& is_gimple_reg_type (TREE_TYPE (TREE_OPERAND (*expr_p, 0))))
{
ret = gimplify_expr (&TREE_OPERAND (*expr_p, 0), pre_p,
post_p, is_gimple_val, fb_rvalue);
recalculate_side_effects (*expr_p);
break;
}
/* Fallthru. */
case ARRAY_REF:
case ARRAY_RANGE_REF:
case REALPART_EXPR:
case IMAGPART_EXPR:
case COMPONENT_REF:
ret = gimplify_compound_lval (expr_p, pre_p, post_p,
fallback ? fallback : fb_rvalue);
break;
case COND_EXPR:
ret = gimplify_cond_expr (expr_p, pre_p, fallback);
/* C99 code may assign to an array in a structure value of a
conditional expression, and this has undefined behavior
only on execution, so create a temporary if an lvalue is
required. */
if (fallback == fb_lvalue)
{
*expr_p = get_initialized_tmp_var (*expr_p, pre_p, post_p, false);
mark_addressable (*expr_p);
ret = GS_OK;
}
break;
case CALL_EXPR:
ret = gimplify_call_expr (expr_p, pre_p, fallback != fb_none);
/* C99 code may assign to an array in a structure returned
from a function, and this has undefined behavior only on
execution, so create a temporary if an lvalue is
required. */
if (fallback == fb_lvalue)
{
*expr_p = get_initialized_tmp_var (*expr_p, pre_p, post_p, false);
mark_addressable (*expr_p);
ret = GS_OK;
}
break;
case TREE_LIST:
gcc_unreachable ();
case COMPOUND_EXPR:
ret = gimplify_compound_expr (expr_p, pre_p, fallback != fb_none);
break;
case COMPOUND_LITERAL_EXPR:
ret = gimplify_compound_literal_expr (expr_p, pre_p,
gimple_test_f, fallback);
break;
case MODIFY_EXPR:
case INIT_EXPR:
ret = gimplify_modify_expr (expr_p, pre_p, post_p,
fallback != fb_none);
break;
case TRUTH_ANDIF_EXPR:
case TRUTH_ORIF_EXPR:
{
/* Preserve the original type of the expression and the
source location of the outer expression. */
tree org_type = TREE_TYPE (*expr_p);
*expr_p = gimple_boolify (*expr_p);
*expr_p = build3_loc (input_location, COND_EXPR,
org_type, *expr_p,
fold_convert_loc
(input_location,
org_type, boolean_true_node),
fold_convert_loc
(input_location,
org_type, boolean_false_node));
ret = GS_OK;
break;
}
case TRUTH_NOT_EXPR:
{
tree type = TREE_TYPE (*expr_p);
/* The parsers are careful to generate TRUTH_NOT_EXPR
only with operands that are always zero or one.
We do not fold here but handle the only interesting case
manually, as fold may re-introduce the TRUTH_NOT_EXPR. */
*expr_p = gimple_boolify (*expr_p);
if (TYPE_PRECISION (TREE_TYPE (*expr_p)) == 1)
*expr_p = build1_loc (input_location, BIT_NOT_EXPR,
TREE_TYPE (*expr_p),
TREE_OPERAND (*expr_p, 0));
else
*expr_p = build2_loc (input_location, BIT_XOR_EXPR,
TREE_TYPE (*expr_p),
TREE_OPERAND (*expr_p, 0),
build_int_cst (TREE_TYPE (*expr_p), 1));
if (!useless_type_conversion_p (type, TREE_TYPE (*expr_p)))
*expr_p = fold_convert_loc (input_location, type, *expr_p);
ret = GS_OK;
break;
}
case ADDR_EXPR:
ret = gimplify_addr_expr (expr_p, pre_p, post_p);
break;
case ANNOTATE_EXPR:
{
tree cond = TREE_OPERAND (*expr_p, 0);
tree kind = TREE_OPERAND (*expr_p, 1);
tree data = TREE_OPERAND (*expr_p, 2);
tree type = TREE_TYPE (cond);
if (!INTEGRAL_TYPE_P (type))
{
*expr_p = cond;
ret = GS_OK;
break;
}
tree tmp = create_tmp_var (type);
gimplify_arg (&cond, pre_p, EXPR_LOCATION (*expr_p));
gcall *call
= gimple_build_call_internal (IFN_ANNOTATE, 3, cond, kind, data);
gimple_call_set_lhs (call, tmp);
gimplify_seq_add_stmt (pre_p, call);
*expr_p = tmp;
ret = GS_ALL_DONE;
break;
}
case VA_ARG_EXPR:
ret = gimplify_va_arg_expr (expr_p, pre_p, post_p);
break;
CASE_CONVERT:
if (IS_EMPTY_STMT (*expr_p))
{
ret = GS_ALL_DONE;
break;
}
if (VOID_TYPE_P (TREE_TYPE (*expr_p))
|| fallback == fb_none)
{
/* Just strip a conversion to void (or in void context) and
try again. */
*expr_p = TREE_OPERAND (*expr_p, 0);
ret = GS_OK;
break;
}
ret = gimplify_conversion (expr_p);
if (ret == GS_ERROR)
break;
if (*expr_p != save_expr)
break;
/* FALLTHRU */
case FIX_TRUNC_EXPR:
/* unary_expr: ... | '(' cast ')' val | ... */
ret = gimplify_expr (&TREE_OPERAND (*expr_p, 0), pre_p, post_p,
is_gimple_val, fb_rvalue);
recalculate_side_effects (*expr_p);
break;
case INDIRECT_REF:
{
bool volatilep = TREE_THIS_VOLATILE (*expr_p);
bool notrap = TREE_THIS_NOTRAP (*expr_p);
tree saved_ptr_type = TREE_TYPE (TREE_OPERAND (*expr_p, 0));
*expr_p = fold_indirect_ref_loc (input_location, *expr_p);
if (*expr_p != save_expr)
{
ret = GS_OK;
break;
}
ret = gimplify_expr (&TREE_OPERAND (*expr_p, 0), pre_p, post_p,
is_gimple_reg, fb_rvalue);
if (ret == GS_ERROR)
break;
recalculate_side_effects (*expr_p);
*expr_p = fold_build2_loc (input_location, MEM_REF,
TREE_TYPE (*expr_p),
TREE_OPERAND (*expr_p, 0),
build_int_cst (saved_ptr_type, 0));
TREE_THIS_VOLATILE (*expr_p) = volatilep;
TREE_THIS_NOTRAP (*expr_p) = notrap;
ret = GS_OK;
break;
}
/* We arrive here through the various re-gimplifcation paths. */
case MEM_REF:
/* First try re-folding the whole thing. */
tmp = fold_binary (MEM_REF, TREE_TYPE (*expr_p),
TREE_OPERAND (*expr_p, 0),
TREE_OPERAND (*expr_p, 1));
if (tmp)
{
REF_REVERSE_STORAGE_ORDER (tmp)
= REF_REVERSE_STORAGE_ORDER (*expr_p);
*expr_p = tmp;
recalculate_side_effects (*expr_p);
ret = GS_OK;
break;
}
/* Avoid re-gimplifying the address operand if it is already
in suitable form. Re-gimplifying would mark the address
operand addressable. Always gimplify when not in SSA form
as we still may have to gimplify decls with value-exprs. */
if (!gimplify_ctxp || !gimple_in_ssa_p (cfun)
|| !is_gimple_mem_ref_addr (TREE_OPERAND (*expr_p, 0)))
{
ret = gimplify_expr (&TREE_OPERAND (*expr_p, 0), pre_p, post_p,
is_gimple_mem_ref_addr, fb_rvalue);
if (ret == GS_ERROR)
break;
}
recalculate_side_effects (*expr_p);
ret = GS_ALL_DONE;
break;
/* Constants need not be gimplified. */
case INTEGER_CST:
case REAL_CST:
case FIXED_CST:
case STRING_CST:
case COMPLEX_CST:
case VECTOR_CST:
/* Drop the overflow flag on constants, we do not want
that in the GIMPLE IL. */
if (TREE_OVERFLOW_P (*expr_p))
*expr_p = drop_tree_overflow (*expr_p);
ret = GS_ALL_DONE;
break;
case CONST_DECL:
/* If we require an lvalue, such as for ADDR_EXPR, retain the
CONST_DECL node. Otherwise the decl is replaceable by its
value. */
/* ??? Should be == fb_lvalue, but ADDR_EXPR passes fb_either. */
if (fallback & fb_lvalue)
ret = GS_ALL_DONE;
else
{
*expr_p = DECL_INITIAL (*expr_p);
ret = GS_OK;
}
break;
case DECL_EXPR:
ret = gimplify_decl_expr (expr_p, pre_p);
break;
case BIND_EXPR:
ret = gimplify_bind_expr (expr_p, pre_p);
break;
case LOOP_EXPR:
ret = gimplify_loop_expr (expr_p, pre_p);
break;
case SWITCH_EXPR:
ret = gimplify_switch_expr (expr_p, pre_p);
break;
case EXIT_EXPR:
ret = gimplify_exit_expr (expr_p);
break;
case GOTO_EXPR:
/* If the target is not LABEL, then it is a computed jump
and the target needs to be gimplified. */
if (TREE_CODE (GOTO_DESTINATION (*expr_p)) != LABEL_DECL)
{
ret = gimplify_expr (&GOTO_DESTINATION (*expr_p), pre_p,
NULL, is_gimple_val, fb_rvalue);
if (ret == GS_ERROR)
break;
}
gimplify_seq_add_stmt (pre_p,
gimple_build_goto (GOTO_DESTINATION (*expr_p)));
ret = GS_ALL_DONE;
break;
case PREDICT_EXPR:
gimplify_seq_add_stmt (pre_p,
gimple_build_predict (PREDICT_EXPR_PREDICTOR (*expr_p),
PREDICT_EXPR_OUTCOME (*expr_p)));
ret = GS_ALL_DONE;
break;
case LABEL_EXPR:
ret = gimplify_label_expr (expr_p, pre_p);
label = LABEL_EXPR_LABEL (*expr_p);
gcc_assert (decl_function_context (label) == current_function_decl);
/* If the label is used in a goto statement, or address of the label
is taken, we need to unpoison all variables that were seen so far.
Doing so would prevent us from reporting a false positives. */
if (asan_poisoned_variables
&& asan_used_labels != NULL
&& asan_used_labels->contains (label))
asan_poison_variables (asan_poisoned_variables, false, pre_p);
break;
case CASE_LABEL_EXPR:
ret = gimplify_case_label_expr (expr_p, pre_p);
if (gimplify_ctxp->live_switch_vars)
asan_poison_variables (gimplify_ctxp->live_switch_vars, false,
pre_p);
break;
case RETURN_EXPR:
ret = gimplify_return_expr (*expr_p, pre_p);
break;
case CONSTRUCTOR:
/* Don't reduce this in place; let gimplify_init_constructor work its
magic. Buf if we're just elaborating this for side effects, just
gimplify any element that has side-effects. */
if (fallback == fb_none)
{
unsigned HOST_WIDE_INT ix;
tree val;
tree temp = NULL_TREE;
FOR_EACH_CONSTRUCTOR_VALUE (CONSTRUCTOR_ELTS (*expr_p), ix, val)
if (TREE_SIDE_EFFECTS (val))
append_to_statement_list (val, &temp);
*expr_p = temp;
ret = temp ? GS_OK : GS_ALL_DONE;
}
/* C99 code may assign to an array in a constructed
structure or union, and this has undefined behavior only
on execution, so create a temporary if an lvalue is
required. */
else if (fallback == fb_lvalue)
{
*expr_p = get_initialized_tmp_var (*expr_p, pre_p, post_p, false);
mark_addressable (*expr_p);
ret = GS_OK;
}
else
ret = GS_ALL_DONE;
break;
/* The following are special cases that are not handled by the
original GIMPLE grammar. */
/* SAVE_EXPR nodes are converted into a GIMPLE identifier and
eliminated. */
case SAVE_EXPR:
ret = gimplify_save_expr (expr_p, pre_p, post_p);
break;
case BIT_FIELD_REF:
ret = gimplify_expr (&TREE_OPERAND (*expr_p, 0), pre_p,
post_p, is_gimple_lvalue, fb_either);
recalculate_side_effects (*expr_p);
break;
case TARGET_MEM_REF:
{
enum gimplify_status r0 = GS_ALL_DONE, r1 = GS_ALL_DONE;
if (TMR_BASE (*expr_p))
r0 = gimplify_expr (&TMR_BASE (*expr_p), pre_p,
post_p, is_gimple_mem_ref_addr, fb_either);
if (TMR_INDEX (*expr_p))
r1 = gimplify_expr (&TMR_INDEX (*expr_p), pre_p,
post_p, is_gimple_val, fb_rvalue);
if (TMR_INDEX2 (*expr_p))
r1 = gimplify_expr (&TMR_INDEX2 (*expr_p), pre_p,
post_p, is_gimple_val, fb_rvalue);
/* TMR_STEP and TMR_OFFSET are always integer constants. */
ret = MIN (r0, r1);
}
break;
case NON_LVALUE_EXPR:
/* This should have been stripped above. */
gcc_unreachable ();
case ASM_EXPR:
ret = gimplify_asm_expr (expr_p, pre_p, post_p);
break;
case TRY_FINALLY_EXPR:
case TRY_CATCH_EXPR:
{
gimple_seq eval, cleanup;
gtry *try_;
/* Calls to destructors are generated automatically in FINALLY/CATCH
block. They should have location as UNKNOWN_LOCATION. However,
gimplify_call_expr will reset these call stmts to input_location
if it finds stmt's location is unknown. To prevent resetting for
destructors, we set the input_location to unknown.
Note that this only affects the destructor calls in FINALLY/CATCH
block, and will automatically reset to its original value by the
end of gimplify_expr. */
input_location = UNKNOWN_LOCATION;
eval = cleanup = NULL;
gimplify_and_add (TREE_OPERAND (*expr_p, 0), &eval);
if (TREE_CODE (*expr_p) == TRY_FINALLY_EXPR
&& TREE_CODE (TREE_OPERAND (*expr_p, 1)) == EH_ELSE_EXPR)
{
gimple_seq n = NULL, e = NULL;
gimplify_and_add (TREE_OPERAND (TREE_OPERAND (*expr_p, 1),
0), &n);
gimplify_and_add (TREE_OPERAND (TREE_OPERAND (*expr_p, 1),
1), &e);
if (!gimple_seq_empty_p (n) && !gimple_seq_empty_p (e))
{
geh_else *stmt = gimple_build_eh_else (n, e);
gimple_seq_add_stmt (&cleanup, stmt);
}
}
else
gimplify_and_add (TREE_OPERAND (*expr_p, 1), &cleanup);
/* Don't create bogus GIMPLE_TRY with empty cleanup. */
if (gimple_seq_empty_p (cleanup))
{
gimple_seq_add_seq (pre_p, eval);
ret = GS_ALL_DONE;
break;
}
try_ = gimple_build_try (eval, cleanup,
TREE_CODE (*expr_p) == TRY_FINALLY_EXPR
? GIMPLE_TRY_FINALLY
: GIMPLE_TRY_CATCH);
if (EXPR_HAS_LOCATION (save_expr))
gimple_set_location (try_, EXPR_LOCATION (save_expr));
else if (LOCATION_LOCUS (saved_location) != UNKNOWN_LOCATION)
gimple_set_location (try_, saved_location);
if (TREE_CODE (*expr_p) == TRY_CATCH_EXPR)
gimple_try_set_catch_is_cleanup (try_,
TRY_CATCH_IS_CLEANUP (*expr_p));
gimplify_seq_add_stmt (pre_p, try_);
ret = GS_ALL_DONE;
break;
}
case CLEANUP_POINT_EXPR:
ret = gimplify_cleanup_point_expr (expr_p, pre_p);
break;
case TARGET_EXPR:
ret = gimplify_target_expr (expr_p, pre_p, post_p);
break;
case CATCH_EXPR:
{
gimple *c;
gimple_seq handler = NULL;
gimplify_and_add (CATCH_BODY (*expr_p), &handler);
c = gimple_build_catch (CATCH_TYPES (*expr_p), handler);
gimplify_seq_add_stmt (pre_p, c);
ret = GS_ALL_DONE;
break;
}
case EH_FILTER_EXPR:
{
gimple *ehf;
gimple_seq failure = NULL;
gimplify_and_add (EH_FILTER_FAILURE (*expr_p), &failure);
ehf = gimple_build_eh_filter (EH_FILTER_TYPES (*expr_p), failure);
gimple_set_no_warning (ehf, TREE_NO_WARNING (*expr_p));
gimplify_seq_add_stmt (pre_p, ehf);
ret = GS_ALL_DONE;
break;
}
case OBJ_TYPE_REF:
{
enum gimplify_status r0, r1;
r0 = gimplify_expr (&OBJ_TYPE_REF_OBJECT (*expr_p), pre_p,
post_p, is_gimple_val, fb_rvalue);
r1 = gimplify_expr (&OBJ_TYPE_REF_EXPR (*expr_p), pre_p,
post_p, is_gimple_val, fb_rvalue);
TREE_SIDE_EFFECTS (*expr_p) = 0;
ret = MIN (r0, r1);
}
break;
case LABEL_DECL:
/* We get here when taking the address of a label. We mark
the label as "forced"; meaning it can never be removed and
it is a potential target for any computed goto. */
FORCED_LABEL (*expr_p) = 1;
ret = GS_ALL_DONE;
break;
case STATEMENT_LIST:
ret = gimplify_statement_list (expr_p, pre_p);
break;
case WITH_SIZE_EXPR:
{
gimplify_expr (&TREE_OPERAND (*expr_p, 0), pre_p,
post_p == &internal_post ? NULL : post_p,
gimple_test_f, fallback);
gimplify_expr (&TREE_OPERAND (*expr_p, 1), pre_p, post_p,
is_gimple_val, fb_rvalue);
ret = GS_ALL_DONE;
}
break;
case VAR_DECL:
case PARM_DECL:
ret = gimplify_var_or_parm_decl (expr_p);
break;
case RESULT_DECL:
/* When within an OMP context, notice uses of variables. */
if (gimplify_omp_ctxp)
omp_notice_variable (gimplify_omp_ctxp, *expr_p, true);
ret = GS_ALL_DONE;
break;
case DEBUG_EXPR_DECL:
gcc_unreachable ();
case DEBUG_BEGIN_STMT:
gimplify_seq_add_stmt (pre_p,
gimple_build_debug_begin_stmt
(TREE_BLOCK (*expr_p),
EXPR_LOCATION (*expr_p)));
ret = GS_ALL_DONE;
*expr_p = NULL;
break;
case SSA_NAME:
/* Allow callbacks into the gimplifier during optimization. */
ret = GS_ALL_DONE;
break;
case OMP_PARALLEL:
gimplify_omp_parallel (expr_p, pre_p);
ret = GS_ALL_DONE;
break;
case OMP_TASK:
gimplify_omp_task (expr_p, pre_p);
ret = GS_ALL_DONE;
break;
case OMP_FOR:
case OMP_SIMD:
case OMP_DISTRIBUTE:
case OMP_TASKLOOP:
case OACC_LOOP:
ret = gimplify_omp_for (expr_p, pre_p);
break;
case OMP_LOOP:
ret = gimplify_omp_loop (expr_p, pre_p);
break;
case OACC_CACHE:
gimplify_oacc_cache (expr_p, pre_p);
ret = GS_ALL_DONE;
break;
case OACC_DECLARE:
gimplify_oacc_declare (expr_p, pre_p);
ret = GS_ALL_DONE;
break;
case OACC_HOST_DATA:
case OACC_DATA:
case OACC_KERNELS:
case OACC_PARALLEL:
case OACC_SERIAL:
case OMP_SECTIONS:
case OMP_SINGLE:
case OMP_TARGET:
case OMP_TARGET_DATA:
case OMP_TEAMS:
gimplify_omp_workshare (expr_p, pre_p);
ret = GS_ALL_DONE;
break;
case OACC_ENTER_DATA:
case OACC_EXIT_DATA:
case OACC_UPDATE:
case OMP_TARGET_UPDATE:
case OMP_TARGET_ENTER_DATA:
case OMP_TARGET_EXIT_DATA:
gimplify_omp_target_update (expr_p, pre_p);
ret = GS_ALL_DONE;
break;
case OMP_SECTION:
case OMP_MASTER:
case OMP_ORDERED:
case OMP_CRITICAL:
case OMP_SCAN:
{
gimple_seq body = NULL;
gimple *g;
bool saved_in_omp_construct = in_omp_construct;
in_omp_construct = true;
gimplify_and_add (OMP_BODY (*expr_p), &body);
in_omp_construct = saved_in_omp_construct;
switch (TREE_CODE (*expr_p))
{
case OMP_SECTION:
g = gimple_build_omp_section (body);
break;
case OMP_MASTER:
g = gimple_build_omp_master (body);
break;
case OMP_ORDERED:
g = gimplify_omp_ordered (*expr_p, body);
break;
case OMP_CRITICAL:
gimplify_scan_omp_clauses (&OMP_CRITICAL_CLAUSES (*expr_p),
pre_p, ORT_WORKSHARE, OMP_CRITICAL);
gimplify_adjust_omp_clauses (pre_p, body,
&OMP_CRITICAL_CLAUSES (*expr_p),
OMP_CRITICAL);
g = gimple_build_omp_critical (body,
OMP_CRITICAL_NAME (*expr_p),
OMP_CRITICAL_CLAUSES (*expr_p));
break;
case OMP_SCAN:
gimplify_scan_omp_clauses (&OMP_SCAN_CLAUSES (*expr_p),
pre_p, ORT_WORKSHARE, OMP_SCAN);
gimplify_adjust_omp_clauses (pre_p, body,
&OMP_SCAN_CLAUSES (*expr_p),
OMP_SCAN);
g = gimple_build_omp_scan (body, OMP_SCAN_CLAUSES (*expr_p));
break;
default:
gcc_unreachable ();
}
gimplify_seq_add_stmt (pre_p, g);
ret = GS_ALL_DONE;
break;
}
case OMP_TASKGROUP:
{
gimple_seq body = NULL;
tree *pclauses = &OMP_TASKGROUP_CLAUSES (*expr_p);
bool saved_in_omp_construct = in_omp_construct;
gimplify_scan_omp_clauses (pclauses, pre_p, ORT_TASKGROUP,
OMP_TASKGROUP);
gimplify_adjust_omp_clauses (pre_p, NULL, pclauses, OMP_TASKGROUP);
in_omp_construct = true;
gimplify_and_add (OMP_BODY (*expr_p), &body);
in_omp_construct = saved_in_omp_construct;
gimple_seq cleanup = NULL;
tree fn = builtin_decl_explicit (BUILT_IN_GOMP_TASKGROUP_END);
gimple *g = gimple_build_call (fn, 0);
gimple_seq_add_stmt (&cleanup, g);
g = gimple_build_try (body, cleanup, GIMPLE_TRY_FINALLY);
body = NULL;
gimple_seq_add_stmt (&body, g);
g = gimple_build_omp_taskgroup (body, *pclauses);
gimplify_seq_add_stmt (pre_p, g);
ret = GS_ALL_DONE;
break;
}
case OMP_ATOMIC:
case OMP_ATOMIC_READ:
case OMP_ATOMIC_CAPTURE_OLD:
case OMP_ATOMIC_CAPTURE_NEW:
ret = gimplify_omp_atomic (expr_p, pre_p);
break;
case TRANSACTION_EXPR:
ret = gimplify_transaction (expr_p, pre_p);
break;
case TRUTH_AND_EXPR:
case TRUTH_OR_EXPR:
case TRUTH_XOR_EXPR:
{
tree orig_type = TREE_TYPE (*expr_p);
tree new_type, xop0, xop1;
*expr_p = gimple_boolify (*expr_p);
new_type = TREE_TYPE (*expr_p);
if (!useless_type_conversion_p (orig_type, new_type))
{
*expr_p = fold_convert_loc (input_location, orig_type, *expr_p);
ret = GS_OK;
break;
}
/* Boolified binary truth expressions are semantically equivalent
to bitwise binary expressions. Canonicalize them to the
bitwise variant. */
switch (TREE_CODE (*expr_p))
{
case TRUTH_AND_EXPR:
TREE_SET_CODE (*expr_p, BIT_AND_EXPR);
break;
case TRUTH_OR_EXPR:
TREE_SET_CODE (*expr_p, BIT_IOR_EXPR);
break;
case TRUTH_XOR_EXPR:
TREE_SET_CODE (*expr_p, BIT_XOR_EXPR);
break;
default:
break;
}
/* Now make sure that operands have compatible type to
expression's new_type. */
xop0 = TREE_OPERAND (*expr_p, 0);
xop1 = TREE_OPERAND (*expr_p, 1);
if (!useless_type_conversion_p (new_type, TREE_TYPE (xop0)))
TREE_OPERAND (*expr_p, 0) = fold_convert_loc (input_location,
new_type,
xop0);
if (!useless_type_conversion_p (new_type, TREE_TYPE (xop1)))
TREE_OPERAND (*expr_p, 1) = fold_convert_loc (input_location,
new_type,
xop1);
/* Continue classified as tcc_binary. */
goto expr_2;
}
case VEC_COND_EXPR:
{
enum gimplify_status r0, r1, r2;
r0 = gimplify_expr (&TREE_OPERAND (*expr_p, 0), pre_p,
post_p, is_gimple_condexpr, fb_rvalue);
r1 = gimplify_expr (&TREE_OPERAND (*expr_p, 1), pre_p,
post_p, is_gimple_val, fb_rvalue);
r2 = gimplify_expr (&TREE_OPERAND (*expr_p, 2), pre_p,
post_p, is_gimple_val, fb_rvalue);
ret = MIN (MIN (r0, r1), r2);
recalculate_side_effects (*expr_p);
}
break;
case VEC_PERM_EXPR:
/* Classified as tcc_expression. */
goto expr_3;
case BIT_INSERT_EXPR:
/* Argument 3 is a constant. */
goto expr_2;
case POINTER_PLUS_EXPR:
{
enum gimplify_status r0, r1;
r0 = gimplify_expr (&TREE_OPERAND (*expr_p, 0), pre_p,
post_p, is_gimple_val, fb_rvalue);
r1 = gimplify_expr (&TREE_OPERAND (*expr_p, 1), pre_p,
post_p, is_gimple_val, fb_rvalue);
recalculate_side_effects (*expr_p);
ret = MIN (r0, r1);
break;
}
default:
switch (TREE_CODE_CLASS (TREE_CODE (*expr_p)))
{
case tcc_comparison:
/* Handle comparison of objects of non scalar mode aggregates
with a call to memcmp. It would be nice to only have to do
this for variable-sized objects, but then we'd have to allow
the same nest of reference nodes we allow for MODIFY_EXPR and
that's too complex.
Compare scalar mode aggregates as scalar mode values. Using
memcmp for them would be very inefficient at best, and is
plain wrong if bitfields are involved. */
{
tree type = TREE_TYPE (TREE_OPERAND (*expr_p, 1));
/* Vector comparisons need no boolification. */
if (TREE_CODE (type) == VECTOR_TYPE)
goto expr_2;
else if (!AGGREGATE_TYPE_P (type))
{
tree org_type = TREE_TYPE (*expr_p);
*expr_p = gimple_boolify (*expr_p);
if (!useless_type_conversion_p (org_type,
TREE_TYPE (*expr_p)))
{
*expr_p = fold_convert_loc (input_location,
org_type, *expr_p);
ret = GS_OK;
}
else
goto expr_2;
}
else if (TYPE_MODE (type) != BLKmode)
ret = gimplify_scalar_mode_aggregate_compare (expr_p);
else
ret = gimplify_variable_sized_compare (expr_p);
break;
}
/* If *EXPR_P does not need to be special-cased, handle it
according to its class. */
case tcc_unary:
ret = gimplify_expr (&TREE_OPERAND (*expr_p, 0), pre_p,
post_p, is_gimple_val, fb_rvalue);
break;
case tcc_binary:
expr_2:
{
enum gimplify_status r0, r1;
r0 = gimplify_expr (&TREE_OPERAND (*expr_p, 0), pre_p,
post_p, is_gimple_val, fb_rvalue);
r1 = gimplify_expr (&TREE_OPERAND (*expr_p, 1), pre_p,
post_p, is_gimple_val, fb_rvalue);
ret = MIN (r0, r1);
break;
}
expr_3:
{
enum gimplify_status r0, r1, r2;
r0 = gimplify_expr (&TREE_OPERAND (*expr_p, 0), pre_p,
post_p, is_gimple_val, fb_rvalue);
r1 = gimplify_expr (&TREE_OPERAND (*expr_p, 1), pre_p,
post_p, is_gimple_val, fb_rvalue);
r2 = gimplify_expr (&TREE_OPERAND (*expr_p, 2), pre_p,
post_p, is_gimple_val, fb_rvalue);
ret = MIN (MIN (r0, r1), r2);
break;
}
case tcc_declaration:
case tcc_constant:
ret = GS_ALL_DONE;
goto dont_recalculate;
default:
gcc_unreachable ();
}
recalculate_side_effects (*expr_p);
dont_recalculate:
break;
}
gcc_assert (*expr_p || ret != GS_OK);
}
while (ret == GS_OK);
/* If we encountered an error_mark somewhere nested inside, either
stub out the statement or propagate the error back out. */
if (ret == GS_ERROR)
{
if (is_statement)
*expr_p = NULL;
goto out;
}
/* This was only valid as a return value from the langhook, which
we handled. Make sure it doesn't escape from any other context. */
gcc_assert (ret != GS_UNHANDLED);
if (fallback == fb_none && *expr_p && !is_gimple_stmt (*expr_p))
{
/* We aren't looking for a value, and we don't have a valid
statement. If it doesn't have side-effects, throw it away.
We can also get here with code such as "*&&L;", where L is
a LABEL_DECL that is marked as FORCED_LABEL. */
if (TREE_CODE (*expr_p) == LABEL_DECL
|| !TREE_SIDE_EFFECTS (*expr_p))
*expr_p = NULL;
else if (!TREE_THIS_VOLATILE (*expr_p))
{
/* This is probably a _REF that contains something nested that
has side effects. Recurse through the operands to find it. */
enum tree_code code = TREE_CODE (*expr_p);
switch (code)
{
case COMPONENT_REF:
case REALPART_EXPR:
case IMAGPART_EXPR:
case VIEW_CONVERT_EXPR:
gimplify_expr (&TREE_OPERAND (*expr_p, 0), pre_p, post_p,
gimple_test_f, fallback);
break;
case ARRAY_REF:
case ARRAY_RANGE_REF:
gimplify_expr (&TREE_OPERAND (*expr_p, 0), pre_p, post_p,
gimple_test_f, fallback);
gimplify_expr (&TREE_OPERAND (*expr_p, 1), pre_p, post_p,
gimple_test_f, fallback);
break;
default:
/* Anything else with side-effects must be converted to
a valid statement before we get here. */
gcc_unreachable ();
}
*expr_p = NULL;
}
else if (COMPLETE_TYPE_P (TREE_TYPE (*expr_p))
&& TYPE_MODE (TREE_TYPE (*expr_p)) != BLKmode)
{
/* Historically, the compiler has treated a bare reference
to a non-BLKmode volatile lvalue as forcing a load. */
tree type = TYPE_MAIN_VARIANT (TREE_TYPE (*expr_p));
/* Normally, we do not want to create a temporary for a
TREE_ADDRESSABLE type because such a type should not be
copied by bitwise-assignment. However, we make an
exception here, as all we are doing here is ensuring that
we read the bytes that make up the type. We use
create_tmp_var_raw because create_tmp_var will abort when
given a TREE_ADDRESSABLE type. */
tree tmp = create_tmp_var_raw (type, "vol");
gimple_add_tmp_var (tmp);
gimplify_assign (tmp, *expr_p, pre_p);
*expr_p = NULL;
}
else
/* We can't do anything useful with a volatile reference to
an incomplete type, so just throw it away. Likewise for
a BLKmode type, since any implicit inner load should
already have been turned into an explicit one by the
gimplification process. */
*expr_p = NULL;
}
/* If we are gimplifying at the statement level, we're done. Tack
everything together and return. */
if (fallback == fb_none || is_statement)
{
/* Since *EXPR_P has been converted into a GIMPLE tuple, clear
it out for GC to reclaim it. */
*expr_p = NULL_TREE;
if (!gimple_seq_empty_p (internal_pre)
|| !gimple_seq_empty_p (internal_post))
{
gimplify_seq_add_seq (&internal_pre, internal_post);
gimplify_seq_add_seq (pre_p, internal_pre);
}
/* The result of gimplifying *EXPR_P is going to be the last few
statements in *PRE_P and *POST_P. Add location information
to all the statements that were added by the gimplification
helpers. */
if (!gimple_seq_empty_p (*pre_p))
annotate_all_with_location_after (*pre_p, pre_last_gsi, input_location);
if (!gimple_seq_empty_p (*post_p))
annotate_all_with_location_after (*post_p, post_last_gsi,
input_location);
goto out;
}
#ifdef ENABLE_GIMPLE_CHECKING
if (*expr_p)
{
enum tree_code code = TREE_CODE (*expr_p);
/* These expressions should already be in gimple IR form. */
gcc_assert (code != MODIFY_EXPR
&& code != ASM_EXPR
&& code != BIND_EXPR
&& code != CATCH_EXPR
&& (code != COND_EXPR || gimplify_ctxp->allow_rhs_cond_expr)
&& code != EH_FILTER_EXPR
&& code != GOTO_EXPR
&& code != LABEL_EXPR
&& code != LOOP_EXPR
&& code != SWITCH_EXPR
&& code != TRY_FINALLY_EXPR
&& code != EH_ELSE_EXPR
&& code != OACC_PARALLEL
&& code != OACC_KERNELS
&& code != OACC_SERIAL
&& code != OACC_DATA
&& code != OACC_HOST_DATA
&& code != OACC_DECLARE
&& code != OACC_UPDATE
&& code != OACC_ENTER_DATA
&& code != OACC_EXIT_DATA
&& code != OACC_CACHE
&& code != OMP_CRITICAL
&& code != OMP_FOR
&& code != OACC_LOOP
&& code != OMP_MASTER
&& code != OMP_TASKGROUP
&& code != OMP_ORDERED
&& code != OMP_PARALLEL
&& code != OMP_SCAN
&& code != OMP_SECTIONS
&& code != OMP_SECTION
&& code != OMP_SINGLE);
}
#endif
/* Otherwise we're gimplifying a subexpression, so the resulting
value is interesting. If it's a valid operand that matches
GIMPLE_TEST_F, we're done. Unless we are handling some
post-effects internally; if that's the case, we need to copy into
a temporary before adding the post-effects to POST_P. */
if (gimple_seq_empty_p (internal_post) && (*gimple_test_f) (*expr_p))
goto out;
/* Otherwise, we need to create a new temporary for the gimplified
expression. */
/* We can't return an lvalue if we have an internal postqueue. The
object the lvalue refers to would (probably) be modified by the
postqueue; we need to copy the value out first, which means an
rvalue. */
if ((fallback & fb_lvalue)
&& gimple_seq_empty_p (internal_post)
&& is_gimple_addressable (*expr_p))
{
/* An lvalue will do. Take the address of the expression, store it
in a temporary, and replace the expression with an INDIRECT_REF of
that temporary. */
tree ref_alias_type = reference_alias_ptr_type (*expr_p);
unsigned int ref_align = get_object_alignment (*expr_p);
tree ref_type = TREE_TYPE (*expr_p);
tmp = build_fold_addr_expr_loc (input_location, *expr_p);
gimplify_expr (&tmp, pre_p, post_p, is_gimple_reg, fb_rvalue);
if (TYPE_ALIGN (ref_type) != ref_align)
ref_type = build_aligned_type (ref_type, ref_align);
*expr_p = build2 (MEM_REF, ref_type,
tmp, build_zero_cst (ref_alias_type));
}
else if ((fallback & fb_rvalue) && is_gimple_reg_rhs_or_call (*expr_p))
{
/* An rvalue will do. Assign the gimplified expression into a
new temporary TMP and replace the original expression with
TMP. First, make sure that the expression has a type so that
it can be assigned into a temporary. */
gcc_assert (!VOID_TYPE_P (TREE_TYPE (*expr_p)));
*expr_p = get_formal_tmp_var (*expr_p, pre_p);
}
else
{
#ifdef ENABLE_GIMPLE_CHECKING
if (!(fallback & fb_mayfail))
{
fprintf (stderr, "gimplification failed:\n");
print_generic_expr (stderr, *expr_p);
debug_tree (*expr_p);
internal_error ("gimplification failed");
}
#endif
gcc_assert (fallback & fb_mayfail);
/* If this is an asm statement, and the user asked for the
impossible, don't die. Fail and let gimplify_asm_expr
issue an error. */
ret = GS_ERROR;
goto out;
}
/* Make sure the temporary matches our predicate. */
gcc_assert ((*gimple_test_f) (*expr_p));
if (!gimple_seq_empty_p (internal_post))
{
annotate_all_with_location (internal_post, input_location);
gimplify_seq_add_seq (pre_p, internal_post);
}
out:
input_location = saved_location;
return ret;
}
/* Like gimplify_expr but make sure the gimplified result is not itself
a SSA name (but a decl if it were). Temporaries required by
evaluating *EXPR_P may be still SSA names. */
static enum gimplify_status
gimplify_expr (tree *expr_p, gimple_seq *pre_p, gimple_seq *post_p,
bool (*gimple_test_f) (tree), fallback_t fallback,
bool allow_ssa)
{
bool was_ssa_name_p = TREE_CODE (*expr_p) == SSA_NAME;
enum gimplify_status ret = gimplify_expr (expr_p, pre_p, post_p,
gimple_test_f, fallback);
if (! allow_ssa
&& TREE_CODE (*expr_p) == SSA_NAME)
{
tree name = *expr_p;
if (was_ssa_name_p)
*expr_p = get_initialized_tmp_var (*expr_p, pre_p, NULL, false);
else
{
/* Avoid the extra copy if possible. */
*expr_p = create_tmp_reg (TREE_TYPE (name));
if (!gimple_nop_p (SSA_NAME_DEF_STMT (name)))
gimple_set_lhs (SSA_NAME_DEF_STMT (name), *expr_p);
release_ssa_name (name);
}
}
return ret;
}
/* Look through TYPE for variable-sized objects and gimplify each such
size that we find. Add to LIST_P any statements generated. */
void
gimplify_type_sizes (tree type, gimple_seq *list_p)
{
tree field, t;
if (type == NULL || type == error_mark_node)
return;
/* We first do the main variant, then copy into any other variants. */
type = TYPE_MAIN_VARIANT (type);
/* Avoid infinite recursion. */
if (TYPE_SIZES_GIMPLIFIED (type))
return;
TYPE_SIZES_GIMPLIFIED (type) = 1;
switch (TREE_CODE (type))
{
case INTEGER_TYPE:
case ENUMERAL_TYPE:
case BOOLEAN_TYPE:
case REAL_TYPE:
case FIXED_POINT_TYPE:
gimplify_one_sizepos (&TYPE_MIN_VALUE (type), list_p);
gimplify_one_sizepos (&TYPE_MAX_VALUE (type), list_p);
for (t = TYPE_NEXT_VARIANT (type); t; t = TYPE_NEXT_VARIANT (t))
{
TYPE_MIN_VALUE (t) = TYPE_MIN_VALUE (type);
TYPE_MAX_VALUE (t) = TYPE_MAX_VALUE (type);
}
break;
case ARRAY_TYPE:
/* These types may not have declarations, so handle them here. */
gimplify_type_sizes (TREE_TYPE (type), list_p);
gimplify_type_sizes (TYPE_DOMAIN (type), list_p);
/* Ensure VLA bounds aren't removed, for -O0 they should be variables
with assigned stack slots, for -O1+ -g they should be tracked
by VTA. */
if (!(TYPE_NAME (type)
&& TREE_CODE (TYPE_NAME (type)) == TYPE_DECL
&& DECL_IGNORED_P (TYPE_NAME (type)))
&& TYPE_DOMAIN (type)
&& INTEGRAL_TYPE_P (TYPE_DOMAIN (type)))
{
t = TYPE_MIN_VALUE (TYPE_DOMAIN (type));
if (t && VAR_P (t) && DECL_ARTIFICIAL (t))
DECL_IGNORED_P (t) = 0;
t = TYPE_MAX_VALUE (TYPE_DOMAIN (type));
if (t && VAR_P (t) && DECL_ARTIFICIAL (t))
DECL_IGNORED_P (t) = 0;
}
break;
case RECORD_TYPE:
case UNION_TYPE:
case QUAL_UNION_TYPE:
for (field = TYPE_FIELDS (type); field; field = DECL_CHAIN (field))
if (TREE_CODE (field) == FIELD_DECL)
{
gimplify_one_sizepos (&DECL_FIELD_OFFSET (field), list_p);
gimplify_one_sizepos (&DECL_SIZE (field), list_p);
gimplify_one_sizepos (&DECL_SIZE_UNIT (field), list_p);
gimplify_type_sizes (TREE_TYPE (field), list_p);
}
break;
case POINTER_TYPE:
case REFERENCE_TYPE:
/* We used to recurse on the pointed-to type here, which turned out to
be incorrect because its definition might refer to variables not
yet initialized at this point if a forward declaration is involved.
It was actually useful for anonymous pointed-to types to ensure
that the sizes evaluation dominates every possible later use of the
values. Restricting to such types here would be safe since there
is no possible forward declaration around, but would introduce an
undesirable middle-end semantic to anonymity. We then defer to
front-ends the responsibility of ensuring that the sizes are
evaluated both early and late enough, e.g. by attaching artificial
type declarations to the tree. */
break;
default:
break;
}
gimplify_one_sizepos (&TYPE_SIZE (type), list_p);
gimplify_one_sizepos (&TYPE_SIZE_UNIT (type), list_p);
for (t = TYPE_NEXT_VARIANT (type); t; t = TYPE_NEXT_VARIANT (t))
{
TYPE_SIZE (t) = TYPE_SIZE (type);
TYPE_SIZE_UNIT (t) = TYPE_SIZE_UNIT (type);
TYPE_SIZES_GIMPLIFIED (t) = 1;
}
}
/* A subroutine of gimplify_type_sizes to make sure that *EXPR_P,
a size or position, has had all of its SAVE_EXPRs evaluated.
We add any required statements to *STMT_P. */
void
gimplify_one_sizepos (tree *expr_p, gimple_seq *stmt_p)
{
tree expr = *expr_p;
/* We don't do anything if the value isn't there, is constant, or contains
A PLACEHOLDER_EXPR. We also don't want to do anything if it's already
a VAR_DECL. If it's a VAR_DECL from another function, the gimplifier
will want to replace it with a new variable, but that will cause problems
if this type is from outside the function. It's OK to have that here. */
if (expr == NULL_TREE
|| is_gimple_constant (expr)
|| TREE_CODE (expr) == VAR_DECL
|| CONTAINS_PLACEHOLDER_P (expr))
return;
*expr_p = unshare_expr (expr);
/* SSA names in decl/type fields are a bad idea - they'll get reclaimed
if the def vanishes. */
gimplify_expr (expr_p, stmt_p, NULL, is_gimple_val, fb_rvalue, false);
/* If expr wasn't already is_gimple_sizepos or is_gimple_constant from the
FE, ensure that it is a VAR_DECL, otherwise we might handle some decls
as gimplify_vla_decl even when they would have all sizes INTEGER_CSTs. */
if (is_gimple_constant (*expr_p))
*expr_p = get_initialized_tmp_var (*expr_p, stmt_p, NULL, false);
}
/* Gimplify the body of statements of FNDECL and return a GIMPLE_BIND node
containing the sequence of corresponding GIMPLE statements. If DO_PARMS
is true, also gimplify the parameters. */
gbind *
gimplify_body (tree fndecl, bool do_parms)
{
location_t saved_location = input_location;
gimple_seq parm_stmts, parm_cleanup = NULL, seq;
gimple *outer_stmt;
gbind *outer_bind;
timevar_push (TV_TREE_GIMPLIFY);
init_tree_ssa (cfun);
/* Initialize for optimize_insn_for_s{ize,peed}_p possibly called during
gimplification. */
default_rtl_profile ();
gcc_assert (gimplify_ctxp == NULL);
push_gimplify_context (true);
if (flag_openacc || flag_openmp)
{
gcc_assert (gimplify_omp_ctxp == NULL);
if (lookup_attribute ("omp declare target", DECL_ATTRIBUTES (fndecl)))
gimplify_omp_ctxp = new_omp_context (ORT_IMPLICIT_TARGET);
}
/* Unshare most shared trees in the body and in that of any nested functions.
It would seem we don't have to do this for nested functions because
they are supposed to be output and then the outer function gimplified
first, but the g++ front end doesn't always do it that way. */
unshare_body (fndecl);
unvisit_body (fndecl);
/* Make sure input_location isn't set to something weird. */
input_location = DECL_SOURCE_LOCATION (fndecl);
/* Resolve callee-copies. This has to be done before processing
the body so that DECL_VALUE_EXPR gets processed correctly. */
parm_stmts = do_parms ? gimplify_parameters (&parm_cleanup) : NULL;
/* Gimplify the function's body. */
seq = NULL;
gimplify_stmt (&DECL_SAVED_TREE (fndecl), &seq);
outer_stmt = gimple_seq_first_nondebug_stmt (seq);
if (!outer_stmt)
{
outer_stmt = gimple_build_nop ();
gimplify_seq_add_stmt (&seq, outer_stmt);
}
/* The body must contain exactly one statement, a GIMPLE_BIND. If this is
not the case, wrap everything in a GIMPLE_BIND to make it so. */
if (gimple_code (outer_stmt) == GIMPLE_BIND
&& (gimple_seq_first_nondebug_stmt (seq)
== gimple_seq_last_nondebug_stmt (seq)))
{
outer_bind = as_a <gbind *> (outer_stmt);
if (gimple_seq_first_stmt (seq) != outer_stmt
|| gimple_seq_last_stmt (seq) != outer_stmt)
{
/* If there are debug stmts before or after outer_stmt, move them
inside of outer_bind body. */
gimple_stmt_iterator gsi = gsi_for_stmt (outer_stmt, &seq);
gimple_seq second_seq = NULL;
if (gimple_seq_first_stmt (seq) != outer_stmt
&& gimple_seq_last_stmt (seq) != outer_stmt)
{
second_seq = gsi_split_seq_after (gsi);
gsi_remove (&gsi, false);
}
else if (gimple_seq_first_stmt (seq) != outer_stmt)
gsi_remove (&gsi, false);
else
{
gsi_remove (&gsi, false);
second_seq = seq;
seq = NULL;
}
gimple_seq_add_seq_without_update (&seq,
gimple_bind_body (outer_bind));
gimple_seq_add_seq_without_update (&seq, second_seq);
gimple_bind_set_body (outer_bind, seq);
}
}
else
outer_bind = gimple_build_bind (NULL_TREE, seq, NULL);
DECL_SAVED_TREE (fndecl) = NULL_TREE;
/* If we had callee-copies statements, insert them at the beginning
of the function and clear DECL_VALUE_EXPR_P on the parameters. */
if (!gimple_seq_empty_p (parm_stmts))
{
tree parm;
gimplify_seq_add_seq (&parm_stmts, gimple_bind_body (outer_bind));
if (parm_cleanup)
{
gtry *g = gimple_build_try (parm_stmts, parm_cleanup,
GIMPLE_TRY_FINALLY);
parm_stmts = NULL;
gimple_seq_add_stmt (&parm_stmts, g);
}
gimple_bind_set_body (outer_bind, parm_stmts);
for (parm = DECL_ARGUMENTS (current_function_decl);
parm; parm = DECL_CHAIN (parm))
if (DECL_HAS_VALUE_EXPR_P (parm))
{
DECL_HAS_VALUE_EXPR_P (parm) = 0;
DECL_IGNORED_P (parm) = 0;
}
}
if ((flag_openacc || flag_openmp || flag_openmp_simd)
&& gimplify_omp_ctxp)
{
delete_omp_context (gimplify_omp_ctxp);
gimplify_omp_ctxp = NULL;
}
pop_gimplify_context (outer_bind);
gcc_assert (gimplify_ctxp == NULL);
if (flag_checking && !seen_error ())
verify_gimple_in_seq (gimple_bind_body (outer_bind));
timevar_pop (TV_TREE_GIMPLIFY);
input_location = saved_location;
return outer_bind;
}
typedef char *char_p; /* For DEF_VEC_P. */
/* Return whether we should exclude FNDECL from instrumentation. */
static bool
flag_instrument_functions_exclude_p (tree fndecl)
{
vec<char_p> *v;
v = (vec<char_p> *) flag_instrument_functions_exclude_functions;
if (v && v->length () > 0)
{
const char *name;
int i;
char *s;
name = lang_hooks.decl_printable_name (fndecl, 1);
FOR_EACH_VEC_ELT (*v, i, s)
if (strstr (name, s) != NULL)
return true;
}
v = (vec<char_p> *) flag_instrument_functions_exclude_files;
if (v && v->length () > 0)
{
const char *name;
int i;
char *s;
name = DECL_SOURCE_FILE (fndecl);
FOR_EACH_VEC_ELT (*v, i, s)
if (strstr (name, s) != NULL)
return true;
}
return false;
}
/* Entry point to the gimplification pass. FNDECL is the FUNCTION_DECL
node for the function we want to gimplify.
Return the sequence of GIMPLE statements corresponding to the body
of FNDECL. */
void
gimplify_function_tree (tree fndecl)
{
tree parm, ret;
gimple_seq seq;
gbind *bind;
gcc_assert (!gimple_body (fndecl));
if (DECL_STRUCT_FUNCTION (fndecl))
push_cfun (DECL_STRUCT_FUNCTION (fndecl));
else
push_struct_function (fndecl);
/* Tentatively set PROP_gimple_lva here, and reset it in gimplify_va_arg_expr
if necessary. */
cfun->curr_properties |= PROP_gimple_lva;
for (parm = DECL_ARGUMENTS (fndecl); parm ; parm = DECL_CHAIN (parm))
{
/* Preliminarily mark non-addressed complex variables as eligible
for promotion to gimple registers. We'll transform their uses
as we find them. */
if ((TREE_CODE (TREE_TYPE (parm)) == COMPLEX_TYPE
|| TREE_CODE (TREE_TYPE (parm)) == VECTOR_TYPE)
&& !TREE_THIS_VOLATILE (parm)
&& !needs_to_live_in_memory (parm))
DECL_GIMPLE_REG_P (parm) = 1;
}
ret = DECL_RESULT (fndecl);
if ((TREE_CODE (TREE_TYPE (ret)) == COMPLEX_TYPE
|| TREE_CODE (TREE_TYPE (ret)) == VECTOR_TYPE)
&& !needs_to_live_in_memory (ret))
DECL_GIMPLE_REG_P (ret) = 1;
if (asan_sanitize_use_after_scope () && sanitize_flags_p (SANITIZE_ADDRESS))
asan_poisoned_variables = new hash_set<tree> ();
bind = gimplify_body (fndecl, true);
if (asan_poisoned_variables)
{
delete asan_poisoned_variables;
asan_poisoned_variables = NULL;
}
/* The tree body of the function is no longer needed, replace it
with the new GIMPLE body. */
seq = NULL;
gimple_seq_add_stmt (&seq, bind);
gimple_set_body (fndecl, seq);
/* If we're instrumenting function entry/exit, then prepend the call to
the entry hook and wrap the whole function in a TRY_FINALLY_EXPR to
catch the exit hook. */
/* ??? Add some way to ignore exceptions for this TFE. */
if (flag_instrument_function_entry_exit
&& !DECL_NO_INSTRUMENT_FUNCTION_ENTRY_EXIT (fndecl)
/* Do not instrument extern inline functions. */
&& !(DECL_DECLARED_INLINE_P (fndecl)
&& DECL_EXTERNAL (fndecl)
&& DECL_DISREGARD_INLINE_LIMITS (fndecl))
&& !flag_instrument_functions_exclude_p (fndecl))
{
tree x;
gbind *new_bind;
gimple *tf;
gimple_seq cleanup = NULL, body = NULL;
tree tmp_var, this_fn_addr;
gcall *call;
/* The instrumentation hooks aren't going to call the instrumented
function and the address they receive is expected to be matchable
against symbol addresses. Make sure we don't create a trampoline,
in case the current function is nested. */
this_fn_addr = build_fold_addr_expr (current_function_decl);
TREE_NO_TRAMPOLINE (this_fn_addr) = 1;
x = builtin_decl_implicit (BUILT_IN_RETURN_ADDRESS);
call = gimple_build_call (x, 1, integer_zero_node);
tmp_var = create_tmp_var (ptr_type_node, "return_addr");
gimple_call_set_lhs (call, tmp_var);
gimplify_seq_add_stmt (&cleanup, call);
x = builtin_decl_implicit (BUILT_IN_PROFILE_FUNC_EXIT);
call = gimple_build_call (x, 2, this_fn_addr, tmp_var);
gimplify_seq_add_stmt (&cleanup, call);
tf = gimple_build_try (seq, cleanup, GIMPLE_TRY_FINALLY);
x = builtin_decl_implicit (BUILT_IN_RETURN_ADDRESS);
call = gimple_build_call (x, 1, integer_zero_node);
tmp_var = create_tmp_var (ptr_type_node, "return_addr");
gimple_call_set_lhs (call, tmp_var);
gimplify_seq_add_stmt (&body, call);
x = builtin_decl_implicit (BUILT_IN_PROFILE_FUNC_ENTER);
call = gimple_build_call (x, 2, this_fn_addr, tmp_var);
gimplify_seq_add_stmt (&body, call);
gimplify_seq_add_stmt (&body, tf);
new_bind = gimple_build_bind (NULL, body, NULL);
/* Replace the current function body with the body
wrapped in the try/finally TF. */
seq = NULL;
gimple_seq_add_stmt (&seq, new_bind);
gimple_set_body (fndecl, seq);
bind = new_bind;
}
if (sanitize_flags_p (SANITIZE_THREAD))
{
gcall *call = gimple_build_call_internal (IFN_TSAN_FUNC_EXIT, 0);
gimple *tf = gimple_build_try (seq, call, GIMPLE_TRY_FINALLY);
gbind *new_bind = gimple_build_bind (NULL, tf, NULL);
/* Replace the current function body with the body
wrapped in the try/finally TF. */
seq = NULL;
gimple_seq_add_stmt (&seq, new_bind);
gimple_set_body (fndecl, seq);
}
DECL_SAVED_TREE (fndecl) = NULL_TREE;
cfun->curr_properties |= PROP_gimple_any;
pop_cfun ();
dump_function (TDI_gimple, fndecl);
}
/* Return a dummy expression of type TYPE in order to keep going after an
error. */
static tree
dummy_object (tree type)
{
tree t = build_int_cst (build_pointer_type (type), 0);
return build2 (MEM_REF, type, t, t);
}
/* Gimplify __builtin_va_arg, aka VA_ARG_EXPR, which is not really a
builtin function, but a very special sort of operator. */
enum gimplify_status
gimplify_va_arg_expr (tree *expr_p, gimple_seq *pre_p,
gimple_seq *post_p ATTRIBUTE_UNUSED)
{
tree promoted_type, have_va_type;
tree valist = TREE_OPERAND (*expr_p, 0);
tree type = TREE_TYPE (*expr_p);
tree t, tag, aptag;
location_t loc = EXPR_LOCATION (*expr_p);
/* Verify that valist is of the proper type. */
have_va_type = TREE_TYPE (valist);
if (have_va_type == error_mark_node)
return GS_ERROR;
have_va_type = targetm.canonical_va_list_type (have_va_type);
if (have_va_type == NULL_TREE
&& POINTER_TYPE_P (TREE_TYPE (valist)))
/* Handle 'Case 1: Not an array type' from c-common.c/build_va_arg. */
have_va_type
= targetm.canonical_va_list_type (TREE_TYPE (TREE_TYPE (valist)));
gcc_assert (have_va_type != NULL_TREE);
/* Generate a diagnostic for requesting data of a type that cannot
be passed through `...' due to type promotion at the call site. */
if ((promoted_type = lang_hooks.types.type_promotes_to (type))
!= type)
{
static bool gave_help;
bool warned;
/* Use the expansion point to handle cases such as passing bool (defined
in a system header) through `...'. */
location_t xloc
= expansion_point_location_if_in_system_header (loc);
/* Unfortunately, this is merely undefined, rather than a constraint
violation, so we cannot make this an error. If this call is never
executed, the program is still strictly conforming. */
auto_diagnostic_group d;
warned = warning_at (xloc, 0,
"%qT is promoted to %qT when passed through %<...%>",
type, promoted_type);
if (!gave_help && warned)
{
gave_help = true;
inform (xloc, "(so you should pass %qT not %qT to %<va_arg%>)",
promoted_type, type);
}
/* We can, however, treat "undefined" any way we please.
Call abort to encourage the user to fix the program. */
if (warned)
inform (xloc, "if this code is reached, the program will abort");
/* Before the abort, allow the evaluation of the va_list
expression to exit or longjmp. */
gimplify_and_add (valist, pre_p);
t = build_call_expr_loc (loc,
builtin_decl_implicit (BUILT_IN_TRAP), 0);
gimplify_and_add (t, pre_p);
/* This is dead code, but go ahead and finish so that the
mode of the result comes out right. */
*expr_p = dummy_object (type);
return GS_ALL_DONE;
}
tag = build_int_cst (build_pointer_type (type), 0);
aptag = build_int_cst (TREE_TYPE (valist), 0);
*expr_p = build_call_expr_internal_loc (loc, IFN_VA_ARG, type, 3,
valist, tag, aptag);
/* Clear the tentatively set PROP_gimple_lva, to indicate that IFN_VA_ARG
needs to be expanded. */
cfun->curr_properties &= ~PROP_gimple_lva;
return GS_OK;
}
/* Build a new GIMPLE_ASSIGN tuple and append it to the end of *SEQ_P.
DST/SRC are the destination and source respectively. You can pass
ungimplified trees in DST or SRC, in which case they will be
converted to a gimple operand if necessary.
This function returns the newly created GIMPLE_ASSIGN tuple. */
gimple *
gimplify_assign (tree dst, tree src, gimple_seq *seq_p)
{
tree t = build2 (MODIFY_EXPR, TREE_TYPE (dst), dst, src);
gimplify_and_add (t, seq_p);
ggc_free (t);
return gimple_seq_last_stmt (*seq_p);
}
inline hashval_t
gimplify_hasher::hash (const elt_t *p)
{
tree t = p->val;
return iterative_hash_expr (t, 0);
}
inline bool
gimplify_hasher::equal (const elt_t *p1, const elt_t *p2)
{
tree t1 = p1->val;
tree t2 = p2->val;
enum tree_code code = TREE_CODE (t1);
if (TREE_CODE (t2) != code
|| TREE_TYPE (t1) != TREE_TYPE (t2))
return false;
if (!operand_equal_p (t1, t2, 0))
return false;
/* Only allow them to compare equal if they also hash equal; otherwise
results are nondeterminate, and we fail bootstrap comparison. */
gcc_checking_assert (hash (p1) == hash (p2));
return true;
}
|
test_reduce.c | double foo(int N, double * B, double * C)
{
double sum = 0.0;
#pragma omp target map(to:B,C) map(tofrom:sum)
#pragma omp teams distribute parallel for reduction(+:sum)
for (int i=0; i<N; i++){
sum += B[i] + C[i];
}
return sum;
}
|
k-means.c | #include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <sys/types.h>
#include <sys/stat.h>
#include <fcntl.h>
#include <unistd.h>
#include <sys/time.h>
#include <assert.h>
#include <omp.h>
#include <time.h>
#define MAX_CHAR_PER_LINE 128
float** read_file(char *filename,int *numImg,int *numFea)
{
float **images;
int i, j, len;
ssize_t numBytesRead;
FILE *infile;
char *line, *ret;
int lenghtline;
if ((infile = fopen(filename, "r")) == NULL)
{
fprintf(stderr, "Error: file is not present (%s)\n", filename); // prints error message if file is not read
return NULL;
}
lenghtline = MAX_CHAR_PER_LINE; // assigning the number of data points per line
line = (char*) malloc(lenghtline); // allocates the memory for the lenghtline variable
assert(line != NULL);
(*numImg) = 0;
while (fgets(line, lenghtline, infile) != NULL) // make sure all the input is received
{
while (strlen(line) == lenghtline-1)
{
len = strlen(line); // gets the length of the line
fseek(infile, -len, SEEK_CUR);
lenghtline += MAX_CHAR_PER_LINE;
line = (char*) realloc(line, lenghtline);
assert(line != NULL);
ret = fgets(line, lenghtline, infile);
assert(ret != NULL);
}
if (strtok(line, " \t\n") != 0) // breaks the string according to the delimiter mentioned
(*numImg)++; // gets the number of images present in the file
}
rewind(infile); // points to the begining of the file
(*numFea) = 0; // this specifies number of points in each images
while (fgets(line, lenghtline, infile) != NULL)
{
if (strtok(line, " \t\n") != 0) // divide the string into tokens
{
while (strtok(NULL, " ,\t\n") != NULL) (*numFea)++;// eliminate the fist element of every line as first element is the image_ID
break;
}
}
rewind(infile);
len = (*numImg) * (*numFea); // product of number of images and number of in each images
images = (float**)malloc((*numImg) * sizeof(float*)); // assign the memory for each object based upon the number of images
assert(images != NULL);
images[0] = (float*) malloc(len * sizeof(float));
assert(images[0] != NULL);
for (i=1; i<(*numImg); i++) // for each images in the file
{
images[i] = images[i-1] + (*numFea);// assign the elements to each images
}
i = 0;
while (fgets(line,lenghtline, infile) != NULL)
{
if (strtok(line, " \t\n") == NULL) continue;
for (j=0; j<(*numFea); j++) // based upon number of images all the data-points will be stored
images[i][j] = atof(strtok(NULL, " ,\t\n")); //reads every data-points (converting string to float)
i++;
}
fclose(infile);
free(line);
return images;
}
int output(char *filename,int numClusters, int numImg, int numFea, float **clusters,int *cluster_id)
{
FILE *fptr;
int i, j;
char outputfile[1024];
sprintf(outputfile, "%s.cluster_centres", filename);
fptr = fopen(outputfile, "w");
fprintf(fptr,"=============The final cluster centroid location==============\n");
for (i=0; i<numClusters; i++) {
fprintf(fptr, "%d ", i);
for (j=0; j<numFea; j++)
fprintf(fptr, "%f ", clusters[i][j]); // The final location of the cluster centroid
fprintf(fptr, "\n");
}
fclose(fptr);
sprintf(outputfile, "%s.cluster_id", filename);
fptr = fopen(outputfile, "w");
fprintf(fptr,"|IMAGE_ID | CLUSTER_ID|\n");
for (i=0; i<numImg; i++)
fprintf(fptr, "|%d\t |%d\t |\n", i, cluster_id[i]);// the image_ID along with the its respective cluster_id
fclose(fptr);
return 1;
}
static float euclidean_distance(int ele,float *image,float *clust)
{
int i;
float ans=0.0;
for (i=0; i<ele; i++) // for each elements in the cluster centroid and the images
ans += (image[i]-clust[i]) * (image[i]-clust[i]); // calculating the distance between the cluster centroid and the each points in the images
return(ans);
}
static int cluster_index(int numClusters,int numFea,float *object,float **clusters)
{
int index, i;
float dist, min_dist;
index = 0;
min_dist = euclidean_distance(numFea, object, clusters[0]);// calculating the euclidean distances for the first cluster
for (i=1; i<numClusters; i++) // check the distances for the other clusters centroid
{
dist = euclidean_distance(numFea, object, clusters[i]);
if (dist < min_dist) // compare the distance with the distance for the first cluster centroid
{
min_dist = dist;
index = i; // get the index in which the image belong
}
}
return(index);
}
float** kmeans_omp(float **images,int numFea,int numImg,int numClusters,float threshold,int *cluster_id)
{
int i, j, k, index, loop=0;
int *newClusterSize; // number of elements in the new clusters
float change; // specifies the number of object that changed their cluster
float **clusters; // this will be the cluster centroid
float **newClusters; // this will be the cluster centroid new location after finding out mean of the elements in that cluster
double timing;
int nthreads; // specifies the number of thread
int **local_newClusterSize;
float ***local_newClusters;
nthreads = omp_get_max_threads();
clusters = (float**) malloc(numClusters * sizeof(float*)); // allocate the memory for the cluster centroid based upon the number of cluster
assert(clusters != NULL);
clusters[0] = (float*) malloc(numClusters * numFea * sizeof(float));// for each cluster allocate the memory based upon the number of cluster and the number of points in the images
assert(clusters[0] != NULL);
for (i=1; i<numClusters; i++)
clusters[i] = clusters[i-1] + numFea;
for (i=0; i<numClusters; i++)
for (j=0; j<numFea; j++)
clusters[i][j] = images[i][j]; // The first K number of the images will be selected as the centroid of the cluster
for (i=0; i<numImg; i++) cluster_id[i] = -1;// initially all the images cluster_id will be set to -1
newClusterSize = (int*) calloc(numClusters, sizeof(int));
assert(newClusterSize != NULL);
newClusters = (float**) malloc(numClusters * sizeof(float*));// allocate the memory for the new cluster based upon the number of cluster
assert(newClusters != NULL);
newClusters[0] = (float*) calloc(numClusters * numFea, sizeof(float));// allocate the memory for the each new cluster formed based upon the number of cluster and the number of points in the images
assert(newClusters[0] != NULL);
for (i=1; i<numClusters; i++)
newClusters[i] = newClusters[i-1] + numFea;
local_newClusterSize = (int**) malloc(nthreads * sizeof(int*));// allocate the memory for the local_newclustersize variable
assert(local_newClusterSize != NULL);
local_newClusterSize[0] = (int*) calloc(nthreads*numClusters,
sizeof(int));
assert(local_newClusterSize[0] != NULL);
for (i=1; i<nthreads; i++)
local_newClusterSize[i] = local_newClusterSize[i-1]+numClusters;
local_newClusters =(float***)malloc(nthreads * sizeof(float**)); // allocate the memory for the local_newcluster variable
assert(local_newClusters != NULL);
local_newClusters[0] =(float**) malloc(nthreads * numClusters *
sizeof(float*));
assert(local_newClusters[0] != NULL);
for (i=1; i<nthreads; i++)
local_newClusters[i] = local_newClusters[i-1] + numClusters;
for (i=0; i<nthreads; i++)
{
for (j=0; j<numClusters; j++)
{
local_newClusters[i][j] = (float*)calloc(numFea,
sizeof(float));
assert(local_newClusters[i][j] != NULL);
}
}
do {
change = 0.0;
#pragma omp parallel \
shared(images,clusters,cluster_id,local_newClusters,local_newClusterSize)
{
int tid = omp_get_thread_num();
#pragma omp for \
private(i,j,index) \
firstprivate(numImg,numClusters,numFea) \
schedule(static) \
reduction(+:change)
for (i=0; i<numImg; i++) // parallel the for loop
{
index = cluster_index(numClusters, numFea,images[i], clusters);// call the index function to get index for each images
if (cluster_id[i] != index) change += 1.0; // increment the change value when the images changes its membership
cluster_id[i] = index; // The cluster_id[image_id] is equal to the index
local_newClusterSize[tid][index]++; // increment the local_newClusterSize if the data-point falls in that cluster
for (j=0; j<numFea; j++)
local_newClusters[tid][index][j] += images[i][j]; // add the data-point to the local_newClusters of that particular index
}
}
for (i=0; i<numClusters; i++)
{
for (j=0; j<nthreads; j++)
{
newClusterSize[i] += local_newClusterSize[j][i];// assign the value of the local_newClusterSize to the newClusterSize
local_newClusterSize[j][i] = 0.0;
for (k=0; k<numFea; k++)
{
newClusters[i][k] += local_newClusters[j][i][k]; // assign the value present in the local_newClusters to the newClusters
local_newClusters[j][i][k] = 0.0;
}
}
}
for (i=0; i<numClusters; i++) {
for (j=0; j<numFea; j++) {
if (newClusterSize[i] > 1)
clusters[i][j] = newClusters[i][j] / newClusterSize[i];// calculate the mean to get the new cluster centroid location
newClusters[i][j] = 0.0;
}
newClusterSize[i] = 0;
}
change /= numImg;
} while (change > threshold && loop++ < 500); // check the condition
free(local_newClusterSize[0]);
free(local_newClusterSize);
for (i=0; i<nthreads; i++)
for (j=0; j<numClusters; j++)
free(local_newClusters[i][j]);
free(local_newClusters[0]);
free(local_newClusters);
free(newClusters[0]);
free(newClusters);
free(newClusterSize);
return clusters;
}
int main(int argc, char **argv) {
int i, j, nthreads;
int output_timing;
int numClusters, numFea, numImg;
int *cluster_id;
char *filename;
float **images;
float **clusters;
float threshold;
// default values
nthreads = 4;
numClusters = 0;
threshold = 0.001;
numClusters = 0;
filename = NULL;
filename =argv[1]; // get the filename
printf("Enter the number of clusters\n"); // get the number of clusters
scanf("%d",&numClusters);
if (filename == 0 || numClusters <= 1) printf("please enter in this format 'seq color.txt'\n ");// if the filename and the number of cluster print the message
printf("The filename you have entered = %s\n", filename);
printf("The number of cluster you have entered = %d\n", numClusters);
if (nthreads > 0)
omp_set_num_threads(nthreads); // allocate the thread
clock_t begin = clock(); // start the clock
images = read_file(filename, &numImg, &numFea); // read the data-points from the text file
if (images == NULL) exit(1);
cluster_id = (int*) malloc(numImg * sizeof(int)); // cluster_id specifies in which index does the image belong
assert(cluster_id != NULL);
clusters = kmeans_omp(images, numFea, numImg, numClusters, threshold, cluster_id);// calls the parallel kmeans
free(images[0]);
free(images);
clock_t end = clock();
double time_spent = (double)(end - begin) / CLOCKS_PER_SEC; // specify the time spent in the execution
output(filename, numClusters, numImg, numFea, clusters, cluster_id);// output the final cluster centroid and the membership of the each images
free(cluster_id);
free(clusters[0]);
free(clusters);
printf("========The parallel Implementation of the K-Means=======\n");
printf("Computation timing = %10.4f sec\n", time_spent);
return(0);
} |
5_for-par0.c | #include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <time.h>
#include <omp.h>
void inicializa(int **v, int size) {
(*v) = (int *) malloc(sizeof(int)*size);
for (int i = 0; i < size; i++) {
(*v)[i] = rand() % 10000;
}
}
float square(int x){
int k = 0;
while(k < 5000) k++;
return sqrt(x);
}
int main(int argc, char **argv) {
srand(time(NULL));
int *vetor;
int size = 1000000;
inicializa(&vetor, size);
#pragma omp parallel
{
// divisão do trabalho
int local_init, local_end, chunk;
chunk = size / omp_get_num_threads();
local_init = omp_get_thread_num() * chunk;
local_end = (omp_get_thread_num()+1) * chunk;
if ((omp_get_num_threads()-1) == omp_get_thread_num()) local_end = size;
// calculo, cada thread responsável por seu bloco de memória
for (int i = local_init; i < local_end; i++) {
vetor[i] = square(vetor[i]);
}
}
return 0;
}
|
zherk.c | #include "blas.h"
#include "error.h"
#include <stdio.h>
#include "handle.h"
#include "config.h"
#include "zherk.fatbin.c"
static inline size_t min(size_t a, size_t b) { return (a < b) ? a : b; }
static inline size_t max(size_t a, size_t b) { return (a > b) ? a : b; }
static inline CUresult cuMemcpyHtoD2DAsync(CUdeviceptr A, size_t lda, size_t ai, size_t aj,
const void * B, size_t ldb, size_t bi, size_t bj,
size_t m, size_t n, size_t elemSize, CUstream stream) {
CUDA_MEMCPY2D copy = {
bi * elemSize, bj, CU_MEMORYTYPE_HOST, B, 0, 0, ldb * elemSize,
ai * elemSize, aj, CU_MEMORYTYPE_DEVICE, NULL, A, 0, lda * elemSize,
m * elemSize, n };
return cuMemcpy2DAsync(©, stream);
}
static inline CUresult cuMemcpyDtoH2DAsync(void * A, size_t lda, size_t ai, size_t aj,
CUdeviceptr B, size_t ldb, size_t bi, size_t bj,
size_t m, size_t n, size_t elemSize, CUstream stream) {
CUDA_MEMCPY2D copy = {
bi * elemSize, bj, CU_MEMORYTYPE_DEVICE, NULL, B, 0, ldb * elemSize,
ai * elemSize, aj, CU_MEMORYTYPE_HOST, A, 0, 0, lda * elemSize,
m * elemSize, n };
return cuMemcpy2DAsync(©, stream);
}
static const double zero = 0.0;
static const double one = 1.0;
static const double complex czero = 0.0 + 0.0 * I;
void zherk(CBlasUplo uplo, CBlasTranspose trans,
size_t n, size_t k,
double alpha, const double complex * restrict A, size_t lda,
double beta, double complex * restrict C, size_t ldc) {
const size_t nRowA = (trans == CBlasNoTrans) ? n : k;
int info = 0;
if (trans == CBlasTrans)
info = 2;
else if (lda < nRowA)
info = 7;
else if (ldc < n)
info = 10;
if (info != 0) {
XERBLA(info);
return;
}
if (n == 0 || ((alpha == zero || k == 0) && beta == one))
return;
if (alpha == zero) {
if (uplo == CBlasUpper) {
if (beta == zero) {
#pragma omp parallel for
for (size_t j = 0; j < n; j++) {
for (size_t i = 0; i <= j; i++)
C[j * ldc + i] = zero;
}
}
else {
#pragma omp parallel for
for (size_t j = 0; j < n; j++) {
for (size_t i = 0; i < j; i++)
C[j * ldc + i] *= beta;
C[j * ldc + j] = beta * creal(C[j * ldc + j]);
}
}
}
else {
if (beta == zero) {
#pragma omp parallel for
for (size_t j = 0; j < n; j++) {
for (size_t i = j; i < n; i++)
C[j * ldc + i] = zero;
}
}
else {
#pragma omp parallel for
for (size_t j = 0; j < n; j++) {
C[j * ldc + j] = beta * creal(C[j * ldc + j]);
for (size_t i = j + 1; i < n; i++)
C[j * ldc + i] *= beta;
}
}
}
return;
}
if (trans == CBlasNoTrans) {
if (uplo == CBlasUpper) {
#pragma omp parallel for
for (size_t j = 0; j < n; j++) {
if (beta == zero) {
for (size_t i = 0; i <= j; i++)
C[j * ldc + i] = zero;
}
else if (beta != one) {
for (size_t i = 0; i < j; i++)
C[j * ldc + i] *= beta;
C[j * ldc + j] = beta * creal(C[j * ldc + j]);
}
else
C[j * ldc + j] = creal(C[j * ldc + j]);
for (size_t l = 0; l < k; l++) {
if (A[l * lda + j] != zero) {
register double complex temp = alpha * conj(A[l * lda + j]);
for (size_t i = 0; i < j; i++)
C[j * ldc + i] += temp * A[l * lda + i];
C[j * ldc + j] = creal(C[j * ldc + j]) + creal(temp * A[l * lda + j]);
}
}
}
}
else {
#pragma omp parallel for
for (size_t j = 0; j < n; j++) {
if (beta == zero) {
for (size_t i = j; i < n; i++)
C[j * ldc + i] = zero;
}
else if (beta != one) {
C[j * ldc + j] = beta * creal(C[j * ldc + j]);
for (size_t i = j + 1; i < n; i++)
C[j * ldc + i] *= beta;
}
else
C[j * ldc + j] = creal(C[j * ldc + j]);
for (size_t l = 0; l < k; l++) {
if (A[l * lda + j] != zero) {
register double complex temp = alpha * conj(A[l * lda + j]);
C[j * ldc + j] = creal(C[j * ldc + j]) + creal(temp * A[l * lda + j]);
for (size_t i = j + 1; i < n; i++)
C[j * ldc + i] += temp * A[l * lda + i];
}
}
}
}
}
else {
if (uplo == CBlasUpper) {
#pragma omp parallel for
for (size_t j = 0; j < n; j++) {
for (size_t i = 0; i < j; i++) {
register double complex temp = czero;
for (size_t l = 0; l < k; l++)
temp += conj(A[i * lda + l]) * A[j * lda + l];
if (beta == zero)
C[j * ldc + i] = alpha * temp;
else
C[j * ldc + i] = alpha * temp + beta * C[j * ldc + i];
}
register double rtemp = zero;
for (size_t l = 0; l < k; l++)
rtemp += conj(A[j * lda + l]) * A[j * lda + l];
if (beta == zero)
C[j * ldc + j] = alpha * rtemp;
else
C[j * ldc + j] = alpha * rtemp + beta * creal(C[j * ldc + j]);
}
}
else {
#pragma omp parallel for
for (size_t j = 0; j < n; j++) {
register double rtemp = zero;
for (size_t l = 0; l < k; l++)
rtemp += conj(A[j * lda + l]) * A[j * lda + l];
if (beta == zero)
C[j * ldc + j] = alpha * rtemp;
else
C[j * ldc + j] = alpha * rtemp + beta * creal(C[j * ldc + j]);
for (size_t i = j + 1; i < n; i++) {
register double complex temp = czero;
for (size_t l = 0; l < k; l++)
temp += conj(A[i * lda + l]) * A[j * lda + l];
if (beta == zero)
C[j * ldc + i] = alpha * temp;
else
C[j * ldc + i] = alpha * temp + beta * C[j * ldc + i];
}
}
}
}
}
CUresult cuZherk(CUBLAShandle handle, CBlasUplo uplo, CBlasTranspose trans,
size_t n, size_t k,
double alpha, CUdeviceptr A, size_t lda,
double beta, CUdeviceptr C, size_t ldc, CUstream stream) {
const size_t nRowA = (trans == CBlasNoTrans) ? n : k;
int info = 0;
if (trans == CBlasTrans)
info = 2;
else if (lda < nRowA)
info = 7;
else if (ldc < n)
info = 10;
if (info != 0) {
XERBLA(info);
return CUDA_ERROR_INVALID_VALUE;
}
if (n == 0 || ((alpha == zero || k == 0) && beta == one))
return CUDA_SUCCESS;
CU_ERROR_CHECK(cuCtxPushCurrent(handle->context));
if (handle->zherk == NULL)
CU_ERROR_CHECK(cuModuleLoadData(&handle->zherk, imageBytes));
const unsigned int mb = (trans == CBlasNoTrans) ? 64 : 8;
const unsigned int nb = (trans == CBlasNoTrans) ? 4 : 8;
const unsigned int kb = (trans == CBlasNoTrans) ? 16 : 4;
const unsigned int bx = (trans == CBlasNoTrans) ? 4 : 4;
const unsigned int by = (trans == CBlasNoTrans) ? 16 : 8;
char name[71];
snprintf(name, 71,
"_Z6zherk%cIL9CBlasUplo%dELj%uELj%uELj%uELj%uELj%uEEvPK7double2PS1_ddiiii",
trans, uplo, mb, nb, kb, bx, by);
CUfunction function;
CU_ERROR_CHECK(cuModuleGetFunction(&function, handle->zherk, name));
void * params[] = { &A, &C, &alpha, &beta, &lda, &ldc, &n, &k };
CU_ERROR_CHECK(cuLaunchKernel(function, (unsigned int)(n + mb - 1) / mb, (unsigned int)(n + nb - 1) / nb, 1,
bx, by, 1, 0, stream, params, NULL));
CU_ERROR_CHECK(cuCtxPopCurrent(&handle->context));
return CUDA_SUCCESS;
}
CUresult cuMultiGPUZherk(CUmultiGPUBLAShandle handle,
CBlasUplo uplo, CBlasTranspose trans,
size_t n, size_t k,
double alpha, const double complex * restrict A, size_t lda,
double beta, double complex * restrict C, size_t ldc) {
const size_t nRowA = (trans == CBlasNoTrans) ? n : k;
int info = 0;
if (trans == CBlasTrans)
info = 2;
else if (lda < nRowA)
info = 7;
else if (ldc < n)
info = 10;
if (info != 0) {
XERBLA(info);
return CUDA_ERROR_INVALID_VALUE;
}
if (n == 0 || ((alpha == zero || k == 0) && beta == one))
return CUDA_SUCCESS;
if (alpha == zero) {
if (uplo == CBlasUpper) {
if (beta == zero) {
#pragma omp parallel for
for (size_t j = 0; j < n; j++) {
for (size_t i = 0; i <= j; i++)
C[j * ldc + i] = zero;
}
}
else {
#pragma omp parallel for
for (size_t j = 0; j < n; j++) {
for (size_t i = 0; i <= j; i++)
C[j * ldc + i] *= beta;
}
}
}
else {
if (beta == zero) {
#pragma omp parallel for
for (size_t j = 0; j < n; j++) {
for (size_t i = j; i < n; i++)
C[j * ldc + i] = zero;
}
}
else {
#pragma omp parallel for
for (size_t j = 0; j < n; j++) {
for (size_t i = j; i < n; i++)
C[j * ldc + i] *= beta;
}
}
}
return CUDA_SUCCESS;
}
const size_t nb = (trans == CBlasNoTrans) ? ZGEMM_N_MB : ZGEMM_CN_NB;
if (n < nb) {
zherk(uplo, trans, n, k, alpha, A, lda, beta, C, ldc);
return CUDA_SUCCESS;
}
if (trans == CBlasNoTrans) {
if (uplo == CBlasUpper) {
for (size_t j = nb; j < n; j += nb)
CU_ERROR_CHECK(cuMultiGPUZgemm(handle, CBlasNoTrans, CBlasConjTrans, j, min(n - j, nb), k, alpha, A, lda, &A[j], lda, beta, &C[j * ldc], ldc));
}
else {
const size_t m = n - nb;
for (size_t j = 0; j < m; j += nb) {
const size_t jb = min(n - j, nb);
CU_ERROR_CHECK(cuMultiGPUZgemm(handle, CBlasNoTrans, CBlasConjTrans, n - j - jb, jb, k, alpha, &A[j + jb], lda, &A[j], lda, beta, &C[j * ldc + j + jb], ldc));
}
}
for (size_t j = 0; j < n; j += nb)
zherk(uplo, trans, min(n - j, nb), k, alpha, &A[j], lda, beta, &C[j * ldc + j], ldc);
}
else {
if (uplo == CBlasUpper) {
for (size_t j = nb; j < n; j += nb)
CU_ERROR_CHECK(cuMultiGPUZgemm(handle, CBlasConjTrans, CBlasNoTrans, j, min(n - j, nb), k, alpha, A, lda, &A[j * lda], lda, beta, &C[j * ldc], ldc));
}
else {
const size_t m = n - nb;
for (size_t j = 0; j < m; j += nb) {
const size_t jb = min(n - j, nb);
CU_ERROR_CHECK(cuMultiGPUZgemm(handle, CBlasConjTrans, CBlasNoTrans, n - j - jb, jb, k, alpha, &A[(j + jb) * lda], lda, &A[j * lda], lda, beta, &C[j * ldc + j + jb], ldc));
}
}
for (size_t j = 0; j < n; j += nb)
zherk(uplo, trans, min(n - j, nb), k, alpha, &A[j * lda], lda, beta, &C[j * ldc + j], ldc);
}
return CUDA_SUCCESS;
}
|
GB_cast_array.c | //------------------------------------------------------------------------------
// GB_cast_array: typecast an array
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// Casts an input array Ax to an output array Cx with a different type. The
// two types are always different, so this does not need to handle user-defined
// types. The iso case is not handled; Ax and Cx must be the same size and no
// iso expansion is done.
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_unop__include.h"
#endif
GB_PUBLIC
void GB_cast_array // typecast an array
(
GB_void *Cx, // output array
const GB_Type_code code1, // type code for Cx
GB_void *Ax, // input array
const GB_Type_code code2, // type code for Ax
const int8_t *restrict Ab, // bitmap for Ax
const int64_t anz, // number of entries in Cx and Ax
const int nthreads // number of threads to use
)
{
//--------------------------------------------------------------------------
// check inputs
//--------------------------------------------------------------------------
if (anz == 0 || Cx == Ax)
{
// if anz is zero: no work to do, and the Ax and Cx pointer may be NULL
// as well. If Cx and Ax are aliased, then no copy is needed.
return ;
}
ASSERT (Cx != NULL) ;
ASSERT (Ax != NULL) ;
ASSERT (anz > 0) ;
ASSERT (GB_code_compatible (code1, code2)) ;
ASSERT (code1 != code2) ;
ASSERT (code1 != GB_UDT_code) ;
//--------------------------------------------------------------------------
// typecast the array
//--------------------------------------------------------------------------
#ifndef GBCOMPACT
//----------------------------------------------------------------------
// define the worker for the switch factory
//----------------------------------------------------------------------
#define GB_unop_apply(zname,xname) \
GB (_unop_apply__identity ## zname ## xname)
#define GB_WORKER(ignore1,zname,ztype,xname,xtype) \
{ \
GrB_Info info = GB_unop_apply (zname,xname) \
((ztype *) Cx, (xtype *) Ax, Ab, anz, nthreads) ; \
if (info == GrB_SUCCESS) return ; \
} \
break ;
//----------------------------------------------------------------------
// launch the switch factory
//----------------------------------------------------------------------
#define GB_EXCLUDE_SAME_TYPES
#include "GB_2type_factory.c"
#endif
//--------------------------------------------------------------------------
// generic worker: only used for GBCOMPACT case
//--------------------------------------------------------------------------
int64_t csize = GB_code_size (code1, 0) ;
int64_t asize = GB_code_size (code2, 0) ;
GB_cast_function cast_A_to_C = GB_cast_factory (code1, code2) ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
// Cx [p] = Ax [p]
cast_A_to_C (Cx +(p*csize), Ax +(p*asize), asize) ;
}
}
|
GB_mask_template.c | //------------------------------------------------------------------------------
// GB_mask_template: phase1 and phase2 for R = masker (M, C, Z)
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// Computes C<M>=Z or C<!M>=Z, returning the result in R. The input matrix C
// is not modified. Effectively, this computes R=C and then R<M>=Z or R<!M>=Z.
// If the C_replace descriptor is enabled, then C has already been cleared, and
// is an empty (but non-NULL) matrix.
// phase1: does not compute R itself, but just counts the # of entries in each
// vector of R. Fine tasks compute the # of entries in their slice of a
// single vector of R, and the results are cumsum'd.
// phase2: computes R, using the counts computed by phase1.
// FUTURE:: add special cases for C==Z, C==M, and Z==M aliases
//------------------------------------------------------------------------------
// R(i,j) = Z(i,j)
//------------------------------------------------------------------------------
#if defined ( GB_PHASE_1_OF_2 )
#define GB_COPY_Z \
{ \
rjnz++ ; \
}
#else
#define GB_COPY_Z \
{ \
Ri [pR] = i ; \
memcpy (Rx +(pR)*rsize, Zx +(pZ)*rsize, rsize) ; \
pR++ ; \
}
#endif
//------------------------------------------------------------------------------
// R(i,j) = C(i,j)
//------------------------------------------------------------------------------
#if defined ( GB_PHASE_1_OF_2 )
#define GB_COPY_C \
{ \
rjnz++ ; \
}
#else
#define GB_COPY_C \
{ \
Ri [pR] = i ; \
memcpy (Rx +(pR)*rsize, Cx +(pC)*rsize, rsize) ; \
pR++ ; \
}
#endif
//------------------------------------------------------------------------------
// mask template
//------------------------------------------------------------------------------
{
//--------------------------------------------------------------------------
// get C, Z, M, and R
//--------------------------------------------------------------------------
const int64_t *GB_RESTRICT Cp = C->p ;
const int64_t *GB_RESTRICT Ci = C->i ;
const int64_t vlen = C->vlen ;
const int64_t *GB_RESTRICT Zp = Z->p ;
const int64_t *GB_RESTRICT Zi = Z->i ;
const int64_t *GB_RESTRICT Mp = NULL ;
// const int64_t *GB_RESTRICT Mh = NULL ;
const int64_t *GB_RESTRICT Mi = NULL ;
const GB_void *GB_RESTRICT Mx = NULL ;
size_t msize = 0 ;
// int64_t Mnvec = 0 ;
// bool M_is_hyper = false ;
if (M != NULL)
{
Mp = M->p ;
// Mh = M->h ;
Mi = M->i ;
Mx = (Mask_struct ? NULL : (M->x)) ;
msize = M->type->size ;
// Mnvec = M->nvec ;
// M_is_hyper = M->is_hyper ;
}
#if defined ( GB_PHASE_2_OF_2 )
const GB_void *GB_RESTRICT Cx = C->x ;
const GB_void *GB_RESTRICT Zx = Z->x ;
const int64_t *GB_RESTRICT Rp = R->p ;
const int64_t *GB_RESTRICT Rh = R->h ;
int64_t *GB_RESTRICT Ri = R->i ;
GB_void *GB_RESTRICT Rx = R->x ;
size_t rsize = R->type->size ;
#endif
//--------------------------------------------------------------------------
// phase1: count entries in each C(:,j); phase2: compute C
//--------------------------------------------------------------------------
int taskid ;
#pragma omp parallel for num_threads(nthreads) schedule(dynamic,1)
for (taskid = 0 ; taskid < ntasks ; taskid++)
{
//----------------------------------------------------------------------
// get the task descriptor
//----------------------------------------------------------------------
int64_t kfirst = TaskList [taskid].kfirst ;
int64_t klast = TaskList [taskid].klast ;
bool fine_task = (klast == -1) ;
int64_t len ;
if (fine_task)
{
// a fine task operates on a slice of a single vector
klast = kfirst ;
len = TaskList [taskid].len ;
}
else
{
// a coarse task operates on one or more whole vectors
len = vlen ;
}
//----------------------------------------------------------------------
// compute all vectors in this task
//----------------------------------------------------------------------
for (int64_t k = kfirst ; k <= klast ; k++)
{
//------------------------------------------------------------------
// get j, the kth vector of R
//------------------------------------------------------------------
int64_t j = (Rh == NULL) ? k : Rh [k] ;
#if defined ( GB_PHASE_1_OF_2 )
int64_t rjnz = 0 ;
#else
int64_t pR, pR_end ;
if (fine_task)
{
// A fine task computes a slice of R(:,j)
pR = TaskList [taskid ].pC ;
pR_end = TaskList [taskid+1].pC ;
ASSERT (Rp [k] <= pR && pR <= pR_end && pR_end <= Rp [k+1]) ;
}
else
{
// The vectors of R are never sliced for a coarse task.
pR = Rp [k] ;
pR_end = Rp [k+1] ;
}
int64_t rjnz = pR_end - pR ;
if (rjnz == 0) continue ;
#endif
//------------------------------------------------------------------
// get C(:,j)
//------------------------------------------------------------------
int64_t pC = -1, pC_end = -1 ;
if (fine_task)
{
// A fine task operates on Ci,Cx [pC...pC_end-1], which is
// a subset of the vector C(:,j)
pC = TaskList [taskid].pA ;
pC_end = TaskList [taskid].pA_end ;
}
else
{
// A coarse task operates on the entire vector C(:,j)
int64_t kC = (R_to_C == NULL) ? j : R_to_C [k] ;
if (kC >= 0)
{
pC = Cp [kC] ;
pC_end = Cp [kC+1] ;
}
}
int64_t cjnz = pC_end - pC ; // nnz in C(:,j) for this slice
bool cdense = (cjnz == len) && (cjnz > 0) ;
#if defined ( GB_PHASE_2_OF_2 ) || defined ( GB_DEBUG )
// get the first index in C(:,j) for this vector
int64_t iC_first = -1 ;
if (cjnz > 0) iC_first = Ci [pC] ;
#endif
#ifdef GB_DEBUG
int64_t iC_last = -1 ;
if (cjnz > 0) iC_last = Ci [pC_end-1] ;
#endif
//------------------------------------------------------------------
// get Z(:,j)
//------------------------------------------------------------------
int64_t pZ = -1, pZ_end = -1 ;
if (fine_task)
{
// A fine task operates on Zi,Zx [pZ...pZ_end-1], which is
// a subset of the vector Z(:,j)
pZ = TaskList [taskid].pB ;
pZ_end = TaskList [taskid].pB_end ;
}
else
{
// A coarse task operates on the entire vector Z(:,j)
int64_t kZ = (R_to_Z == NULL) ? j : R_to_Z [k] ;
if (kZ >= 0)
{
pZ = Zp [kZ] ;
pZ_end = Zp [kZ+1] ;
}
}
int64_t zjnz = pZ_end - pZ ; // nnz in Z(:,j) for this slice
bool zdense = (zjnz == len) && (zjnz > 0) ;
#ifdef GB_DEBUG
int64_t iZ_first = -1, iZ_last = -1 ;
if (zjnz > 0)
{
iZ_first = Zi [pZ] ;
iZ_last = Zi [pZ_end-1] ;
}
#endif
//------------------------------------------------------------------
// get M(:,j)
//------------------------------------------------------------------
int64_t pM = -1, pM_end = -1 ;
if (fine_task)
{
// A fine task operates on Mi,Mx [pM...pM_end-1], which is
// a subset of the vector M(:,j)
pM = TaskList [taskid].pM ;
pM_end = TaskList [taskid].pM_end ;
}
else
{
// A coarse task operates on the entire vector M (:,j)
int64_t kM = (R_to_M == NULL) ? j : R_to_M [k] ;
if (kM >= 0)
{
pM = Mp [kM] ;
pM_end = Mp [kM+1] ;
}
}
int64_t mjnz = pM_end - pM ; // nnz (M (:,j))
bool mdense = (mjnz == len) && (mjnz > 0) ;
// get the first index in M(:,j) for this vector
int64_t iM_first = -1 ;
int64_t pM_first = pM ;
if (mjnz > 0) iM_first = Mi [pM_first] ;
//------------------------------------------------------------------
// phase1: count nnz (R(:,j)); phase2: compute R(:,j)
//------------------------------------------------------------------
if (mjnz == 0)
{
//--------------------------------------------------------------
// M(:,j) is empty
//--------------------------------------------------------------
if (!Mask_comp)
{
//----------------------------------------------------------
// M(:,j) is empty and not complemented
//----------------------------------------------------------
// R(:,j) = C(:,j), regardless of Z(:,j)
#if defined ( GB_PHASE_1_OF_2 )
rjnz = cjnz ;
#else
ASSERT (rjnz == cjnz) ;
memcpy (Ri +(pR), Ci +(pC), cjnz * sizeof (int64_t)) ;
memcpy (Rx +(pR)*rsize, Cx +(pC)*rsize, cjnz*rsize) ;
#endif
}
else
{
//----------------------------------------------------------
// M(:,j) is empty and complemented
//----------------------------------------------------------
// R(:,j) = Z(:,j), regardless of C(:,j)
#if defined ( GB_PHASE_1_OF_2 )
rjnz = zjnz ;
#else
ASSERT (rjnz == zjnz) ;
memcpy (Ri +(pR), Zi +(pZ), zjnz * sizeof (int64_t)) ;
memcpy (Rx +(pR)*rsize, Zx +(pZ)*rsize, zjnz*rsize) ;
#endif
}
}
else if (cdense && zdense)
{
//--------------------------------------------------------------
// C(:,j) and Z(:,j) dense: thus R(:,j) dense
//--------------------------------------------------------------
ASSERT (cjnz == zjnz) ;
ASSERT (iC_first == iZ_first) ;
ASSERT (iC_last == iZ_last ) ;
#if defined ( GB_PHASE_1_OF_2 )
rjnz = cjnz ;
#else
ASSERT (rjnz == cjnz) ;
for (int64_t p = 0 ; p < cjnz ; p++)
{
int64_t i = p + iC_first ;
Ri [pR + p] = i ;
int64_t iM = (pM < pM_end) ? Mi [pM] : INT64_MAX ;
bool mij = false ;
if (i == iM)
{
mij = GB_mcast (Mx, pM, msize) ;
pM++ ;
}
if (Mask_comp) mij = !mij ;
if (mij)
{
memcpy (Rx +(pR+p)*rsize, Zx +(pZ+p)*rsize, rsize) ;
}
else
{
memcpy (Rx +(pR+p)*rsize, Cx +(pC+p)*rsize, rsize) ;
}
}
#endif
}
else
{
//--------------------------------------------------------------
// 2-way merge of C(:,j) and Z(:,j); binary search of M(:,j)
//--------------------------------------------------------------
while (pC < pC_end && pZ < pZ_end)
{
//----------------------------------------------------------
// get the next i for R(:,j)
//----------------------------------------------------------
int64_t iC = Ci [pC] ;
int64_t iZ = Zi [pZ] ;
int64_t i = GB_IMIN (iC, iZ) ;
//----------------------------------------------------------
// get M(i,j)
//----------------------------------------------------------
bool mij = false ;
if (mdense)
{
//------------------------------------------------------
// M(:,j) is dense
//------------------------------------------------------
// mask is dense, lookup M(i,j)
// iM_first == Mi [pM_first]
// iM_first + delta == Mi [pM_first + delta]
// let i = iM_first + delta
// let pM = pM_first + delta
// then delta = i - iM_first
pM = pM_first + (i - iM_first) ;
ASSERT (i == Mi [pM]) ;
mij = GB_mcast (Mx, pM, msize) ;
// increment pM for the wrapup phase below
pM++ ;
}
else
{
//------------------------------------------------------
// M(:,j) is sparse
//------------------------------------------------------
// Use GB_SPLIT_BINARY_SEARCH so that pM can be used in
// the for loop with index pM in the wrapup phase.
int64_t pright = pM_end - 1 ;
bool found ;
GB_SPLIT_BINARY_SEARCH (i, Mi, pM, pright, found) ;
if (found)
{
ASSERT (i == Mi [pM]) ;
mij = GB_mcast (Mx, pM, msize) ;
// increment pM for the wrapup phase below
pM++ ;
}
}
if (Mask_comp) mij = !mij ;
//----------------------------------------------------------
// R(i,j) = C(i,j) or Z(i,j)
//----------------------------------------------------------
if (iC < iZ)
{
// C(i,j) is present but Z(i,j) is not
if (!mij) GB_COPY_C ;
pC++ ;
}
else if (iC > iZ)
{
// Z(i,j) is present but C(i,j) is not
if (mij) GB_COPY_Z ;
pZ++ ;
}
else
{
// both C(i,j) and Z(i,j) are present
if (mij)
{
GB_COPY_Z ;
}
else
{
GB_COPY_C ;
}
pC++ ;
pZ++ ;
}
}
//--------------------------------------------------------------
// wrapup: C or Z are exhausted, or initially empty
//--------------------------------------------------------------
cjnz = pC_end - pC ; // nnz (C(:,j)) remaining
zjnz = pZ_end - pZ ; // nnz (Z(:,j)) remaining
mjnz = pM_end - pM ; // nnz (M(:,j)) remaining
if (cjnz == 0)
{
//----------------------------------------------------------
// C(:,j) is empty
//----------------------------------------------------------
if (!Mask_comp)
{
//------------------------------------------------------
// mask is not complemented
//------------------------------------------------------
if (mdense)
{
//--------------------------------------------------
// M(:,j) is dense
//--------------------------------------------------
for ( ; pZ < pZ_end ; pZ++)
{
int64_t i = Zi [pZ] ;
// mask is dense, lookup M(i,j)
pM = pM_first + (i - iM_first) ;
ASSERT (i == Mi [pM]) ;
bool mij = GB_mcast (Mx, pM, msize) ;
if (mij) GB_COPY_Z ;
}
}
else if (zjnz > 32 * mjnz)
{
//--------------------------------------------------
// Z(:,j) is much denser than M(:,j)
//--------------------------------------------------
// This loop requires pM to start at the first
// entry in M(:,j) that has not yet been handled.
for ( ; pM < pM_end ; pM++)
{
if (GB_mcast (Mx, pM, msize))
{
int64_t i = Mi [pM] ;
int64_t pright = pZ_end - 1 ;
bool found ;
GB_BINARY_SEARCH (i, Zi, pZ, pright, found);
if (found) GB_COPY_Z ;
}
}
}
else if (mjnz > 32 * zjnz)
{
//--------------------------------------------------
// M(:,j) is much denser than Z(:,j)
//--------------------------------------------------
for ( ; pZ < pZ_end ; pZ++)
{
int64_t i = Zi [pZ] ;
bool mij = false ;
int64_t pright = pM_end - 1 ;
bool found ;
GB_BINARY_SEARCH (i, Mi, pM, pright,found) ;
if (found) mij = GB_mcast (Mx, pM, msize) ;
if (mij) GB_COPY_Z ;
}
}
else
{
//--------------------------------------------------
// M(:,j) and Z(:,j) have about the same # entries
//--------------------------------------------------
while (pM < pM_end && pZ < pZ_end)
{
int64_t iM = Mi [pM] ;
int64_t i = Zi [pZ] ;
if (iM < i)
{
// M(i,j) exists but not Z(i,j)
pM++ ;
}
else if (i < iM)
{
// Z(i,j) exists but not M(i,j)
pZ++ ;
}
else
{
// both M(i,j) and Z(i,j) exist
if (GB_mcast (Mx, pM, msize)) GB_COPY_Z ;
pM++ ;
pZ++ ;
}
}
}
}
else
{
//------------------------------------------------------
// complemented mask, and C(:,j) empty
//------------------------------------------------------
if (mdense)
{
//--------------------------------------------------
// M(:,j) is dense
//--------------------------------------------------
for ( ; pZ < pZ_end ; pZ++)
{
int64_t i = Zi [pZ] ;
// mask is dense, lookup M(i,j)
pM = pM_first + (i - iM_first) ;
ASSERT (i == Mi [pM]) ;
bool mij = GB_mcast (Mx, pM, msize) ;
if (!mij) GB_COPY_Z ; // mask is complemented
}
}
else
{
//--------------------------------------------------
// M(:,j) is sparse
//--------------------------------------------------
for ( ; pZ < pZ_end ; pZ++)
{
int64_t i = Zi [pZ] ;
bool mij = false ;
int64_t pright = pM_end - 1 ;
bool found ;
GB_BINARY_SEARCH (i, Mi, pM, pright, found) ;
if (found) mij = GB_mcast (Mx, pM, msize) ;
if (!mij) GB_COPY_Z ; // mask is complemented
}
}
}
}
else if (zjnz == 0)
{
//----------------------------------------------------------
// Z(:,j) is empty
//----------------------------------------------------------
if (Mask_comp)
{
//------------------------------------------------------
// mask is complemented
//------------------------------------------------------
if (mdense)
{
//--------------------------------------------------
// M(:,j) is dense
//--------------------------------------------------
for ( ; pC < pC_end ; pC++)
{
int64_t i = Ci [pC] ;
// mask is dense, lookup M(i,j)
pM = pM_first + (i - iM_first) ;
ASSERT (i == Mi [pM]) ;
bool mij = GB_mcast (Mx, pM, msize) ;
if (mij) GB_COPY_C ;
}
}
else if (cjnz > 32 * mjnz)
{
//--------------------------------------------------
// C(:,j) is much denser than M(:,j)
//--------------------------------------------------
for ( ; pM < pM_end ; pM++)
{
if (GB_mcast (Mx, pM, msize))
{
int64_t i = Mi [pM] ;
int64_t pright = pC_end - 1 ;
bool found ;
GB_BINARY_SEARCH (i, Ci, pC, pright, found);
if (found) GB_COPY_C ;
}
}
}
else if (mjnz > 32 * cjnz)
{
//--------------------------------------------------
// M(:,j) is much denser than C(:,j)
//--------------------------------------------------
for ( ; pC < pC_end ; pC++)
{
int64_t i = Ci [pC] ;
bool mij = false ;
int64_t pright = pM_end - 1 ;
bool found ;
GB_BINARY_SEARCH (i, Mi, pM, pright, found);
if (found) mij = GB_mcast (Mx, pM, msize) ;
if (mij) GB_COPY_C ;
}
}
else
{
//--------------------------------------------------
// M(:,j) and C(:,j) have about the same # entries
//--------------------------------------------------
while (pM < pM_end && pC < pC_end)
{
int64_t iM = Mi [pM] ;
int64_t i = Ci [pC] ;
if (iM < i)
{
// M(i,j) exists but not C(i,j)
pM++ ;
}
else if (i < iM)
{
// C(i,j) exists but not M(i,j)
pC++ ;
}
else
{
// both M(i,j) and C(i,j) exist
if (GB_mcast (Mx, pM, msize)) GB_COPY_C ;
pM++ ;
pC++ ;
}
}
}
}
else
{
//------------------------------------------------------
// non-complemented mask, and Z(:,j) empty
//------------------------------------------------------
if (mdense)
{
//--------------------------------------------------
// M(:,j) is dense
//--------------------------------------------------
for ( ; pC < pC_end ; pC++)
{
int64_t i = Ci [pC] ;
// mask is dense, lookup M(i,j)
pM = pM_first + (i - iM_first) ;
ASSERT (i == Mi [pM]) ;
bool mij = GB_mcast (Mx, pM, msize) ;
if (!mij) GB_COPY_C ;
}
}
else
{
//--------------------------------------------------
// M(:,j) is sparse
//--------------------------------------------------
for ( ; pC < pC_end ; pC++)
{
int64_t i = Ci [pC] ;
bool mij = false ; // M(i,j) false if not present
int64_t pright = pM_end - 1 ;
bool found ;
GB_BINARY_SEARCH (i, Mi, pM, pright, found) ;
if (found) mij = GB_mcast (Mx, pM, msize) ;
if (!mij) GB_COPY_C ;
}
}
}
}
#if defined ( GB_PHASE_2_OF_2 )
ASSERT (pR == pR_end) ;
#endif
}
//------------------------------------------------------------------
// final count of nnz (R(:,j))
//------------------------------------------------------------------
#if defined ( GB_PHASE_1_OF_2 )
if (fine_task)
{
TaskList [taskid].pC = rjnz ;
}
else
{
Rp [k] = rjnz ;
}
#endif
}
}
}
|
omp_mm.c | /******************************************************************************
* FILE: omp_mm.c
* DESCRIPTION:
* OpenMp Example - Matrix Multiply - C Version
* Demonstrates a matrix multiply using OpenMP. Threads share row iterations
* according to a predefined chunk size.
* AUTHOR: Blaise Barney
* LAST REVISED: 06/28/05
******************************************************************************/
#include <omp.h>
#include <stdio.h>
#include <stdlib.h>
#define NRA 62 /* number of rows in matrix A */
#define NCA 15 /* number of columns in matrix A */
#define NCB 7 /* number of columns in matrix B */
int main (int argc, char *argv[])
{
int tid, nthreads, i, j, k, chunk;
double a[NRA][NCA], /* matrix A to be multiplied */
b[NCA][NCB], /* matrix B to be multiplied */
c[NRA][NCB]; /* result matrix C */
chunk = 10; /* set loop iteration chunk size */
/*** Spawn a parallel region explicitly scoping all variables ***/
#pragma omp parallel shared(a,b,c,nthreads,chunk) private(tid,i,j,k)
{
tid = omp_get_thread_num();
if (tid == 0)
{
nthreads = omp_get_num_threads();
printf("Starting matrix multiple example with %d threads\n",nthreads);
printf("Initializing matrices...\n");
}
/*** Initialize matrices ***/
#pragma omp for schedule (static, chunk)
for (i=0; i<NRA; i++)
for (j=0; j<NCA; j++)
a[i][j]= i+j;
#pragma omp for schedule (static, chunk)
for (i=0; i<NCA; i++)
for (j=0; j<NCB; j++)
b[i][j]= i*j;
#pragma omp for schedule (static, chunk)
for (i=0; i<NRA; i++)
for (j=0; j<NCB; j++)
c[i][j]= 0;
/*** Do matrix multiply sharing iterations on outer loop ***/
/*** Display who does which iterations for demonstration purposes ***/
printf("Thread %d starting matrix multiply...\n",tid);
#pragma omp for schedule (static, chunk)
for (i=0; i<NRA; i++)
{
printf("Thread=%d did row=%d\n",tid,i);
for(j=0; j<NCB; j++)
for (k=0; k<NCA; k++)
c[i][j] += a[i][k] * b[k][j];
}
} /*** End of parallel region ***/
/*** Print results ***/
printf("******************************************************\n");
printf("Result Matrix:\n");
for (i=0; i<NRA; i++)
{
for (j=0; j<NCB; j++)
printf("%6.2f ", c[i][j]);
printf("\n");
}
printf("******************************************************\n");
printf ("Done.\n");
}
|
GB_unaryop__minv_uint8_uint32.c | //------------------------------------------------------------------------------
// GB_unaryop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_iterator.h"
#include "GB_unaryop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop__minv_uint8_uint32
// op(A') function: GB_tran__minv_uint8_uint32
// C type: uint8_t
// A type: uint32_t
// cast: uint8_t cij = (uint8_t) aij
// unaryop: cij = GB_IMINV_UNSIGNED (aij, 8)
#define GB_ATYPE \
uint32_t
#define GB_CTYPE \
uint8_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
uint32_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = GB_IMINV_UNSIGNED (x, 8) ;
// casting
#define GB_CASTING(z, aij) \
uint8_t z = (uint8_t) aij ;
// cij = op (cast (aij))
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
GB_GETA (aij, Ax, pA) ; \
/* Cx [pC] = op (cast (aij)) */ \
GB_CASTING (z, aij) ; \
GB_OP (GB_CX (pC), z) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_MINV || GxB_NO_UINT8 || GxB_NO_UINT32)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop__minv_uint8_uint32
(
uint8_t *Cx, // Cx and Ax may be aliased
uint32_t *Ax,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
GB_CAST_OP (p, p) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_tran__minv_uint8_uint32
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *GB_RESTRICT *Rowcounts,
GBI_single_iterator Iter,
const int64_t *GB_RESTRICT A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#define GB_PHASE_2_OF_2
#include "GB_unaryop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
Nonlocal_TV_core.c | /*
* This work is part of the Core Imaging Library developed by
* Visual Analytics and Imaging System Group of the Science Technology
* Facilities Council, STFC and Diamond Light Source Ltd.
*
* Copyright 2017 Daniil Kazantsev
* Copyright 2017 Srikanth Nagella, Edoardo Pasca
* Copyright 2018 Diamond Light Source Ltd.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
* http://www.apache.org/licenses/LICENSE-2.0
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "Nonlocal_TV_core.h"
/* C-OMP implementation of non-local regulariser
* Weights and associated indices must be given as an input.
* Gauss-Seidel fixed point iteration requires ~ 3 iterations, so the main effort
* goes in pre-calculation of weights and selection of patches
*
*
* Input Parameters:
* 1. 2D/3D grayscale image/volume
* 2. AR_i - indeces of i neighbours
* 3. AR_j - indeces of j neighbours
* 4. AR_k - indeces of k neighbours (0 - for 2D case)
* 5. Weights_ij(k) - associated weights
* 6. regularisation parameter
* 7. iterations number
* Output:
* 1. denoised image/volume
* Elmoataz, Abderrahim, Olivier Lezoray, and Sébastien Bougleux. "Nonlocal discrete regularization on weighted graphs: a framework for image and manifold processing." IEEE Trans. Image Processing 17, no. 7 (2008): 1047-1060.
*/
/*****************************************************************************/
float Nonlocal_TV_CPU_main(float *A_orig, float *Output, unsigned short *H_i, unsigned short *H_j, unsigned short *H_k, float *Weights, int dimX, int dimY, int dimZ, int NumNeighb, float lambdaReg, int IterNumb)
{
long i, j, k;
int iter;
lambdaReg = 1.0f/lambdaReg;
/*****2D INPUT *****/
if (dimZ == 0) {
copyIm(A_orig, Output, (long)(dimX), (long)(dimY), 1l);
/* for each pixel store indeces of the most similar neighbours (patches) */
for(iter=0; iter<IterNumb; iter++) {
#pragma omp parallel for shared (A_orig, Output, Weights, H_i, H_j, iter) private(i,j)
for(i=0; i<(long)(dimX); i++) {
for(j=0; j<(long)(dimY); j++) {
/*NLM_H1_2D(Output, A_orig, H_i, H_j, Weights, i, j, (long)(dimX), (long)(dimY), NumNeighb, lambdaReg);*/ /* NLM - H1 penalty */
NLM_TV_2D(Output, A_orig, H_i, H_j, Weights, i, j, (long)(dimX), (long)(dimY), NumNeighb, lambdaReg); /* NLM - TV penalty */
}}
}
}
else {
/*****3D INPUT *****/
copyIm(A_orig, Output, (long)(dimX), (long)(dimY), (long)(dimZ));
/* for each pixel store indeces of the most similar neighbours (patches) */
for(iter=0; iter<IterNumb; iter++) {
#pragma omp parallel for shared (A_orig, Output, Weights, H_i, H_j, H_k, iter) private(i,j,k)
for(i=0; i<(long)(dimX); i++) {
for(j=0; j<(long)(dimY); j++) {
for(k=0; k<(long)(dimZ); k++) {
/* NLM_H1_3D(Output, A_orig, H_i, H_j, H_k, Weights, i, j, k, dimX, dimY, dimZ, NumNeighb, lambdaReg); */ /* NLM - H1 penalty */
NLM_TV_3D(Output, A_orig, H_i, H_j, H_k, Weights, i, j, k, (long)(dimX), (long)(dimY), (long)(dimZ), NumNeighb, lambdaReg); /* NLM - TV penalty */
}}}
}
}
return *Output;
}
/***********<<<<Main Function for NLM - H1 penalty>>>>**********/
float NLM_H1_2D(float *A, float *A_orig, unsigned short *H_i, unsigned short *H_j, float *Weights, long i, long j, long dimX, long dimY, int NumNeighb, float lambdaReg)
{
long x, i1, j1, index, index_m;
float value = 0.0f, normweight = 0.0f;
index_m = j*dimX+i;
for(x=0; x < NumNeighb; x++) {
index = (dimX*dimY*x) + j*dimX+i;
i1 = H_i[index];
j1 = H_j[index];
value += A[j1*dimX+i1]*Weights[index];
normweight += Weights[index];
}
A[index_m] = (lambdaReg*A_orig[index_m] + value)/(lambdaReg + normweight);
return *A;
}
/*3D version*/
float NLM_H1_3D(float *A, float *A_orig, unsigned short *H_i, unsigned short *H_j, unsigned short *H_k, float *Weights, long i, long j, long k, long dimX, long dimY, long dimZ, int NumNeighb, float lambdaReg)
{
long x, i1, j1, k1, index;
float value = 0.0f, normweight = 0.0f;
for(x=0; x < NumNeighb; x++) {
index = dimX*dimY*dimZ*x + (dimX*dimY*k) + j*dimX+i;
i1 = H_i[index];
j1 = H_j[index];
k1 = H_k[index];
value += A[(dimX*dimY*k1) + j1*dimX+i1]*Weights[index];
normweight += Weights[index];
}
A[(dimX*dimY*k) + j*dimX+i] = (lambdaReg*A_orig[(dimX*dimY*k) + j*dimX+i] + value)/(lambdaReg + normweight);
return *A;
}
/***********<<<<Main Function for NLM - TV penalty>>>>**********/
float NLM_TV_2D(float *A, float *A_orig, unsigned short *H_i, unsigned short *H_j, float *Weights, long i, long j, long dimX, long dimY, int NumNeighb, float lambdaReg)
{
long x, i1, j1, index, index_m;
float value = 0.0f, normweight = 0.0f, NLgrad_magn = 0.0f, NLCoeff;
index_m = j*dimX+i;
for(x=0; x < NumNeighb; x++) {
index = (dimX*dimY*x) + j*dimX+i; /*c*/
i1 = H_i[index];
j1 = H_j[index];
NLgrad_magn += powf((A[j1*dimX+i1] - A[index_m]),2)*Weights[index];
}
NLgrad_magn = sqrtf(NLgrad_magn); /*Non Local Gradients Magnitude */
NLCoeff = 2.0f*(1.0f/(NLgrad_magn + EPS));
for(x=0; x < NumNeighb; x++) {
index = (dimX*dimY*x) + j*dimX+i; /*c*/
i1 = H_i[index];
j1 = H_j[index];
value += A[j1*dimX+i1]*NLCoeff*Weights[index];
normweight += Weights[index]*NLCoeff;
}
A[index_m] = (lambdaReg*A_orig[index_m] + value)/(lambdaReg + normweight);
return *A;
}
/*3D version*/
float NLM_TV_3D(float *A, float *A_orig, unsigned short *H_i, unsigned short *H_j, unsigned short *H_k, float *Weights, long i, long j, long k, long dimX, long dimY, long dimZ, int NumNeighb, float lambdaReg)
{
long x, i1, j1, k1, index;
float value = 0.0f, normweight = 0.0f, NLgrad_magn = 0.0f, NLCoeff;
for(x=0; x < NumNeighb; x++) {
index = dimX*dimY*dimZ*x + (dimX*dimY*k) + j*dimX+i;
i1 = H_i[index];
j1 = H_j[index];
k1 = H_k[index];
NLgrad_magn += powf((A[(dimX*dimY*k1) + j1*dimX+i1] - A[(dimX*dimY*k1) + j*dimX+i]),2)*Weights[index];
}
NLgrad_magn = sqrtf(NLgrad_magn); /*Non Local Gradients Magnitude */
NLCoeff = 2.0f*(1.0f/(NLgrad_magn + EPS));
for(x=0; x < NumNeighb; x++) {
index = dimX*dimY*dimZ*x + (dimX*dimY*k) + j*dimX+i;
i1 = H_i[index];
j1 = H_j[index];
k1 = H_k[index];
value += A[(dimX*dimY*k1) + j1*dimX+i1]*NLCoeff*Weights[index];
normweight += Weights[index]*NLCoeff;
}
A[(dimX*dimY*k) + j*dimX+i] = (lambdaReg*A_orig[(dimX*dimY*k) + j*dimX+i] + value)/(lambdaReg + normweight);
return *A;
}
|
callback.h | #ifndef _BSD_SOURCE
#define _BSD_SOURCE
#endif
#ifndef _DEFAULT_SOURCE
#define _DEFAULT_SOURCE
#endif
#include <stdio.h>
#ifndef __STDC_FORMAT_MACROS
#define __STDC_FORMAT_MACROS
#endif
#include <inttypes.h>
#include <omp.h>
#include <omp-tools.h>
#include "ompt-signal.h"
// Used to detect architecture
#include "../../src/kmp_platform.h"
#ifndef _TOOL_PREFIX
#define _TOOL_PREFIX ""
// If no _TOOL_PREFIX is set, we assume that we run as part of an OMPT test
#define _OMPT_TESTS
#endif
static const char *ompt_thread_t_values[] = {
"ompt_thread_UNDEFINED", "ompt_thread_initial", "ompt_thread_worker",
"ompt_thread_other"};
static const char *ompt_task_status_t_values[] = {
"ompt_task_UNDEFINED",
"ompt_task_complete", // 1
"ompt_task_yield", // 2
"ompt_task_cancel", // 3
"ompt_task_detach", // 4
"ompt_task_early_fulfill", // 5
"ompt_task_late_fulfill", // 6
"ompt_task_switch" // 7
};
static const char* ompt_cancel_flag_t_values[] = {
"ompt_cancel_parallel",
"ompt_cancel_sections",
"ompt_cancel_loop",
"ompt_cancel_taskgroup",
"ompt_cancel_activated",
"ompt_cancel_detected",
"ompt_cancel_discarded_task"
};
static const char *ompt_dependence_type_t_values[] = {
"ompt_dependence_type_UNDEFINED",
"ompt_dependence_type_in", // 1
"ompt_dependence_type_out", // 2
"ompt_dependence_type_inout", // 3
"ompt_dependence_type_mutexinoutset", // 4
"ompt_dependence_type_source", // 5
"ompt_dependence_type_sink", // 6
"ompt_dependence_type_inoutset" // 7
};
static void format_task_type(int type, char *buffer) {
char *progress = buffer;
if (type & ompt_task_initial)
progress += sprintf(progress, "ompt_task_initial");
if (type & ompt_task_implicit)
progress += sprintf(progress, "ompt_task_implicit");
if (type & ompt_task_explicit)
progress += sprintf(progress, "ompt_task_explicit");
if (type & ompt_task_target)
progress += sprintf(progress, "ompt_task_target");
if (type & ompt_task_undeferred)
progress += sprintf(progress, "|ompt_task_undeferred");
if (type & ompt_task_untied)
progress += sprintf(progress, "|ompt_task_untied");
if (type & ompt_task_final)
progress += sprintf(progress, "|ompt_task_final");
if (type & ompt_task_mergeable)
progress += sprintf(progress, "|ompt_task_mergeable");
if (type & ompt_task_merged)
progress += sprintf(progress, "|ompt_task_merged");
}
static ompt_set_callback_t ompt_set_callback;
static ompt_get_callback_t ompt_get_callback;
static ompt_get_state_t ompt_get_state;
static ompt_get_task_info_t ompt_get_task_info;
static ompt_get_task_memory_t ompt_get_task_memory;
static ompt_get_thread_data_t ompt_get_thread_data;
static ompt_get_parallel_info_t ompt_get_parallel_info;
static ompt_get_unique_id_t ompt_get_unique_id;
static ompt_finalize_tool_t ompt_finalize_tool;
static ompt_get_num_procs_t ompt_get_num_procs;
static ompt_get_num_places_t ompt_get_num_places;
static ompt_get_place_proc_ids_t ompt_get_place_proc_ids;
static ompt_get_place_num_t ompt_get_place_num;
static ompt_get_partition_place_nums_t ompt_get_partition_place_nums;
static ompt_get_proc_id_t ompt_get_proc_id;
static ompt_enumerate_states_t ompt_enumerate_states;
static ompt_enumerate_mutex_impls_t ompt_enumerate_mutex_impls;
static void print_ids(int level)
{
int task_type, thread_num;
ompt_frame_t *frame;
ompt_data_t *task_parallel_data;
ompt_data_t *task_data;
int exists_task = ompt_get_task_info(level, &task_type, &task_data, &frame,
&task_parallel_data, &thread_num);
char buffer[2048];
format_task_type(task_type, buffer);
if (frame)
printf("%" PRIu64 ": task level %d: parallel_id=%" PRIu64
", task_id=%" PRIu64 ", exit_frame=%p, reenter_frame=%p, "
"task_type=%s=%d, thread_num=%d\n",
ompt_get_thread_data()->value, level,
exists_task ? task_parallel_data->value : 0,
exists_task ? task_data->value : 0, frame->exit_frame.ptr,
frame->enter_frame.ptr, buffer, task_type, thread_num);
}
#define get_frame_address(level) __builtin_frame_address(level)
#define print_frame(level) \
printf("%" PRIu64 ": __builtin_frame_address(%d)=%p\n", \
ompt_get_thread_data()->value, level, get_frame_address(level))
// clang (version 5.0 and above) adds an intermediate function call with debug flag (-g)
#if defined(TEST_NEED_PRINT_FRAME_FROM_OUTLINED_FN)
#if defined(DEBUG) && defined(__clang__) && __clang_major__ >= 5
#define print_frame_from_outlined_fn(level) print_frame(level+1)
#else
#define print_frame_from_outlined_fn(level) print_frame(level)
#endif
#if defined(__clang__) && __clang_major__ >= 5
#warning "Clang 5.0 and later add an additional wrapper for outlined functions when compiling with debug information."
#warning "Please define -DDEBUG iff you manually pass in -g to make the tests succeed!"
#endif
#endif
// This macro helps to define a label at the current position that can be used
// to get the current address in the code.
//
// For print_current_address():
// To reliably determine the offset between the address of the label and the
// actual return address, we insert a NOP instruction as a jump target as the
// compiler would otherwise insert an instruction that we can't control. The
// instruction length is target dependent and is explained below.
//
// (The empty block between "#pragma omp ..." and the __asm__ statement is a
// workaround for a bug in the Intel Compiler.)
#define define_ompt_label(id) \
{} \
__asm__("nop"); \
ompt_label_##id:
// This macro helps to get the address of a label that is inserted by the above
// macro define_ompt_label(). The address is obtained with a GNU extension
// (&&label) that has been tested with gcc, clang and icc.
#define get_ompt_label_address(id) (&& ompt_label_##id)
// This macro prints the exact address that a previously called runtime function
// returns to.
#define print_current_address(id) \
define_ompt_label(id) \
print_possible_return_addresses(get_ompt_label_address(id))
#if KMP_ARCH_X86 || KMP_ARCH_X86_64
// On X86 the NOP instruction is 1 byte long. In addition, the compiler inserts
// a MOV instruction for non-void runtime functions which is 3 bytes long.
#define print_possible_return_addresses(addr) \
printf("%" PRIu64 ": current_address=%p or %p for non-void functions\n", \
ompt_get_thread_data()->value, ((char *)addr) - 1, ((char *)addr) - 4)
#elif KMP_ARCH_PPC64
// On Power the NOP instruction is 4 bytes long. In addition, the compiler
// inserts a second NOP instruction (another 4 bytes). For non-void runtime
// functions Clang inserts a STW instruction (but only if compiling under
// -fno-PIC which will be the default with Clang 8.0, another 4 bytes).
#define print_possible_return_addresses(addr) \
printf("%" PRIu64 ": current_address=%p or %p\n", ompt_get_thread_data()->value, \
((char *)addr) - 8, ((char *)addr) - 12)
#elif KMP_ARCH_AARCH64
// On AArch64 the NOP instruction is 4 bytes long, can be followed by inserted
// store instruction (another 4 bytes long).
#define print_possible_return_addresses(addr) \
printf("%" PRIu64 ": current_address=%p or %p\n", ompt_get_thread_data()->value, \
((char *)addr) - 4, ((char *)addr) - 8)
#elif KMP_ARCH_RISCV64
#if __riscv_compressed
// On RV64GC the C.NOP instruction is 2 byte long. In addition, the compiler
// inserts a J instruction (targeting the successor basic block), which
// accounts for another 4 bytes. Finally, an additional J instruction may
// appear (adding 4 more bytes) when the C.NOP is referenced elsewhere (ie.
// another branch).
#define print_possible_return_addresses(addr) \
printf("%" PRIu64 ": current_address=%p or %p\n", \
ompt_get_thread_data()->value, ((char *)addr) - 6, ((char *)addr) - 10)
#else
// On RV64G the NOP instruction is 4 byte long. In addition, the compiler
// inserts a J instruction (targeting the successor basic block), which
// accounts for another 4 bytes. Finally, an additional J instruction may
// appear (adding 4 more bytes) when the NOP is referenced elsewhere (ie.
// another branch).
#define print_possible_return_addresses(addr) \
printf("%" PRIu64 ": current_address=%p or %p\n", \
ompt_get_thread_data()->value, ((char *)addr) - 8, ((char *)addr) - 12)
#endif
#else
#error Unsupported target architecture, cannot determine address offset!
#endif
// This macro performs a somewhat similar job to print_current_address(), except
// that it discards a certain number of nibbles from the address and only prints
// the most significant bits / nibbles. This can be used for cases where the
// return address can only be approximated.
//
// To account for overflows (ie the most significant bits / nibbles have just
// changed as we are a few bytes above the relevant power of two) the addresses
// of the "current" and of the "previous block" are printed.
#define print_fuzzy_address(id) \
define_ompt_label(id) \
print_fuzzy_address_blocks(get_ompt_label_address(id))
// If you change this define you need to adapt all capture patterns in the tests
// to include or discard the new number of nibbles!
#define FUZZY_ADDRESS_DISCARD_NIBBLES 2
#define FUZZY_ADDRESS_DISCARD_BYTES (1 << ((FUZZY_ADDRESS_DISCARD_NIBBLES) * 4))
#define print_fuzzy_address_blocks(addr) \
printf("%" PRIu64 ": fuzzy_address=0x%" PRIx64 " or 0x%" PRIx64 \
" or 0x%" PRIx64 " or 0x%" PRIx64 " (%p)\n", \
ompt_get_thread_data()->value, \
((uint64_t)addr) / FUZZY_ADDRESS_DISCARD_BYTES - 1, \
((uint64_t)addr) / FUZZY_ADDRESS_DISCARD_BYTES, \
((uint64_t)addr) / FUZZY_ADDRESS_DISCARD_BYTES + 1, \
((uint64_t)addr) / FUZZY_ADDRESS_DISCARD_BYTES + 2, addr)
#define register_ompt_callback_t(name, type) \
do { \
type f_##name = &on_##name; \
if (ompt_set_callback(name, (ompt_callback_t)f_##name) == ompt_set_never) \
printf("0: Could not register callback '" #name "'\n"); \
} while (0)
#define register_ompt_callback(name) register_ompt_callback_t(name, name##_t)
#ifndef USE_PRIVATE_TOOL
static void
on_ompt_callback_mutex_acquire(
ompt_mutex_t kind,
unsigned int hint,
unsigned int impl,
ompt_wait_id_t wait_id,
const void *codeptr_ra)
{
switch(kind)
{
case ompt_mutex_lock:
printf("%" PRIu64 ":" _TOOL_PREFIX
" ompt_event_wait_lock: wait_id=%" PRIu64 ", hint=%" PRIu32
", impl=%" PRIu32 ", codeptr_ra=%p \n",
ompt_get_thread_data()->value, wait_id, hint, impl, codeptr_ra);
break;
case ompt_mutex_nest_lock:
printf("%" PRIu64 ":" _TOOL_PREFIX
" ompt_event_wait_nest_lock: wait_id=%" PRIu64 ", hint=%" PRIu32
", impl=%" PRIu32 ", codeptr_ra=%p \n",
ompt_get_thread_data()->value, wait_id, hint, impl, codeptr_ra);
break;
case ompt_mutex_critical:
printf("%" PRIu64 ":" _TOOL_PREFIX
" ompt_event_wait_critical: wait_id=%" PRIu64 ", hint=%" PRIu32
", impl=%" PRIu32 ", codeptr_ra=%p \n",
ompt_get_thread_data()->value, wait_id, hint, impl, codeptr_ra);
break;
case ompt_mutex_atomic:
printf("%" PRIu64 ":" _TOOL_PREFIX
" ompt_event_wait_atomic: wait_id=%" PRIu64 ", hint=%" PRIu32
", impl=%" PRIu32 ", codeptr_ra=%p \n",
ompt_get_thread_data()->value, wait_id, hint, impl, codeptr_ra);
break;
case ompt_mutex_ordered:
printf("%" PRIu64 ":" _TOOL_PREFIX
" ompt_event_wait_ordered: wait_id=%" PRIu64 ", hint=%" PRIu32
", impl=%" PRIu32 ", codeptr_ra=%p \n",
ompt_get_thread_data()->value, wait_id, hint, impl, codeptr_ra);
break;
default:
break;
}
}
static void
on_ompt_callback_mutex_acquired(
ompt_mutex_t kind,
ompt_wait_id_t wait_id,
const void *codeptr_ra)
{
switch(kind)
{
case ompt_mutex_lock:
printf("%" PRIu64 ":" _TOOL_PREFIX
" ompt_event_acquired_lock: wait_id=%" PRIu64 ", codeptr_ra=%p \n",
ompt_get_thread_data()->value, wait_id, codeptr_ra);
break;
case ompt_mutex_nest_lock:
printf("%" PRIu64 ":" _TOOL_PREFIX
" ompt_event_acquired_nest_lock_first: wait_id=%" PRIu64
", codeptr_ra=%p \n",
ompt_get_thread_data()->value, wait_id, codeptr_ra);
break;
case ompt_mutex_critical:
printf("%" PRIu64 ":" _TOOL_PREFIX
" ompt_event_acquired_critical: wait_id=%" PRIu64
", codeptr_ra=%p \n",
ompt_get_thread_data()->value, wait_id, codeptr_ra);
break;
case ompt_mutex_atomic:
printf("%" PRIu64 ":" _TOOL_PREFIX
" ompt_event_acquired_atomic: wait_id=%" PRIu64
", codeptr_ra=%p \n",
ompt_get_thread_data()->value, wait_id, codeptr_ra);
break;
case ompt_mutex_ordered:
printf("%" PRIu64 ":" _TOOL_PREFIX
" ompt_event_acquired_ordered: wait_id=%" PRIu64
", codeptr_ra=%p \n",
ompt_get_thread_data()->value, wait_id, codeptr_ra);
break;
default:
break;
}
}
static void
on_ompt_callback_mutex_released(
ompt_mutex_t kind,
ompt_wait_id_t wait_id,
const void *codeptr_ra)
{
switch(kind)
{
case ompt_mutex_lock:
printf("%" PRIu64 ":" _TOOL_PREFIX
" ompt_event_release_lock: wait_id=%" PRIu64 ", codeptr_ra=%p \n",
ompt_get_thread_data()->value, wait_id, codeptr_ra);
break;
case ompt_mutex_nest_lock:
printf("%" PRIu64 ":" _TOOL_PREFIX
" ompt_event_release_nest_lock_last: wait_id=%" PRIu64
", codeptr_ra=%p \n",
ompt_get_thread_data()->value, wait_id, codeptr_ra);
break;
case ompt_mutex_critical:
printf("%" PRIu64 ":" _TOOL_PREFIX
" ompt_event_release_critical: wait_id=%" PRIu64
", codeptr_ra=%p \n",
ompt_get_thread_data()->value, wait_id, codeptr_ra);
break;
case ompt_mutex_atomic:
printf("%" PRIu64 ":" _TOOL_PREFIX
" ompt_event_release_atomic: wait_id=%" PRIu64
", codeptr_ra=%p \n",
ompt_get_thread_data()->value, wait_id, codeptr_ra);
break;
case ompt_mutex_ordered:
printf("%" PRIu64 ":" _TOOL_PREFIX
" ompt_event_release_ordered: wait_id=%" PRIu64
", codeptr_ra=%p \n",
ompt_get_thread_data()->value, wait_id, codeptr_ra);
break;
default:
break;
}
}
static void
on_ompt_callback_nest_lock(
ompt_scope_endpoint_t endpoint,
ompt_wait_id_t wait_id,
const void *codeptr_ra)
{
switch(endpoint)
{
case ompt_scope_begin:
printf("%" PRIu64 ":" _TOOL_PREFIX
" ompt_event_acquired_nest_lock_next: wait_id=%" PRIu64
", codeptr_ra=%p \n",
ompt_get_thread_data()->value, wait_id, codeptr_ra);
break;
case ompt_scope_end:
printf("%" PRIu64 ":" _TOOL_PREFIX
" ompt_event_release_nest_lock_prev: wait_id=%" PRIu64
", codeptr_ra=%p \n",
ompt_get_thread_data()->value, wait_id, codeptr_ra);
break;
case ompt_scope_beginend:
printf("ompt_scope_beginend should never be passed to %s\n", __func__);
exit(-1);
}
}
static void
on_ompt_callback_sync_region(
ompt_sync_region_t kind,
ompt_scope_endpoint_t endpoint,
ompt_data_t *parallel_data,
ompt_data_t *task_data,
const void *codeptr_ra)
{
switch(endpoint)
{
case ompt_scope_begin:
switch(kind)
{
case ompt_sync_region_barrier:
case ompt_sync_region_barrier_implicit:
case ompt_sync_region_barrier_implicit_workshare:
case ompt_sync_region_barrier_implicit_parallel:
case ompt_sync_region_barrier_teams:
case ompt_sync_region_barrier_explicit:
case ompt_sync_region_barrier_implementation:
printf("%" PRIu64 ":" _TOOL_PREFIX
" ompt_event_barrier_begin: parallel_id=%" PRIu64
", task_id=%" PRIu64 ", codeptr_ra=%p\n",
ompt_get_thread_data()->value, parallel_data->value,
task_data->value, codeptr_ra);
print_ids(0);
break;
case ompt_sync_region_taskwait:
printf("%" PRIu64 ":" _TOOL_PREFIX
" ompt_event_taskwait_begin: parallel_id=%" PRIu64
", task_id=%" PRIu64 ", codeptr_ra=%p\n",
ompt_get_thread_data()->value, parallel_data->value,
task_data->value, codeptr_ra);
break;
case ompt_sync_region_taskgroup:
printf("%" PRIu64 ":" _TOOL_PREFIX
" ompt_event_taskgroup_begin: parallel_id=%" PRIu64
", task_id=%" PRIu64 ", codeptr_ra=%p\n",
ompt_get_thread_data()->value, parallel_data->value,
task_data->value, codeptr_ra);
break;
case ompt_sync_region_reduction:
printf("ompt_sync_region_reduction should never be passed to "
"on_ompt_callback_sync_region\n");
exit(-1);
break;
}
break;
case ompt_scope_end:
switch(kind)
{
case ompt_sync_region_barrier:
case ompt_sync_region_barrier_implicit:
case ompt_sync_region_barrier_explicit:
case ompt_sync_region_barrier_implicit_workshare:
case ompt_sync_region_barrier_implicit_parallel:
case ompt_sync_region_barrier_teams:
case ompt_sync_region_barrier_implementation:
printf("%" PRIu64 ":" _TOOL_PREFIX
" ompt_event_barrier_end: parallel_id=%" PRIu64
", task_id=%" PRIu64 ", codeptr_ra=%p\n",
ompt_get_thread_data()->value,
(parallel_data) ? parallel_data->value : 0, task_data->value,
codeptr_ra);
break;
case ompt_sync_region_taskwait:
printf("%" PRIu64 ":" _TOOL_PREFIX
" ompt_event_taskwait_end: parallel_id=%" PRIu64
", task_id=%" PRIu64 ", codeptr_ra=%p\n",
ompt_get_thread_data()->value,
(parallel_data) ? parallel_data->value : 0, task_data->value,
codeptr_ra);
break;
case ompt_sync_region_taskgroup:
printf("%" PRIu64 ":" _TOOL_PREFIX
" ompt_event_taskgroup_end: parallel_id=%" PRIu64
", task_id=%" PRIu64 ", codeptr_ra=%p\n",
ompt_get_thread_data()->value,
(parallel_data) ? parallel_data->value : 0, task_data->value,
codeptr_ra);
break;
case ompt_sync_region_reduction:
printf("ompt_sync_region_reduction should never be passed to "
"on_ompt_callback_sync_region\n");
exit(-1);
break;
}
break;
case ompt_scope_beginend:
printf("ompt_scope_beginend should never be passed to %s\n", __func__);
exit(-1);
}
}
static void
on_ompt_callback_sync_region_wait(
ompt_sync_region_t kind,
ompt_scope_endpoint_t endpoint,
ompt_data_t *parallel_data,
ompt_data_t *task_data,
const void *codeptr_ra)
{
switch(endpoint)
{
case ompt_scope_begin:
switch(kind)
{
case ompt_sync_region_barrier:
case ompt_sync_region_barrier_implicit:
case ompt_sync_region_barrier_implicit_workshare:
case ompt_sync_region_barrier_implicit_parallel:
case ompt_sync_region_barrier_teams:
case ompt_sync_region_barrier_explicit:
case ompt_sync_region_barrier_implementation:
printf("%" PRIu64 ":" _TOOL_PREFIX
" ompt_event_wait_barrier_begin: parallel_id=%" PRIu64
", task_id=%" PRIu64 ", codeptr_ra=%p\n",
ompt_get_thread_data()->value, parallel_data->value,
task_data->value, codeptr_ra);
break;
case ompt_sync_region_taskwait:
printf("%" PRIu64 ":" _TOOL_PREFIX
" ompt_event_wait_taskwait_begin: parallel_id=%" PRIu64
", task_id=%" PRIu64 ", codeptr_ra=%p\n",
ompt_get_thread_data()->value, parallel_data->value,
task_data->value, codeptr_ra);
break;
case ompt_sync_region_taskgroup:
printf("%" PRIu64 ":" _TOOL_PREFIX
" ompt_event_wait_taskgroup_begin: parallel_id=%" PRIu64
", task_id=%" PRIu64 ", codeptr_ra=%p\n",
ompt_get_thread_data()->value, parallel_data->value,
task_data->value, codeptr_ra);
break;
case ompt_sync_region_reduction:
printf("ompt_sync_region_reduction should never be passed to "
"on_ompt_callback_sync_region_wait\n");
exit(-1);
break;
}
break;
case ompt_scope_end:
switch(kind)
{
case ompt_sync_region_barrier:
case ompt_sync_region_barrier_implicit:
case ompt_sync_region_barrier_implicit_workshare:
case ompt_sync_region_barrier_implicit_parallel:
case ompt_sync_region_barrier_teams:
case ompt_sync_region_barrier_explicit:
case ompt_sync_region_barrier_implementation:
printf("%" PRIu64 ":" _TOOL_PREFIX
" ompt_event_wait_barrier_end: parallel_id=%" PRIu64
", task_id=%" PRIu64 ", codeptr_ra=%p\n",
ompt_get_thread_data()->value,
(parallel_data) ? parallel_data->value : 0, task_data->value,
codeptr_ra);
break;
case ompt_sync_region_taskwait:
printf("%" PRIu64 ":" _TOOL_PREFIX
" ompt_event_wait_taskwait_end: parallel_id=%" PRIu64
", task_id=%" PRIu64 ", codeptr_ra=%p\n",
ompt_get_thread_data()->value,
(parallel_data) ? parallel_data->value : 0, task_data->value,
codeptr_ra);
break;
case ompt_sync_region_taskgroup:
printf("%" PRIu64 ":" _TOOL_PREFIX
" ompt_event_wait_taskgroup_end: parallel_id=%" PRIu64
", task_id=%" PRIu64 ", codeptr_ra=%p\n",
ompt_get_thread_data()->value,
(parallel_data) ? parallel_data->value : 0, task_data->value,
codeptr_ra);
break;
case ompt_sync_region_reduction:
printf("ompt_sync_region_reduction should never be passed to "
"on_ompt_callback_sync_region_wait\n");
exit(-1);
break;
}
break;
case ompt_scope_beginend:
printf("ompt_scope_beginend should never be passed to %s\n", __func__);
exit(-1);
}
}
static void on_ompt_callback_reduction(ompt_sync_region_t kind,
ompt_scope_endpoint_t endpoint,
ompt_data_t *parallel_data,
ompt_data_t *task_data,
const void *codeptr_ra) {
switch (endpoint) {
case ompt_scope_begin:
printf("%" PRIu64 ":" _TOOL_PREFIX
" ompt_event_reduction_begin: parallel_id=%" PRIu64
", task_id=%" PRIu64 ", codeptr_ra=%p\n",
ompt_get_thread_data()->value,
(parallel_data) ? parallel_data->value : 0, task_data->value,
codeptr_ra);
break;
case ompt_scope_end:
printf("%" PRIu64 ":" _TOOL_PREFIX
" ompt_event_reduction_end: parallel_id=%" PRIu64
", task_id=%" PRIu64 ", codeptr_ra=%p\n",
ompt_get_thread_data()->value,
(parallel_data) ? parallel_data->value : 0, task_data->value,
codeptr_ra);
break;
case ompt_scope_beginend:
printf("ompt_scope_beginend should never be passed to %s\n", __func__);
exit(-1);
}
}
static void
on_ompt_callback_flush(
ompt_data_t *thread_data,
const void *codeptr_ra)
{
printf("%" PRIu64 ":" _TOOL_PREFIX " ompt_event_flush: codeptr_ra=%p\n",
thread_data->value, codeptr_ra);
}
static void
on_ompt_callback_cancel(
ompt_data_t *task_data,
int flags,
const void *codeptr_ra)
{
const char* first_flag_value;
const char* second_flag_value;
if(flags & ompt_cancel_parallel)
first_flag_value = ompt_cancel_flag_t_values[0];
else if(flags & ompt_cancel_sections)
first_flag_value = ompt_cancel_flag_t_values[1];
else if(flags & ompt_cancel_loop)
first_flag_value = ompt_cancel_flag_t_values[2];
else if(flags & ompt_cancel_taskgroup)
first_flag_value = ompt_cancel_flag_t_values[3];
if(flags & ompt_cancel_activated)
second_flag_value = ompt_cancel_flag_t_values[4];
else if(flags & ompt_cancel_detected)
second_flag_value = ompt_cancel_flag_t_values[5];
else if(flags & ompt_cancel_discarded_task)
second_flag_value = ompt_cancel_flag_t_values[6];
printf("%" PRIu64 ":" _TOOL_PREFIX " ompt_event_cancel: task_data=%" PRIu64
", flags=%s|%s=%" PRIu32 ", codeptr_ra=%p\n",
ompt_get_thread_data()->value, task_data->value, first_flag_value,
second_flag_value, flags, codeptr_ra);
}
static void
on_ompt_callback_implicit_task(
ompt_scope_endpoint_t endpoint,
ompt_data_t *parallel_data,
ompt_data_t *task_data,
unsigned int team_size,
unsigned int thread_num,
int flags)
{
switch(endpoint)
{
case ompt_scope_begin:
if(task_data->ptr)
printf("%s\n", "0: task_data initially not null");
task_data->value = ompt_get_unique_id();
//there is no parallel_begin callback for implicit parallel region
//thus it is initialized in initial task
if(flags & ompt_task_initial)
{
char buffer[2048];
format_task_type(flags, buffer);
// Only check initial task not created by teams construct
if (team_size == 1 && thread_num == 1 && parallel_data->ptr)
printf("%s\n", "0: parallel_data initially not null");
parallel_data->value = ompt_get_unique_id();
printf("%" PRIu64 ":" _TOOL_PREFIX
" ompt_event_initial_task_begin: parallel_id=%" PRIu64
", task_id=%" PRIu64 ", actual_parallelism=%" PRIu32
", index=%" PRIu32 ", flags=%" PRIu32 "\n",
ompt_get_thread_data()->value, parallel_data->value,
task_data->value, team_size, thread_num, flags);
} else {
printf("%" PRIu64 ":" _TOOL_PREFIX
" ompt_event_implicit_task_begin: parallel_id=%" PRIu64
", task_id=%" PRIu64 ", team_size=%" PRIu32
", thread_num=%" PRIu32 "\n",
ompt_get_thread_data()->value, parallel_data->value,
task_data->value, team_size, thread_num);
}
break;
case ompt_scope_end:
if(flags & ompt_task_initial){
printf("%" PRIu64 ":" _TOOL_PREFIX
" ompt_event_initial_task_end: parallel_id=%" PRIu64
", task_id=%" PRIu64 ", actual_parallelism=%" PRIu32
", index=%" PRIu32 "\n",
ompt_get_thread_data()->value,
(parallel_data) ? parallel_data->value : 0, task_data->value,
team_size, thread_num);
} else {
printf("%" PRIu64 ":" _TOOL_PREFIX
" ompt_event_implicit_task_end: parallel_id=%" PRIu64
", task_id=%" PRIu64 ", team_size=%" PRIu32
", thread_num=%" PRIu32 "\n",
ompt_get_thread_data()->value,
(parallel_data) ? parallel_data->value : 0, task_data->value,
team_size, thread_num);
}
break;
case ompt_scope_beginend:
printf("ompt_scope_beginend should never be passed to %s\n", __func__);
exit(-1);
}
}
static void
on_ompt_callback_lock_init(
ompt_mutex_t kind,
unsigned int hint,
unsigned int impl,
ompt_wait_id_t wait_id,
const void *codeptr_ra)
{
switch(kind)
{
case ompt_mutex_lock:
printf("%" PRIu64 ":" _TOOL_PREFIX
" ompt_event_init_lock: wait_id=%" PRIu64 ", hint=%" PRIu32
", impl=%" PRIu32 ", codeptr_ra=%p \n",
ompt_get_thread_data()->value, wait_id, hint, impl, codeptr_ra);
break;
case ompt_mutex_nest_lock:
printf("%" PRIu64 ":" _TOOL_PREFIX
" ompt_event_init_nest_lock: wait_id=%" PRIu64 ", hint=%" PRIu32
", impl=%" PRIu32 ", codeptr_ra=%p \n",
ompt_get_thread_data()->value, wait_id, hint, impl, codeptr_ra);
break;
default:
break;
}
}
static void
on_ompt_callback_lock_destroy(
ompt_mutex_t kind,
ompt_wait_id_t wait_id,
const void *codeptr_ra)
{
switch(kind)
{
case ompt_mutex_lock:
printf("%" PRIu64 ":" _TOOL_PREFIX
" ompt_event_destroy_lock: wait_id=%" PRIu64 ", codeptr_ra=%p \n",
ompt_get_thread_data()->value, wait_id, codeptr_ra);
break;
case ompt_mutex_nest_lock:
printf("%" PRIu64 ":" _TOOL_PREFIX
" ompt_event_destroy_nest_lock: wait_id=%" PRIu64
", codeptr_ra=%p \n",
ompt_get_thread_data()->value, wait_id, codeptr_ra);
break;
default:
break;
}
}
static void
on_ompt_callback_work(
ompt_work_t wstype,
ompt_scope_endpoint_t endpoint,
ompt_data_t *parallel_data,
ompt_data_t *task_data,
uint64_t count,
const void *codeptr_ra)
{
switch(endpoint)
{
case ompt_scope_begin:
switch(wstype)
{
case ompt_work_loop:
printf("%" PRIu64 ":" _TOOL_PREFIX
" ompt_event_loop_begin: parallel_id=%" PRIu64
", parent_task_id=%" PRIu64 ", codeptr_ra=%p, count=%" PRIu64
"\n",
ompt_get_thread_data()->value, parallel_data->value,
task_data->value, codeptr_ra, count);
break;
case ompt_work_sections:
printf("%" PRIu64 ":" _TOOL_PREFIX
" ompt_event_sections_begin: parallel_id=%" PRIu64
", parent_task_id=%" PRIu64 ", codeptr_ra=%p, count=%" PRIu64
"\n",
ompt_get_thread_data()->value, parallel_data->value,
task_data->value, codeptr_ra, count);
break;
case ompt_work_single_executor:
printf("%" PRIu64 ":" _TOOL_PREFIX
" ompt_event_single_in_block_begin: parallel_id=%" PRIu64
", parent_task_id=%" PRIu64 ", codeptr_ra=%p, count=%" PRIu64
"\n",
ompt_get_thread_data()->value, parallel_data->value,
task_data->value, codeptr_ra, count);
break;
case ompt_work_single_other:
printf("%" PRIu64 ":" _TOOL_PREFIX
" ompt_event_single_others_begin: parallel_id=%" PRIu64
", task_id=%" PRIu64 ", codeptr_ra=%p, count=%" PRIu64 "\n",
ompt_get_thread_data()->value, parallel_data->value,
task_data->value, codeptr_ra, count);
break;
case ompt_work_workshare:
//impl
break;
case ompt_work_distribute:
printf("%" PRIu64 ":" _TOOL_PREFIX
" ompt_event_distribute_begin: parallel_id=%" PRIu64
", parent_task_id=%" PRIu64 ", codeptr_ra=%p, count=%" PRIu64
"\n",
ompt_get_thread_data()->value, parallel_data->value,
task_data->value, codeptr_ra, count);
break;
case ompt_work_taskloop:
//impl
printf("%" PRIu64 ":" _TOOL_PREFIX
" ompt_event_taskloop_begin: parallel_id=%" PRIu64
", parent_task_id=%" PRIu64 ", codeptr_ra=%p, count=%" PRIu64
"\n",
ompt_get_thread_data()->value, parallel_data->value,
task_data->value, codeptr_ra, count);
break;
case ompt_work_scope:
printf("%" PRIu64 ":" _TOOL_PREFIX
" ompt_event_scope_begin: parallel_id=%" PRIu64
", parent_task_id=%" PRIu64 ", codeptr_ra=%p, count=%" PRIu64
"\n",
ompt_get_thread_data()->value, parallel_data->value,
task_data->value, codeptr_ra, count);
break;
}
break;
case ompt_scope_end:
switch(wstype)
{
case ompt_work_loop:
printf("%" PRIu64 ":" _TOOL_PREFIX
" ompt_event_loop_end: parallel_id=%" PRIu64
", task_id=%" PRIu64 ", codeptr_ra=%p, count=%" PRIu64 "\n",
ompt_get_thread_data()->value, parallel_data->value,
task_data->value, codeptr_ra, count);
break;
case ompt_work_sections:
printf("%" PRIu64 ":" _TOOL_PREFIX
" ompt_event_sections_end: parallel_id=%" PRIu64
", task_id=%" PRIu64 ", codeptr_ra=%p, count=%" PRIu64 "\n",
ompt_get_thread_data()->value, parallel_data->value,
task_data->value, codeptr_ra, count);
break;
case ompt_work_single_executor:
printf("%" PRIu64 ":" _TOOL_PREFIX
" ompt_event_single_in_block_end: parallel_id=%" PRIu64
", task_id=%" PRIu64 ", codeptr_ra=%p, count=%" PRIu64 "\n",
ompt_get_thread_data()->value, parallel_data->value,
task_data->value, codeptr_ra, count);
break;
case ompt_work_single_other:
printf("%" PRIu64 ":" _TOOL_PREFIX
" ompt_event_single_others_end: parallel_id=%" PRIu64
", task_id=%" PRIu64 ", codeptr_ra=%p, count=%" PRIu64 "\n",
ompt_get_thread_data()->value, parallel_data->value,
task_data->value, codeptr_ra, count);
break;
case ompt_work_workshare:
//impl
break;
case ompt_work_distribute:
printf("%" PRIu64 ":" _TOOL_PREFIX
" ompt_event_distribute_end: parallel_id=%" PRIu64
", parent_task_id=%" PRIu64 ", codeptr_ra=%p, count=%" PRIu64
"\n",
ompt_get_thread_data()->value, parallel_data->value,
task_data->value, codeptr_ra, count);
break;
case ompt_work_taskloop:
//impl
printf("%" PRIu64 ":" _TOOL_PREFIX
" ompt_event_taskloop_end: parallel_id=%" PRIu64
", parent_task_id=%" PRIu64 ", codeptr_ra=%p, count=%" PRIu64
"\n",
ompt_get_thread_data()->value, parallel_data->value,
task_data->value, codeptr_ra, count);
break;
case ompt_work_scope:
printf("%" PRIu64 ":" _TOOL_PREFIX
" ompt_event_scope_end: parallel_id=%" PRIu64
", parent_task_id=%" PRIu64 ", codeptr_ra=%p, count=%" PRIu64
"\n",
ompt_get_thread_data()->value, parallel_data->value,
task_data->value, codeptr_ra, count);
break;
}
break;
case ompt_scope_beginend:
printf("ompt_scope_beginend should never be passed to %s\n", __func__);
exit(-1);
}
}
static void on_ompt_callback_masked(ompt_scope_endpoint_t endpoint,
ompt_data_t *parallel_data,
ompt_data_t *task_data,
const void *codeptr_ra) {
switch(endpoint)
{
case ompt_scope_begin:
printf("%" PRIu64 ":" _TOOL_PREFIX
" ompt_event_masked_begin: parallel_id=%" PRIu64
", task_id=%" PRIu64 ", codeptr_ra=%p\n",
ompt_get_thread_data()->value, parallel_data->value,
task_data->value, codeptr_ra);
break;
case ompt_scope_end:
printf("%" PRIu64 ":" _TOOL_PREFIX
" ompt_event_masked_end: parallel_id=%" PRIu64 ", task_id=%" PRIu64
", codeptr_ra=%p\n",
ompt_get_thread_data()->value, parallel_data->value,
task_data->value, codeptr_ra);
break;
case ompt_scope_beginend:
printf("ompt_scope_beginend should never be passed to %s\n", __func__);
exit(-1);
}
}
static void on_ompt_callback_parallel_begin(
ompt_data_t *encountering_task_data,
const ompt_frame_t *encountering_task_frame, ompt_data_t *parallel_data,
uint32_t requested_team_size, int flag, const void *codeptr_ra) {
if(parallel_data->ptr)
printf("0: parallel_data initially not null\n");
parallel_data->value = ompt_get_unique_id();
int invoker = flag & 0xF;
const char *event = (flag & ompt_parallel_team) ? "parallel" : "teams";
const char *size = (flag & ompt_parallel_team) ? "team_size" : "num_teams";
printf("%" PRIu64 ":" _TOOL_PREFIX
" ompt_event_%s_begin: parent_task_id=%" PRIu64
", parent_task_frame.exit=%p, parent_task_frame.reenter=%p, "
"parallel_id=%" PRIu64 ", requested_%s=%" PRIu32
", codeptr_ra=%p, invoker=%d\n",
ompt_get_thread_data()->value, event, encountering_task_data->value,
encountering_task_frame->exit_frame.ptr,
encountering_task_frame->enter_frame.ptr, parallel_data->value, size,
requested_team_size, codeptr_ra, invoker);
}
static void on_ompt_callback_parallel_end(ompt_data_t *parallel_data,
ompt_data_t *encountering_task_data,
int flag, const void *codeptr_ra) {
int invoker = flag & 0xF;
const char *event = (flag & ompt_parallel_team) ? "parallel" : "teams";
printf("%" PRIu64 ":" _TOOL_PREFIX " ompt_event_%s_end: parallel_id=%" PRIu64
", task_id=%" PRIu64 ", invoker=%d, codeptr_ra=%p\n",
ompt_get_thread_data()->value, event, parallel_data->value,
encountering_task_data->value, invoker, codeptr_ra);
}
static void
on_ompt_callback_task_create(
ompt_data_t *encountering_task_data,
const ompt_frame_t *encountering_task_frame,
ompt_data_t* new_task_data,
int type,
int has_dependences,
const void *codeptr_ra)
{
if(new_task_data->ptr)
printf("0: new_task_data initially not null\n");
new_task_data->value = ompt_get_unique_id();
char buffer[2048];
format_task_type(type, buffer);
printf(
"%" PRIu64 ":" _TOOL_PREFIX
" ompt_event_task_create: parent_task_id=%" PRIu64
", parent_task_frame.exit=%p, parent_task_frame.reenter=%p, "
"new_task_id=%" PRIu64
", codeptr_ra=%p, task_type=%s=%d, has_dependences=%s\n",
ompt_get_thread_data()->value,
encountering_task_data ? encountering_task_data->value : 0,
encountering_task_frame ? encountering_task_frame->exit_frame.ptr : NULL,
encountering_task_frame ? encountering_task_frame->enter_frame.ptr : NULL,
new_task_data->value, codeptr_ra, buffer, type,
has_dependences ? "yes" : "no");
}
static void
on_ompt_callback_task_schedule(
ompt_data_t *first_task_data,
ompt_task_status_t prior_task_status,
ompt_data_t *second_task_data)
{
printf("%" PRIu64 ":" _TOOL_PREFIX
" ompt_event_task_schedule: first_task_id=%" PRIu64
", second_task_id=%" PRIu64 ", prior_task_status=%s=%d\n",
ompt_get_thread_data()->value, first_task_data->value,
(second_task_data ? second_task_data->value : -1),
ompt_task_status_t_values[prior_task_status], prior_task_status);
if (prior_task_status == ompt_task_complete ||
prior_task_status == ompt_task_late_fulfill) {
printf("%" PRIu64 ":" _TOOL_PREFIX " ompt_event_task_end: task_id=%" PRIu64
"\n", ompt_get_thread_data()->value, first_task_data->value);
}
}
static void
on_ompt_callback_dependences(
ompt_data_t *task_data,
const ompt_dependence_t *deps,
int ndeps)
{
char buffer[2048];
char *progress = buffer;
for (int i = 0; i < ndeps && progress < buffer + 2000; i++) {
if (deps[i].dependence_type == ompt_dependence_type_source ||
deps[i].dependence_type == ompt_dependence_type_sink)
progress +=
sprintf(progress, "(%" PRIu64 ", %s), ", deps[i].variable.value,
ompt_dependence_type_t_values[deps[i].dependence_type]);
else
progress +=
sprintf(progress, "(%p, %s), ", deps[i].variable.ptr,
ompt_dependence_type_t_values[deps[i].dependence_type]);
}
if (ndeps > 0)
progress[-2] = 0;
printf("%" PRIu64 ":" _TOOL_PREFIX " ompt_event_dependences: task_id=%" PRIu64
", deps=[%s], ndeps=%d\n",
ompt_get_thread_data()->value, task_data->value, buffer, ndeps);
}
static void
on_ompt_callback_task_dependence(
ompt_data_t *first_task_data,
ompt_data_t *second_task_data)
{
printf("%" PRIu64 ":" _TOOL_PREFIX
" ompt_event_task_dependence_pair: first_task_id=%" PRIu64
", second_task_id=%" PRIu64 "\n",
ompt_get_thread_data()->value, first_task_data->value,
second_task_data->value);
}
static void
on_ompt_callback_thread_begin(
ompt_thread_t thread_type,
ompt_data_t *thread_data)
{
if(thread_data->ptr)
printf("%s\n", "0: thread_data initially not null");
thread_data->value = ompt_get_unique_id();
printf("%" PRIu64 ":" _TOOL_PREFIX
" ompt_event_thread_begin: thread_type=%s=%d, thread_id=%" PRIu64 "\n",
ompt_get_thread_data()->value, ompt_thread_t_values[thread_type],
thread_type, thread_data->value);
}
static void
on_ompt_callback_thread_end(
ompt_data_t *thread_data)
{
printf("%" PRIu64 ":" _TOOL_PREFIX " ompt_event_thread_end: thread_id=%" PRIu64
"\n",
ompt_get_thread_data()->value, thread_data->value);
}
static int
on_ompt_callback_control_tool(
uint64_t command,
uint64_t modifier,
void *arg,
const void *codeptr_ra)
{
ompt_frame_t* omptTaskFrame;
ompt_get_task_info(0, NULL, (ompt_data_t**) NULL, &omptTaskFrame, NULL, NULL);
printf("%" PRIu64 ":" _TOOL_PREFIX " ompt_event_control_tool: command=%" PRIu64
", modifier=%" PRIu64
", arg=%p, codeptr_ra=%p, current_task_frame.exit=%p, "
"current_task_frame.reenter=%p \n",
ompt_get_thread_data()->value, command, modifier, arg, codeptr_ra,
omptTaskFrame->exit_frame.ptr, omptTaskFrame->enter_frame.ptr);
// the following would interfere with expected output for OMPT tests, so skip
#ifndef _OMPT_TESTS
// print task data
int task_level = 0;
ompt_data_t *task_data;
while (ompt_get_task_info(task_level, NULL, (ompt_data_t **)&task_data, NULL,
NULL, NULL)) {
printf("%" PRIu64 ":" _TOOL_PREFIX " task level %d: task_id=%" PRIu64 "\n",
ompt_get_thread_data()->value, task_level, task_data->value);
task_level++;
}
// print parallel data
int parallel_level = 0;
ompt_data_t *parallel_data;
while (ompt_get_parallel_info(parallel_level, (ompt_data_t **)¶llel_data,
NULL)) {
printf("%" PRIu64 ":" _TOOL_PREFIX " parallel level %d: parallel_id=%" PRIu64
"\n",
ompt_get_thread_data()->value, parallel_level, parallel_data->value);
parallel_level++;
}
#endif
return 0; //success
}
static void on_ompt_callback_error(ompt_severity_t severity,
const char *message, size_t length,
const void *codeptr_ra) {
printf("%" PRIu64 ": ompt_event_runtime_error: severity=%" PRIu32
", message=%s, length=%" PRIu64 ", codeptr_ra=%p\n",
ompt_get_thread_data()->value, severity, message, (uint64_t)length,
codeptr_ra);
}
int ompt_initialize(
ompt_function_lookup_t lookup,
int initial_device_num,
ompt_data_t *tool_data)
{
ompt_set_callback = (ompt_set_callback_t) lookup("ompt_set_callback");
ompt_get_callback = (ompt_get_callback_t) lookup("ompt_get_callback");
ompt_get_state = (ompt_get_state_t) lookup("ompt_get_state");
ompt_get_task_info = (ompt_get_task_info_t) lookup("ompt_get_task_info");
ompt_get_task_memory = (ompt_get_task_memory_t)lookup("ompt_get_task_memory");
ompt_get_thread_data = (ompt_get_thread_data_t) lookup("ompt_get_thread_data");
ompt_get_parallel_info = (ompt_get_parallel_info_t) lookup("ompt_get_parallel_info");
ompt_get_unique_id = (ompt_get_unique_id_t) lookup("ompt_get_unique_id");
ompt_finalize_tool = (ompt_finalize_tool_t)lookup("ompt_finalize_tool");
ompt_get_unique_id();
ompt_get_num_procs = (ompt_get_num_procs_t) lookup("ompt_get_num_procs");
ompt_get_num_places = (ompt_get_num_places_t) lookup("ompt_get_num_places");
ompt_get_place_proc_ids = (ompt_get_place_proc_ids_t) lookup("ompt_get_place_proc_ids");
ompt_get_place_num = (ompt_get_place_num_t) lookup("ompt_get_place_num");
ompt_get_partition_place_nums = (ompt_get_partition_place_nums_t) lookup("ompt_get_partition_place_nums");
ompt_get_proc_id = (ompt_get_proc_id_t) lookup("ompt_get_proc_id");
ompt_enumerate_states = (ompt_enumerate_states_t) lookup("ompt_enumerate_states");
ompt_enumerate_mutex_impls = (ompt_enumerate_mutex_impls_t) lookup("ompt_enumerate_mutex_impls");
register_ompt_callback(ompt_callback_mutex_acquire);
register_ompt_callback_t(ompt_callback_mutex_acquired, ompt_callback_mutex_t);
register_ompt_callback_t(ompt_callback_mutex_released, ompt_callback_mutex_t);
register_ompt_callback(ompt_callback_nest_lock);
register_ompt_callback(ompt_callback_sync_region);
register_ompt_callback_t(ompt_callback_sync_region_wait, ompt_callback_sync_region_t);
register_ompt_callback_t(ompt_callback_reduction, ompt_callback_sync_region_t);
register_ompt_callback(ompt_callback_control_tool);
register_ompt_callback(ompt_callback_flush);
register_ompt_callback(ompt_callback_cancel);
register_ompt_callback(ompt_callback_implicit_task);
register_ompt_callback_t(ompt_callback_lock_init, ompt_callback_mutex_acquire_t);
register_ompt_callback_t(ompt_callback_lock_destroy, ompt_callback_mutex_t);
register_ompt_callback(ompt_callback_work);
register_ompt_callback(ompt_callback_masked);
register_ompt_callback(ompt_callback_parallel_begin);
register_ompt_callback(ompt_callback_parallel_end);
register_ompt_callback(ompt_callback_task_create);
register_ompt_callback(ompt_callback_task_schedule);
register_ompt_callback(ompt_callback_dependences);
register_ompt_callback(ompt_callback_task_dependence);
register_ompt_callback(ompt_callback_thread_begin);
register_ompt_callback(ompt_callback_thread_end);
register_ompt_callback(ompt_callback_error);
printf("0: NULL_POINTER=%p\n", (void*)NULL);
return 1; //success
}
void ompt_finalize(ompt_data_t *tool_data)
{
printf("0: ompt_event_runtime_shutdown\n");
}
#ifdef __cplusplus
extern "C" {
#endif
ompt_start_tool_result_t* ompt_start_tool(
unsigned int omp_version,
const char *runtime_version)
{
static ompt_start_tool_result_t ompt_start_tool_result = {&ompt_initialize,&ompt_finalize, 0};
return &ompt_start_tool_result;
}
#ifdef __cplusplus
}
#endif
#endif // ifndef USE_PRIVATE_TOOL
#ifdef _OMPT_TESTS
#undef _OMPT_TESTS
#endif
|
IndexedFaceMesh.h | #ifndef __INDEXEDFACEMESH_H__
#define __INDEXEDFACEMESH_H__
#include <vector>
#include <array>
#include "Common/Common.h"
#include <iterator>
namespace Utilities
{
class IndexedFaceMesh
{
public:
struct Edge
{
std::array<unsigned int, 2> m_face;
std::array<unsigned int, 2> m_vert;
};
public:
typedef std::vector<unsigned int> Faces;
typedef std::vector<Vector3r> FaceNormals;
typedef std::vector<Vector3r> VertexNormals;
typedef std::vector<std::vector<unsigned int>> FacesEdges;
typedef std::vector<Edge> Edges;
typedef std::vector<std::vector<unsigned int>> VerticesEdges;
typedef std::vector<std::vector<unsigned int>> VerticesFaces;
typedef std::vector<unsigned int> UVIndices;
typedef std::vector<Vector2r> UVs;
protected:
unsigned int m_numPoints;
Faces m_indices;
Edges m_edges;
FacesEdges m_facesEdges;
bool m_closed;
UVIndices m_uvIndices;
UVs m_uvs;
VerticesFaces m_verticesFaces;
VerticesEdges m_verticesEdges;
const unsigned int m_verticesPerFace = 3u;
FaceNormals m_normals;
VertexNormals m_vertexNormals;
bool m_flatShading;
public:
IndexedFaceMesh();
IndexedFaceMesh(IndexedFaceMesh const& other);
IndexedFaceMesh& operator=(IndexedFaceMesh const& other);
~IndexedFaceMesh();
void release();
bool isClosed() const;
bool getFlatShading() const { return m_flatShading; }
void setFlatShading(const bool v) { m_flatShading = v; }
void initMesh(const unsigned int nPoints, const unsigned int nEdges, const unsigned int nFaces);
void addFace(const unsigned int * const indices);
void addFace(const int * const indices);
void addUV(const Real u, const Real v);
void addUVIndex(const unsigned int index);
const Faces& getFaces() const { return m_indices; }
Faces& getFaces(){ return m_indices; }
const FaceNormals& getFaceNormals() const { return m_normals; }
FaceNormals& getFaceNormals(){ return m_normals; }
const VertexNormals& getVertexNormals() const { return m_vertexNormals; }
VertexNormals& getVertexNormals(){ return m_vertexNormals; }
Edges& getEdges() { return m_edges; }
const Edges& getEdges() const { return m_edges; }
const FacesEdges& getFacesEdges() const { return m_facesEdges; }
const UVIndices& getUVIndices() const { return m_uvIndices; }
const UVs& getUVs() const { return m_uvs; }
const VerticesFaces& getVertexFaces() const { return m_verticesFaces; }
const VerticesEdges& getVertexEdges() const { return m_verticesEdges; }
unsigned int numVertices() const { return m_numPoints; }
unsigned int numFaces() const { return (unsigned int)m_indices.size() / m_verticesPerFace; }
unsigned int numEdges() const { return (unsigned int)m_edges.size(); }
unsigned int numUVs() const { return (unsigned int)m_uvs.size(); }
void copyUVs(const UVIndices& uvIndices, const UVs& uvs);
void buildNeighbors();
template<class PositionData>
void updateNormals(const PositionData &pd, const unsigned int offset);
template<class PositionData>
void updateVertexNormals(const PositionData &pd);
unsigned int getVerticesPerFace() const;
};
template<class PositionData>
void IndexedFaceMesh::updateNormals(const PositionData &pd, const unsigned int offset)
{
m_normals.resize(numFaces());
#pragma omp parallel default(shared)
{
#pragma omp for schedule(static)
for (int i = 0; i < (int) numFaces(); i++)
{
// Get first three points of face
const Vector3r &a = pd.getPosition(m_indices[m_verticesPerFace*i] + offset);
const Vector3r &b = pd.getPosition(m_indices[m_verticesPerFace*i + 1] + offset);
const Vector3r &c = pd.getPosition(m_indices[m_verticesPerFace*i + 2] + offset);
// Create normal
Vector3r v1 = b - a;
Vector3r v2 = c - a;
m_normals[i] = v1.cross(v2);
m_normals[i].normalize();
// fix normals of degenerate triangles that can become zero vectors
if (m_normals[i].squaredNorm() < 1e-6f)
m_normals[i] = Vector3r::UnitX();
}
}
}
template<class PositionData>
void IndexedFaceMesh::updateVertexNormals(const PositionData &pd)
{
m_vertexNormals.resize(numVertices());
for (unsigned int i = 0; i < numVertices(); i++)
{
m_vertexNormals[i].setZero();
}
for (unsigned int i = 0u; i < numFaces(); i++)
{
const Vector3r &n = m_normals[i];
m_vertexNormals[m_indices[m_verticesPerFace*i]] += n;
m_vertexNormals[m_indices[m_verticesPerFace*i + 1]] += n;
m_vertexNormals[m_indices[m_verticesPerFace*i + 2]] += n;
}
for (unsigned int i = 0; i < numVertices(); i++)
{
m_vertexNormals[i].normalize();
}
}
}
#endif
|
openmp_async.c |
/**
*
* @file runtime_async.c
*
* @copyright 2012-2017 The University of Tennessee and The University of
* Tennessee Research Foundation. All rights reserved.
* @copyright 2012-2017 Bordeaux INP, CNRS (LaBRI UMR 5800), Inria,
* Univ. Bordeaux. All rights reserved.
* @copyright 2018 King Abdullah University of Science and Technology (KAUST).
* All rights reserved.
*
* @brief AL4san OpenMP sequence source codes
*
* AL4SAN is a software package provided by King Abdullah University of Science and Technology (KAUST)
*
*
* @author Reazul Hoque
* @author Mathieu Faverge
* @date 2017-01-12
* @version 1.1.0
* @author Rabab Alomairy
* @date 2018-10-18
*/
#include <stdlib.h>
#include "al4san_openmp.h"
/*******************************************************************************
* Wait for the completion of a sequence
**/
int AL4SAN_Openmp_sequence_wait( AL4SAN_context_t *al4san,
AL4SAN_sequence_t *sequence )
{
(void)al4san;
(void)sequence;
#pragma omp taskwait
return AL4SAN_SUCCESS;
}
/*******************************************************************************
* Terminate a sequence
**/
void AL4SAN_Openmp_sequence_flush( AL4SAN_context_t *al4san,
AL4SAN_sequence_t *sequence,
AL4SAN_request_t *request,
int status)
{
(void)al4san;
sequence->request = request;
sequence->status = status;
request->status = status;
#pragma omp taskwait
// #pragma omp flush
return;
}
|
keyring_fmt_plug.c | /* GNOME Keyring cracker patch for JtR. Hacked together during Monsoon of
* 2012 by Dhiru Kholia <dhiru.kholia at gmail.com>.
*
* This software is Copyright (c) 2012, Dhiru Kholia <dhiru.kholia at gmail.com>,
* and it is hereby released to the general public under the following terms:
* Redistribution and use in source and binary forms, with or without modification,
* are permitted. */
#if FMT_EXTERNS_H
extern struct fmt_main fmt_keyring;
#elif FMT_REGISTERS_H
john_register_one(&fmt_keyring);
#else
#include <string.h>
#include <assert.h>
#include <errno.h>
#ifdef _OPENMP
#include <omp.h>
#ifndef OMP_SCALE
#ifdef __MIC__
#define OMP_SCALE 32
#else
#define OMP_SCALE 64
#endif // __MIC__
#endif // OMP_SCALE
#endif // _OPENMP
#include "arch.h"
//#undef _OPENMP
//#undef SIMD_COEF_32
#include "misc.h"
#include "common.h"
#include "formats.h"
#include "params.h"
#include "options.h"
#include "md5.h"
#include "sha2.h"
#include "aes.h"
#include "johnswap.h"
#include "simd-intrinsics.h"
#include "memdbg.h"
#define FORMAT_LABEL "keyring"
#define FORMAT_NAME "GNOME Keyring"
#define FORMAT_TAG "$keyring$"
#define FORMAT_TAG_LEN (sizeof(FORMAT_TAG)-1)
#define ALGORITHM_NAME "SHA256 AES " SHA256_ALGORITHM_NAME
#define BENCHMARK_COMMENT ""
#define BENCHMARK_LENGTH -1
#define PLAINTEXT_LENGTH 125
#define BINARY_SIZE 0
#define SALT_SIZE sizeof(*cur_salt)
#define BINARY_ALIGN 1
#define SALT_ALIGN sizeof(int)
#ifdef SIMD_COEF_32
#define MIN_KEYS_PER_CRYPT (SIMD_COEF_32*SIMD_PARA_SHA256)
#define MAX_KEYS_PER_CRYPT (SIMD_COEF_32*SIMD_PARA_SHA256)
#define GETPOS(i, index) ( (index&(SIMD_COEF_32-1))*4 + ((i)&(0xffffffff-3))*SIMD_COEF_32 + (3-((i)&3)) + (unsigned int)index/SIMD_COEF_32*SHA_BUF_SIZ*SIMD_COEF_32*4 )
#else
#define MIN_KEYS_PER_CRYPT 1
#define MAX_KEYS_PER_CRYPT 1
#endif
#define SALTLEN 8
typedef unsigned char guchar;
typedef unsigned int guint;
typedef int gint;
static struct fmt_tests keyring_tests[] = {
{"$keyring$db1b562e453a0764*3221*16*0*02b5c084e4802369c42507300f2e5e56", "openwall"},
{"$keyring$4f3f1557a7da17f5*2439*144*0*12215fabcff6782aa23605ab2cd843f7be9477b172b615eaa9130836f189d32ffda2e666747378f09c6e76ad817154daae83a36c0a0a35f991d40bcfcba3b7807ef57a0ce4c7f835bf34c6e358f0d66aa048d73dacaaaf6d7fa4b3510add6b88cc237000ff13cb4dbd132db33be3ea113bedeba80606f86662cc226af0dad789c703a7df5ad8700542e0f7a5e1f10cf0", "password"},
{NULL}
};
static struct custom_salt {
unsigned int iterations;
unsigned char salt[SALTLEN];
unsigned int crypto_size;
unsigned int inlined;
unsigned char ct[LINE_BUFFER_SIZE / 2]; /* after hex conversion */
} *cur_salt;
static char (*saved_key)[PLAINTEXT_LENGTH + 1];
static int *cracked;
static int any_cracked;
static size_t cracked_size;
static void init(struct fmt_main *self)
{
#if defined (_OPENMP)
int omp_t;
omp_t = omp_get_max_threads();
self->params.min_keys_per_crypt *= omp_t;
self->params.max_keys_per_crypt *= omp_t * OMP_SCALE;
#endif
saved_key = mem_calloc(self->params.max_keys_per_crypt,
sizeof(*saved_key));
any_cracked = 0;
cracked_size = sizeof(*cracked) * self->params.max_keys_per_crypt;
cracked = mem_calloc(cracked_size, 1);
}
static void done(void)
{
MEM_FREE(cracked);
MEM_FREE(saved_key);
}
static int looks_like_nice_int(char *p)
{
// reasonability check + avoids atoi's UB
if (strlen(p) > 9)
return 0;
for (; *p; p++)
if (*p < '0' || *p > '9')
return 0;
return 1;
}
static int valid(char *ciphertext, struct fmt_main *self)
{
char *ctcopy, *keeptr, *p;
int ctlen, extra;
if (strncmp(ciphertext, FORMAT_TAG, FORMAT_TAG_LEN) != 0)
return 0;
ctcopy = strdup(ciphertext);
keeptr = ctcopy;
if (keeptr == NULL)
goto err;
ctcopy += FORMAT_TAG_LEN;
if ((p = strtokm(ctcopy, "*")) == NULL) /* salt */
goto err;
if (hexlenl(p, &extra) != SALTLEN * 2 || extra)
goto err;
while (*p)
if (atoi16[ARCH_INDEX(*p++)] == 0x7f)
goto err;
if ((p = strtokm(NULL, "*")) == NULL) /* iterations */
goto err;
if (!looks_like_nice_int(p))
goto err;
if ((p = strtokm(NULL, "*")) == NULL) /* crypto size */
goto err;
if (!looks_like_nice_int(p))
goto err;
ctlen = atoi(p);
if (ctlen > sizeof(cur_salt->ct))
goto err;
if ((p = strtokm(NULL, "*")) == NULL) /* inlined - unused? TODO */
goto err;
if (!looks_like_nice_int(p))
goto err;
if ((p = strtokm(NULL, "*")) == NULL) /* ciphertext */
goto err;
if (ctlen > LINE_BUFFER_SIZE)
goto err;
if (hexlenl(p, &extra) != ctlen * 2 || extra)
goto err;
MEM_FREE(keeptr);
return 1;
err:
MEM_FREE(keeptr);
return 0;
}
static void *get_salt(char *ciphertext)
{
char *ctcopy = strdup(ciphertext);
char *keeptr = ctcopy;
int i;
char *p;
static struct custom_salt cs;
memset(&cs, 0, sizeof(cs));
ctcopy += FORMAT_TAG_LEN; /* skip over "$keyring$" */
cur_salt = mem_alloc_tiny(sizeof(struct custom_salt), MEM_ALIGN_WORD);
p = strtokm(ctcopy, "*");
for (i = 0; i < SALTLEN; i++)
cs.salt[i] = atoi16[ARCH_INDEX(p[i * 2])] * 16
+ atoi16[ARCH_INDEX(p[i * 2 + 1])];
p = strtokm(NULL, "*");
cs.iterations = atoi(p);
p = strtokm(NULL, "*");
cs.crypto_size = atoi(p);
p = strtokm(NULL, "*");
cs.inlined = atoi(p);
p = strtokm(NULL, "*");
for (i = 0; i < cs.crypto_size; i++)
cs.ct[i] = atoi16[ARCH_INDEX(p[i * 2])] * 16
+ atoi16[ARCH_INDEX(p[i * 2 + 1])];
MEM_FREE(keeptr);
return (void *)&cs;
}
static void set_salt(void *salt)
{
cur_salt = (struct custom_salt *)salt;
}
#ifdef SIMD_COEF_32
static void symkey_generate_simple(int index, unsigned char *salt, int n_salt, int iterations,
unsigned char key[MAX_KEYS_PER_CRYPT][32],
unsigned char iv[MAX_KEYS_PER_CRYPT][32])
{
SHA256_CTX ctx;
unsigned char digest[32], _IBuf[64*MAX_KEYS_PER_CRYPT+MEM_ALIGN_SIMD], *keys;
uint32_t *keys32;
unsigned int i, j;
keys = (unsigned char*)mem_align(_IBuf, MEM_ALIGN_SIMD);
memset(keys, 0, 64*MAX_KEYS_PER_CRYPT);
keys32 = (uint32_t*)keys;
// use oSSL to do first crypt, and marshal into SIMD buffers.
for (i = 0; i < MAX_KEYS_PER_CRYPT; ++i) {
SHA256_Init(&ctx);
SHA256_Update(&ctx, saved_key[index+i], strlen(saved_key[index+i]));
SHA256_Update(&ctx, salt, n_salt);
SHA256_Final(digest, &ctx);
for (j = 0; j < 32; ++j)
keys[GETPOS(j, i)] = digest[j];
keys[GETPOS(j, i)] = 0x80;
// 32 bytes is 256 bits (0x100, simply put a 1 into offset 62)
keys[GETPOS(62, i)] = 1;
}
// the 'simple' inner loop in SIMD.
for (i = 1; i < iterations; ++i)
SIMDSHA256body(keys, keys32, NULL, SSEi_MIXED_IN|SSEi_OUTPUT_AS_INP_FMT);
// marshal data back into flat buffers.
for (i = 0; i < MAX_KEYS_PER_CRYPT; ++i) {
uint32_t *Optr32 = (uint32_t*)(key[i]);
uint32_t *Iptr32 = &keys32[(i/SIMD_COEF_32)*SIMD_COEF_32*16 + (i%SIMD_COEF_32)];
for (j = 0; j < 4; ++j)
Optr32[j] = JOHNSWAP(Iptr32[j*SIMD_COEF_32]);
Optr32 = (uint32_t*)(iv[i]);
for (j = 0; j < 4; ++j)
Optr32[j] = JOHNSWAP(Iptr32[(j+4)*SIMD_COEF_32]);
}
}
#else
static void symkey_generate_simple(int index, unsigned char *salt, int n_salt, int iterations,
unsigned char key[MAX_KEYS_PER_CRYPT][32],
unsigned char iv[MAX_KEYS_PER_CRYPT][32])
{
SHA256_CTX ctx;
unsigned char digest[32];
int i;
SHA256_Init(&ctx);
SHA256_Update(&ctx, saved_key[index], strlen(saved_key[index]));
SHA256_Update(&ctx, salt, n_salt);
SHA256_Final(digest, &ctx);
for (i = 1; i < iterations; ++i) {
SHA256_Init(&ctx);
SHA256_Update(&ctx, digest, 32);
SHA256_Final(digest, &ctx);
}
memcpy(key[0], digest, 16);
memcpy(iv[0], &digest[16], 16);
}
#endif
static void decrypt_buffer(unsigned char buffers[MAX_KEYS_PER_CRYPT][sizeof(cur_salt->ct)], int index)
{
unsigned char key[MAX_KEYS_PER_CRYPT][32];
unsigned char iv[MAX_KEYS_PER_CRYPT][32];
AES_KEY akey;
unsigned int i, len = cur_salt->crypto_size;
unsigned char *salt = cur_salt->salt;
int iterations = cur_salt->iterations;
symkey_generate_simple(index, salt, 8, iterations, key, iv);
for (i = 0; i < MAX_KEYS_PER_CRYPT; ++i) {
memset(&akey, 0, sizeof(AES_KEY));
if (AES_set_decrypt_key(key[i], 128, &akey) < 0) {
fprintf(stderr, "AES_set_decrypt_key failed!\n");
}
AES_cbc_encrypt(cur_salt->ct, buffers[i], len, &akey, iv[i], AES_DECRYPT);
}
}
static int verify_decrypted_buffer(unsigned char *buffer, int len)
{
guchar digest[16];
MD5_CTX ctx;
MD5_Init(&ctx);
MD5_Update(&ctx, buffer + 16, len - 16);
MD5_Final(digest, &ctx);
return memcmp(buffer, digest, 16) == 0;
}
static int crypt_all(int *pcount, struct db_salt *salt)
{
const int count = *pcount;
int index = 0;
if (any_cracked) {
memset(cracked, 0, cracked_size);
any_cracked = 0;
}
#ifdef _OPENMP
#pragma omp parallel for
#endif
for (index = 0; index < count; index+=MAX_KEYS_PER_CRYPT)
{
int i;
unsigned char (*buffers)[sizeof(cur_salt->ct)];
// This is too big to be on stack. See #1292.
buffers = mem_alloc(MAX_KEYS_PER_CRYPT * sizeof(*buffers));
decrypt_buffer(buffers, index);
for (i = 0; i < MAX_KEYS_PER_CRYPT; ++i) {
if (verify_decrypted_buffer(buffers[i], cur_salt->crypto_size)) {
cracked[index+i] = 1;
#ifdef _OPENMP
#pragma omp atomic
#endif
any_cracked |= 1;
}
}
MEM_FREE(buffers);
}
return count;
}
static int cmp_all(void *binary, int count)
{
return any_cracked;
}
static int cmp_one(void *binary, int index)
{
return cracked[index];
}
static int cmp_exact(char *source, int index)
{
return cracked[index];
}
static void keyring_set_key(char *key, int index)
{
int saved_len = strlen(key);
if (saved_len > PLAINTEXT_LENGTH)
saved_len = PLAINTEXT_LENGTH;
memcpy(saved_key[index], key, saved_len);
saved_key[index][saved_len] = 0;
}
static char *get_key(int index)
{
return saved_key[index];
}
static unsigned int iteration_count(void *salt)
{
struct custom_salt *my_salt;
my_salt = salt;
return my_salt->iterations;
}
struct fmt_main fmt_keyring = {
{
FORMAT_LABEL,
FORMAT_NAME,
ALGORITHM_NAME,
BENCHMARK_COMMENT,
BENCHMARK_LENGTH,
0,
PLAINTEXT_LENGTH,
BINARY_SIZE,
BINARY_ALIGN,
SALT_SIZE,
SALT_ALIGN,
MIN_KEYS_PER_CRYPT,
MAX_KEYS_PER_CRYPT,
FMT_CASE | FMT_8_BIT | FMT_OMP,
{
"iteration count",
},
{ FORMAT_TAG },
keyring_tests
}, {
init,
done,
fmt_default_reset,
fmt_default_prepare,
valid,
fmt_default_split,
fmt_default_binary,
get_salt,
{
iteration_count,
},
fmt_default_source,
{
fmt_default_binary_hash
},
fmt_default_salt_hash,
NULL,
set_salt,
keyring_set_key,
get_key,
fmt_default_clear_keys,
crypt_all,
{
fmt_default_get_hash
},
cmp_all,
cmp_one,
cmp_exact
}
};
#endif /* plugin stanza */
|
StmtOpenMP.h | //===- StmtOpenMP.h - Classes for OpenMP directives ------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
/// \file
/// This file defines OpenMP AST classes for executable directives and
/// clauses.
///
//===----------------------------------------------------------------------===//
#ifndef LLVM_CLANG_AST_STMTOPENMP_H
#define LLVM_CLANG_AST_STMTOPENMP_H
#include "clang/AST/Expr.h"
#include "clang/AST/OpenMPClause.h"
#include "clang/AST/Stmt.h"
#include "clang/AST/StmtCXX.h"
#include "clang/Basic/OpenMPKinds.h"
#include "clang/Basic/SourceLocation.h"
namespace clang {
//===----------------------------------------------------------------------===//
// AST classes for directives.
//===----------------------------------------------------------------------===//
/// This is a basic class for representing single OpenMP executable
/// directive.
///
class OMPExecutableDirective : public Stmt {
friend class ASTStmtReader;
/// Kind of the directive.
OpenMPDirectiveKind Kind;
/// Starting location of the directive (directive keyword).
SourceLocation StartLoc;
/// Ending location of the directive.
SourceLocation EndLoc;
/// Numbers of clauses.
const unsigned NumClauses;
/// Number of child expressions/stmts.
const unsigned NumChildren;
/// Offset from this to the start of clauses.
/// There are NumClauses pointers to clauses, they are followed by
/// NumChildren pointers to child stmts/exprs (if the directive type
/// requires an associated stmt, then it has to be the first of them).
const unsigned ClausesOffset;
/// Get the clauses storage.
MutableArrayRef<OMPClause *> getClauses() {
OMPClause **ClauseStorage = reinterpret_cast<OMPClause **>(
reinterpret_cast<char *>(this) + ClausesOffset);
return MutableArrayRef<OMPClause *>(ClauseStorage, NumClauses);
}
protected:
/// Build instance of directive of class \a K.
///
/// \param SC Statement class.
/// \param K Kind of OpenMP directive.
/// \param StartLoc Starting location of the directive (directive keyword).
/// \param EndLoc Ending location of the directive.
///
template <typename T>
OMPExecutableDirective(const T *, StmtClass SC, OpenMPDirectiveKind K,
SourceLocation StartLoc, SourceLocation EndLoc,
unsigned NumClauses, unsigned NumChildren)
: Stmt(SC), Kind(K), StartLoc(std::move(StartLoc)),
EndLoc(std::move(EndLoc)), NumClauses(NumClauses),
NumChildren(NumChildren),
ClausesOffset(llvm::alignTo(sizeof(T), alignof(OMPClause *))) {}
/// Sets the list of variables for this clause.
///
/// \param Clauses The list of clauses for the directive.
///
void setClauses(ArrayRef<OMPClause *> Clauses);
/// Set the associated statement for the directive.
///
/// /param S Associated statement.
///
void setAssociatedStmt(Stmt *S) {
assert(hasAssociatedStmt() && "no associated statement.");
*child_begin() = S;
}
public:
/// Iterates over expressions/statements used in the construct.
class used_clauses_child_iterator
: public llvm::iterator_adaptor_base<
used_clauses_child_iterator, ArrayRef<OMPClause *>::iterator,
std::forward_iterator_tag, Stmt *, ptrdiff_t, Stmt *, Stmt *> {
ArrayRef<OMPClause *>::iterator End;
OMPClause::child_iterator ChildI, ChildEnd;
void MoveToNext() {
if (ChildI != ChildEnd)
return;
while (this->I != End) {
++this->I;
if (this->I != End) {
ChildI = (*this->I)->used_children().begin();
ChildEnd = (*this->I)->used_children().end();
if (ChildI != ChildEnd)
return;
}
}
}
public:
explicit used_clauses_child_iterator(ArrayRef<OMPClause *> Clauses)
: used_clauses_child_iterator::iterator_adaptor_base(Clauses.begin()),
End(Clauses.end()) {
if (this->I != End) {
ChildI = (*this->I)->used_children().begin();
ChildEnd = (*this->I)->used_children().end();
MoveToNext();
}
}
Stmt *operator*() const { return *ChildI; }
Stmt *operator->() const { return **this; }
used_clauses_child_iterator &operator++() {
++ChildI;
if (ChildI != ChildEnd)
return *this;
if (this->I != End) {
++this->I;
if (this->I != End) {
ChildI = (*this->I)->used_children().begin();
ChildEnd = (*this->I)->used_children().end();
}
}
MoveToNext();
return *this;
}
};
static llvm::iterator_range<used_clauses_child_iterator>
used_clauses_children(ArrayRef<OMPClause *> Clauses) {
return {used_clauses_child_iterator(Clauses),
used_clauses_child_iterator(llvm::makeArrayRef(Clauses.end(), 0))};
}
/// Iterates over a filtered subrange of clauses applied to a
/// directive.
///
/// This iterator visits only clauses of type SpecificClause.
template <typename SpecificClause>
class specific_clause_iterator
: public llvm::iterator_adaptor_base<
specific_clause_iterator<SpecificClause>,
ArrayRef<OMPClause *>::const_iterator, std::forward_iterator_tag,
const SpecificClause *, ptrdiff_t, const SpecificClause *,
const SpecificClause *> {
ArrayRef<OMPClause *>::const_iterator End;
void SkipToNextClause() {
while (this->I != End && !isa<SpecificClause>(*this->I))
++this->I;
}
public:
explicit specific_clause_iterator(ArrayRef<OMPClause *> Clauses)
: specific_clause_iterator::iterator_adaptor_base(Clauses.begin()),
End(Clauses.end()) {
SkipToNextClause();
}
const SpecificClause *operator*() const {
return cast<SpecificClause>(*this->I);
}
const SpecificClause *operator->() const { return **this; }
specific_clause_iterator &operator++() {
++this->I;
SkipToNextClause();
return *this;
}
};
template <typename SpecificClause>
static llvm::iterator_range<specific_clause_iterator<SpecificClause>>
getClausesOfKind(ArrayRef<OMPClause *> Clauses) {
return {specific_clause_iterator<SpecificClause>(Clauses),
specific_clause_iterator<SpecificClause>(
llvm::makeArrayRef(Clauses.end(), 0))};
}
template <typename SpecificClause>
llvm::iterator_range<specific_clause_iterator<SpecificClause>>
getClausesOfKind() const {
return getClausesOfKind<SpecificClause>(clauses());
}
/// Gets a single clause of the specified kind associated with the
/// current directive iff there is only one clause of this kind (and assertion
/// is fired if there is more than one clause is associated with the
/// directive). Returns nullptr if no clause of this kind is associated with
/// the directive.
template <typename SpecificClause>
const SpecificClause *getSingleClause() const {
auto Clauses = getClausesOfKind<SpecificClause>();
if (Clauses.begin() != Clauses.end()) {
assert(std::next(Clauses.begin()) == Clauses.end() &&
"There are at least 2 clauses of the specified kind");
return *Clauses.begin();
}
return nullptr;
}
/// Returns true if the current directive has one or more clauses of a
/// specific kind.
template <typename SpecificClause>
bool hasClausesOfKind() const {
auto Clauses = getClausesOfKind<SpecificClause>();
return Clauses.begin() != Clauses.end();
}
/// Returns starting location of directive kind.
SourceLocation getBeginLoc() const { return StartLoc; }
/// Returns ending location of directive.
SourceLocation getEndLoc() const { return EndLoc; }
/// Set starting location of directive kind.
///
/// \param Loc New starting location of directive.
///
void setLocStart(SourceLocation Loc) { StartLoc = Loc; }
/// Set ending location of directive.
///
/// \param Loc New ending location of directive.
///
void setLocEnd(SourceLocation Loc) { EndLoc = Loc; }
/// Get number of clauses.
unsigned getNumClauses() const { return NumClauses; }
/// Returns specified clause.
///
/// \param i Number of clause.
///
OMPClause *getClause(unsigned i) const { return clauses()[i]; }
/// Returns true if directive has associated statement.
bool hasAssociatedStmt() const { return NumChildren > 0; }
/// Returns statement associated with the directive.
const Stmt *getAssociatedStmt() const {
assert(hasAssociatedStmt() && "no associated statement.");
return *child_begin();
}
Stmt *getAssociatedStmt() {
assert(hasAssociatedStmt() && "no associated statement.");
return *child_begin();
}
/// Returns the captured statement associated with the
/// component region within the (combined) directive.
//
// \param RegionKind Component region kind.
const CapturedStmt *getCapturedStmt(OpenMPDirectiveKind RegionKind) const {
SmallVector<OpenMPDirectiveKind, 4> CaptureRegions;
getOpenMPCaptureRegions(CaptureRegions, getDirectiveKind());
assert(std::any_of(
CaptureRegions.begin(), CaptureRegions.end(),
[=](const OpenMPDirectiveKind K) { return K == RegionKind; }) &&
"RegionKind not found in OpenMP CaptureRegions.");
auto *CS = cast<CapturedStmt>(getAssociatedStmt());
for (auto ThisCaptureRegion : CaptureRegions) {
if (ThisCaptureRegion == RegionKind)
return CS;
CS = cast<CapturedStmt>(CS->getCapturedStmt());
}
llvm_unreachable("Incorrect RegionKind specified for directive.");
}
/// Get innermost captured statement for the construct.
CapturedStmt *getInnermostCapturedStmt() {
assert(hasAssociatedStmt() && getAssociatedStmt() &&
"Must have associated statement.");
SmallVector<OpenMPDirectiveKind, 4> CaptureRegions;
getOpenMPCaptureRegions(CaptureRegions, getDirectiveKind());
assert(!CaptureRegions.empty() &&
"At least one captured statement must be provided.");
auto *CS = cast<CapturedStmt>(getAssociatedStmt());
for (unsigned Level = CaptureRegions.size(); Level > 1; --Level)
CS = cast<CapturedStmt>(CS->getCapturedStmt());
return CS;
}
const CapturedStmt *getInnermostCapturedStmt() const {
return const_cast<OMPExecutableDirective *>(this)
->getInnermostCapturedStmt();
}
OpenMPDirectiveKind getDirectiveKind() const { return Kind; }
static bool classof(const Stmt *S) {
return S->getStmtClass() >= firstOMPExecutableDirectiveConstant &&
S->getStmtClass() <= lastOMPExecutableDirectiveConstant;
}
child_range children() {
if (!hasAssociatedStmt())
return child_range(child_iterator(), child_iterator());
Stmt **ChildStorage = reinterpret_cast<Stmt **>(getClauses().end());
/// Do not mark all the special expression/statements as children, except
/// for the associated statement.
return child_range(ChildStorage, ChildStorage + 1);
}
const_child_range children() const {
if (!hasAssociatedStmt())
return const_child_range(const_child_iterator(), const_child_iterator());
Stmt **ChildStorage = reinterpret_cast<Stmt **>(
const_cast<OMPExecutableDirective *>(this)->getClauses().end());
return const_child_range(ChildStorage, ChildStorage + 1);
}
ArrayRef<OMPClause *> clauses() { return getClauses(); }
ArrayRef<OMPClause *> clauses() const {
return const_cast<OMPExecutableDirective *>(this)->getClauses();
}
/// Returns whether or not this is a Standalone directive.
///
/// Stand-alone directives are executable directives
/// that have no associated user code.
bool isStandaloneDirective() const;
/// Returns the AST node representing OpenMP structured-block of this
/// OpenMP executable directive,
/// Prerequisite: Executable Directive must not be Standalone directive.
const Stmt *getStructuredBlock() const;
Stmt *getStructuredBlock() {
return const_cast<Stmt *>(
const_cast<const OMPExecutableDirective *>(this)->getStructuredBlock());
}
};
/// This represents '#pragma omp parallel' directive.
///
/// \code
/// #pragma omp parallel private(a,b) reduction(+: c,d)
/// \endcode
/// In this example directive '#pragma omp parallel' has clauses 'private'
/// with the variables 'a' and 'b' and 'reduction' with operator '+' and
/// variables 'c' and 'd'.
///
class OMPParallelDirective : public OMPExecutableDirective {
friend class ASTStmtReader;
/// true if the construct has inner cancel directive.
bool HasCancel;
/// Build directive with the given start and end location.
///
/// \param StartLoc Starting location of the directive (directive keyword).
/// \param EndLoc Ending Location of the directive.
///
OMPParallelDirective(SourceLocation StartLoc, SourceLocation EndLoc,
unsigned NumClauses)
: OMPExecutableDirective(this, OMPParallelDirectiveClass, OMPD_parallel,
StartLoc, EndLoc, NumClauses, 1),
HasCancel(false) {}
/// Build an empty directive.
///
/// \param NumClauses Number of clauses.
///
explicit OMPParallelDirective(unsigned NumClauses)
: OMPExecutableDirective(this, OMPParallelDirectiveClass, OMPD_parallel,
SourceLocation(), SourceLocation(), NumClauses,
1),
HasCancel(false) {}
/// Set cancel state.
void setHasCancel(bool Has) { HasCancel = Has; }
public:
/// Creates directive with a list of \a Clauses.
///
/// \param C AST context.
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending Location of the directive.
/// \param Clauses List of clauses.
/// \param AssociatedStmt Statement associated with the directive.
/// \param HasCancel true if this directive has inner cancel directive.
///
static OMPParallelDirective *
Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc,
ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt, bool HasCancel);
/// Creates an empty directive with the place for \a N clauses.
///
/// \param C AST context.
/// \param NumClauses Number of clauses.
///
static OMPParallelDirective *CreateEmpty(const ASTContext &C,
unsigned NumClauses, EmptyShell);
/// Return true if current directive has inner cancel directive.
bool hasCancel() const { return HasCancel; }
static bool classof(const Stmt *T) {
return T->getStmtClass() == OMPParallelDirectiveClass;
}
};
/// This is a common base class for loop directives ('omp simd', 'omp
/// for', 'omp for simd' etc.). It is responsible for the loop code generation.
///
class OMPLoopDirective : public OMPExecutableDirective {
friend class ASTStmtReader;
/// Number of collapsed loops as specified by 'collapse' clause.
unsigned CollapsedNum;
/// Offsets to the stored exprs.
/// This enumeration contains offsets to all the pointers to children
/// expressions stored in OMPLoopDirective.
/// The first 9 children are necessary for all the loop directives,
/// the next 8 are specific to the worksharing ones, and the next 11 are
/// used for combined constructs containing two pragmas associated to loops.
/// After the fixed children, three arrays of length CollapsedNum are
/// allocated: loop counters, their updates and final values.
/// PrevLowerBound and PrevUpperBound are used to communicate blocking
/// information in composite constructs which require loop blocking
/// DistInc is used to generate the increment expression for the distribute
/// loop when combined with a further nested loop
/// PrevEnsureUpperBound is used as the EnsureUpperBound expression for the
/// for loop when combined with a previous distribute loop in the same pragma
/// (e.g. 'distribute parallel for')
///
enum {
AssociatedStmtOffset = 0,
IterationVariableOffset = 1,
LastIterationOffset = 2,
CalcLastIterationOffset = 3,
PreConditionOffset = 4,
CondOffset = 5,
InitOffset = 6,
IncOffset = 7,
PreInitsOffset = 8,
// The '...End' enumerators do not correspond to child expressions - they
// specify the offset to the end (and start of the following counters/
// updates/finals/dependent_counters/dependent_inits/finals_conditions
// arrays).
DefaultEnd = 9,
// The following 8 exprs are used by worksharing and distribute loops only.
IsLastIterVariableOffset = 9,
LowerBoundVariableOffset = 10,
UpperBoundVariableOffset = 11,
StrideVariableOffset = 12,
EnsureUpperBoundOffset = 13,
NextLowerBoundOffset = 14,
NextUpperBoundOffset = 15,
NumIterationsOffset = 16,
// Offset to the end for worksharing loop directives.
WorksharingEnd = 17,
PrevLowerBoundVariableOffset = 17,
PrevUpperBoundVariableOffset = 18,
DistIncOffset = 19,
PrevEnsureUpperBoundOffset = 20,
CombinedLowerBoundVariableOffset = 21,
CombinedUpperBoundVariableOffset = 22,
CombinedEnsureUpperBoundOffset = 23,
CombinedInitOffset = 24,
CombinedConditionOffset = 25,
CombinedNextLowerBoundOffset = 26,
CombinedNextUpperBoundOffset = 27,
CombinedDistConditionOffset = 28,
CombinedParForInDistConditionOffset = 29,
// Offset to the end (and start of the following
// counters/updates/finals/dependent_counters/dependent_inits/finals_conditions
// arrays) for combined distribute loop directives.
CombinedDistributeEnd = 30,
};
/// Get the counters storage.
MutableArrayRef<Expr *> getCounters() {
Expr **Storage = reinterpret_cast<Expr **>(
&(*(std::next(child_begin(), getArraysOffset(getDirectiveKind())))));
return MutableArrayRef<Expr *>(Storage, CollapsedNum);
}
/// Get the private counters storage.
MutableArrayRef<Expr *> getPrivateCounters() {
Expr **Storage = reinterpret_cast<Expr **>(&*std::next(
child_begin(), getArraysOffset(getDirectiveKind()) + CollapsedNum));
return MutableArrayRef<Expr *>(Storage, CollapsedNum);
}
/// Get the updates storage.
MutableArrayRef<Expr *> getInits() {
Expr **Storage = reinterpret_cast<Expr **>(
&*std::next(child_begin(),
getArraysOffset(getDirectiveKind()) + 2 * CollapsedNum));
return MutableArrayRef<Expr *>(Storage, CollapsedNum);
}
/// Get the updates storage.
MutableArrayRef<Expr *> getUpdates() {
Expr **Storage = reinterpret_cast<Expr **>(
&*std::next(child_begin(),
getArraysOffset(getDirectiveKind()) + 3 * CollapsedNum));
return MutableArrayRef<Expr *>(Storage, CollapsedNum);
}
/// Get the final counter updates storage.
MutableArrayRef<Expr *> getFinals() {
Expr **Storage = reinterpret_cast<Expr **>(
&*std::next(child_begin(),
getArraysOffset(getDirectiveKind()) + 4 * CollapsedNum));
return MutableArrayRef<Expr *>(Storage, CollapsedNum);
}
/// Get the dependent counters storage.
MutableArrayRef<Expr *> getDependentCounters() {
Expr **Storage = reinterpret_cast<Expr **>(
&*std::next(child_begin(),
getArraysOffset(getDirectiveKind()) + 5 * CollapsedNum));
return MutableArrayRef<Expr *>(Storage, CollapsedNum);
}
/// Get the dependent inits storage.
MutableArrayRef<Expr *> getDependentInits() {
Expr **Storage = reinterpret_cast<Expr **>(
&*std::next(child_begin(),
getArraysOffset(getDirectiveKind()) + 6 * CollapsedNum));
return MutableArrayRef<Expr *>(Storage, CollapsedNum);
}
/// Get the finals conditions storage.
MutableArrayRef<Expr *> getFinalsConditions() {
Expr **Storage = reinterpret_cast<Expr **>(
&*std::next(child_begin(),
getArraysOffset(getDirectiveKind()) + 7 * CollapsedNum));
return MutableArrayRef<Expr *>(Storage, CollapsedNum);
}
protected:
/// Build instance of loop directive of class \a Kind.
///
/// \param SC Statement class.
/// \param Kind Kind of OpenMP directive.
/// \param StartLoc Starting location of the directive (directive keyword).
/// \param EndLoc Ending location of the directive.
/// \param CollapsedNum Number of collapsed loops from 'collapse' clause.
/// \param NumClauses Number of clauses.
/// \param NumSpecialChildren Number of additional directive-specific stmts.
///
template <typename T>
OMPLoopDirective(const T *That, StmtClass SC, OpenMPDirectiveKind Kind,
SourceLocation StartLoc, SourceLocation EndLoc,
unsigned CollapsedNum, unsigned NumClauses,
unsigned NumSpecialChildren = 0)
: OMPExecutableDirective(That, SC, Kind, StartLoc, EndLoc, NumClauses,
numLoopChildren(CollapsedNum, Kind) +
NumSpecialChildren),
CollapsedNum(CollapsedNum) {}
/// Offset to the start of children expression arrays.
static unsigned getArraysOffset(OpenMPDirectiveKind Kind) {
if (isOpenMPLoopBoundSharingDirective(Kind))
return CombinedDistributeEnd;
if (isOpenMPWorksharingDirective(Kind) || isOpenMPTaskLoopDirective(Kind) ||
isOpenMPDistributeDirective(Kind))
return WorksharingEnd;
return DefaultEnd;
}
/// Children number.
static unsigned numLoopChildren(unsigned CollapsedNum,
OpenMPDirectiveKind Kind) {
return getArraysOffset(Kind) +
8 * CollapsedNum; // Counters, PrivateCounters, Inits,
// Updates, Finals, DependentCounters,
// DependentInits, FinalsConditions.
}
void setIterationVariable(Expr *IV) {
*std::next(child_begin(), IterationVariableOffset) = IV;
}
void setLastIteration(Expr *LI) {
*std::next(child_begin(), LastIterationOffset) = LI;
}
void setCalcLastIteration(Expr *CLI) {
*std::next(child_begin(), CalcLastIterationOffset) = CLI;
}
void setPreCond(Expr *PC) {
*std::next(child_begin(), PreConditionOffset) = PC;
}
void setCond(Expr *Cond) {
*std::next(child_begin(), CondOffset) = Cond;
}
void setInit(Expr *Init) { *std::next(child_begin(), InitOffset) = Init; }
void setInc(Expr *Inc) { *std::next(child_begin(), IncOffset) = Inc; }
void setPreInits(Stmt *PreInits) {
*std::next(child_begin(), PreInitsOffset) = PreInits;
}
void setIsLastIterVariable(Expr *IL) {
assert((isOpenMPWorksharingDirective(getDirectiveKind()) ||
isOpenMPTaskLoopDirective(getDirectiveKind()) ||
isOpenMPDistributeDirective(getDirectiveKind())) &&
"expected worksharing loop directive");
*std::next(child_begin(), IsLastIterVariableOffset) = IL;
}
void setLowerBoundVariable(Expr *LB) {
assert((isOpenMPWorksharingDirective(getDirectiveKind()) ||
isOpenMPTaskLoopDirective(getDirectiveKind()) ||
isOpenMPDistributeDirective(getDirectiveKind())) &&
"expected worksharing loop directive");
*std::next(child_begin(), LowerBoundVariableOffset) = LB;
}
void setUpperBoundVariable(Expr *UB) {
assert((isOpenMPWorksharingDirective(getDirectiveKind()) ||
isOpenMPTaskLoopDirective(getDirectiveKind()) ||
isOpenMPDistributeDirective(getDirectiveKind())) &&
"expected worksharing loop directive");
*std::next(child_begin(), UpperBoundVariableOffset) = UB;
}
void setStrideVariable(Expr *ST) {
assert((isOpenMPWorksharingDirective(getDirectiveKind()) ||
isOpenMPTaskLoopDirective(getDirectiveKind()) ||
isOpenMPDistributeDirective(getDirectiveKind())) &&
"expected worksharing loop directive");
*std::next(child_begin(), StrideVariableOffset) = ST;
}
void setEnsureUpperBound(Expr *EUB) {
assert((isOpenMPWorksharingDirective(getDirectiveKind()) ||
isOpenMPTaskLoopDirective(getDirectiveKind()) ||
isOpenMPDistributeDirective(getDirectiveKind())) &&
"expected worksharing loop directive");
*std::next(child_begin(), EnsureUpperBoundOffset) = EUB;
}
void setNextLowerBound(Expr *NLB) {
assert((isOpenMPWorksharingDirective(getDirectiveKind()) ||
isOpenMPTaskLoopDirective(getDirectiveKind()) ||
isOpenMPDistributeDirective(getDirectiveKind())) &&
"expected worksharing loop directive");
*std::next(child_begin(), NextLowerBoundOffset) = NLB;
}
void setNextUpperBound(Expr *NUB) {
assert((isOpenMPWorksharingDirective(getDirectiveKind()) ||
isOpenMPTaskLoopDirective(getDirectiveKind()) ||
isOpenMPDistributeDirective(getDirectiveKind())) &&
"expected worksharing loop directive");
*std::next(child_begin(), NextUpperBoundOffset) = NUB;
}
void setNumIterations(Expr *NI) {
assert((isOpenMPWorksharingDirective(getDirectiveKind()) ||
isOpenMPTaskLoopDirective(getDirectiveKind()) ||
isOpenMPDistributeDirective(getDirectiveKind())) &&
"expected worksharing loop directive");
*std::next(child_begin(), NumIterationsOffset) = NI;
}
void setPrevLowerBoundVariable(Expr *PrevLB) {
assert(isOpenMPLoopBoundSharingDirective(getDirectiveKind()) &&
"expected loop bound sharing directive");
*std::next(child_begin(), PrevLowerBoundVariableOffset) = PrevLB;
}
void setPrevUpperBoundVariable(Expr *PrevUB) {
assert(isOpenMPLoopBoundSharingDirective(getDirectiveKind()) &&
"expected loop bound sharing directive");
*std::next(child_begin(), PrevUpperBoundVariableOffset) = PrevUB;
}
void setDistInc(Expr *DistInc) {
assert(isOpenMPLoopBoundSharingDirective(getDirectiveKind()) &&
"expected loop bound sharing directive");
*std::next(child_begin(), DistIncOffset) = DistInc;
}
void setPrevEnsureUpperBound(Expr *PrevEUB) {
assert(isOpenMPLoopBoundSharingDirective(getDirectiveKind()) &&
"expected loop bound sharing directive");
*std::next(child_begin(), PrevEnsureUpperBoundOffset) = PrevEUB;
}
void setCombinedLowerBoundVariable(Expr *CombLB) {
assert(isOpenMPLoopBoundSharingDirective(getDirectiveKind()) &&
"expected loop bound sharing directive");
*std::next(child_begin(), CombinedLowerBoundVariableOffset) = CombLB;
}
void setCombinedUpperBoundVariable(Expr *CombUB) {
assert(isOpenMPLoopBoundSharingDirective(getDirectiveKind()) &&
"expected loop bound sharing directive");
*std::next(child_begin(), CombinedUpperBoundVariableOffset) = CombUB;
}
void setCombinedEnsureUpperBound(Expr *CombEUB) {
assert(isOpenMPLoopBoundSharingDirective(getDirectiveKind()) &&
"expected loop bound sharing directive");
*std::next(child_begin(), CombinedEnsureUpperBoundOffset) = CombEUB;
}
void setCombinedInit(Expr *CombInit) {
assert(isOpenMPLoopBoundSharingDirective(getDirectiveKind()) &&
"expected loop bound sharing directive");
*std::next(child_begin(), CombinedInitOffset) = CombInit;
}
void setCombinedCond(Expr *CombCond) {
assert(isOpenMPLoopBoundSharingDirective(getDirectiveKind()) &&
"expected loop bound sharing directive");
*std::next(child_begin(), CombinedConditionOffset) = CombCond;
}
void setCombinedNextLowerBound(Expr *CombNLB) {
assert(isOpenMPLoopBoundSharingDirective(getDirectiveKind()) &&
"expected loop bound sharing directive");
*std::next(child_begin(), CombinedNextLowerBoundOffset) = CombNLB;
}
void setCombinedNextUpperBound(Expr *CombNUB) {
assert(isOpenMPLoopBoundSharingDirective(getDirectiveKind()) &&
"expected loop bound sharing directive");
*std::next(child_begin(), CombinedNextUpperBoundOffset) = CombNUB;
}
void setCombinedDistCond(Expr *CombDistCond) {
assert(isOpenMPLoopBoundSharingDirective(getDirectiveKind()) &&
"expected loop bound distribute sharing directive");
*std::next(child_begin(), CombinedDistConditionOffset) = CombDistCond;
}
void setCombinedParForInDistCond(Expr *CombParForInDistCond) {
assert(isOpenMPLoopBoundSharingDirective(getDirectiveKind()) &&
"expected loop bound distribute sharing directive");
*std::next(child_begin(),
CombinedParForInDistConditionOffset) = CombParForInDistCond;
}
void setCounters(ArrayRef<Expr *> A);
void setPrivateCounters(ArrayRef<Expr *> A);
void setInits(ArrayRef<Expr *> A);
void setUpdates(ArrayRef<Expr *> A);
void setFinals(ArrayRef<Expr *> A);
void setDependentCounters(ArrayRef<Expr *> A);
void setDependentInits(ArrayRef<Expr *> A);
void setFinalsConditions(ArrayRef<Expr *> A);
public:
/// The expressions built to support OpenMP loops in combined/composite
/// pragmas (e.g. pragma omp distribute parallel for)
struct DistCombinedHelperExprs {
/// DistributeLowerBound - used when composing 'omp distribute' with
/// 'omp for' in a same construct.
Expr *LB;
/// DistributeUpperBound - used when composing 'omp distribute' with
/// 'omp for' in a same construct.
Expr *UB;
/// DistributeEnsureUpperBound - used when composing 'omp distribute'
/// with 'omp for' in a same construct, EUB depends on DistUB
Expr *EUB;
/// Distribute loop iteration variable init used when composing 'omp
/// distribute'
/// with 'omp for' in a same construct
Expr *Init;
/// Distribute Loop condition used when composing 'omp distribute'
/// with 'omp for' in a same construct
Expr *Cond;
/// Update of LowerBound for statically scheduled omp loops for
/// outer loop in combined constructs (e.g. 'distribute parallel for')
Expr *NLB;
/// Update of UpperBound for statically scheduled omp loops for
/// outer loop in combined constructs (e.g. 'distribute parallel for')
Expr *NUB;
/// Distribute Loop condition used when composing 'omp distribute'
/// with 'omp for' in a same construct when schedule is chunked.
Expr *DistCond;
/// 'omp parallel for' loop condition used when composed with
/// 'omp distribute' in the same construct and when schedule is
/// chunked and the chunk size is 1.
Expr *ParForInDistCond;
};
/// The expressions built for the OpenMP loop CodeGen for the
/// whole collapsed loop nest.
struct HelperExprs {
/// Loop iteration variable.
Expr *IterationVarRef;
/// Loop last iteration number.
Expr *LastIteration;
/// Loop number of iterations.
Expr *NumIterations;
/// Calculation of last iteration.
Expr *CalcLastIteration;
/// Loop pre-condition.
Expr *PreCond;
/// Loop condition.
Expr *Cond;
/// Loop iteration variable init.
Expr *Init;
/// Loop increment.
Expr *Inc;
/// IsLastIteration - local flag variable passed to runtime.
Expr *IL;
/// LowerBound - local variable passed to runtime.
Expr *LB;
/// UpperBound - local variable passed to runtime.
Expr *UB;
/// Stride - local variable passed to runtime.
Expr *ST;
/// EnsureUpperBound -- expression UB = min(UB, NumIterations).
Expr *EUB;
/// Update of LowerBound for statically scheduled 'omp for' loops.
Expr *NLB;
/// Update of UpperBound for statically scheduled 'omp for' loops.
Expr *NUB;
/// PreviousLowerBound - local variable passed to runtime in the
/// enclosing schedule or null if that does not apply.
Expr *PrevLB;
/// PreviousUpperBound - local variable passed to runtime in the
/// enclosing schedule or null if that does not apply.
Expr *PrevUB;
/// DistInc - increment expression for distribute loop when found
/// combined with a further loop level (e.g. in 'distribute parallel for')
/// expression IV = IV + ST
Expr *DistInc;
/// PrevEUB - expression similar to EUB but to be used when loop
/// scheduling uses PrevLB and PrevUB (e.g. in 'distribute parallel for'
/// when ensuring that the UB is either the calculated UB by the runtime or
/// the end of the assigned distribute chunk)
/// expression UB = min (UB, PrevUB)
Expr *PrevEUB;
/// Counters Loop counters.
SmallVector<Expr *, 4> Counters;
/// PrivateCounters Loop counters.
SmallVector<Expr *, 4> PrivateCounters;
/// Expressions for loop counters inits for CodeGen.
SmallVector<Expr *, 4> Inits;
/// Expressions for loop counters update for CodeGen.
SmallVector<Expr *, 4> Updates;
/// Final loop counter values for GodeGen.
SmallVector<Expr *, 4> Finals;
/// List of counters required for the generation of the non-rectangular
/// loops.
SmallVector<Expr *, 4> DependentCounters;
/// List of initializers required for the generation of the non-rectangular
/// loops.
SmallVector<Expr *, 4> DependentInits;
/// List of final conditions required for the generation of the
/// non-rectangular loops.
SmallVector<Expr *, 4> FinalsConditions;
/// Init statement for all captured expressions.
Stmt *PreInits;
/// Expressions used when combining OpenMP loop pragmas
DistCombinedHelperExprs DistCombinedFields;
/// Check if all the expressions are built (does not check the
/// worksharing ones).
bool builtAll() {
return IterationVarRef != nullptr && LastIteration != nullptr &&
NumIterations != nullptr && PreCond != nullptr &&
Cond != nullptr && Init != nullptr && Inc != nullptr;
}
/// Initialize all the fields to null.
/// \param Size Number of elements in the
/// counters/finals/updates/dependent_counters/dependent_inits/finals_conditions
/// arrays.
void clear(unsigned Size) {
IterationVarRef = nullptr;
LastIteration = nullptr;
CalcLastIteration = nullptr;
PreCond = nullptr;
Cond = nullptr;
Init = nullptr;
Inc = nullptr;
IL = nullptr;
LB = nullptr;
UB = nullptr;
ST = nullptr;
EUB = nullptr;
NLB = nullptr;
NUB = nullptr;
NumIterations = nullptr;
PrevLB = nullptr;
PrevUB = nullptr;
DistInc = nullptr;
PrevEUB = nullptr;
Counters.resize(Size);
PrivateCounters.resize(Size);
Inits.resize(Size);
Updates.resize(Size);
Finals.resize(Size);
DependentCounters.resize(Size);
DependentInits.resize(Size);
FinalsConditions.resize(Size);
for (unsigned i = 0; i < Size; ++i) {
Counters[i] = nullptr;
PrivateCounters[i] = nullptr;
Inits[i] = nullptr;
Updates[i] = nullptr;
Finals[i] = nullptr;
DependentCounters[i] = nullptr;
DependentInits[i] = nullptr;
FinalsConditions[i] = nullptr;
}
PreInits = nullptr;
DistCombinedFields.LB = nullptr;
DistCombinedFields.UB = nullptr;
DistCombinedFields.EUB = nullptr;
DistCombinedFields.Init = nullptr;
DistCombinedFields.Cond = nullptr;
DistCombinedFields.NLB = nullptr;
DistCombinedFields.NUB = nullptr;
DistCombinedFields.DistCond = nullptr;
DistCombinedFields.ParForInDistCond = nullptr;
}
};
/// Get number of collapsed loops.
unsigned getCollapsedNumber() const { return CollapsedNum; }
Expr *getIterationVariable() const {
return const_cast<Expr *>(reinterpret_cast<const Expr *>(
*std::next(child_begin(), IterationVariableOffset)));
}
Expr *getLastIteration() const {
return const_cast<Expr *>(reinterpret_cast<const Expr *>(
*std::next(child_begin(), LastIterationOffset)));
}
Expr *getCalcLastIteration() const {
return const_cast<Expr *>(reinterpret_cast<const Expr *>(
*std::next(child_begin(), CalcLastIterationOffset)));
}
Expr *getPreCond() const {
return const_cast<Expr *>(reinterpret_cast<const Expr *>(
*std::next(child_begin(), PreConditionOffset)));
}
Expr *getCond() const {
return const_cast<Expr *>(
reinterpret_cast<const Expr *>(*std::next(child_begin(), CondOffset)));
}
Expr *getInit() const {
return const_cast<Expr *>(
reinterpret_cast<const Expr *>(*std::next(child_begin(), InitOffset)));
}
Expr *getInc() const {
return const_cast<Expr *>(
reinterpret_cast<const Expr *>(*std::next(child_begin(), IncOffset)));
}
const Stmt *getPreInits() const {
return *std::next(child_begin(), PreInitsOffset);
}
Stmt *getPreInits() { return *std::next(child_begin(), PreInitsOffset); }
Expr *getIsLastIterVariable() const {
assert((isOpenMPWorksharingDirective(getDirectiveKind()) ||
isOpenMPTaskLoopDirective(getDirectiveKind()) ||
isOpenMPDistributeDirective(getDirectiveKind())) &&
"expected worksharing loop directive");
return const_cast<Expr *>(reinterpret_cast<const Expr *>(
*std::next(child_begin(), IsLastIterVariableOffset)));
}
Expr *getLowerBoundVariable() const {
assert((isOpenMPWorksharingDirective(getDirectiveKind()) ||
isOpenMPTaskLoopDirective(getDirectiveKind()) ||
isOpenMPDistributeDirective(getDirectiveKind())) &&
"expected worksharing loop directive");
return const_cast<Expr *>(reinterpret_cast<const Expr *>(
*std::next(child_begin(), LowerBoundVariableOffset)));
}
Expr *getUpperBoundVariable() const {
assert((isOpenMPWorksharingDirective(getDirectiveKind()) ||
isOpenMPTaskLoopDirective(getDirectiveKind()) ||
isOpenMPDistributeDirective(getDirectiveKind())) &&
"expected worksharing loop directive");
return const_cast<Expr *>(reinterpret_cast<const Expr *>(
*std::next(child_begin(), UpperBoundVariableOffset)));
}
Expr *getStrideVariable() const {
assert((isOpenMPWorksharingDirective(getDirectiveKind()) ||
isOpenMPTaskLoopDirective(getDirectiveKind()) ||
isOpenMPDistributeDirective(getDirectiveKind())) &&
"expected worksharing loop directive");
return const_cast<Expr *>(reinterpret_cast<const Expr *>(
*std::next(child_begin(), StrideVariableOffset)));
}
Expr *getEnsureUpperBound() const {
assert((isOpenMPWorksharingDirective(getDirectiveKind()) ||
isOpenMPTaskLoopDirective(getDirectiveKind()) ||
isOpenMPDistributeDirective(getDirectiveKind())) &&
"expected worksharing loop directive");
return const_cast<Expr *>(reinterpret_cast<const Expr *>(
*std::next(child_begin(), EnsureUpperBoundOffset)));
}
Expr *getNextLowerBound() const {
assert((isOpenMPWorksharingDirective(getDirectiveKind()) ||
isOpenMPTaskLoopDirective(getDirectiveKind()) ||
isOpenMPDistributeDirective(getDirectiveKind())) &&
"expected worksharing loop directive");
return const_cast<Expr *>(reinterpret_cast<const Expr *>(
*std::next(child_begin(), NextLowerBoundOffset)));
}
Expr *getNextUpperBound() const {
assert((isOpenMPWorksharingDirective(getDirectiveKind()) ||
isOpenMPTaskLoopDirective(getDirectiveKind()) ||
isOpenMPDistributeDirective(getDirectiveKind())) &&
"expected worksharing loop directive");
return const_cast<Expr *>(reinterpret_cast<const Expr *>(
*std::next(child_begin(), NextUpperBoundOffset)));
}
Expr *getNumIterations() const {
assert((isOpenMPWorksharingDirective(getDirectiveKind()) ||
isOpenMPTaskLoopDirective(getDirectiveKind()) ||
isOpenMPDistributeDirective(getDirectiveKind())) &&
"expected worksharing loop directive");
return const_cast<Expr *>(reinterpret_cast<const Expr *>(
*std::next(child_begin(), NumIterationsOffset)));
}
Expr *getPrevLowerBoundVariable() const {
assert(isOpenMPLoopBoundSharingDirective(getDirectiveKind()) &&
"expected loop bound sharing directive");
return const_cast<Expr *>(reinterpret_cast<const Expr *>(
*std::next(child_begin(), PrevLowerBoundVariableOffset)));
}
Expr *getPrevUpperBoundVariable() const {
assert(isOpenMPLoopBoundSharingDirective(getDirectiveKind()) &&
"expected loop bound sharing directive");
return const_cast<Expr *>(reinterpret_cast<const Expr *>(
*std::next(child_begin(), PrevUpperBoundVariableOffset)));
}
Expr *getDistInc() const {
assert(isOpenMPLoopBoundSharingDirective(getDirectiveKind()) &&
"expected loop bound sharing directive");
return const_cast<Expr *>(reinterpret_cast<const Expr *>(
*std::next(child_begin(), DistIncOffset)));
}
Expr *getPrevEnsureUpperBound() const {
assert(isOpenMPLoopBoundSharingDirective(getDirectiveKind()) &&
"expected loop bound sharing directive");
return const_cast<Expr *>(reinterpret_cast<const Expr *>(
*std::next(child_begin(), PrevEnsureUpperBoundOffset)));
}
Expr *getCombinedLowerBoundVariable() const {
assert(isOpenMPLoopBoundSharingDirective(getDirectiveKind()) &&
"expected loop bound sharing directive");
return const_cast<Expr *>(reinterpret_cast<const Expr *>(
*std::next(child_begin(), CombinedLowerBoundVariableOffset)));
}
Expr *getCombinedUpperBoundVariable() const {
assert(isOpenMPLoopBoundSharingDirective(getDirectiveKind()) &&
"expected loop bound sharing directive");
return const_cast<Expr *>(reinterpret_cast<const Expr *>(
*std::next(child_begin(), CombinedUpperBoundVariableOffset)));
}
Expr *getCombinedEnsureUpperBound() const {
assert(isOpenMPLoopBoundSharingDirective(getDirectiveKind()) &&
"expected loop bound sharing directive");
return const_cast<Expr *>(reinterpret_cast<const Expr *>(
*std::next(child_begin(), CombinedEnsureUpperBoundOffset)));
}
Expr *getCombinedInit() const {
assert(isOpenMPLoopBoundSharingDirective(getDirectiveKind()) &&
"expected loop bound sharing directive");
return const_cast<Expr *>(reinterpret_cast<const Expr *>(
*std::next(child_begin(), CombinedInitOffset)));
}
Expr *getCombinedCond() const {
assert(isOpenMPLoopBoundSharingDirective(getDirectiveKind()) &&
"expected loop bound sharing directive");
return const_cast<Expr *>(reinterpret_cast<const Expr *>(
*std::next(child_begin(), CombinedConditionOffset)));
}
Expr *getCombinedNextLowerBound() const {
assert(isOpenMPLoopBoundSharingDirective(getDirectiveKind()) &&
"expected loop bound sharing directive");
return const_cast<Expr *>(reinterpret_cast<const Expr *>(
*std::next(child_begin(), CombinedNextLowerBoundOffset)));
}
Expr *getCombinedNextUpperBound() const {
assert(isOpenMPLoopBoundSharingDirective(getDirectiveKind()) &&
"expected loop bound sharing directive");
return const_cast<Expr *>(reinterpret_cast<const Expr *>(
*std::next(child_begin(), CombinedNextUpperBoundOffset)));
}
Expr *getCombinedDistCond() const {
assert(isOpenMPLoopBoundSharingDirective(getDirectiveKind()) &&
"expected loop bound distribute sharing directive");
return const_cast<Expr *>(reinterpret_cast<const Expr *>(
*std::next(child_begin(), CombinedDistConditionOffset)));
}
Expr *getCombinedParForInDistCond() const {
assert(isOpenMPLoopBoundSharingDirective(getDirectiveKind()) &&
"expected loop bound distribute sharing directive");
return const_cast<Expr *>(reinterpret_cast<const Expr *>(
*std::next(child_begin(), CombinedParForInDistConditionOffset)));
}
const Stmt *getBody() const {
// This relies on the loop form is already checked by Sema.
const Stmt *Body =
getInnermostCapturedStmt()->getCapturedStmt()->IgnoreContainers();
if (auto *For = dyn_cast<ForStmt>(Body)) {
Body = For->getBody();
} else {
assert(isa<CXXForRangeStmt>(Body) &&
"Expected canonical for loop or range-based for loop.");
Body = cast<CXXForRangeStmt>(Body)->getBody();
}
for (unsigned Cnt = 1; Cnt < CollapsedNum; ++Cnt) {
Body = Body->IgnoreContainers();
if (auto *For = dyn_cast<ForStmt>(Body)) {
Body = For->getBody();
} else {
assert(isa<CXXForRangeStmt>(Body) &&
"Expected canonical for loop or range-based for loop.");
Body = cast<CXXForRangeStmt>(Body)->getBody();
}
}
return Body;
}
ArrayRef<Expr *> counters() { return getCounters(); }
ArrayRef<Expr *> counters() const {
return const_cast<OMPLoopDirective *>(this)->getCounters();
}
ArrayRef<Expr *> private_counters() { return getPrivateCounters(); }
ArrayRef<Expr *> private_counters() const {
return const_cast<OMPLoopDirective *>(this)->getPrivateCounters();
}
ArrayRef<Expr *> inits() { return getInits(); }
ArrayRef<Expr *> inits() const {
return const_cast<OMPLoopDirective *>(this)->getInits();
}
ArrayRef<Expr *> updates() { return getUpdates(); }
ArrayRef<Expr *> updates() const {
return const_cast<OMPLoopDirective *>(this)->getUpdates();
}
ArrayRef<Expr *> finals() { return getFinals(); }
ArrayRef<Expr *> finals() const {
return const_cast<OMPLoopDirective *>(this)->getFinals();
}
ArrayRef<Expr *> dependent_counters() { return getDependentCounters(); }
ArrayRef<Expr *> dependent_counters() const {
return const_cast<OMPLoopDirective *>(this)->getDependentCounters();
}
ArrayRef<Expr *> dependent_inits() { return getDependentInits(); }
ArrayRef<Expr *> dependent_inits() const {
return const_cast<OMPLoopDirective *>(this)->getDependentInits();
}
ArrayRef<Expr *> finals_conditions() { return getFinalsConditions(); }
ArrayRef<Expr *> finals_conditions() const {
return const_cast<OMPLoopDirective *>(this)->getFinalsConditions();
}
static bool classof(const Stmt *T) {
return T->getStmtClass() == OMPSimdDirectiveClass ||
T->getStmtClass() == OMPForDirectiveClass ||
T->getStmtClass() == OMPForSimdDirectiveClass ||
T->getStmtClass() == OMPParallelForDirectiveClass ||
T->getStmtClass() == OMPParallelForSimdDirectiveClass ||
T->getStmtClass() == OMPTaskLoopDirectiveClass ||
T->getStmtClass() == OMPTaskLoopSimdDirectiveClass ||
T->getStmtClass() == OMPMasterTaskLoopDirectiveClass ||
T->getStmtClass() == OMPParallelMasterTaskLoopDirectiveClass ||
T->getStmtClass() == OMPDistributeDirectiveClass ||
T->getStmtClass() == OMPTargetParallelForDirectiveClass ||
T->getStmtClass() == OMPDistributeParallelForDirectiveClass ||
T->getStmtClass() == OMPDistributeParallelForSimdDirectiveClass ||
T->getStmtClass() == OMPDistributeSimdDirectiveClass ||
T->getStmtClass() == OMPTargetParallelForSimdDirectiveClass ||
T->getStmtClass() == OMPTargetSimdDirectiveClass ||
T->getStmtClass() == OMPTeamsDistributeDirectiveClass ||
T->getStmtClass() == OMPTeamsDistributeSimdDirectiveClass ||
T->getStmtClass() ==
OMPTeamsDistributeParallelForSimdDirectiveClass ||
T->getStmtClass() == OMPTeamsDistributeParallelForDirectiveClass ||
T->getStmtClass() ==
OMPTargetTeamsDistributeParallelForDirectiveClass ||
T->getStmtClass() ==
OMPTargetTeamsDistributeParallelForSimdDirectiveClass ||
T->getStmtClass() == OMPTargetTeamsDistributeDirectiveClass ||
T->getStmtClass() == OMPTargetTeamsDistributeSimdDirectiveClass;
}
};
/// This represents '#pragma omp simd' directive.
///
/// \code
/// #pragma omp simd private(a,b) linear(i,j:s) reduction(+:c,d)
/// \endcode
/// In this example directive '#pragma omp simd' has clauses 'private'
/// with the variables 'a' and 'b', 'linear' with variables 'i', 'j' and
/// linear step 's', 'reduction' with operator '+' and variables 'c' and 'd'.
///
class OMPSimdDirective : public OMPLoopDirective {
friend class ASTStmtReader;
/// Build directive with the given start and end location.
///
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending location of the directive.
/// \param CollapsedNum Number of collapsed nested loops.
/// \param NumClauses Number of clauses.
///
OMPSimdDirective(SourceLocation StartLoc, SourceLocation EndLoc,
unsigned CollapsedNum, unsigned NumClauses)
: OMPLoopDirective(this, OMPSimdDirectiveClass, OMPD_simd, StartLoc,
EndLoc, CollapsedNum, NumClauses) {}
/// Build an empty directive.
///
/// \param CollapsedNum Number of collapsed nested loops.
/// \param NumClauses Number of clauses.
///
explicit OMPSimdDirective(unsigned CollapsedNum, unsigned NumClauses)
: OMPLoopDirective(this, OMPSimdDirectiveClass, OMPD_simd,
SourceLocation(), SourceLocation(), CollapsedNum,
NumClauses) {}
public:
/// Creates directive with a list of \a Clauses.
///
/// \param C AST context.
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending Location of the directive.
/// \param CollapsedNum Number of collapsed loops.
/// \param Clauses List of clauses.
/// \param AssociatedStmt Statement, associated with the directive.
/// \param Exprs Helper expressions for CodeGen.
///
static OMPSimdDirective *Create(const ASTContext &C, SourceLocation StartLoc,
SourceLocation EndLoc, unsigned CollapsedNum,
ArrayRef<OMPClause *> Clauses,
Stmt *AssociatedStmt,
const HelperExprs &Exprs);
/// Creates an empty directive with the place
/// for \a NumClauses clauses.
///
/// \param C AST context.
/// \param CollapsedNum Number of collapsed nested loops.
/// \param NumClauses Number of clauses.
///
static OMPSimdDirective *CreateEmpty(const ASTContext &C, unsigned NumClauses,
unsigned CollapsedNum, EmptyShell);
static bool classof(const Stmt *T) {
return T->getStmtClass() == OMPSimdDirectiveClass;
}
};
/// This represents '#pragma omp for' directive.
///
/// \code
/// #pragma omp for private(a,b) reduction(+:c,d)
/// \endcode
/// In this example directive '#pragma omp for' has clauses 'private' with the
/// variables 'a' and 'b' and 'reduction' with operator '+' and variables 'c'
/// and 'd'.
///
class OMPForDirective : public OMPLoopDirective {
friend class ASTStmtReader;
/// true if current directive has inner cancel directive.
bool HasCancel;
/// Build directive with the given start and end location.
///
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending location of the directive.
/// \param CollapsedNum Number of collapsed nested loops.
/// \param NumClauses Number of clauses.
///
OMPForDirective(SourceLocation StartLoc, SourceLocation EndLoc,
unsigned CollapsedNum, unsigned NumClauses)
: OMPLoopDirective(this, OMPForDirectiveClass, OMPD_for, StartLoc, EndLoc,
CollapsedNum, NumClauses),
HasCancel(false) {}
/// Build an empty directive.
///
/// \param CollapsedNum Number of collapsed nested loops.
/// \param NumClauses Number of clauses.
///
explicit OMPForDirective(unsigned CollapsedNum, unsigned NumClauses)
: OMPLoopDirective(this, OMPForDirectiveClass, OMPD_for, SourceLocation(),
SourceLocation(), CollapsedNum, NumClauses),
HasCancel(false) {}
/// Set cancel state.
void setHasCancel(bool Has) { HasCancel = Has; }
public:
/// Creates directive with a list of \a Clauses.
///
/// \param C AST context.
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending Location of the directive.
/// \param CollapsedNum Number of collapsed loops.
/// \param Clauses List of clauses.
/// \param AssociatedStmt Statement, associated with the directive.
/// \param Exprs Helper expressions for CodeGen.
/// \param HasCancel true if current directive has inner cancel directive.
///
static OMPForDirective *Create(const ASTContext &C, SourceLocation StartLoc,
SourceLocation EndLoc, unsigned CollapsedNum,
ArrayRef<OMPClause *> Clauses,
Stmt *AssociatedStmt, const HelperExprs &Exprs,
bool HasCancel);
/// Creates an empty directive with the place
/// for \a NumClauses clauses.
///
/// \param C AST context.
/// \param CollapsedNum Number of collapsed nested loops.
/// \param NumClauses Number of clauses.
///
static OMPForDirective *CreateEmpty(const ASTContext &C, unsigned NumClauses,
unsigned CollapsedNum, EmptyShell);
/// Return true if current directive has inner cancel directive.
bool hasCancel() const { return HasCancel; }
static bool classof(const Stmt *T) {
return T->getStmtClass() == OMPForDirectiveClass;
}
};
/// This represents '#pragma omp for simd' directive.
///
/// \code
/// #pragma omp for simd private(a,b) linear(i,j:s) reduction(+:c,d)
/// \endcode
/// In this example directive '#pragma omp for simd' has clauses 'private'
/// with the variables 'a' and 'b', 'linear' with variables 'i', 'j' and
/// linear step 's', 'reduction' with operator '+' and variables 'c' and 'd'.
///
class OMPForSimdDirective : public OMPLoopDirective {
friend class ASTStmtReader;
/// Build directive with the given start and end location.
///
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending location of the directive.
/// \param CollapsedNum Number of collapsed nested loops.
/// \param NumClauses Number of clauses.
///
OMPForSimdDirective(SourceLocation StartLoc, SourceLocation EndLoc,
unsigned CollapsedNum, unsigned NumClauses)
: OMPLoopDirective(this, OMPForSimdDirectiveClass, OMPD_for_simd,
StartLoc, EndLoc, CollapsedNum, NumClauses) {}
/// Build an empty directive.
///
/// \param CollapsedNum Number of collapsed nested loops.
/// \param NumClauses Number of clauses.
///
explicit OMPForSimdDirective(unsigned CollapsedNum, unsigned NumClauses)
: OMPLoopDirective(this, OMPForSimdDirectiveClass, OMPD_for_simd,
SourceLocation(), SourceLocation(), CollapsedNum,
NumClauses) {}
public:
/// Creates directive with a list of \a Clauses.
///
/// \param C AST context.
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending Location of the directive.
/// \param CollapsedNum Number of collapsed loops.
/// \param Clauses List of clauses.
/// \param AssociatedStmt Statement, associated with the directive.
/// \param Exprs Helper expressions for CodeGen.
///
static OMPForSimdDirective *
Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc,
unsigned CollapsedNum, ArrayRef<OMPClause *> Clauses,
Stmt *AssociatedStmt, const HelperExprs &Exprs);
/// Creates an empty directive with the place
/// for \a NumClauses clauses.
///
/// \param C AST context.
/// \param CollapsedNum Number of collapsed nested loops.
/// \param NumClauses Number of clauses.
///
static OMPForSimdDirective *CreateEmpty(const ASTContext &C,
unsigned NumClauses,
unsigned CollapsedNum, EmptyShell);
static bool classof(const Stmt *T) {
return T->getStmtClass() == OMPForSimdDirectiveClass;
}
};
/// This represents '#pragma omp sections' directive.
///
/// \code
/// #pragma omp sections private(a,b) reduction(+:c,d)
/// \endcode
/// In this example directive '#pragma omp sections' has clauses 'private' with
/// the variables 'a' and 'b' and 'reduction' with operator '+' and variables
/// 'c' and 'd'.
///
class OMPSectionsDirective : public OMPExecutableDirective {
friend class ASTStmtReader;
/// true if current directive has inner cancel directive.
bool HasCancel;
/// Build directive with the given start and end location.
///
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending location of the directive.
/// \param NumClauses Number of clauses.
///
OMPSectionsDirective(SourceLocation StartLoc, SourceLocation EndLoc,
unsigned NumClauses)
: OMPExecutableDirective(this, OMPSectionsDirectiveClass, OMPD_sections,
StartLoc, EndLoc, NumClauses, 1),
HasCancel(false) {}
/// Build an empty directive.
///
/// \param NumClauses Number of clauses.
///
explicit OMPSectionsDirective(unsigned NumClauses)
: OMPExecutableDirective(this, OMPSectionsDirectiveClass, OMPD_sections,
SourceLocation(), SourceLocation(), NumClauses,
1),
HasCancel(false) {}
/// Set cancel state.
void setHasCancel(bool Has) { HasCancel = Has; }
public:
/// Creates directive with a list of \a Clauses.
///
/// \param C AST context.
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending Location of the directive.
/// \param Clauses List of clauses.
/// \param AssociatedStmt Statement, associated with the directive.
/// \param HasCancel true if current directive has inner directive.
///
static OMPSectionsDirective *
Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc,
ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt, bool HasCancel);
/// Creates an empty directive with the place for \a NumClauses
/// clauses.
///
/// \param C AST context.
/// \param NumClauses Number of clauses.
///
static OMPSectionsDirective *CreateEmpty(const ASTContext &C,
unsigned NumClauses, EmptyShell);
/// Return true if current directive has inner cancel directive.
bool hasCancel() const { return HasCancel; }
static bool classof(const Stmt *T) {
return T->getStmtClass() == OMPSectionsDirectiveClass;
}
};
/// This represents '#pragma omp section' directive.
///
/// \code
/// #pragma omp section
/// \endcode
///
class OMPSectionDirective : public OMPExecutableDirective {
friend class ASTStmtReader;
/// true if current directive has inner cancel directive.
bool HasCancel;
/// Build directive with the given start and end location.
///
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending location of the directive.
///
OMPSectionDirective(SourceLocation StartLoc, SourceLocation EndLoc)
: OMPExecutableDirective(this, OMPSectionDirectiveClass, OMPD_section,
StartLoc, EndLoc, 0, 1),
HasCancel(false) {}
/// Build an empty directive.
///
explicit OMPSectionDirective()
: OMPExecutableDirective(this, OMPSectionDirectiveClass, OMPD_section,
SourceLocation(), SourceLocation(), 0, 1),
HasCancel(false) {}
public:
/// Creates directive.
///
/// \param C AST context.
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending Location of the directive.
/// \param AssociatedStmt Statement, associated with the directive.
/// \param HasCancel true if current directive has inner directive.
///
static OMPSectionDirective *Create(const ASTContext &C,
SourceLocation StartLoc,
SourceLocation EndLoc,
Stmt *AssociatedStmt, bool HasCancel);
/// Creates an empty directive.
///
/// \param C AST context.
///
static OMPSectionDirective *CreateEmpty(const ASTContext &C, EmptyShell);
/// Set cancel state.
void setHasCancel(bool Has) { HasCancel = Has; }
/// Return true if current directive has inner cancel directive.
bool hasCancel() const { return HasCancel; }
static bool classof(const Stmt *T) {
return T->getStmtClass() == OMPSectionDirectiveClass;
}
};
/// This represents '#pragma omp single' directive.
///
/// \code
/// #pragma omp single private(a,b) copyprivate(c,d)
/// \endcode
/// In this example directive '#pragma omp single' has clauses 'private' with
/// the variables 'a' and 'b' and 'copyprivate' with variables 'c' and 'd'.
///
class OMPSingleDirective : public OMPExecutableDirective {
friend class ASTStmtReader;
/// Build directive with the given start and end location.
///
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending location of the directive.
/// \param NumClauses Number of clauses.
///
OMPSingleDirective(SourceLocation StartLoc, SourceLocation EndLoc,
unsigned NumClauses)
: OMPExecutableDirective(this, OMPSingleDirectiveClass, OMPD_single,
StartLoc, EndLoc, NumClauses, 1) {}
/// Build an empty directive.
///
/// \param NumClauses Number of clauses.
///
explicit OMPSingleDirective(unsigned NumClauses)
: OMPExecutableDirective(this, OMPSingleDirectiveClass, OMPD_single,
SourceLocation(), SourceLocation(), NumClauses,
1) {}
public:
/// Creates directive with a list of \a Clauses.
///
/// \param C AST context.
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending Location of the directive.
/// \param Clauses List of clauses.
/// \param AssociatedStmt Statement, associated with the directive.
///
static OMPSingleDirective *
Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc,
ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt);
/// Creates an empty directive with the place for \a NumClauses
/// clauses.
///
/// \param C AST context.
/// \param NumClauses Number of clauses.
///
static OMPSingleDirective *CreateEmpty(const ASTContext &C,
unsigned NumClauses, EmptyShell);
static bool classof(const Stmt *T) {
return T->getStmtClass() == OMPSingleDirectiveClass;
}
};
/// This represents '#pragma omp master' directive.
///
/// \code
/// #pragma omp master
/// \endcode
///
class OMPMasterDirective : public OMPExecutableDirective {
friend class ASTStmtReader;
/// Build directive with the given start and end location.
///
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending location of the directive.
///
OMPMasterDirective(SourceLocation StartLoc, SourceLocation EndLoc)
: OMPExecutableDirective(this, OMPMasterDirectiveClass, OMPD_master,
StartLoc, EndLoc, 0, 1) {}
/// Build an empty directive.
///
explicit OMPMasterDirective()
: OMPExecutableDirective(this, OMPMasterDirectiveClass, OMPD_master,
SourceLocation(), SourceLocation(), 0, 1) {}
public:
/// Creates directive.
///
/// \param C AST context.
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending Location of the directive.
/// \param AssociatedStmt Statement, associated with the directive.
///
static OMPMasterDirective *Create(const ASTContext &C,
SourceLocation StartLoc,
SourceLocation EndLoc,
Stmt *AssociatedStmt);
/// Creates an empty directive.
///
/// \param C AST context.
///
static OMPMasterDirective *CreateEmpty(const ASTContext &C, EmptyShell);
static bool classof(const Stmt *T) {
return T->getStmtClass() == OMPMasterDirectiveClass;
}
};
/// This represents '#pragma omp critical' directive.
///
/// \code
/// #pragma omp critical
/// \endcode
///
class OMPCriticalDirective : public OMPExecutableDirective {
friend class ASTStmtReader;
/// Name of the directive.
DeclarationNameInfo DirName;
/// Build directive with the given start and end location.
///
/// \param Name Name of the directive.
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending location of the directive.
/// \param NumClauses Number of clauses.
///
OMPCriticalDirective(const DeclarationNameInfo &Name, SourceLocation StartLoc,
SourceLocation EndLoc, unsigned NumClauses)
: OMPExecutableDirective(this, OMPCriticalDirectiveClass, OMPD_critical,
StartLoc, EndLoc, NumClauses, 1),
DirName(Name) {}
/// Build an empty directive.
///
/// \param NumClauses Number of clauses.
///
explicit OMPCriticalDirective(unsigned NumClauses)
: OMPExecutableDirective(this, OMPCriticalDirectiveClass, OMPD_critical,
SourceLocation(), SourceLocation(), NumClauses,
1),
DirName() {}
/// Set name of the directive.
///
/// \param Name Name of the directive.
///
void setDirectiveName(const DeclarationNameInfo &Name) { DirName = Name; }
public:
/// Creates directive.
///
/// \param C AST context.
/// \param Name Name of the directive.
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending Location of the directive.
/// \param Clauses List of clauses.
/// \param AssociatedStmt Statement, associated with the directive.
///
static OMPCriticalDirective *
Create(const ASTContext &C, const DeclarationNameInfo &Name,
SourceLocation StartLoc, SourceLocation EndLoc,
ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt);
/// Creates an empty directive.
///
/// \param C AST context.
/// \param NumClauses Number of clauses.
///
static OMPCriticalDirective *CreateEmpty(const ASTContext &C,
unsigned NumClauses, EmptyShell);
/// Return name of the directive.
///
DeclarationNameInfo getDirectiveName() const { return DirName; }
static bool classof(const Stmt *T) {
return T->getStmtClass() == OMPCriticalDirectiveClass;
}
};
/// This represents '#pragma omp parallel for' directive.
///
/// \code
/// #pragma omp parallel for private(a,b) reduction(+:c,d)
/// \endcode
/// In this example directive '#pragma omp parallel for' has clauses 'private'
/// with the variables 'a' and 'b' and 'reduction' with operator '+' and
/// variables 'c' and 'd'.
///
class OMPParallelForDirective : public OMPLoopDirective {
friend class ASTStmtReader;
/// true if current region has inner cancel directive.
bool HasCancel;
/// Build directive with the given start and end location.
///
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending location of the directive.
/// \param CollapsedNum Number of collapsed nested loops.
/// \param NumClauses Number of clauses.
///
OMPParallelForDirective(SourceLocation StartLoc, SourceLocation EndLoc,
unsigned CollapsedNum, unsigned NumClauses)
: OMPLoopDirective(this, OMPParallelForDirectiveClass, OMPD_parallel_for,
StartLoc, EndLoc, CollapsedNum, NumClauses),
HasCancel(false) {}
/// Build an empty directive.
///
/// \param CollapsedNum Number of collapsed nested loops.
/// \param NumClauses Number of clauses.
///
explicit OMPParallelForDirective(unsigned CollapsedNum, unsigned NumClauses)
: OMPLoopDirective(this, OMPParallelForDirectiveClass, OMPD_parallel_for,
SourceLocation(), SourceLocation(), CollapsedNum,
NumClauses),
HasCancel(false) {}
/// Set cancel state.
void setHasCancel(bool Has) { HasCancel = Has; }
public:
/// Creates directive with a list of \a Clauses.
///
/// \param C AST context.
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending Location of the directive.
/// \param CollapsedNum Number of collapsed loops.
/// \param Clauses List of clauses.
/// \param AssociatedStmt Statement, associated with the directive.
/// \param Exprs Helper expressions for CodeGen.
/// \param HasCancel true if current directive has inner cancel directive.
///
static OMPParallelForDirective *
Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc,
unsigned CollapsedNum, ArrayRef<OMPClause *> Clauses,
Stmt *AssociatedStmt, const HelperExprs &Exprs, bool HasCancel);
/// Creates an empty directive with the place
/// for \a NumClauses clauses.
///
/// \param C AST context.
/// \param CollapsedNum Number of collapsed nested loops.
/// \param NumClauses Number of clauses.
///
static OMPParallelForDirective *CreateEmpty(const ASTContext &C,
unsigned NumClauses,
unsigned CollapsedNum,
EmptyShell);
/// Return true if current directive has inner cancel directive.
bool hasCancel() const { return HasCancel; }
static bool classof(const Stmt *T) {
return T->getStmtClass() == OMPParallelForDirectiveClass;
}
};
/// This represents '#pragma omp parallel for simd' directive.
///
/// \code
/// #pragma omp parallel for simd private(a,b) linear(i,j:s) reduction(+:c,d)
/// \endcode
/// In this example directive '#pragma omp parallel for simd' has clauses
/// 'private' with the variables 'a' and 'b', 'linear' with variables 'i', 'j'
/// and linear step 's', 'reduction' with operator '+' and variables 'c' and
/// 'd'.
///
class OMPParallelForSimdDirective : public OMPLoopDirective {
friend class ASTStmtReader;
/// Build directive with the given start and end location.
///
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending location of the directive.
/// \param CollapsedNum Number of collapsed nested loops.
/// \param NumClauses Number of clauses.
///
OMPParallelForSimdDirective(SourceLocation StartLoc, SourceLocation EndLoc,
unsigned CollapsedNum, unsigned NumClauses)
: OMPLoopDirective(this, OMPParallelForSimdDirectiveClass,
OMPD_parallel_for_simd, StartLoc, EndLoc, CollapsedNum,
NumClauses) {}
/// Build an empty directive.
///
/// \param CollapsedNum Number of collapsed nested loops.
/// \param NumClauses Number of clauses.
///
explicit OMPParallelForSimdDirective(unsigned CollapsedNum,
unsigned NumClauses)
: OMPLoopDirective(this, OMPParallelForSimdDirectiveClass,
OMPD_parallel_for_simd, SourceLocation(),
SourceLocation(), CollapsedNum, NumClauses) {}
public:
/// Creates directive with a list of \a Clauses.
///
/// \param C AST context.
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending Location of the directive.
/// \param CollapsedNum Number of collapsed loops.
/// \param Clauses List of clauses.
/// \param AssociatedStmt Statement, associated with the directive.
/// \param Exprs Helper expressions for CodeGen.
///
static OMPParallelForSimdDirective *
Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc,
unsigned CollapsedNum, ArrayRef<OMPClause *> Clauses,
Stmt *AssociatedStmt, const HelperExprs &Exprs);
/// Creates an empty directive with the place
/// for \a NumClauses clauses.
///
/// \param C AST context.
/// \param CollapsedNum Number of collapsed nested loops.
/// \param NumClauses Number of clauses.
///
static OMPParallelForSimdDirective *CreateEmpty(const ASTContext &C,
unsigned NumClauses,
unsigned CollapsedNum,
EmptyShell);
static bool classof(const Stmt *T) {
return T->getStmtClass() == OMPParallelForSimdDirectiveClass;
}
};
/// This represents '#pragma omp parallel sections' directive.
///
/// \code
/// #pragma omp parallel sections private(a,b) reduction(+:c,d)
/// \endcode
/// In this example directive '#pragma omp parallel sections' has clauses
/// 'private' with the variables 'a' and 'b' and 'reduction' with operator '+'
/// and variables 'c' and 'd'.
///
class OMPParallelSectionsDirective : public OMPExecutableDirective {
friend class ASTStmtReader;
/// true if current directive has inner cancel directive.
bool HasCancel;
/// Build directive with the given start and end location.
///
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending location of the directive.
/// \param NumClauses Number of clauses.
///
OMPParallelSectionsDirective(SourceLocation StartLoc, SourceLocation EndLoc,
unsigned NumClauses)
: OMPExecutableDirective(this, OMPParallelSectionsDirectiveClass,
OMPD_parallel_sections, StartLoc, EndLoc,
NumClauses, 1),
HasCancel(false) {}
/// Build an empty directive.
///
/// \param NumClauses Number of clauses.
///
explicit OMPParallelSectionsDirective(unsigned NumClauses)
: OMPExecutableDirective(this, OMPParallelSectionsDirectiveClass,
OMPD_parallel_sections, SourceLocation(),
SourceLocation(), NumClauses, 1),
HasCancel(false) {}
/// Set cancel state.
void setHasCancel(bool Has) { HasCancel = Has; }
public:
/// Creates directive with a list of \a Clauses.
///
/// \param C AST context.
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending Location of the directive.
/// \param Clauses List of clauses.
/// \param AssociatedStmt Statement, associated with the directive.
/// \param HasCancel true if current directive has inner cancel directive.
///
static OMPParallelSectionsDirective *
Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc,
ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt, bool HasCancel);
/// Creates an empty directive with the place for \a NumClauses
/// clauses.
///
/// \param C AST context.
/// \param NumClauses Number of clauses.
///
static OMPParallelSectionsDirective *
CreateEmpty(const ASTContext &C, unsigned NumClauses, EmptyShell);
/// Return true if current directive has inner cancel directive.
bool hasCancel() const { return HasCancel; }
static bool classof(const Stmt *T) {
return T->getStmtClass() == OMPParallelSectionsDirectiveClass;
}
};
/// This represents '#pragma omp task' directive.
///
/// \code
/// #pragma omp task private(a,b) final(d)
/// \endcode
/// In this example directive '#pragma omp task' has clauses 'private' with the
/// variables 'a' and 'b' and 'final' with condition 'd'.
///
class OMPTaskDirective : public OMPExecutableDirective {
friend class ASTStmtReader;
/// true if this directive has inner cancel directive.
bool HasCancel;
/// Build directive with the given start and end location.
///
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending location of the directive.
/// \param NumClauses Number of clauses.
///
OMPTaskDirective(SourceLocation StartLoc, SourceLocation EndLoc,
unsigned NumClauses)
: OMPExecutableDirective(this, OMPTaskDirectiveClass, OMPD_task, StartLoc,
EndLoc, NumClauses, 1),
HasCancel(false) {}
/// Build an empty directive.
///
/// \param NumClauses Number of clauses.
///
explicit OMPTaskDirective(unsigned NumClauses)
: OMPExecutableDirective(this, OMPTaskDirectiveClass, OMPD_task,
SourceLocation(), SourceLocation(), NumClauses,
1),
HasCancel(false) {}
/// Set cancel state.
void setHasCancel(bool Has) { HasCancel = Has; }
public:
/// Creates directive with a list of \a Clauses.
///
/// \param C AST context.
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending Location of the directive.
/// \param Clauses List of clauses.
/// \param AssociatedStmt Statement, associated with the directive.
/// \param HasCancel true, if current directive has inner cancel directive.
///
static OMPTaskDirective *Create(const ASTContext &C, SourceLocation StartLoc,
SourceLocation EndLoc,
ArrayRef<OMPClause *> Clauses,
Stmt *AssociatedStmt, bool HasCancel);
/// Creates an empty directive with the place for \a NumClauses
/// clauses.
///
/// \param C AST context.
/// \param NumClauses Number of clauses.
///
static OMPTaskDirective *CreateEmpty(const ASTContext &C, unsigned NumClauses,
EmptyShell);
/// Return true if current directive has inner cancel directive.
bool hasCancel() const { return HasCancel; }
static bool classof(const Stmt *T) {
return T->getStmtClass() == OMPTaskDirectiveClass;
}
};
/// This represents '#pragma omp taskyield' directive.
///
/// \code
/// #pragma omp taskyield
/// \endcode
///
class OMPTaskyieldDirective : public OMPExecutableDirective {
friend class ASTStmtReader;
/// Build directive with the given start and end location.
///
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending location of the directive.
///
OMPTaskyieldDirective(SourceLocation StartLoc, SourceLocation EndLoc)
: OMPExecutableDirective(this, OMPTaskyieldDirectiveClass, OMPD_taskyield,
StartLoc, EndLoc, 0, 0) {}
/// Build an empty directive.
///
explicit OMPTaskyieldDirective()
: OMPExecutableDirective(this, OMPTaskyieldDirectiveClass, OMPD_taskyield,
SourceLocation(), SourceLocation(), 0, 0) {}
public:
/// Creates directive.
///
/// \param C AST context.
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending Location of the directive.
///
static OMPTaskyieldDirective *
Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc);
/// Creates an empty directive.
///
/// \param C AST context.
///
static OMPTaskyieldDirective *CreateEmpty(const ASTContext &C, EmptyShell);
static bool classof(const Stmt *T) {
return T->getStmtClass() == OMPTaskyieldDirectiveClass;
}
};
/// This represents '#pragma omp barrier' directive.
///
/// \code
/// #pragma omp barrier
/// \endcode
///
class OMPBarrierDirective : public OMPExecutableDirective {
friend class ASTStmtReader;
/// Build directive with the given start and end location.
///
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending location of the directive.
///
OMPBarrierDirective(SourceLocation StartLoc, SourceLocation EndLoc)
: OMPExecutableDirective(this, OMPBarrierDirectiveClass, OMPD_barrier,
StartLoc, EndLoc, 0, 0) {}
/// Build an empty directive.
///
explicit OMPBarrierDirective()
: OMPExecutableDirective(this, OMPBarrierDirectiveClass, OMPD_barrier,
SourceLocation(), SourceLocation(), 0, 0) {}
public:
/// Creates directive.
///
/// \param C AST context.
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending Location of the directive.
///
static OMPBarrierDirective *
Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc);
/// Creates an empty directive.
///
/// \param C AST context.
///
static OMPBarrierDirective *CreateEmpty(const ASTContext &C, EmptyShell);
static bool classof(const Stmt *T) {
return T->getStmtClass() == OMPBarrierDirectiveClass;
}
};
/// This represents '#pragma omp taskwait' directive.
///
/// \code
/// #pragma omp taskwait
/// \endcode
///
class OMPTaskwaitDirective : public OMPExecutableDirective {
friend class ASTStmtReader;
/// Build directive with the given start and end location.
///
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending location of the directive.
///
OMPTaskwaitDirective(SourceLocation StartLoc, SourceLocation EndLoc)
: OMPExecutableDirective(this, OMPTaskwaitDirectiveClass, OMPD_taskwait,
StartLoc, EndLoc, 0, 0) {}
/// Build an empty directive.
///
explicit OMPTaskwaitDirective()
: OMPExecutableDirective(this, OMPTaskwaitDirectiveClass, OMPD_taskwait,
SourceLocation(), SourceLocation(), 0, 0) {}
public:
/// Creates directive.
///
/// \param C AST context.
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending Location of the directive.
///
static OMPTaskwaitDirective *
Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc);
/// Creates an empty directive.
///
/// \param C AST context.
///
static OMPTaskwaitDirective *CreateEmpty(const ASTContext &C, EmptyShell);
static bool classof(const Stmt *T) {
return T->getStmtClass() == OMPTaskwaitDirectiveClass;
}
};
/// This represents '#pragma omp taskgroup' directive.
///
/// \code
/// #pragma omp taskgroup
/// \endcode
///
class OMPTaskgroupDirective : public OMPExecutableDirective {
friend class ASTStmtReader;
/// Build directive with the given start and end location.
///
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending location of the directive.
/// \param NumClauses Number of clauses.
///
OMPTaskgroupDirective(SourceLocation StartLoc, SourceLocation EndLoc,
unsigned NumClauses)
: OMPExecutableDirective(this, OMPTaskgroupDirectiveClass, OMPD_taskgroup,
StartLoc, EndLoc, NumClauses, 2) {}
/// Build an empty directive.
/// \param NumClauses Number of clauses.
///
explicit OMPTaskgroupDirective(unsigned NumClauses)
: OMPExecutableDirective(this, OMPTaskgroupDirectiveClass, OMPD_taskgroup,
SourceLocation(), SourceLocation(), NumClauses,
2) {}
/// Sets the task_reduction return variable.
void setReductionRef(Expr *RR) {
*std::next(child_begin(), 1) = RR;
}
public:
/// Creates directive.
///
/// \param C AST context.
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending Location of the directive.
/// \param Clauses List of clauses.
/// \param AssociatedStmt Statement, associated with the directive.
/// \param ReductionRef Reference to the task_reduction return variable.
///
static OMPTaskgroupDirective *
Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc,
ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt,
Expr *ReductionRef);
/// Creates an empty directive.
///
/// \param C AST context.
/// \param NumClauses Number of clauses.
///
static OMPTaskgroupDirective *CreateEmpty(const ASTContext &C,
unsigned NumClauses, EmptyShell);
/// Returns reference to the task_reduction return variable.
const Expr *getReductionRef() const {
return static_cast<const Expr *>(*std::next(child_begin(), 1));
}
Expr *getReductionRef() {
return static_cast<Expr *>(*std::next(child_begin(), 1));
}
static bool classof(const Stmt *T) {
return T->getStmtClass() == OMPTaskgroupDirectiveClass;
}
};
/// This represents '#pragma omp flush' directive.
///
/// \code
/// #pragma omp flush(a,b)
/// \endcode
/// In this example directive '#pragma omp flush' has 2 arguments- variables 'a'
/// and 'b'.
/// 'omp flush' directive does not have clauses but have an optional list of
/// variables to flush. This list of variables is stored within some fake clause
/// FlushClause.
class OMPFlushDirective : public OMPExecutableDirective {
friend class ASTStmtReader;
/// Build directive with the given start and end location.
///
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending location of the directive.
/// \param NumClauses Number of clauses.
///
OMPFlushDirective(SourceLocation StartLoc, SourceLocation EndLoc,
unsigned NumClauses)
: OMPExecutableDirective(this, OMPFlushDirectiveClass, OMPD_flush,
StartLoc, EndLoc, NumClauses, 0) {}
/// Build an empty directive.
///
/// \param NumClauses Number of clauses.
///
explicit OMPFlushDirective(unsigned NumClauses)
: OMPExecutableDirective(this, OMPFlushDirectiveClass, OMPD_flush,
SourceLocation(), SourceLocation(), NumClauses,
0) {}
public:
/// Creates directive with a list of \a Clauses.
///
/// \param C AST context.
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending Location of the directive.
/// \param Clauses List of clauses (only single OMPFlushClause clause is
/// allowed).
///
static OMPFlushDirective *Create(const ASTContext &C, SourceLocation StartLoc,
SourceLocation EndLoc,
ArrayRef<OMPClause *> Clauses);
/// Creates an empty directive with the place for \a NumClauses
/// clauses.
///
/// \param C AST context.
/// \param NumClauses Number of clauses.
///
static OMPFlushDirective *CreateEmpty(const ASTContext &C,
unsigned NumClauses, EmptyShell);
static bool classof(const Stmt *T) {
return T->getStmtClass() == OMPFlushDirectiveClass;
}
};
/// This represents '#pragma omp ordered' directive.
///
/// \code
/// #pragma omp ordered
/// \endcode
///
class OMPOrderedDirective : public OMPExecutableDirective {
friend class ASTStmtReader;
/// Build directive with the given start and end location.
///
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending location of the directive.
/// \param NumClauses Number of clauses.
///
OMPOrderedDirective(SourceLocation StartLoc, SourceLocation EndLoc,
unsigned NumClauses)
: OMPExecutableDirective(this, OMPOrderedDirectiveClass, OMPD_ordered,
StartLoc, EndLoc, NumClauses, 1) {}
/// Build an empty directive.
///
/// \param NumClauses Number of clauses.
///
explicit OMPOrderedDirective(unsigned NumClauses)
: OMPExecutableDirective(this, OMPOrderedDirectiveClass, OMPD_ordered,
SourceLocation(), SourceLocation(), NumClauses,
1) {}
public:
/// Creates directive.
///
/// \param C AST context.
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending Location of the directive.
/// \param Clauses List of clauses.
/// \param AssociatedStmt Statement, associated with the directive.
///
static OMPOrderedDirective *
Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc,
ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt);
/// Creates an empty directive.
///
/// \param C AST context.
/// \param NumClauses Number of clauses.
///
static OMPOrderedDirective *CreateEmpty(const ASTContext &C,
unsigned NumClauses, EmptyShell);
static bool classof(const Stmt *T) {
return T->getStmtClass() == OMPOrderedDirectiveClass;
}
};
/// This represents '#pragma omp atomic' directive.
///
/// \code
/// #pragma omp atomic capture
/// \endcode
/// In this example directive '#pragma omp atomic' has clause 'capture'.
///
class OMPAtomicDirective : public OMPExecutableDirective {
friend class ASTStmtReader;
/// Used for 'atomic update' or 'atomic capture' constructs. They may
/// have atomic expressions of forms
/// \code
/// x = x binop expr;
/// x = expr binop x;
/// \endcode
/// This field is true for the first form of the expression and false for the
/// second. Required for correct codegen of non-associative operations (like
/// << or >>).
bool IsXLHSInRHSPart;
/// Used for 'atomic update' or 'atomic capture' constructs. They may
/// have atomic expressions of forms
/// \code
/// v = x; <update x>;
/// <update x>; v = x;
/// \endcode
/// This field is true for the first(postfix) form of the expression and false
/// otherwise.
bool IsPostfixUpdate;
/// Build directive with the given start and end location.
///
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending location of the directive.
/// \param NumClauses Number of clauses.
///
OMPAtomicDirective(SourceLocation StartLoc, SourceLocation EndLoc,
unsigned NumClauses)
: OMPExecutableDirective(this, OMPAtomicDirectiveClass, OMPD_atomic,
StartLoc, EndLoc, NumClauses, 5),
IsXLHSInRHSPart(false), IsPostfixUpdate(false) {}
/// Build an empty directive.
///
/// \param NumClauses Number of clauses.
///
explicit OMPAtomicDirective(unsigned NumClauses)
: OMPExecutableDirective(this, OMPAtomicDirectiveClass, OMPD_atomic,
SourceLocation(), SourceLocation(), NumClauses,
5),
IsXLHSInRHSPart(false), IsPostfixUpdate(false) {}
/// Set 'x' part of the associated expression/statement.
void setX(Expr *X) { *std::next(child_begin()) = X; }
/// Set helper expression of the form
/// 'OpaqueValueExpr(x) binop OpaqueValueExpr(expr)' or
/// 'OpaqueValueExpr(expr) binop OpaqueValueExpr(x)'.
void setUpdateExpr(Expr *UE) { *std::next(child_begin(), 2) = UE; }
/// Set 'v' part of the associated expression/statement.
void setV(Expr *V) { *std::next(child_begin(), 3) = V; }
/// Set 'expr' part of the associated expression/statement.
void setExpr(Expr *E) { *std::next(child_begin(), 4) = E; }
public:
/// Creates directive with a list of \a Clauses and 'x', 'v' and 'expr'
/// parts of the atomic construct (see Section 2.12.6, atomic Construct, for
/// detailed description of 'x', 'v' and 'expr').
///
/// \param C AST context.
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending Location of the directive.
/// \param Clauses List of clauses.
/// \param AssociatedStmt Statement, associated with the directive.
/// \param X 'x' part of the associated expression/statement.
/// \param V 'v' part of the associated expression/statement.
/// \param E 'expr' part of the associated expression/statement.
/// \param UE Helper expression of the form
/// 'OpaqueValueExpr(x) binop OpaqueValueExpr(expr)' or
/// 'OpaqueValueExpr(expr) binop OpaqueValueExpr(x)'.
/// \param IsXLHSInRHSPart true if \a UE has the first form and false if the
/// second.
/// \param IsPostfixUpdate true if original value of 'x' must be stored in
/// 'v', not an updated one.
static OMPAtomicDirective *
Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc,
ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt, Expr *X, Expr *V,
Expr *E, Expr *UE, bool IsXLHSInRHSPart, bool IsPostfixUpdate);
/// Creates an empty directive with the place for \a NumClauses
/// clauses.
///
/// \param C AST context.
/// \param NumClauses Number of clauses.
///
static OMPAtomicDirective *CreateEmpty(const ASTContext &C,
unsigned NumClauses, EmptyShell);
/// Get 'x' part of the associated expression/statement.
Expr *getX() { return cast_or_null<Expr>(*std::next(child_begin())); }
const Expr *getX() const {
return cast_or_null<Expr>(*std::next(child_begin()));
}
/// Get helper expression of the form
/// 'OpaqueValueExpr(x) binop OpaqueValueExpr(expr)' or
/// 'OpaqueValueExpr(expr) binop OpaqueValueExpr(x)'.
Expr *getUpdateExpr() {
return cast_or_null<Expr>(*std::next(child_begin(), 2));
}
const Expr *getUpdateExpr() const {
return cast_or_null<Expr>(*std::next(child_begin(), 2));
}
/// Return true if helper update expression has form
/// 'OpaqueValueExpr(x) binop OpaqueValueExpr(expr)' and false if it has form
/// 'OpaqueValueExpr(expr) binop OpaqueValueExpr(x)'.
bool isXLHSInRHSPart() const { return IsXLHSInRHSPart; }
/// Return true if 'v' expression must be updated to original value of
/// 'x', false if 'v' must be updated to the new value of 'x'.
bool isPostfixUpdate() const { return IsPostfixUpdate; }
/// Get 'v' part of the associated expression/statement.
Expr *getV() { return cast_or_null<Expr>(*std::next(child_begin(), 3)); }
const Expr *getV() const {
return cast_or_null<Expr>(*std::next(child_begin(), 3));
}
/// Get 'expr' part of the associated expression/statement.
Expr *getExpr() { return cast_or_null<Expr>(*std::next(child_begin(), 4)); }
const Expr *getExpr() const {
return cast_or_null<Expr>(*std::next(child_begin(), 4));
}
static bool classof(const Stmt *T) {
return T->getStmtClass() == OMPAtomicDirectiveClass;
}
};
/// This represents '#pragma omp target' directive.
///
/// \code
/// #pragma omp target if(a)
/// \endcode
/// In this example directive '#pragma omp target' has clause 'if' with
/// condition 'a'.
///
class OMPTargetDirective : public OMPExecutableDirective {
friend class ASTStmtReader;
/// Build directive with the given start and end location.
///
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending location of the directive.
/// \param NumClauses Number of clauses.
///
OMPTargetDirective(SourceLocation StartLoc, SourceLocation EndLoc,
unsigned NumClauses)
: OMPExecutableDirective(this, OMPTargetDirectiveClass, OMPD_target,
StartLoc, EndLoc, NumClauses, 1) {}
/// Build an empty directive.
///
/// \param NumClauses Number of clauses.
///
explicit OMPTargetDirective(unsigned NumClauses)
: OMPExecutableDirective(this, OMPTargetDirectiveClass, OMPD_target,
SourceLocation(), SourceLocation(), NumClauses,
1) {}
public:
/// Creates directive with a list of \a Clauses.
///
/// \param C AST context.
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending Location of the directive.
/// \param Clauses List of clauses.
/// \param AssociatedStmt Statement, associated with the directive.
///
static OMPTargetDirective *
Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc,
ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt);
/// Creates an empty directive with the place for \a NumClauses
/// clauses.
///
/// \param C AST context.
/// \param NumClauses Number of clauses.
///
static OMPTargetDirective *CreateEmpty(const ASTContext &C,
unsigned NumClauses, EmptyShell);
static bool classof(const Stmt *T) {
return T->getStmtClass() == OMPTargetDirectiveClass;
}
};
/// This represents '#pragma omp target data' directive.
///
/// \code
/// #pragma omp target data device(0) if(a) map(b[:])
/// \endcode
/// In this example directive '#pragma omp target data' has clauses 'device'
/// with the value '0', 'if' with condition 'a' and 'map' with array
/// section 'b[:]'.
///
class OMPTargetDataDirective : public OMPExecutableDirective {
friend class ASTStmtReader;
/// Build directive with the given start and end location.
///
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending Location of the directive.
/// \param NumClauses The number of clauses.
///
OMPTargetDataDirective(SourceLocation StartLoc, SourceLocation EndLoc,
unsigned NumClauses)
: OMPExecutableDirective(this, OMPTargetDataDirectiveClass,
OMPD_target_data, StartLoc, EndLoc, NumClauses,
1) {}
/// Build an empty directive.
///
/// \param NumClauses Number of clauses.
///
explicit OMPTargetDataDirective(unsigned NumClauses)
: OMPExecutableDirective(this, OMPTargetDataDirectiveClass,
OMPD_target_data, SourceLocation(),
SourceLocation(), NumClauses, 1) {}
public:
/// Creates directive with a list of \a Clauses.
///
/// \param C AST context.
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending Location of the directive.
/// \param Clauses List of clauses.
/// \param AssociatedStmt Statement, associated with the directive.
///
static OMPTargetDataDirective *
Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc,
ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt);
/// Creates an empty directive with the place for \a N clauses.
///
/// \param C AST context.
/// \param N The number of clauses.
///
static OMPTargetDataDirective *CreateEmpty(const ASTContext &C, unsigned N,
EmptyShell);
static bool classof(const Stmt *T) {
return T->getStmtClass() == OMPTargetDataDirectiveClass;
}
};
/// This represents '#pragma omp target enter data' directive.
///
/// \code
/// #pragma omp target enter data device(0) if(a) map(b[:])
/// \endcode
/// In this example directive '#pragma omp target enter data' has clauses
/// 'device' with the value '0', 'if' with condition 'a' and 'map' with array
/// section 'b[:]'.
///
class OMPTargetEnterDataDirective : public OMPExecutableDirective {
friend class ASTStmtReader;
/// Build directive with the given start and end location.
///
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending Location of the directive.
/// \param NumClauses The number of clauses.
///
OMPTargetEnterDataDirective(SourceLocation StartLoc, SourceLocation EndLoc,
unsigned NumClauses)
: OMPExecutableDirective(this, OMPTargetEnterDataDirectiveClass,
OMPD_target_enter_data, StartLoc, EndLoc,
NumClauses, /*NumChildren=*/1) {}
/// Build an empty directive.
///
/// \param NumClauses Number of clauses.
///
explicit OMPTargetEnterDataDirective(unsigned NumClauses)
: OMPExecutableDirective(this, OMPTargetEnterDataDirectiveClass,
OMPD_target_enter_data, SourceLocation(),
SourceLocation(), NumClauses,
/*NumChildren=*/1) {}
public:
/// Creates directive with a list of \a Clauses.
///
/// \param C AST context.
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending Location of the directive.
/// \param Clauses List of clauses.
/// \param AssociatedStmt Statement, associated with the directive.
///
static OMPTargetEnterDataDirective *
Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc,
ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt);
/// Creates an empty directive with the place for \a N clauses.
///
/// \param C AST context.
/// \param N The number of clauses.
///
static OMPTargetEnterDataDirective *CreateEmpty(const ASTContext &C,
unsigned N, EmptyShell);
static bool classof(const Stmt *T) {
return T->getStmtClass() == OMPTargetEnterDataDirectiveClass;
}
};
/// This represents '#pragma omp target exit data' directive.
///
/// \code
/// #pragma omp target exit data device(0) if(a) map(b[:])
/// \endcode
/// In this example directive '#pragma omp target exit data' has clauses
/// 'device' with the value '0', 'if' with condition 'a' and 'map' with array
/// section 'b[:]'.
///
class OMPTargetExitDataDirective : public OMPExecutableDirective {
friend class ASTStmtReader;
/// Build directive with the given start and end location.
///
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending Location of the directive.
/// \param NumClauses The number of clauses.
///
OMPTargetExitDataDirective(SourceLocation StartLoc, SourceLocation EndLoc,
unsigned NumClauses)
: OMPExecutableDirective(this, OMPTargetExitDataDirectiveClass,
OMPD_target_exit_data, StartLoc, EndLoc,
NumClauses, /*NumChildren=*/1) {}
/// Build an empty directive.
///
/// \param NumClauses Number of clauses.
///
explicit OMPTargetExitDataDirective(unsigned NumClauses)
: OMPExecutableDirective(this, OMPTargetExitDataDirectiveClass,
OMPD_target_exit_data, SourceLocation(),
SourceLocation(), NumClauses,
/*NumChildren=*/1) {}
public:
/// Creates directive with a list of \a Clauses.
///
/// \param C AST context.
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending Location of the directive.
/// \param Clauses List of clauses.
/// \param AssociatedStmt Statement, associated with the directive.
///
static OMPTargetExitDataDirective *
Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc,
ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt);
/// Creates an empty directive with the place for \a N clauses.
///
/// \param C AST context.
/// \param N The number of clauses.
///
static OMPTargetExitDataDirective *CreateEmpty(const ASTContext &C,
unsigned N, EmptyShell);
static bool classof(const Stmt *T) {
return T->getStmtClass() == OMPTargetExitDataDirectiveClass;
}
};
/// This represents '#pragma omp target parallel' directive.
///
/// \code
/// #pragma omp target parallel if(a)
/// \endcode
/// In this example directive '#pragma omp target parallel' has clause 'if' with
/// condition 'a'.
///
class OMPTargetParallelDirective : public OMPExecutableDirective {
friend class ASTStmtReader;
/// Build directive with the given start and end location.
///
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending location of the directive.
/// \param NumClauses Number of clauses.
///
OMPTargetParallelDirective(SourceLocation StartLoc, SourceLocation EndLoc,
unsigned NumClauses)
: OMPExecutableDirective(this, OMPTargetParallelDirectiveClass,
OMPD_target_parallel, StartLoc, EndLoc,
NumClauses, /*NumChildren=*/1) {}
/// Build an empty directive.
///
/// \param NumClauses Number of clauses.
///
explicit OMPTargetParallelDirective(unsigned NumClauses)
: OMPExecutableDirective(this, OMPTargetParallelDirectiveClass,
OMPD_target_parallel, SourceLocation(),
SourceLocation(), NumClauses,
/*NumChildren=*/1) {}
public:
/// Creates directive with a list of \a Clauses.
///
/// \param C AST context.
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending Location of the directive.
/// \param Clauses List of clauses.
/// \param AssociatedStmt Statement, associated with the directive.
///
static OMPTargetParallelDirective *
Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc,
ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt);
/// Creates an empty directive with the place for \a NumClauses
/// clauses.
///
/// \param C AST context.
/// \param NumClauses Number of clauses.
///
static OMPTargetParallelDirective *
CreateEmpty(const ASTContext &C, unsigned NumClauses, EmptyShell);
static bool classof(const Stmt *T) {
return T->getStmtClass() == OMPTargetParallelDirectiveClass;
}
};
/// This represents '#pragma omp target parallel for' directive.
///
/// \code
/// #pragma omp target parallel for private(a,b) reduction(+:c,d)
/// \endcode
/// In this example directive '#pragma omp target parallel for' has clauses
/// 'private' with the variables 'a' and 'b' and 'reduction' with operator '+'
/// and variables 'c' and 'd'.
///
class OMPTargetParallelForDirective : public OMPLoopDirective {
friend class ASTStmtReader;
/// true if current region has inner cancel directive.
bool HasCancel;
/// Build directive with the given start and end location.
///
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending location of the directive.
/// \param CollapsedNum Number of collapsed nested loops.
/// \param NumClauses Number of clauses.
///
OMPTargetParallelForDirective(SourceLocation StartLoc, SourceLocation EndLoc,
unsigned CollapsedNum, unsigned NumClauses)
: OMPLoopDirective(this, OMPTargetParallelForDirectiveClass,
OMPD_target_parallel_for, StartLoc, EndLoc,
CollapsedNum, NumClauses),
HasCancel(false) {}
/// Build an empty directive.
///
/// \param CollapsedNum Number of collapsed nested loops.
/// \param NumClauses Number of clauses.
///
explicit OMPTargetParallelForDirective(unsigned CollapsedNum,
unsigned NumClauses)
: OMPLoopDirective(this, OMPTargetParallelForDirectiveClass,
OMPD_target_parallel_for, SourceLocation(),
SourceLocation(), CollapsedNum, NumClauses),
HasCancel(false) {}
/// Set cancel state.
void setHasCancel(bool Has) { HasCancel = Has; }
public:
/// Creates directive with a list of \a Clauses.
///
/// \param C AST context.
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending Location of the directive.
/// \param CollapsedNum Number of collapsed loops.
/// \param Clauses List of clauses.
/// \param AssociatedStmt Statement, associated with the directive.
/// \param Exprs Helper expressions for CodeGen.
/// \param HasCancel true if current directive has inner cancel directive.
///
static OMPTargetParallelForDirective *
Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc,
unsigned CollapsedNum, ArrayRef<OMPClause *> Clauses,
Stmt *AssociatedStmt, const HelperExprs &Exprs, bool HasCancel);
/// Creates an empty directive with the place
/// for \a NumClauses clauses.
///
/// \param C AST context.
/// \param CollapsedNum Number of collapsed nested loops.
/// \param NumClauses Number of clauses.
///
static OMPTargetParallelForDirective *CreateEmpty(const ASTContext &C,
unsigned NumClauses,
unsigned CollapsedNum,
EmptyShell);
/// Return true if current directive has inner cancel directive.
bool hasCancel() const { return HasCancel; }
static bool classof(const Stmt *T) {
return T->getStmtClass() == OMPTargetParallelForDirectiveClass;
}
};
/// This represents '#pragma omp teams' directive.
///
/// \code
/// #pragma omp teams if(a)
/// \endcode
/// In this example directive '#pragma omp teams' has clause 'if' with
/// condition 'a'.
///
class OMPTeamsDirective : public OMPExecutableDirective {
friend class ASTStmtReader;
/// Build directive with the given start and end location.
///
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending location of the directive.
/// \param NumClauses Number of clauses.
///
OMPTeamsDirective(SourceLocation StartLoc, SourceLocation EndLoc,
unsigned NumClauses)
: OMPExecutableDirective(this, OMPTeamsDirectiveClass, OMPD_teams,
StartLoc, EndLoc, NumClauses, 1) {}
/// Build an empty directive.
///
/// \param NumClauses Number of clauses.
///
explicit OMPTeamsDirective(unsigned NumClauses)
: OMPExecutableDirective(this, OMPTeamsDirectiveClass, OMPD_teams,
SourceLocation(), SourceLocation(), NumClauses,
1) {}
public:
/// Creates directive with a list of \a Clauses.
///
/// \param C AST context.
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending Location of the directive.
/// \param Clauses List of clauses.
/// \param AssociatedStmt Statement, associated with the directive.
///
static OMPTeamsDirective *Create(const ASTContext &C, SourceLocation StartLoc,
SourceLocation EndLoc,
ArrayRef<OMPClause *> Clauses,
Stmt *AssociatedStmt);
/// Creates an empty directive with the place for \a NumClauses
/// clauses.
///
/// \param C AST context.
/// \param NumClauses Number of clauses.
///
static OMPTeamsDirective *CreateEmpty(const ASTContext &C,
unsigned NumClauses, EmptyShell);
static bool classof(const Stmt *T) {
return T->getStmtClass() == OMPTeamsDirectiveClass;
}
};
/// This represents '#pragma omp cancellation point' directive.
///
/// \code
/// #pragma omp cancellation point for
/// \endcode
///
/// In this example a cancellation point is created for innermost 'for' region.
class OMPCancellationPointDirective : public OMPExecutableDirective {
friend class ASTStmtReader;
OpenMPDirectiveKind CancelRegion;
/// Build directive with the given start and end location.
///
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending location of the directive.
///
OMPCancellationPointDirective(SourceLocation StartLoc, SourceLocation EndLoc)
: OMPExecutableDirective(this, OMPCancellationPointDirectiveClass,
OMPD_cancellation_point, StartLoc, EndLoc, 0, 0),
CancelRegion(OMPD_unknown) {}
/// Build an empty directive.
///
explicit OMPCancellationPointDirective()
: OMPExecutableDirective(this, OMPCancellationPointDirectiveClass,
OMPD_cancellation_point, SourceLocation(),
SourceLocation(), 0, 0),
CancelRegion(OMPD_unknown) {}
/// Set cancel region for current cancellation point.
/// \param CR Cancellation region.
void setCancelRegion(OpenMPDirectiveKind CR) { CancelRegion = CR; }
public:
/// Creates directive.
///
/// \param C AST context.
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending Location of the directive.
///
static OMPCancellationPointDirective *
Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc,
OpenMPDirectiveKind CancelRegion);
/// Creates an empty directive.
///
/// \param C AST context.
///
static OMPCancellationPointDirective *CreateEmpty(const ASTContext &C,
EmptyShell);
/// Get cancellation region for the current cancellation point.
OpenMPDirectiveKind getCancelRegion() const { return CancelRegion; }
static bool classof(const Stmt *T) {
return T->getStmtClass() == OMPCancellationPointDirectiveClass;
}
};
/// This represents '#pragma omp cancel' directive.
///
/// \code
/// #pragma omp cancel for
/// \endcode
///
/// In this example a cancel is created for innermost 'for' region.
class OMPCancelDirective : public OMPExecutableDirective {
friend class ASTStmtReader;
OpenMPDirectiveKind CancelRegion;
/// Build directive with the given start and end location.
///
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending location of the directive.
/// \param NumClauses Number of clauses.
///
OMPCancelDirective(SourceLocation StartLoc, SourceLocation EndLoc,
unsigned NumClauses)
: OMPExecutableDirective(this, OMPCancelDirectiveClass, OMPD_cancel,
StartLoc, EndLoc, NumClauses, 0),
CancelRegion(OMPD_unknown) {}
/// Build an empty directive.
///
/// \param NumClauses Number of clauses.
explicit OMPCancelDirective(unsigned NumClauses)
: OMPExecutableDirective(this, OMPCancelDirectiveClass, OMPD_cancel,
SourceLocation(), SourceLocation(), NumClauses,
0),
CancelRegion(OMPD_unknown) {}
/// Set cancel region for current cancellation point.
/// \param CR Cancellation region.
void setCancelRegion(OpenMPDirectiveKind CR) { CancelRegion = CR; }
public:
/// Creates directive.
///
/// \param C AST context.
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending Location of the directive.
/// \param Clauses List of clauses.
///
static OMPCancelDirective *
Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc,
ArrayRef<OMPClause *> Clauses, OpenMPDirectiveKind CancelRegion);
/// Creates an empty directive.
///
/// \param C AST context.
/// \param NumClauses Number of clauses.
///
static OMPCancelDirective *CreateEmpty(const ASTContext &C,
unsigned NumClauses, EmptyShell);
/// Get cancellation region for the current cancellation point.
OpenMPDirectiveKind getCancelRegion() const { return CancelRegion; }
static bool classof(const Stmt *T) {
return T->getStmtClass() == OMPCancelDirectiveClass;
}
};
/// This represents '#pragma omp taskloop' directive.
///
/// \code
/// #pragma omp taskloop private(a,b) grainsize(val) num_tasks(num)
/// \endcode
/// In this example directive '#pragma omp taskloop' has clauses 'private'
/// with the variables 'a' and 'b', 'grainsize' with expression 'val' and
/// 'num_tasks' with expression 'num'.
///
class OMPTaskLoopDirective : public OMPLoopDirective {
friend class ASTStmtReader;
/// Build directive with the given start and end location.
///
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending location of the directive.
/// \param CollapsedNum Number of collapsed nested loops.
/// \param NumClauses Number of clauses.
///
OMPTaskLoopDirective(SourceLocation StartLoc, SourceLocation EndLoc,
unsigned CollapsedNum, unsigned NumClauses)
: OMPLoopDirective(this, OMPTaskLoopDirectiveClass, OMPD_taskloop,
StartLoc, EndLoc, CollapsedNum, NumClauses) {}
/// Build an empty directive.
///
/// \param CollapsedNum Number of collapsed nested loops.
/// \param NumClauses Number of clauses.
///
explicit OMPTaskLoopDirective(unsigned CollapsedNum, unsigned NumClauses)
: OMPLoopDirective(this, OMPTaskLoopDirectiveClass, OMPD_taskloop,
SourceLocation(), SourceLocation(), CollapsedNum,
NumClauses) {}
public:
/// Creates directive with a list of \a Clauses.
///
/// \param C AST context.
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending Location of the directive.
/// \param CollapsedNum Number of collapsed loops.
/// \param Clauses List of clauses.
/// \param AssociatedStmt Statement, associated with the directive.
/// \param Exprs Helper expressions for CodeGen.
///
static OMPTaskLoopDirective *
Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc,
unsigned CollapsedNum, ArrayRef<OMPClause *> Clauses,
Stmt *AssociatedStmt, const HelperExprs &Exprs);
/// Creates an empty directive with the place
/// for \a NumClauses clauses.
///
/// \param C AST context.
/// \param CollapsedNum Number of collapsed nested loops.
/// \param NumClauses Number of clauses.
///
static OMPTaskLoopDirective *CreateEmpty(const ASTContext &C,
unsigned NumClauses,
unsigned CollapsedNum, EmptyShell);
static bool classof(const Stmt *T) {
return T->getStmtClass() == OMPTaskLoopDirectiveClass;
}
};
/// This represents '#pragma omp taskloop simd' directive.
///
/// \code
/// #pragma omp taskloop simd private(a,b) grainsize(val) num_tasks(num)
/// \endcode
/// In this example directive '#pragma omp taskloop simd' has clauses 'private'
/// with the variables 'a' and 'b', 'grainsize' with expression 'val' and
/// 'num_tasks' with expression 'num'.
///
class OMPTaskLoopSimdDirective : public OMPLoopDirective {
friend class ASTStmtReader;
/// Build directive with the given start and end location.
///
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending location of the directive.
/// \param CollapsedNum Number of collapsed nested loops.
/// \param NumClauses Number of clauses.
///
OMPTaskLoopSimdDirective(SourceLocation StartLoc, SourceLocation EndLoc,
unsigned CollapsedNum, unsigned NumClauses)
: OMPLoopDirective(this, OMPTaskLoopSimdDirectiveClass,
OMPD_taskloop_simd, StartLoc, EndLoc, CollapsedNum,
NumClauses) {}
/// Build an empty directive.
///
/// \param CollapsedNum Number of collapsed nested loops.
/// \param NumClauses Number of clauses.
///
explicit OMPTaskLoopSimdDirective(unsigned CollapsedNum, unsigned NumClauses)
: OMPLoopDirective(this, OMPTaskLoopSimdDirectiveClass,
OMPD_taskloop_simd, SourceLocation(), SourceLocation(),
CollapsedNum, NumClauses) {}
public:
/// Creates directive with a list of \a Clauses.
///
/// \param C AST context.
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending Location of the directive.
/// \param CollapsedNum Number of collapsed loops.
/// \param Clauses List of clauses.
/// \param AssociatedStmt Statement, associated with the directive.
/// \param Exprs Helper expressions for CodeGen.
///
static OMPTaskLoopSimdDirective *
Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc,
unsigned CollapsedNum, ArrayRef<OMPClause *> Clauses,
Stmt *AssociatedStmt, const HelperExprs &Exprs);
/// Creates an empty directive with the place
/// for \a NumClauses clauses.
///
/// \param C AST context.
/// \param CollapsedNum Number of collapsed nested loops.
/// \param NumClauses Number of clauses.
///
static OMPTaskLoopSimdDirective *CreateEmpty(const ASTContext &C,
unsigned NumClauses,
unsigned CollapsedNum,
EmptyShell);
static bool classof(const Stmt *T) {
return T->getStmtClass() == OMPTaskLoopSimdDirectiveClass;
}
};
/// This represents '#pragma omp master taskloop' directive.
///
/// \code
/// #pragma omp master taskloop private(a,b) grainsize(val) num_tasks(num)
/// \endcode
/// In this example directive '#pragma omp master taskloop' has clauses
/// 'private' with the variables 'a' and 'b', 'grainsize' with expression 'val'
/// and 'num_tasks' with expression 'num'.
///
class OMPMasterTaskLoopDirective : public OMPLoopDirective {
friend class ASTStmtReader;
/// Build directive with the given start and end location.
///
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending location of the directive.
/// \param CollapsedNum Number of collapsed nested loops.
/// \param NumClauses Number of clauses.
///
OMPMasterTaskLoopDirective(SourceLocation StartLoc, SourceLocation EndLoc,
unsigned CollapsedNum, unsigned NumClauses)
: OMPLoopDirective(this, OMPMasterTaskLoopDirectiveClass,
OMPD_master_taskloop, StartLoc, EndLoc, CollapsedNum,
NumClauses) {}
/// Build an empty directive.
///
/// \param CollapsedNum Number of collapsed nested loops.
/// \param NumClauses Number of clauses.
///
explicit OMPMasterTaskLoopDirective(unsigned CollapsedNum,
unsigned NumClauses)
: OMPLoopDirective(this, OMPMasterTaskLoopDirectiveClass,
OMPD_master_taskloop, SourceLocation(),
SourceLocation(), CollapsedNum, NumClauses) {}
public:
/// Creates directive with a list of \a Clauses.
///
/// \param C AST context.
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending Location of the directive.
/// \param CollapsedNum Number of collapsed loops.
/// \param Clauses List of clauses.
/// \param AssociatedStmt Statement, associated with the directive.
/// \param Exprs Helper expressions for CodeGen.
///
static OMPMasterTaskLoopDirective *
Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc,
unsigned CollapsedNum, ArrayRef<OMPClause *> Clauses,
Stmt *AssociatedStmt, const HelperExprs &Exprs);
/// Creates an empty directive with the place
/// for \a NumClauses clauses.
///
/// \param C AST context.
/// \param CollapsedNum Number of collapsed nested loops.
/// \param NumClauses Number of clauses.
///
static OMPMasterTaskLoopDirective *CreateEmpty(const ASTContext &C,
unsigned NumClauses,
unsigned CollapsedNum,
EmptyShell);
static bool classof(const Stmt *T) {
return T->getStmtClass() == OMPMasterTaskLoopDirectiveClass;
}
};
/// This represents '#pragma omp parallel master taskloop' directive.
///
/// \code
/// #pragma omp parallel master taskloop private(a,b) grainsize(val)
/// num_tasks(num)
/// \endcode
/// In this example directive '#pragma omp parallel master taskloop' has clauses
/// 'private' with the variables 'a' and 'b', 'grainsize' with expression 'val'
/// and 'num_tasks' with expression 'num'.
///
class OMPParallelMasterTaskLoopDirective : public OMPLoopDirective {
friend class ASTStmtReader;
/// Build directive with the given start and end location.
///
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending location of the directive.
/// \param CollapsedNum Number of collapsed nested loops.
/// \param NumClauses Number of clauses.
///
OMPParallelMasterTaskLoopDirective(SourceLocation StartLoc,
SourceLocation EndLoc,
unsigned CollapsedNum, unsigned NumClauses)
: OMPLoopDirective(this, OMPParallelMasterTaskLoopDirectiveClass,
OMPD_parallel_master_taskloop, StartLoc, EndLoc,
CollapsedNum, NumClauses) {}
/// Build an empty directive.
///
/// \param CollapsedNum Number of collapsed nested loops.
/// \param NumClauses Number of clauses.
///
explicit OMPParallelMasterTaskLoopDirective(unsigned CollapsedNum,
unsigned NumClauses)
: OMPLoopDirective(this, OMPParallelMasterTaskLoopDirectiveClass,
OMPD_parallel_master_taskloop, SourceLocation(),
SourceLocation(), CollapsedNum, NumClauses) {}
public:
/// Creates directive with a list of \a Clauses.
///
/// \param C AST context.
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending Location of the directive.
/// \param CollapsedNum Number of collapsed loops.
/// \param Clauses List of clauses.
/// \param AssociatedStmt Statement, associated with the directive.
/// \param Exprs Helper expressions for CodeGen.
///
static OMPParallelMasterTaskLoopDirective *
Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc,
unsigned CollapsedNum, ArrayRef<OMPClause *> Clauses,
Stmt *AssociatedStmt, const HelperExprs &Exprs);
/// Creates an empty directive with the place
/// for \a NumClauses clauses.
///
/// \param C AST context.
/// \param CollapsedNum Number of collapsed nested loops.
/// \param NumClauses Number of clauses.
///
static OMPParallelMasterTaskLoopDirective *CreateEmpty(const ASTContext &C,
unsigned NumClauses,
unsigned CollapsedNum,
EmptyShell);
static bool classof(const Stmt *T) {
return T->getStmtClass() == OMPParallelMasterTaskLoopDirectiveClass;
}
};
/// This represents '#pragma omp distribute' directive.
///
/// \code
/// #pragma omp distribute private(a,b)
/// \endcode
/// In this example directive '#pragma omp distribute' has clauses 'private'
/// with the variables 'a' and 'b'
///
class OMPDistributeDirective : public OMPLoopDirective {
friend class ASTStmtReader;
/// Build directive with the given start and end location.
///
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending location of the directive.
/// \param CollapsedNum Number of collapsed nested loops.
/// \param NumClauses Number of clauses.
///
OMPDistributeDirective(SourceLocation StartLoc, SourceLocation EndLoc,
unsigned CollapsedNum, unsigned NumClauses)
: OMPLoopDirective(this, OMPDistributeDirectiveClass, OMPD_distribute,
StartLoc, EndLoc, CollapsedNum, NumClauses)
{}
/// Build an empty directive.
///
/// \param CollapsedNum Number of collapsed nested loops.
/// \param NumClauses Number of clauses.
///
explicit OMPDistributeDirective(unsigned CollapsedNum, unsigned NumClauses)
: OMPLoopDirective(this, OMPDistributeDirectiveClass, OMPD_distribute,
SourceLocation(), SourceLocation(), CollapsedNum,
NumClauses)
{}
public:
/// Creates directive with a list of \a Clauses.
///
/// \param C AST context.
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending Location of the directive.
/// \param CollapsedNum Number of collapsed loops.
/// \param Clauses List of clauses.
/// \param AssociatedStmt Statement, associated with the directive.
/// \param Exprs Helper expressions for CodeGen.
///
static OMPDistributeDirective *
Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc,
unsigned CollapsedNum, ArrayRef<OMPClause *> Clauses,
Stmt *AssociatedStmt, const HelperExprs &Exprs);
/// Creates an empty directive with the place
/// for \a NumClauses clauses.
///
/// \param C AST context.
/// \param CollapsedNum Number of collapsed nested loops.
/// \param NumClauses Number of clauses.
///
static OMPDistributeDirective *CreateEmpty(const ASTContext &C,
unsigned NumClauses,
unsigned CollapsedNum, EmptyShell);
static bool classof(const Stmt *T) {
return T->getStmtClass() == OMPDistributeDirectiveClass;
}
};
/// This represents '#pragma omp target update' directive.
///
/// \code
/// #pragma omp target update to(a) from(b) device(1)
/// \endcode
/// In this example directive '#pragma omp target update' has clause 'to' with
/// argument 'a', clause 'from' with argument 'b' and clause 'device' with
/// argument '1'.
///
class OMPTargetUpdateDirective : public OMPExecutableDirective {
friend class ASTStmtReader;
/// Build directive with the given start and end location.
///
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending Location of the directive.
/// \param NumClauses The number of clauses.
///
OMPTargetUpdateDirective(SourceLocation StartLoc, SourceLocation EndLoc,
unsigned NumClauses)
: OMPExecutableDirective(this, OMPTargetUpdateDirectiveClass,
OMPD_target_update, StartLoc, EndLoc, NumClauses,
1) {}
/// Build an empty directive.
///
/// \param NumClauses Number of clauses.
///
explicit OMPTargetUpdateDirective(unsigned NumClauses)
: OMPExecutableDirective(this, OMPTargetUpdateDirectiveClass,
OMPD_target_update, SourceLocation(),
SourceLocation(), NumClauses, 1) {}
public:
/// Creates directive with a list of \a Clauses.
///
/// \param C AST context.
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending Location of the directive.
/// \param Clauses List of clauses.
/// \param AssociatedStmt Statement, associated with the directive.
///
static OMPTargetUpdateDirective *
Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc,
ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt);
/// Creates an empty directive with the place for \a NumClauses
/// clauses.
///
/// \param C AST context.
/// \param NumClauses The number of clauses.
///
static OMPTargetUpdateDirective *CreateEmpty(const ASTContext &C,
unsigned NumClauses, EmptyShell);
static bool classof(const Stmt *T) {
return T->getStmtClass() == OMPTargetUpdateDirectiveClass;
}
};
/// This represents '#pragma omp distribute parallel for' composite
/// directive.
///
/// \code
/// #pragma omp distribute parallel for private(a,b)
/// \endcode
/// In this example directive '#pragma omp distribute parallel for' has clause
/// 'private' with the variables 'a' and 'b'
///
class OMPDistributeParallelForDirective : public OMPLoopDirective {
friend class ASTStmtReader;
/// true if the construct has inner cancel directive.
bool HasCancel = false;
/// Build directive with the given start and end location.
///
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending location of the directive.
/// \param CollapsedNum Number of collapsed nested loops.
/// \param NumClauses Number of clauses.
///
OMPDistributeParallelForDirective(SourceLocation StartLoc,
SourceLocation EndLoc,
unsigned CollapsedNum, unsigned NumClauses)
: OMPLoopDirective(this, OMPDistributeParallelForDirectiveClass,
OMPD_distribute_parallel_for, StartLoc, EndLoc,
CollapsedNum, NumClauses), HasCancel(false) {}
/// Build an empty directive.
///
/// \param CollapsedNum Number of collapsed nested loops.
/// \param NumClauses Number of clauses.
///
explicit OMPDistributeParallelForDirective(unsigned CollapsedNum,
unsigned NumClauses)
: OMPLoopDirective(this, OMPDistributeParallelForDirectiveClass,
OMPD_distribute_parallel_for, SourceLocation(),
SourceLocation(), CollapsedNum, NumClauses),
HasCancel(false) {}
/// Set cancel state.
void setHasCancel(bool Has) { HasCancel = Has; }
public:
/// Creates directive with a list of \a Clauses.
///
/// \param C AST context.
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending Location of the directive.
/// \param CollapsedNum Number of collapsed loops.
/// \param Clauses List of clauses.
/// \param AssociatedStmt Statement, associated with the directive.
/// \param Exprs Helper expressions for CodeGen.
/// \param HasCancel true if this directive has inner cancel directive.
///
static OMPDistributeParallelForDirective *
Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc,
unsigned CollapsedNum, ArrayRef<OMPClause *> Clauses,
Stmt *AssociatedStmt, const HelperExprs &Exprs, bool HasCancel);
/// Creates an empty directive with the place
/// for \a NumClauses clauses.
///
/// \param C AST context.
/// \param CollapsedNum Number of collapsed nested loops.
/// \param NumClauses Number of clauses.
///
static OMPDistributeParallelForDirective *CreateEmpty(const ASTContext &C,
unsigned NumClauses,
unsigned CollapsedNum,
EmptyShell);
/// Return true if current directive has inner cancel directive.
bool hasCancel() const { return HasCancel; }
static bool classof(const Stmt *T) {
return T->getStmtClass() == OMPDistributeParallelForDirectiveClass;
}
};
/// This represents '#pragma omp distribute parallel for simd' composite
/// directive.
///
/// \code
/// #pragma omp distribute parallel for simd private(x)
/// \endcode
/// In this example directive '#pragma omp distribute parallel for simd' has
/// clause 'private' with the variables 'x'
///
class OMPDistributeParallelForSimdDirective final : public OMPLoopDirective {
friend class ASTStmtReader;
/// Build directive with the given start and end location.
///
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending location of the directive.
/// \param CollapsedNum Number of collapsed nested loops.
/// \param NumClauses Number of clauses.
///
OMPDistributeParallelForSimdDirective(SourceLocation StartLoc,
SourceLocation EndLoc,
unsigned CollapsedNum,
unsigned NumClauses)
: OMPLoopDirective(this, OMPDistributeParallelForSimdDirectiveClass,
OMPD_distribute_parallel_for_simd, StartLoc,
EndLoc, CollapsedNum, NumClauses) {}
/// Build an empty directive.
///
/// \param CollapsedNum Number of collapsed nested loops.
/// \param NumClauses Number of clauses.
///
explicit OMPDistributeParallelForSimdDirective(unsigned CollapsedNum,
unsigned NumClauses)
: OMPLoopDirective(this, OMPDistributeParallelForSimdDirectiveClass,
OMPD_distribute_parallel_for_simd,
SourceLocation(), SourceLocation(), CollapsedNum,
NumClauses) {}
public:
/// Creates directive with a list of \a Clauses.
///
/// \param C AST context.
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending Location of the directive.
/// \param CollapsedNum Number of collapsed loops.
/// \param Clauses List of clauses.
/// \param AssociatedStmt Statement, associated with the directive.
/// \param Exprs Helper expressions for CodeGen.
///
static OMPDistributeParallelForSimdDirective *Create(
const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc,
unsigned CollapsedNum, ArrayRef<OMPClause *> Clauses,
Stmt *AssociatedStmt, const HelperExprs &Exprs);
/// Creates an empty directive with the place for \a NumClauses clauses.
///
/// \param C AST context.
/// \param CollapsedNum Number of collapsed nested loops.
/// \param NumClauses Number of clauses.
///
static OMPDistributeParallelForSimdDirective *CreateEmpty(
const ASTContext &C, unsigned NumClauses, unsigned CollapsedNum,
EmptyShell);
static bool classof(const Stmt *T) {
return T->getStmtClass() == OMPDistributeParallelForSimdDirectiveClass;
}
};
/// This represents '#pragma omp distribute simd' composite directive.
///
/// \code
/// #pragma omp distribute simd private(x)
/// \endcode
/// In this example directive '#pragma omp distribute simd' has clause
/// 'private' with the variables 'x'
///
class OMPDistributeSimdDirective final : public OMPLoopDirective {
friend class ASTStmtReader;
/// Build directive with the given start and end location.
///
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending location of the directive.
/// \param CollapsedNum Number of collapsed nested loops.
/// \param NumClauses Number of clauses.
///
OMPDistributeSimdDirective(SourceLocation StartLoc, SourceLocation EndLoc,
unsigned CollapsedNum, unsigned NumClauses)
: OMPLoopDirective(this, OMPDistributeSimdDirectiveClass,
OMPD_distribute_simd, StartLoc, EndLoc, CollapsedNum,
NumClauses) {}
/// Build an empty directive.
///
/// \param CollapsedNum Number of collapsed nested loops.
/// \param NumClauses Number of clauses.
///
explicit OMPDistributeSimdDirective(unsigned CollapsedNum,
unsigned NumClauses)
: OMPLoopDirective(this, OMPDistributeSimdDirectiveClass,
OMPD_distribute_simd, SourceLocation(),
SourceLocation(), CollapsedNum, NumClauses) {}
public:
/// Creates directive with a list of \a Clauses.
///
/// \param C AST context.
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending Location of the directive.
/// \param CollapsedNum Number of collapsed loops.
/// \param Clauses List of clauses.
/// \param AssociatedStmt Statement, associated with the directive.
/// \param Exprs Helper expressions for CodeGen.
///
static OMPDistributeSimdDirective *
Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc,
unsigned CollapsedNum, ArrayRef<OMPClause *> Clauses,
Stmt *AssociatedStmt, const HelperExprs &Exprs);
/// Creates an empty directive with the place for \a NumClauses clauses.
///
/// \param C AST context.
/// \param CollapsedNum Number of collapsed nested loops.
/// \param NumClauses Number of clauses.
///
static OMPDistributeSimdDirective *CreateEmpty(const ASTContext &C,
unsigned NumClauses,
unsigned CollapsedNum,
EmptyShell);
static bool classof(const Stmt *T) {
return T->getStmtClass() == OMPDistributeSimdDirectiveClass;
}
};
/// This represents '#pragma omp target parallel for simd' directive.
///
/// \code
/// #pragma omp target parallel for simd private(a) map(b) safelen(c)
/// \endcode
/// In this example directive '#pragma omp target parallel for simd' has clauses
/// 'private' with the variable 'a', 'map' with the variable 'b' and 'safelen'
/// with the variable 'c'.
///
class OMPTargetParallelForSimdDirective final : public OMPLoopDirective {
friend class ASTStmtReader;
/// Build directive with the given start and end location.
///
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending location of the directive.
/// \param CollapsedNum Number of collapsed nested loops.
/// \param NumClauses Number of clauses.
///
OMPTargetParallelForSimdDirective(SourceLocation StartLoc, SourceLocation EndLoc,
unsigned CollapsedNum, unsigned NumClauses)
: OMPLoopDirective(this, OMPTargetParallelForSimdDirectiveClass,
OMPD_target_parallel_for_simd, StartLoc, EndLoc,
CollapsedNum, NumClauses) {}
/// Build an empty directive.
///
/// \param CollapsedNum Number of collapsed nested loops.
/// \param NumClauses Number of clauses.
///
explicit OMPTargetParallelForSimdDirective(unsigned CollapsedNum,
unsigned NumClauses)
: OMPLoopDirective(this, OMPTargetParallelForSimdDirectiveClass,
OMPD_target_parallel_for_simd, SourceLocation(),
SourceLocation(), CollapsedNum, NumClauses) {}
public:
/// Creates directive with a list of \a Clauses.
///
/// \param C AST context.
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending Location of the directive.
/// \param CollapsedNum Number of collapsed loops.
/// \param Clauses List of clauses.
/// \param AssociatedStmt Statement, associated with the directive.
/// \param Exprs Helper expressions for CodeGen.
///
static OMPTargetParallelForSimdDirective *
Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc,
unsigned CollapsedNum, ArrayRef<OMPClause *> Clauses,
Stmt *AssociatedStmt, const HelperExprs &Exprs);
/// Creates an empty directive with the place for \a NumClauses clauses.
///
/// \param C AST context.
/// \param CollapsedNum Number of collapsed nested loops.
/// \param NumClauses Number of clauses.
///
static OMPTargetParallelForSimdDirective *CreateEmpty(const ASTContext &C,
unsigned NumClauses,
unsigned CollapsedNum,
EmptyShell);
static bool classof(const Stmt *T) {
return T->getStmtClass() == OMPTargetParallelForSimdDirectiveClass;
}
};
/// This represents '#pragma omp target simd' directive.
///
/// \code
/// #pragma omp target simd private(a) map(b) safelen(c)
/// \endcode
/// In this example directive '#pragma omp target simd' has clauses 'private'
/// with the variable 'a', 'map' with the variable 'b' and 'safelen' with
/// the variable 'c'.
///
class OMPTargetSimdDirective final : public OMPLoopDirective {
friend class ASTStmtReader;
/// Build directive with the given start and end location.
///
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending location of the directive.
/// \param CollapsedNum Number of collapsed nested loops.
/// \param NumClauses Number of clauses.
///
OMPTargetSimdDirective(SourceLocation StartLoc, SourceLocation EndLoc,
unsigned CollapsedNum, unsigned NumClauses)
: OMPLoopDirective(this, OMPTargetSimdDirectiveClass,
OMPD_target_simd, StartLoc, EndLoc, CollapsedNum,
NumClauses) {}
/// Build an empty directive.
///
/// \param CollapsedNum Number of collapsed nested loops.
/// \param NumClauses Number of clauses.
///
explicit OMPTargetSimdDirective(unsigned CollapsedNum, unsigned NumClauses)
: OMPLoopDirective(this, OMPTargetSimdDirectiveClass, OMPD_target_simd,
SourceLocation(),SourceLocation(), CollapsedNum,
NumClauses) {}
public:
/// Creates directive with a list of \a Clauses.
///
/// \param C AST context.
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending Location of the directive.
/// \param CollapsedNum Number of collapsed loops.
/// \param Clauses List of clauses.
/// \param AssociatedStmt Statement, associated with the directive.
/// \param Exprs Helper expressions for CodeGen.
///
static OMPTargetSimdDirective *
Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc,
unsigned CollapsedNum, ArrayRef<OMPClause *> Clauses,
Stmt *AssociatedStmt, const HelperExprs &Exprs);
/// Creates an empty directive with the place for \a NumClauses clauses.
///
/// \param C AST context.
/// \param CollapsedNum Number of collapsed nested loops.
/// \param NumClauses Number of clauses.
///
static OMPTargetSimdDirective *CreateEmpty(const ASTContext &C,
unsigned NumClauses,
unsigned CollapsedNum,
EmptyShell);
static bool classof(const Stmt *T) {
return T->getStmtClass() == OMPTargetSimdDirectiveClass;
}
};
/// This represents '#pragma omp teams distribute' directive.
///
/// \code
/// #pragma omp teams distribute private(a,b)
/// \endcode
/// In this example directive '#pragma omp teams distribute' has clauses
/// 'private' with the variables 'a' and 'b'
///
class OMPTeamsDistributeDirective final : public OMPLoopDirective {
friend class ASTStmtReader;
/// Build directive with the given start and end location.
///
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending location of the directive.
/// \param CollapsedNum Number of collapsed nested loops.
/// \param NumClauses Number of clauses.
///
OMPTeamsDistributeDirective(SourceLocation StartLoc, SourceLocation EndLoc,
unsigned CollapsedNum, unsigned NumClauses)
: OMPLoopDirective(this, OMPTeamsDistributeDirectiveClass,
OMPD_teams_distribute, StartLoc, EndLoc,
CollapsedNum, NumClauses) {}
/// Build an empty directive.
///
/// \param CollapsedNum Number of collapsed nested loops.
/// \param NumClauses Number of clauses.
///
explicit OMPTeamsDistributeDirective(unsigned CollapsedNum,
unsigned NumClauses)
: OMPLoopDirective(this, OMPTeamsDistributeDirectiveClass,
OMPD_teams_distribute, SourceLocation(),
SourceLocation(), CollapsedNum, NumClauses) {}
public:
/// Creates directive with a list of \a Clauses.
///
/// \param C AST context.
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending Location of the directive.
/// \param CollapsedNum Number of collapsed loops.
/// \param Clauses List of clauses.
/// \param AssociatedStmt Statement, associated with the directive.
/// \param Exprs Helper expressions for CodeGen.
///
static OMPTeamsDistributeDirective *
Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc,
unsigned CollapsedNum, ArrayRef<OMPClause *> Clauses,
Stmt *AssociatedStmt, const HelperExprs &Exprs);
/// Creates an empty directive with the place for \a NumClauses clauses.
///
/// \param C AST context.
/// \param CollapsedNum Number of collapsed nested loops.
/// \param NumClauses Number of clauses.
///
static OMPTeamsDistributeDirective *CreateEmpty(const ASTContext &C,
unsigned NumClauses,
unsigned CollapsedNum,
EmptyShell);
static bool classof(const Stmt *T) {
return T->getStmtClass() == OMPTeamsDistributeDirectiveClass;
}
};
/// This represents '#pragma omp teams distribute simd'
/// combined directive.
///
/// \code
/// #pragma omp teams distribute simd private(a,b)
/// \endcode
/// In this example directive '#pragma omp teams distribute simd'
/// has clause 'private' with the variables 'a' and 'b'
///
class OMPTeamsDistributeSimdDirective final : public OMPLoopDirective {
friend class ASTStmtReader;
/// Build directive with the given start and end location.
///
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending location of the directive.
/// \param CollapsedNum Number of collapsed nested loops.
/// \param NumClauses Number of clauses.
///
OMPTeamsDistributeSimdDirective(SourceLocation StartLoc,
SourceLocation EndLoc, unsigned CollapsedNum,
unsigned NumClauses)
: OMPLoopDirective(this, OMPTeamsDistributeSimdDirectiveClass,
OMPD_teams_distribute_simd, StartLoc, EndLoc,
CollapsedNum, NumClauses) {}
/// Build an empty directive.
///
/// \param CollapsedNum Number of collapsed nested loops.
/// \param NumClauses Number of clauses.
///
explicit OMPTeamsDistributeSimdDirective(unsigned CollapsedNum,
unsigned NumClauses)
: OMPLoopDirective(this, OMPTeamsDistributeSimdDirectiveClass,
OMPD_teams_distribute_simd, SourceLocation(),
SourceLocation(), CollapsedNum, NumClauses) {}
public:
/// Creates directive with a list of \a Clauses.
///
/// \param C AST context.
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending Location of the directive.
/// \param CollapsedNum Number of collapsed loops.
/// \param Clauses List of clauses.
/// \param AssociatedStmt Statement, associated with the directive.
/// \param Exprs Helper expressions for CodeGen.
///
static OMPTeamsDistributeSimdDirective *
Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc,
unsigned CollapsedNum, ArrayRef<OMPClause *> Clauses,
Stmt *AssociatedStmt, const HelperExprs &Exprs);
/// Creates an empty directive with the place
/// for \a NumClauses clauses.
///
/// \param C AST context.
/// \param CollapsedNum Number of collapsed nested loops.
/// \param NumClauses Number of clauses.
///
static OMPTeamsDistributeSimdDirective *CreateEmpty(const ASTContext &C,
unsigned NumClauses,
unsigned CollapsedNum,
EmptyShell);
static bool classof(const Stmt *T) {
return T->getStmtClass() == OMPTeamsDistributeSimdDirectiveClass;
}
};
/// This represents '#pragma omp teams distribute parallel for simd' composite
/// directive.
///
/// \code
/// #pragma omp teams distribute parallel for simd private(x)
/// \endcode
/// In this example directive '#pragma omp teams distribute parallel for simd'
/// has clause 'private' with the variables 'x'
///
class OMPTeamsDistributeParallelForSimdDirective final
: public OMPLoopDirective {
friend class ASTStmtReader;
/// Build directive with the given start and end location.
///
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending location of the directive.
/// \param CollapsedNum Number of collapsed nested loops.
/// \param NumClauses Number of clauses.
///
OMPTeamsDistributeParallelForSimdDirective(SourceLocation StartLoc,
SourceLocation EndLoc,
unsigned CollapsedNum,
unsigned NumClauses)
: OMPLoopDirective(this, OMPTeamsDistributeParallelForSimdDirectiveClass,
OMPD_teams_distribute_parallel_for_simd, StartLoc,
EndLoc, CollapsedNum, NumClauses) {}
/// Build an empty directive.
///
/// \param CollapsedNum Number of collapsed nested loops.
/// \param NumClauses Number of clauses.
///
explicit OMPTeamsDistributeParallelForSimdDirective(unsigned CollapsedNum,
unsigned NumClauses)
: OMPLoopDirective(this, OMPTeamsDistributeParallelForSimdDirectiveClass,
OMPD_teams_distribute_parallel_for_simd,
SourceLocation(), SourceLocation(), CollapsedNum,
NumClauses) {}
public:
/// Creates directive with a list of \a Clauses.
///
/// \param C AST context.
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending Location of the directive.
/// \param CollapsedNum Number of collapsed loops.
/// \param Clauses List of clauses.
/// \param AssociatedStmt Statement, associated with the directive.
/// \param Exprs Helper expressions for CodeGen.
///
static OMPTeamsDistributeParallelForSimdDirective *
Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc,
unsigned CollapsedNum, ArrayRef<OMPClause *> Clauses,
Stmt *AssociatedStmt, const HelperExprs &Exprs);
/// Creates an empty directive with the place for \a NumClauses clauses.
///
/// \param C AST context.
/// \param CollapsedNum Number of collapsed nested loops.
/// \param NumClauses Number of clauses.
///
static OMPTeamsDistributeParallelForSimdDirective *
CreateEmpty(const ASTContext &C, unsigned NumClauses, unsigned CollapsedNum,
EmptyShell);
static bool classof(const Stmt *T) {
return T->getStmtClass() == OMPTeamsDistributeParallelForSimdDirectiveClass;
}
};
/// This represents '#pragma omp teams distribute parallel for' composite
/// directive.
///
/// \code
/// #pragma omp teams distribute parallel for private(x)
/// \endcode
/// In this example directive '#pragma omp teams distribute parallel for'
/// has clause 'private' with the variables 'x'
///
class OMPTeamsDistributeParallelForDirective final : public OMPLoopDirective {
friend class ASTStmtReader;
/// true if the construct has inner cancel directive.
bool HasCancel = false;
/// Build directive with the given start and end location.
///
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending location of the directive.
/// \param CollapsedNum Number of collapsed nested loops.
/// \param NumClauses Number of clauses.
///
OMPTeamsDistributeParallelForDirective(SourceLocation StartLoc,
SourceLocation EndLoc,
unsigned CollapsedNum,
unsigned NumClauses)
: OMPLoopDirective(this, OMPTeamsDistributeParallelForDirectiveClass,
OMPD_teams_distribute_parallel_for, StartLoc, EndLoc,
CollapsedNum, NumClauses), HasCancel(false) {}
/// Build an empty directive.
///
/// \param CollapsedNum Number of collapsed nested loops.
/// \param NumClauses Number of clauses.
///
explicit OMPTeamsDistributeParallelForDirective(unsigned CollapsedNum,
unsigned NumClauses)
: OMPLoopDirective(this, OMPTeamsDistributeParallelForDirectiveClass,
OMPD_teams_distribute_parallel_for, SourceLocation(),
SourceLocation(), CollapsedNum, NumClauses),
HasCancel(false) {}
/// Set cancel state.
void setHasCancel(bool Has) { HasCancel = Has; }
public:
/// Creates directive with a list of \a Clauses.
///
/// \param C AST context.
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending Location of the directive.
/// \param CollapsedNum Number of collapsed loops.
/// \param Clauses List of clauses.
/// \param AssociatedStmt Statement, associated with the directive.
/// \param Exprs Helper expressions for CodeGen.
/// \param HasCancel true if this directive has inner cancel directive.
///
static OMPTeamsDistributeParallelForDirective *
Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc,
unsigned CollapsedNum, ArrayRef<OMPClause *> Clauses,
Stmt *AssociatedStmt, const HelperExprs &Exprs, bool HasCancel);
/// Creates an empty directive with the place for \a NumClauses clauses.
///
/// \param C AST context.
/// \param CollapsedNum Number of collapsed nested loops.
/// \param NumClauses Number of clauses.
///
static OMPTeamsDistributeParallelForDirective *
CreateEmpty(const ASTContext &C, unsigned NumClauses, unsigned CollapsedNum,
EmptyShell);
/// Return true if current directive has inner cancel directive.
bool hasCancel() const { return HasCancel; }
static bool classof(const Stmt *T) {
return T->getStmtClass() == OMPTeamsDistributeParallelForDirectiveClass;
}
};
/// This represents '#pragma omp target teams' directive.
///
/// \code
/// #pragma omp target teams if(a>0)
/// \endcode
/// In this example directive '#pragma omp target teams' has clause 'if' with
/// condition 'a>0'.
///
class OMPTargetTeamsDirective final : public OMPExecutableDirective {
friend class ASTStmtReader;
/// Build directive with the given start and end location.
///
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending location of the directive.
/// \param NumClauses Number of clauses.
///
OMPTargetTeamsDirective(SourceLocation StartLoc, SourceLocation EndLoc,
unsigned NumClauses)
: OMPExecutableDirective(this, OMPTargetTeamsDirectiveClass,
OMPD_target_teams, StartLoc, EndLoc, NumClauses,
1) {}
/// Build an empty directive.
///
/// \param NumClauses Number of clauses.
///
explicit OMPTargetTeamsDirective(unsigned NumClauses)
: OMPExecutableDirective(this, OMPTargetTeamsDirectiveClass,
OMPD_target_teams, SourceLocation(),
SourceLocation(), NumClauses, 1) {}
public:
/// Creates directive with a list of \a Clauses.
///
/// \param C AST context.
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending Location of the directive.
/// \param Clauses List of clauses.
/// \param AssociatedStmt Statement, associated with the directive.
///
static OMPTargetTeamsDirective *Create(const ASTContext &C,
SourceLocation StartLoc,
SourceLocation EndLoc,
ArrayRef<OMPClause *> Clauses,
Stmt *AssociatedStmt);
/// Creates an empty directive with the place for \a NumClauses clauses.
///
/// \param C AST context.
/// \param NumClauses Number of clauses.
///
static OMPTargetTeamsDirective *CreateEmpty(const ASTContext &C,
unsigned NumClauses, EmptyShell);
static bool classof(const Stmt *T) {
return T->getStmtClass() == OMPTargetTeamsDirectiveClass;
}
};
/// This represents '#pragma omp target teams distribute' combined directive.
///
/// \code
/// #pragma omp target teams distribute private(x)
/// \endcode
/// In this example directive '#pragma omp target teams distribute' has clause
/// 'private' with the variables 'x'
///
class OMPTargetTeamsDistributeDirective final : public OMPLoopDirective {
friend class ASTStmtReader;
/// Build directive with the given start and end location.
///
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending location of the directive.
/// \param CollapsedNum Number of collapsed nested loops.
/// \param NumClauses Number of clauses.
///
OMPTargetTeamsDistributeDirective(SourceLocation StartLoc,
SourceLocation EndLoc,
unsigned CollapsedNum, unsigned NumClauses)
: OMPLoopDirective(this, OMPTargetTeamsDistributeDirectiveClass,
OMPD_target_teams_distribute, StartLoc, EndLoc,
CollapsedNum, NumClauses) {}
/// Build an empty directive.
///
/// \param CollapsedNum Number of collapsed nested loops.
/// \param NumClauses Number of clauses.
///
explicit OMPTargetTeamsDistributeDirective(unsigned CollapsedNum,
unsigned NumClauses)
: OMPLoopDirective(this, OMPTargetTeamsDistributeDirectiveClass,
OMPD_target_teams_distribute, SourceLocation(),
SourceLocation(), CollapsedNum, NumClauses) {}
public:
/// Creates directive with a list of \a Clauses.
///
/// \param C AST context.
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending Location of the directive.
/// \param CollapsedNum Number of collapsed loops.
/// \param Clauses List of clauses.
/// \param AssociatedStmt Statement, associated with the directive.
/// \param Exprs Helper expressions for CodeGen.
///
static OMPTargetTeamsDistributeDirective *
Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc,
unsigned CollapsedNum, ArrayRef<OMPClause *> Clauses,
Stmt *AssociatedStmt, const HelperExprs &Exprs);
/// Creates an empty directive with the place for \a NumClauses clauses.
///
/// \param C AST context.
/// \param CollapsedNum Number of collapsed nested loops.
/// \param NumClauses Number of clauses.
///
static OMPTargetTeamsDistributeDirective *
CreateEmpty(const ASTContext &C, unsigned NumClauses, unsigned CollapsedNum,
EmptyShell);
static bool classof(const Stmt *T) {
return T->getStmtClass() == OMPTargetTeamsDistributeDirectiveClass;
}
};
/// This represents '#pragma omp target teams distribute parallel for' combined
/// directive.
///
/// \code
/// #pragma omp target teams distribute parallel for private(x)
/// \endcode
/// In this example directive '#pragma omp target teams distribute parallel
/// for' has clause 'private' with the variables 'x'
///
class OMPTargetTeamsDistributeParallelForDirective final
: public OMPLoopDirective {
friend class ASTStmtReader;
/// true if the construct has inner cancel directive.
bool HasCancel = false;
/// Build directive with the given start and end location.
///
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending location of the directive.
/// \param CollapsedNum Number of collapsed nested loops.
/// \param NumClauses Number of clauses.
///
OMPTargetTeamsDistributeParallelForDirective(SourceLocation StartLoc,
SourceLocation EndLoc,
unsigned CollapsedNum,
unsigned NumClauses)
: OMPLoopDirective(this,
OMPTargetTeamsDistributeParallelForDirectiveClass,
OMPD_target_teams_distribute_parallel_for, StartLoc,
EndLoc, CollapsedNum, NumClauses),
HasCancel(false) {}
/// Build an empty directive.
///
/// \param CollapsedNum Number of collapsed nested loops.
/// \param NumClauses Number of clauses.
///
explicit OMPTargetTeamsDistributeParallelForDirective(unsigned CollapsedNum,
unsigned NumClauses)
: OMPLoopDirective(
this, OMPTargetTeamsDistributeParallelForDirectiveClass,
OMPD_target_teams_distribute_parallel_for, SourceLocation(),
SourceLocation(), CollapsedNum, NumClauses),
HasCancel(false) {}
/// Set cancel state.
void setHasCancel(bool Has) { HasCancel = Has; }
public:
/// Creates directive with a list of \a Clauses.
///
/// \param C AST context.
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending Location of the directive.
/// \param CollapsedNum Number of collapsed loops.
/// \param Clauses List of clauses.
/// \param AssociatedStmt Statement, associated with the directive.
/// \param Exprs Helper expressions for CodeGen.
/// \param HasCancel true if this directive has inner cancel directive.
///
static OMPTargetTeamsDistributeParallelForDirective *
Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc,
unsigned CollapsedNum, ArrayRef<OMPClause *> Clauses,
Stmt *AssociatedStmt, const HelperExprs &Exprs, bool HasCancel);
/// Creates an empty directive with the place for \a NumClauses clauses.
///
/// \param C AST context.
/// \param CollapsedNum Number of collapsed nested loops.
/// \param NumClauses Number of clauses.
///
static OMPTargetTeamsDistributeParallelForDirective *
CreateEmpty(const ASTContext &C, unsigned NumClauses, unsigned CollapsedNum,
EmptyShell);
/// Return true if current directive has inner cancel directive.
bool hasCancel() const { return HasCancel; }
static bool classof(const Stmt *T) {
return T->getStmtClass() ==
OMPTargetTeamsDistributeParallelForDirectiveClass;
}
};
/// This represents '#pragma omp target teams distribute parallel for simd'
/// combined directive.
///
/// \code
/// #pragma omp target teams distribute parallel for simd private(x)
/// \endcode
/// In this example directive '#pragma omp target teams distribute parallel
/// for simd' has clause 'private' with the variables 'x'
///
class OMPTargetTeamsDistributeParallelForSimdDirective final
: public OMPLoopDirective {
friend class ASTStmtReader;
/// Build directive with the given start and end location.
///
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending location of the directive.
/// \param CollapsedNum Number of collapsed nested loops.
/// \param NumClauses Number of clauses.
///
OMPTargetTeamsDistributeParallelForSimdDirective(SourceLocation StartLoc,
SourceLocation EndLoc,
unsigned CollapsedNum,
unsigned NumClauses)
: OMPLoopDirective(this,
OMPTargetTeamsDistributeParallelForSimdDirectiveClass,
OMPD_target_teams_distribute_parallel_for_simd,
StartLoc, EndLoc, CollapsedNum, NumClauses) {}
/// Build an empty directive.
///
/// \param CollapsedNum Number of collapsed nested loops.
/// \param NumClauses Number of clauses.
///
explicit OMPTargetTeamsDistributeParallelForSimdDirective(
unsigned CollapsedNum, unsigned NumClauses)
: OMPLoopDirective(
this, OMPTargetTeamsDistributeParallelForSimdDirectiveClass,
OMPD_target_teams_distribute_parallel_for_simd, SourceLocation(),
SourceLocation(), CollapsedNum, NumClauses) {}
public:
/// Creates directive with a list of \a Clauses.
///
/// \param C AST context.
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending Location of the directive.
/// \param CollapsedNum Number of collapsed loops.
/// \param Clauses List of clauses.
/// \param AssociatedStmt Statement, associated with the directive.
/// \param Exprs Helper expressions for CodeGen.
///
static OMPTargetTeamsDistributeParallelForSimdDirective *
Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc,
unsigned CollapsedNum, ArrayRef<OMPClause *> Clauses,
Stmt *AssociatedStmt, const HelperExprs &Exprs);
/// Creates an empty directive with the place for \a NumClauses clauses.
///
/// \param C AST context.
/// \param CollapsedNum Number of collapsed nested loops.
/// \param NumClauses Number of clauses.
///
static OMPTargetTeamsDistributeParallelForSimdDirective *
CreateEmpty(const ASTContext &C, unsigned NumClauses, unsigned CollapsedNum,
EmptyShell);
static bool classof(const Stmt *T) {
return T->getStmtClass() ==
OMPTargetTeamsDistributeParallelForSimdDirectiveClass;
}
};
/// This represents '#pragma omp target teams distribute simd' combined
/// directive.
///
/// \code
/// #pragma omp target teams distribute simd private(x)
/// \endcode
/// In this example directive '#pragma omp target teams distribute simd'
/// has clause 'private' with the variables 'x'
///
class OMPTargetTeamsDistributeSimdDirective final : public OMPLoopDirective {
friend class ASTStmtReader;
/// Build directive with the given start and end location.
///
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending location of the directive.
/// \param CollapsedNum Number of collapsed nested loops.
/// \param NumClauses Number of clauses.
///
OMPTargetTeamsDistributeSimdDirective(SourceLocation StartLoc,
SourceLocation EndLoc,
unsigned CollapsedNum,
unsigned NumClauses)
: OMPLoopDirective(this, OMPTargetTeamsDistributeSimdDirectiveClass,
OMPD_target_teams_distribute_simd, StartLoc, EndLoc,
CollapsedNum, NumClauses) {}
/// Build an empty directive.
///
/// \param CollapsedNum Number of collapsed nested loops.
/// \param NumClauses Number of clauses.
///
explicit OMPTargetTeamsDistributeSimdDirective(unsigned CollapsedNum,
unsigned NumClauses)
: OMPLoopDirective(this, OMPTargetTeamsDistributeSimdDirectiveClass,
OMPD_target_teams_distribute_simd, SourceLocation(),
SourceLocation(), CollapsedNum, NumClauses) {}
public:
/// Creates directive with a list of \a Clauses.
///
/// \param C AST context.
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending Location of the directive.
/// \param CollapsedNum Number of collapsed loops.
/// \param Clauses List of clauses.
/// \param AssociatedStmt Statement, associated with the directive.
/// \param Exprs Helper expressions for CodeGen.
///
static OMPTargetTeamsDistributeSimdDirective *
Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc,
unsigned CollapsedNum, ArrayRef<OMPClause *> Clauses,
Stmt *AssociatedStmt, const HelperExprs &Exprs);
/// Creates an empty directive with the place for \a NumClauses clauses.
///
/// \param C AST context.
/// \param CollapsedNum Number of collapsed nested loops.
/// \param NumClauses Number of clauses.
///
static OMPTargetTeamsDistributeSimdDirective *
CreateEmpty(const ASTContext &C, unsigned NumClauses, unsigned CollapsedNum,
EmptyShell);
static bool classof(const Stmt *T) {
return T->getStmtClass() == OMPTargetTeamsDistributeSimdDirectiveClass;
}
};
} // end namespace clang
#endif
|
batchnorm_kernel_arm.c | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* License); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* AS IS BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
/*
* Copyright (c) 2021, OPEN AI LAB
* Author: haitao@openailab.com
*/
#include "batchnorm_kernel_arm.h"
#include <arm_neon.h>
static void batchnorm_kernel(int i, int id, void* data, const float* input, float* output, float* scale_mean,
float* scale_var, int channel_size, int num_thread)
{
int step = ((int*)data)[0];
#pragma omp parallel for num_threads(num_thread)
for (int c = 0; c < step; c++)
{
int cur_c = id * step + c;
float s_mean = scale_mean[cur_c];
float s_var = scale_var[cur_c];
float32x4_t _mean = vdupq_n_f32(s_mean);
float32x4_t _var = vdupq_n_f32(s_var);
int offset = cur_c * channel_size;
const float* input_ptr = input + offset;
float* output_ptr = output + offset;
for (int l = 0; l < (channel_size & -4); l += 4)
{
float32x4_t _input = vld1q_f32(input_ptr);
vst1q_f32(output_ptr, vmlaq_f32(_mean, _input, _var));
input_ptr += 4;
output_ptr += 4;
}
for (int l = channel_size & ~3; l < channel_size; l++)
{
*output_ptr = (*input_ptr) * s_var + s_mean;
input_ptr++;
output_ptr++;
}
}
}
int batchnorm_run(struct tensor* output_tensor, struct tensor* input_tensor, float* scale_mean,
float* scale_var_inv, int num_thread)
{
int batch_number = input_tensor->dims[0];
int channel_num = input_tensor->dims[1];
int channel_size;
if (4 == input_tensor->dim_num)
{
channel_size = (input_tensor->dims[2]) * (input_tensor->dims[3]);
}
else if (3 == input_tensor->dim_num)
{
channel_size = (input_tensor->dims[2]);
}
else if (2 == input_tensor->dim_num)
{
channel_size = 1;
}
else
{
return -1;
}
int img_size = channel_num * channel_size;
const float* input = (const float*)input_tensor->data;
float* output = (float*)output_tensor->data;
float* scale_mean_t = (float*)scale_mean;
float* scale_var_inv_t = (float*)scale_var_inv;
/* only use mean and var */
for (int i = 0; i < batch_number; i++)
{
const float* cur_input = input + i * img_size;
float* cur_output = output + i * img_size;
batchnorm_kernel(0, 0, &channel_num, cur_input, cur_output, scale_mean_t, scale_var_inv_t, channel_size,
num_thread);
}
return 0;
}
|
segment.c | /*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% SSSSS EEEEE GGGG M M EEEEE N N TTTTT %
% SS E G MM MM E NN N T %
% SSS EEE G GGG M M M EEE N N N T %
% SS E G G M M E N NN T %
% SSSSS EEEEE GGGG M M EEEEE N N T %
% %
% %
% MagickCore Methods to Segment an Image with Thresholding Fuzzy c-Means %
% %
% Software Design %
% Cristy %
% April 1993 %
% %
% %
% Copyright 1999-2018 ImageMagick Studio LLC, a non-profit organization %
% dedicated to making software imaging solutions freely available. %
% %
% You may not use this file except in compliance with the License. You may %
% obtain a copy of the License at %
% %
% https://www.imagemagick.org/script/license.php %
% %
% Unless required by applicable law or agreed to in writing, software %
% distributed under the License is distributed on an "AS IS" BASIS, %
% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. %
% See the License for the specific language governing permissions and %
% limitations under the License. %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% Segment segments an image by analyzing the histograms of the color
% components and identifying units that are homogeneous with the fuzzy
% c-means technique. The scale-space filter analyzes the histograms of
% the three color components of the image and identifies a set of
% classes. The extents of each class is used to coarsely segment the
% image with thresholding. The color associated with each class is
% determined by the mean color of all pixels within the extents of a
% particular class. Finally, any unclassified pixels are assigned to
% the closest class with the fuzzy c-means technique.
%
% The fuzzy c-Means algorithm can be summarized as follows:
%
% o Build a histogram, one for each color component of the image.
%
% o For each histogram, successively apply the scale-space filter and
% build an interval tree of zero crossings in the second derivative
% at each scale. Analyze this scale-space ''fingerprint'' to
% determine which peaks and valleys in the histogram are most
% predominant.
%
% o The fingerprint defines intervals on the axis of the histogram.
% Each interval contains either a minima or a maxima in the original
% signal. If each color component lies within the maxima interval,
% that pixel is considered ''classified'' and is assigned an unique
% class number.
%
% o Any pixel that fails to be classified in the above thresholding
% pass is classified using the fuzzy c-Means technique. It is
% assigned to one of the classes discovered in the histogram analysis
% phase.
%
% The fuzzy c-Means technique attempts to cluster a pixel by finding
% the local minima of the generalized within group sum of squared error
% objective function. A pixel is assigned to the closest class of
% which the fuzzy membership has a maximum value.
%
% Segment is strongly based on software written by Andy Gallo,
% University of Delaware.
%
% The following reference was used in creating this program:
%
% Young Won Lim, Sang Uk Lee, "On The Color Image Segmentation
% Algorithm Based on the Thresholding and the Fuzzy c-Means
% Techniques", Pattern Recognition, Volume 23, Number 9, pages
% 935-952, 1990.
%
%
*/
#include "MagickCore/studio.h"
#include "MagickCore/cache.h"
#include "MagickCore/color.h"
#include "MagickCore/colormap.h"
#include "MagickCore/colorspace.h"
#include "MagickCore/colorspace-private.h"
#include "MagickCore/exception.h"
#include "MagickCore/exception-private.h"
#include "MagickCore/image.h"
#include "MagickCore/image-private.h"
#include "MagickCore/memory_.h"
#include "MagickCore/memory-private.h"
#include "MagickCore/monitor.h"
#include "MagickCore/monitor-private.h"
#include "MagickCore/pixel-accessor.h"
#include "MagickCore/pixel-private.h"
#include "MagickCore/quantize.h"
#include "MagickCore/quantum.h"
#include "MagickCore/quantum-private.h"
#include "MagickCore/resource_.h"
#include "MagickCore/segment.h"
#include "MagickCore/string_.h"
#include "MagickCore/thread-private.h"
/*
Define declarations.
*/
#define MaxDimension 3
#define DeltaTau 0.5f
#if defined(FastClassify)
#define WeightingExponent 2.0
#define SegmentPower(ratio) (ratio)
#else
#define WeightingExponent 2.5
#define SegmentPower(ratio) pow(ratio,(double) (1.0/(weighting_exponent-1.0)));
#endif
#define Tau 5.2f
/*
Typedef declarations.
*/
typedef struct _ExtentPacket
{
double
center;
ssize_t
index,
left,
right;
} ExtentPacket;
typedef struct _Cluster
{
struct _Cluster
*next;
ExtentPacket
red,
green,
blue;
ssize_t
count,
id;
} Cluster;
typedef struct _IntervalTree
{
double
tau;
ssize_t
left,
right;
double
mean_stability,
stability;
struct _IntervalTree
*sibling,
*child;
} IntervalTree;
typedef struct _ZeroCrossing
{
double
tau,
histogram[256];
short
crossings[256];
} ZeroCrossing;
/*
Constant declarations.
*/
static const int
Blue = 2,
Green = 1,
Red = 0,
SafeMargin = 3,
TreeLength = 600;
/*
Method prototypes.
*/
static double
OptimalTau(const ssize_t *,const double,const double,const double,
const double,short *);
static ssize_t
DefineRegion(const short *,ExtentPacket *);
static void
FreeNodes(IntervalTree *),
InitializeHistogram(const Image *,ssize_t **,ExceptionInfo *),
ScaleSpace(const ssize_t *,const double,double *),
ZeroCrossHistogram(double *,const double,short *);
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ C l a s s i f y %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% Classify() defines one or more classes. Each pixel is thresholded to
% determine which class it belongs to. If the class is not identified it is
% assigned to the closest class based on the fuzzy c-Means technique.
%
% The format of the Classify method is:
%
% MagickBooleanType Classify(Image *image,short **extrema,
% const double cluster_threshold,
% const double weighting_exponent,
% const MagickBooleanType verbose,ExceptionInfo *exception)
%
% A description of each parameter follows.
%
% o image: the image.
%
% o extrema: Specifies a pointer to an array of integers. They
% represent the peaks and valleys of the histogram for each color
% component.
%
% o cluster_threshold: This double represents the minimum number of
% pixels contained in a hexahedra before it can be considered valid
% (expressed as a percentage).
%
% o weighting_exponent: Specifies the membership weighting exponent.
%
% o verbose: A value greater than zero prints detailed information about
% the identified classes.
%
% o exception: return any errors or warnings in this structure.
%
*/
static MagickBooleanType Classify(Image *image,short **extrema,
const double cluster_threshold,
const double weighting_exponent,const MagickBooleanType verbose,
ExceptionInfo *exception)
{
#define SegmentImageTag "Segment/Image"
CacheView
*image_view;
Cluster
*cluster,
*head,
*last_cluster,
*next_cluster;
ExtentPacket
blue,
green,
red;
MagickOffsetType
progress;
double
*free_squares;
MagickStatusType
status;
register ssize_t
i;
register double
*squares;
size_t
number_clusters;
ssize_t
count,
y;
/*
Form clusters.
*/
cluster=(Cluster *) NULL;
head=(Cluster *) NULL;
(void) ResetMagickMemory(&red,0,sizeof(red));
(void) ResetMagickMemory(&green,0,sizeof(green));
(void) ResetMagickMemory(&blue,0,sizeof(blue));
while (DefineRegion(extrema[Red],&red) != 0)
{
green.index=0;
while (DefineRegion(extrema[Green],&green) != 0)
{
blue.index=0;
while (DefineRegion(extrema[Blue],&blue) != 0)
{
/*
Allocate a new class.
*/
if (head != (Cluster *) NULL)
{
cluster->next=(Cluster *) AcquireMagickMemory(
sizeof(*cluster->next));
cluster=cluster->next;
}
else
{
cluster=(Cluster *) AcquireMagickMemory(sizeof(*cluster));
head=cluster;
}
if (cluster == (Cluster *) NULL)
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
/*
Initialize a new class.
*/
cluster->count=0;
cluster->red=red;
cluster->green=green;
cluster->blue=blue;
cluster->next=(Cluster *) NULL;
}
}
}
if (head == (Cluster *) NULL)
{
/*
No classes were identified-- create one.
*/
cluster=(Cluster *) AcquireMagickMemory(sizeof(*cluster));
if (cluster == (Cluster *) NULL)
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
/*
Initialize a new class.
*/
cluster->count=0;
cluster->red=red;
cluster->green=green;
cluster->blue=blue;
cluster->next=(Cluster *) NULL;
head=cluster;
}
/*
Count the pixels for each cluster.
*/
status=MagickTrue;
count=0;
progress=0;
image_view=AcquireVirtualCacheView(image,exception);
for (y=0; y < (ssize_t) image->rows; y++)
{
register const Quantum
*p;
register ssize_t
x;
p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception);
if (p == (const Quantum *) NULL)
break;
for (x=0; x < (ssize_t) image->columns; x++)
{
for (cluster=head; cluster != (Cluster *) NULL; cluster=cluster->next)
if (((ssize_t) ScaleQuantumToChar(GetPixelRed(image,p)) >=
(cluster->red.left-SafeMargin)) &&
((ssize_t) ScaleQuantumToChar(GetPixelRed(image,p)) <=
(cluster->red.right+SafeMargin)) &&
((ssize_t) ScaleQuantumToChar(GetPixelGreen(image,p)) >=
(cluster->green.left-SafeMargin)) &&
((ssize_t) ScaleQuantumToChar(GetPixelGreen(image,p)) <=
(cluster->green.right+SafeMargin)) &&
((ssize_t) ScaleQuantumToChar(GetPixelBlue(image,p)) >=
(cluster->blue.left-SafeMargin)) &&
((ssize_t) ScaleQuantumToChar(GetPixelBlue(image,p)) <=
(cluster->blue.right+SafeMargin)))
{
/*
Count this pixel.
*/
count++;
cluster->red.center+=(double) ScaleQuantumToChar(
GetPixelRed(image,p));
cluster->green.center+=(double) ScaleQuantumToChar(
GetPixelGreen(image,p));
cluster->blue.center+=(double) ScaleQuantumToChar(
GetPixelBlue(image,p));
cluster->count++;
break;
}
p+=GetPixelChannels(image);
}
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_Classify)
#endif
proceed=SetImageProgress(image,SegmentImageTag,progress++,2*
image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
image_view=DestroyCacheView(image_view);
/*
Remove clusters that do not meet minimum cluster threshold.
*/
count=0;
last_cluster=head;
next_cluster=head;
for (cluster=head; cluster != (Cluster *) NULL; cluster=next_cluster)
{
next_cluster=cluster->next;
if ((cluster->count > 0) &&
(cluster->count >= (count*cluster_threshold/100.0)))
{
/*
Initialize cluster.
*/
cluster->id=count;
cluster->red.center/=cluster->count;
cluster->green.center/=cluster->count;
cluster->blue.center/=cluster->count;
count++;
last_cluster=cluster;
continue;
}
/*
Delete cluster.
*/
if (cluster == head)
head=next_cluster;
else
last_cluster->next=next_cluster;
cluster=(Cluster *) RelinquishMagickMemory(cluster);
}
number_clusters=(size_t) count;
if (verbose != MagickFalse)
{
/*
Print cluster statistics.
*/
(void) FormatLocaleFile(stdout,"Fuzzy C-means Statistics\n");
(void) FormatLocaleFile(stdout,"===================\n\n");
(void) FormatLocaleFile(stdout,"\tCluster Threshold = %g\n",(double)
cluster_threshold);
(void) FormatLocaleFile(stdout,"\tWeighting Exponent = %g\n",(double)
weighting_exponent);
(void) FormatLocaleFile(stdout,"\tTotal Number of Clusters = %.20g\n\n",
(double) number_clusters);
/*
Print the total number of points per cluster.
*/
(void) FormatLocaleFile(stdout,"\n\nNumber of Vectors Per Cluster\n");
(void) FormatLocaleFile(stdout,"=============================\n\n");
for (cluster=head; cluster != (Cluster *) NULL; cluster=cluster->next)
(void) FormatLocaleFile(stdout,"Cluster #%.20g = %.20g\n",(double)
cluster->id,(double) cluster->count);
/*
Print the cluster extents.
*/
(void) FormatLocaleFile(stdout,
"\n\n\nCluster Extents: (Vector Size: %d)\n",MaxDimension);
(void) FormatLocaleFile(stdout,"================");
for (cluster=head; cluster != (Cluster *) NULL; cluster=cluster->next)
{
(void) FormatLocaleFile(stdout,"\n\nCluster #%.20g\n\n",(double)
cluster->id);
(void) FormatLocaleFile(stdout,
"%.20g-%.20g %.20g-%.20g %.20g-%.20g\n",(double)
cluster->red.left,(double) cluster->red.right,(double)
cluster->green.left,(double) cluster->green.right,(double)
cluster->blue.left,(double) cluster->blue.right);
}
/*
Print the cluster center values.
*/
(void) FormatLocaleFile(stdout,
"\n\n\nCluster Center Values: (Vector Size: %d)\n",MaxDimension);
(void) FormatLocaleFile(stdout,"=====================");
for (cluster=head; cluster != (Cluster *) NULL; cluster=cluster->next)
{
(void) FormatLocaleFile(stdout,"\n\nCluster #%.20g\n\n",(double)
cluster->id);
(void) FormatLocaleFile(stdout,"%g %g %g\n",(double)
cluster->red.center,(double) cluster->green.center,(double)
cluster->blue.center);
}
(void) FormatLocaleFile(stdout,"\n");
}
if (number_clusters > 256)
ThrowBinaryException(ImageError,"TooManyClusters",image->filename);
/*
Speed up distance calculations.
*/
squares=(double *) AcquireQuantumMemory(513UL,sizeof(*squares));
if (squares == (double *) NULL)
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
squares+=255;
for (i=(-255); i <= 255; i++)
squares[i]=(double) i*(double) i;
/*
Allocate image colormap.
*/
if (AcquireImageColormap(image,number_clusters,exception) == MagickFalse)
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
i=0;
for (cluster=head; cluster != (Cluster *) NULL; cluster=cluster->next)
{
image->colormap[i].red=(double) ScaleCharToQuantum((unsigned char)
(cluster->red.center+0.5));
image->colormap[i].green=(double) ScaleCharToQuantum((unsigned char)
(cluster->green.center+0.5));
image->colormap[i].blue=(double) ScaleCharToQuantum((unsigned char)
(cluster->blue.center+0.5));
i++;
}
/*
Do course grain classes.
*/
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) shared(progress,status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
Cluster
*clust;
register const PixelInfo
*magick_restrict p;
register ssize_t
x;
register Quantum
*magick_restrict q;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
SetPixelIndex(image,0,q);
for (clust=head; clust != (Cluster *) NULL; clust=clust->next)
{
if (((ssize_t) ScaleQuantumToChar(GetPixelRed(image,q)) >=
(clust->red.left-SafeMargin)) &&
((ssize_t) ScaleQuantumToChar(GetPixelRed(image,q)) <=
(clust->red.right+SafeMargin)) &&
((ssize_t) ScaleQuantumToChar(GetPixelGreen(image,q)) >=
(clust->green.left-SafeMargin)) &&
((ssize_t) ScaleQuantumToChar(GetPixelGreen(image,q)) <=
(clust->green.right+SafeMargin)) &&
((ssize_t) ScaleQuantumToChar(GetPixelBlue(image,q)) >=
(clust->blue.left-SafeMargin)) &&
((ssize_t) ScaleQuantumToChar(GetPixelBlue(image,q)) <=
(clust->blue.right+SafeMargin)))
{
/*
Classify this pixel.
*/
SetPixelIndex(image,(Quantum) clust->id,q);
break;
}
}
if (clust == (Cluster *) NULL)
{
double
distance_squared,
local_minima,
numerator,
ratio,
sum;
register ssize_t
j,
k;
/*
Compute fuzzy membership.
*/
local_minima=0.0;
for (j=0; j < (ssize_t) image->colors; j++)
{
sum=0.0;
p=image->colormap+j;
distance_squared=squares[(ssize_t) ScaleQuantumToChar(
GetPixelRed(image,q))-(ssize_t)
ScaleQuantumToChar(ClampToQuantum(p->red))]+squares[(ssize_t)
ScaleQuantumToChar(GetPixelGreen(image,q))-(ssize_t)
ScaleQuantumToChar(ClampToQuantum(p->green))]+squares[(ssize_t)
ScaleQuantumToChar(GetPixelBlue(image,q))-(ssize_t)
ScaleQuantumToChar(ClampToQuantum(p->blue))];
numerator=distance_squared;
for (k=0; k < (ssize_t) image->colors; k++)
{
p=image->colormap+k;
distance_squared=squares[(ssize_t) ScaleQuantumToChar(
GetPixelRed(image,q))-(ssize_t)
ScaleQuantumToChar(ClampToQuantum(p->red))]+squares[
(ssize_t) ScaleQuantumToChar(GetPixelGreen(image,q))-(ssize_t)
ScaleQuantumToChar(ClampToQuantum(p->green))]+squares[
(ssize_t) ScaleQuantumToChar(GetPixelBlue(image,q))-(ssize_t)
ScaleQuantumToChar(ClampToQuantum(p->blue))];
ratio=numerator/distance_squared;
sum+=SegmentPower(ratio);
}
if ((sum != 0.0) && ((1.0/sum) > local_minima))
{
/*
Classify this pixel.
*/
local_minima=1.0/sum;
SetPixelIndex(image,(Quantum) j,q);
}
}
}
q+=GetPixelChannels(image);
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_Classify)
#endif
proceed=SetImageProgress(image,SegmentImageTag,progress++,
2*image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
image_view=DestroyCacheView(image_view);
status&=SyncImage(image,exception);
/*
Relinquish resources.
*/
for (cluster=head; cluster != (Cluster *) NULL; cluster=next_cluster)
{
next_cluster=cluster->next;
cluster=(Cluster *) RelinquishMagickMemory(cluster);
}
squares-=255;
free_squares=squares;
free_squares=(double *) RelinquishMagickMemory(free_squares);
return(MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ C o n s o l i d a t e C r o s s i n g s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ConsolidateCrossings() guarantees that an even number of zero crossings
% always lie between two crossings.
%
% The format of the ConsolidateCrossings method is:
%
% ConsolidateCrossings(ZeroCrossing *zero_crossing,
% const size_t number_crossings)
%
% A description of each parameter follows.
%
% o zero_crossing: Specifies an array of structures of type ZeroCrossing.
%
% o number_crossings: This size_t specifies the number of elements
% in the zero_crossing array.
%
*/
static void ConsolidateCrossings(ZeroCrossing *zero_crossing,
const size_t number_crossings)
{
register ssize_t
i,
j,
k,
l;
ssize_t
center,
correct,
count,
left,
right;
/*
Consolidate zero crossings.
*/
for (i=(ssize_t) number_crossings-1; i >= 0; i--)
for (j=0; j <= 255; j++)
{
if (zero_crossing[i].crossings[j] == 0)
continue;
/*
Find the entry that is closest to j and still preserves the
property that there are an even number of crossings between
intervals.
*/
for (k=j-1; k > 0; k--)
if (zero_crossing[i+1].crossings[k] != 0)
break;
left=MagickMax(k,0);
center=j;
for (k=j+1; k < 255; k++)
if (zero_crossing[i+1].crossings[k] != 0)
break;
right=MagickMin(k,255);
/*
K is the zero crossing just left of j.
*/
for (k=j-1; k > 0; k--)
if (zero_crossing[i].crossings[k] != 0)
break;
if (k < 0)
k=0;
/*
Check center for an even number of crossings between k and j.
*/
correct=(-1);
if (zero_crossing[i+1].crossings[j] != 0)
{
count=0;
for (l=k+1; l < center; l++)
if (zero_crossing[i+1].crossings[l] != 0)
count++;
if (((count % 2) == 0) && (center != k))
correct=center;
}
/*
Check left for an even number of crossings between k and j.
*/
if (correct == -1)
{
count=0;
for (l=k+1; l < left; l++)
if (zero_crossing[i+1].crossings[l] != 0)
count++;
if (((count % 2) == 0) && (left != k))
correct=left;
}
/*
Check right for an even number of crossings between k and j.
*/
if (correct == -1)
{
count=0;
for (l=k+1; l < right; l++)
if (zero_crossing[i+1].crossings[l] != 0)
count++;
if (((count % 2) == 0) && (right != k))
correct=right;
}
l=(ssize_t) zero_crossing[i].crossings[j];
zero_crossing[i].crossings[j]=0;
if (correct != -1)
zero_crossing[i].crossings[correct]=(short) l;
}
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ D e f i n e R e g i o n %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DefineRegion() defines the left and right boundaries of a peak region.
%
% The format of the DefineRegion method is:
%
% ssize_t DefineRegion(const short *extrema,ExtentPacket *extents)
%
% A description of each parameter follows.
%
% o extrema: Specifies a pointer to an array of integers. They
% represent the peaks and valleys of the histogram for each color
% component.
%
% o extents: This pointer to an ExtentPacket represent the extends
% of a particular peak or valley of a color component.
%
*/
static ssize_t DefineRegion(const short *extrema,ExtentPacket *extents)
{
/*
Initialize to default values.
*/
extents->left=0;
extents->center=0.0;
extents->right=255;
/*
Find the left side (maxima).
*/
for ( ; extents->index <= 255; extents->index++)
if (extrema[extents->index] > 0)
break;
if (extents->index > 255)
return(MagickFalse); /* no left side - no region exists */
extents->left=extents->index;
/*
Find the right side (minima).
*/
for ( ; extents->index <= 255; extents->index++)
if (extrema[extents->index] < 0)
break;
extents->right=extents->index-1;
return(MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ D e r i v a t i v e H i s t o g r a m %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DerivativeHistogram() determines the derivative of the histogram using
% central differencing.
%
% The format of the DerivativeHistogram method is:
%
% DerivativeHistogram(const double *histogram,
% double *derivative)
%
% A description of each parameter follows.
%
% o histogram: Specifies an array of doubles representing the number
% of pixels for each intensity of a particular color component.
%
% o derivative: This array of doubles is initialized by
% DerivativeHistogram to the derivative of the histogram using central
% differencing.
%
*/
static void DerivativeHistogram(const double *histogram,
double *derivative)
{
register ssize_t
i,
n;
/*
Compute endpoints using second order polynomial interpolation.
*/
n=255;
derivative[0]=(-1.5*histogram[0]+2.0*histogram[1]-0.5*histogram[2]);
derivative[n]=(0.5*histogram[n-2]-2.0*histogram[n-1]+1.5*histogram[n]);
/*
Compute derivative using central differencing.
*/
for (i=1; i < n; i++)
derivative[i]=(histogram[i+1]-histogram[i-1])/2.0;
return;
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t I m a g e D y n a m i c T h r e s h o l d %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetImageDynamicThreshold() returns the dynamic threshold for an image.
%
% The format of the GetImageDynamicThreshold method is:
%
% MagickBooleanType GetImageDynamicThreshold(const Image *image,
% const double cluster_threshold,const double smooth_threshold,
% PixelInfo *pixel,ExceptionInfo *exception)
%
% A description of each parameter follows.
%
% o image: the image.
%
% o cluster_threshold: This double represents the minimum number of
% pixels contained in a hexahedra before it can be considered valid
% (expressed as a percentage).
%
% o smooth_threshold: the smoothing threshold eliminates noise in the second
% derivative of the histogram. As the value is increased, you can expect a
% smoother second derivative.
%
% o pixel: return the dynamic threshold here.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType GetImageDynamicThreshold(const Image *image,
const double cluster_threshold,const double smooth_threshold,
PixelInfo *pixel,ExceptionInfo *exception)
{
Cluster
*background,
*cluster,
*object,
*head,
*last_cluster,
*next_cluster;
ExtentPacket
blue,
green,
red;
MagickBooleanType
proceed;
double
threshold;
register const Quantum
*p;
register ssize_t
i,
x;
short
*extrema[MaxDimension];
ssize_t
count,
*histogram[MaxDimension],
y;
/*
Allocate histogram and extrema.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
GetPixelInfo(image,pixel);
for (i=0; i < MaxDimension; i++)
{
histogram[i]=(ssize_t *) AcquireQuantumMemory(256UL,sizeof(**histogram));
extrema[i]=(short *) AcquireQuantumMemory(256UL,sizeof(**histogram));
if ((histogram[i] == (ssize_t *) NULL) || (extrema[i] == (short *) NULL))
{
for (i-- ; i >= 0; i--)
{
extrema[i]=(short *) RelinquishMagickMemory(extrema[i]);
histogram[i]=(ssize_t *) RelinquishMagickMemory(histogram[i]);
}
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","`%s'",image->filename);
return(MagickFalse);
}
}
/*
Initialize histogram.
*/
InitializeHistogram(image,histogram,exception);
(void) OptimalTau(histogram[Red],Tau,0.2f,DeltaTau,
(smooth_threshold == 0.0f ? 1.0f : smooth_threshold),extrema[Red]);
(void) OptimalTau(histogram[Green],Tau,0.2f,DeltaTau,
(smooth_threshold == 0.0f ? 1.0f : smooth_threshold),extrema[Green]);
(void) OptimalTau(histogram[Blue],Tau,0.2f,DeltaTau,
(smooth_threshold == 0.0f ? 1.0f : smooth_threshold),extrema[Blue]);
/*
Form clusters.
*/
cluster=(Cluster *) NULL;
head=(Cluster *) NULL;
(void) ResetMagickMemory(&red,0,sizeof(red));
(void) ResetMagickMemory(&green,0,sizeof(green));
(void) ResetMagickMemory(&blue,0,sizeof(blue));
while (DefineRegion(extrema[Red],&red) != 0)
{
green.index=0;
while (DefineRegion(extrema[Green],&green) != 0)
{
blue.index=0;
while (DefineRegion(extrema[Blue],&blue) != 0)
{
/*
Allocate a new class.
*/
if (head != (Cluster *) NULL)
{
cluster->next=(Cluster *) AcquireMagickMemory(
sizeof(*cluster->next));
cluster=cluster->next;
}
else
{
cluster=(Cluster *) AcquireMagickMemory(sizeof(*cluster));
head=cluster;
}
if (cluster == (Cluster *) NULL)
{
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","`%s'",
image->filename);
return(MagickFalse);
}
/*
Initialize a new class.
*/
cluster->count=0;
cluster->red=red;
cluster->green=green;
cluster->blue=blue;
cluster->next=(Cluster *) NULL;
}
}
}
if (head == (Cluster *) NULL)
{
/*
No classes were identified-- create one.
*/
cluster=(Cluster *) AcquireMagickMemory(sizeof(*cluster));
if (cluster == (Cluster *) NULL)
{
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","`%s'",image->filename);
return(MagickFalse);
}
/*
Initialize a new class.
*/
cluster->count=0;
cluster->red=red;
cluster->green=green;
cluster->blue=blue;
cluster->next=(Cluster *) NULL;
head=cluster;
}
/*
Count the pixels for each cluster.
*/
count=0;
for (y=0; y < (ssize_t) image->rows; y++)
{
p=GetVirtualPixels(image,0,y,image->columns,1,exception);
if (p == (const Quantum *) NULL)
break;
for (x=0; x < (ssize_t) image->columns; x++)
{
for (cluster=head; cluster != (Cluster *) NULL; cluster=cluster->next)
if (((ssize_t) ScaleQuantumToChar(GetPixelRed(image,p)) >=
(cluster->red.left-SafeMargin)) &&
((ssize_t) ScaleQuantumToChar(GetPixelRed(image,p)) <=
(cluster->red.right+SafeMargin)) &&
((ssize_t) ScaleQuantumToChar(GetPixelGreen(image,p)) >=
(cluster->green.left-SafeMargin)) &&
((ssize_t) ScaleQuantumToChar(GetPixelGreen(image,p)) <=
(cluster->green.right+SafeMargin)) &&
((ssize_t) ScaleQuantumToChar(GetPixelBlue(image,p)) >=
(cluster->blue.left-SafeMargin)) &&
((ssize_t) ScaleQuantumToChar(GetPixelBlue(image,p)) <=
(cluster->blue.right+SafeMargin)))
{
/*
Count this pixel.
*/
count++;
cluster->red.center+=(double) ScaleQuantumToChar(
GetPixelRed(image,p));
cluster->green.center+=(double) ScaleQuantumToChar(
GetPixelGreen(image,p));
cluster->blue.center+=(double) ScaleQuantumToChar(
GetPixelBlue(image,p));
cluster->count++;
break;
}
p+=GetPixelChannels(image);
}
proceed=SetImageProgress(image,SegmentImageTag,(MagickOffsetType) y,
2*image->rows);
if (proceed == MagickFalse)
break;
}
/*
Remove clusters that do not meet minimum cluster threshold.
*/
count=0;
last_cluster=head;
next_cluster=head;
for (cluster=head; cluster != (Cluster *) NULL; cluster=next_cluster)
{
next_cluster=cluster->next;
if ((cluster->count > 0) &&
(cluster->count >= (count*cluster_threshold/100.0)))
{
/*
Initialize cluster.
*/
cluster->id=count;
cluster->red.center/=cluster->count;
cluster->green.center/=cluster->count;
cluster->blue.center/=cluster->count;
count++;
last_cluster=cluster;
continue;
}
/*
Delete cluster.
*/
if (cluster == head)
head=next_cluster;
else
last_cluster->next=next_cluster;
cluster=(Cluster *) RelinquishMagickMemory(cluster);
}
object=head;
background=head;
if (count > 1)
{
object=head->next;
for (cluster=object; cluster->next != (Cluster *) NULL; )
{
if (cluster->count < object->count)
object=cluster;
cluster=cluster->next;
}
background=head->next;
for (cluster=background; cluster->next != (Cluster *) NULL; )
{
if (cluster->count > background->count)
background=cluster;
cluster=cluster->next;
}
}
if (background != (Cluster *) NULL)
{
threshold=(background->red.center+object->red.center)/2.0;
pixel->red=(double) ScaleCharToQuantum((unsigned char)
(threshold+0.5));
threshold=(background->green.center+object->green.center)/2.0;
pixel->green=(double) ScaleCharToQuantum((unsigned char)
(threshold+0.5));
threshold=(background->blue.center+object->blue.center)/2.0;
pixel->blue=(double) ScaleCharToQuantum((unsigned char)
(threshold+0.5));
}
/*
Relinquish resources.
*/
for (cluster=head; cluster != (Cluster *) NULL; cluster=next_cluster)
{
next_cluster=cluster->next;
cluster=(Cluster *) RelinquishMagickMemory(cluster);
}
for (i=0; i < MaxDimension; i++)
{
extrema[i]=(short *) RelinquishMagickMemory(extrema[i]);
histogram[i]=(ssize_t *) RelinquishMagickMemory(histogram[i]);
}
return(MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ I n i t i a l i z e H i s t o g r a m %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% InitializeHistogram() computes the histogram for an image.
%
% The format of the InitializeHistogram method is:
%
% InitializeHistogram(const Image *image,ssize_t **histogram)
%
% A description of each parameter follows.
%
% o image: Specifies a pointer to an Image structure; returned from
% ReadImage.
%
% o histogram: Specifies an array of integers representing the number
% of pixels for each intensity of a particular color component.
%
*/
static void InitializeHistogram(const Image *image,ssize_t **histogram,
ExceptionInfo *exception)
{
register const Quantum
*p;
register ssize_t
i,
x;
ssize_t
y;
/*
Initialize histogram.
*/
for (i=0; i <= 255; i++)
{
histogram[Red][i]=0;
histogram[Green][i]=0;
histogram[Blue][i]=0;
}
for (y=0; y < (ssize_t) image->rows; y++)
{
p=GetVirtualPixels(image,0,y,image->columns,1,exception);
if (p == (const Quantum *) NULL)
break;
for (x=0; x < (ssize_t) image->columns; x++)
{
histogram[Red][(ssize_t) ScaleQuantumToChar(GetPixelRed(image,p))]++;
histogram[Green][(ssize_t) ScaleQuantumToChar(GetPixelGreen(image,p))]++;
histogram[Blue][(ssize_t) ScaleQuantumToChar(GetPixelBlue(image,p))]++;
p+=GetPixelChannels(image);
}
}
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ I n i t i a l i z e I n t e r v a l T r e e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% InitializeIntervalTree() initializes an interval tree from the lists of
% zero crossings.
%
% The format of the InitializeIntervalTree method is:
%
% InitializeIntervalTree(IntervalTree **list,ssize_t *number_nodes,
% IntervalTree *node)
%
% A description of each parameter follows.
%
% o zero_crossing: Specifies an array of structures of type ZeroCrossing.
%
% o number_crossings: This size_t specifies the number of elements
% in the zero_crossing array.
%
*/
static void InitializeList(IntervalTree **list,ssize_t *number_nodes,
IntervalTree *node)
{
if (node == (IntervalTree *) NULL)
return;
if (node->child == (IntervalTree *) NULL)
list[(*number_nodes)++]=node;
InitializeList(list,number_nodes,node->sibling);
InitializeList(list,number_nodes,node->child);
}
static void MeanStability(IntervalTree *node)
{
register IntervalTree
*child;
if (node == (IntervalTree *) NULL)
return;
node->mean_stability=0.0;
child=node->child;
if (child != (IntervalTree *) NULL)
{
register ssize_t
count;
register double
sum;
sum=0.0;
count=0;
for ( ; child != (IntervalTree *) NULL; child=child->sibling)
{
sum+=child->stability;
count++;
}
node->mean_stability=sum/(double) count;
}
MeanStability(node->sibling);
MeanStability(node->child);
}
static void Stability(IntervalTree *node)
{
if (node == (IntervalTree *) NULL)
return;
if (node->child == (IntervalTree *) NULL)
node->stability=0.0;
else
node->stability=node->tau-(node->child)->tau;
Stability(node->sibling);
Stability(node->child);
}
static IntervalTree *InitializeIntervalTree(const ZeroCrossing *zero_crossing,
const size_t number_crossings)
{
IntervalTree
*head,
**list,
*node,
*root;
register ssize_t
i;
ssize_t
j,
k,
left,
number_nodes;
/*
Allocate interval tree.
*/
list=(IntervalTree **) AcquireQuantumMemory((size_t) TreeLength,
sizeof(*list));
if (list == (IntervalTree **) NULL)
return((IntervalTree *) NULL);
/*
The root is the entire histogram.
*/
root=(IntervalTree *) AcquireCriticalMemory(sizeof(*root));
root->child=(IntervalTree *) NULL;
root->sibling=(IntervalTree *) NULL;
root->tau=0.0;
root->left=0;
root->right=255;
root->mean_stability=0.0;
root->stability=0.0;
(void) memset(list,0,TreeLength*sizeof(*list));
for (i=(-1); i < (ssize_t) number_crossings; i++)
{
/*
Initialize list with all nodes with no children.
*/
number_nodes=0;
InitializeList(list,&number_nodes,root);
/*
Split list.
*/
for (j=0; j < number_nodes; j++)
{
head=list[j];
left=head->left;
node=head;
for (k=head->left+1; k < head->right; k++)
{
if (zero_crossing[i+1].crossings[k] != 0)
{
if (node == head)
{
node->child=(IntervalTree *) AcquireMagickMemory(
sizeof(*node->child));
node=node->child;
}
else
{
node->sibling=(IntervalTree *) AcquireMagickMemory(
sizeof(*node->sibling));
node=node->sibling;
}
if (node == (IntervalTree *) NULL)
{
list=(IntervalTree **) RelinquishMagickMemory(list);
FreeNodes(root);
return((IntervalTree *) NULL);
}
node->tau=zero_crossing[i+1].tau;
node->child=(IntervalTree *) NULL;
node->sibling=(IntervalTree *) NULL;
node->left=left;
node->right=k;
left=k;
}
}
if (left != head->left)
{
node->sibling=(IntervalTree *) AcquireMagickMemory(
sizeof(*node->sibling));
node=node->sibling;
if (node == (IntervalTree *) NULL)
{
list=(IntervalTree **) RelinquishMagickMemory(list);
FreeNodes(root);
return((IntervalTree *) NULL);
}
node->tau=zero_crossing[i+1].tau;
node->child=(IntervalTree *) NULL;
node->sibling=(IntervalTree *) NULL;
node->left=left;
node->right=head->right;
}
}
}
/*
Determine the stability: difference between a nodes tau and its child.
*/
Stability(root->child);
MeanStability(root->child);
list=(IntervalTree **) RelinquishMagickMemory(list);
return(root);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ O p t i m a l T a u %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% OptimalTau() finds the optimal tau for each band of the histogram.
%
% The format of the OptimalTau method is:
%
% double OptimalTau(const ssize_t *histogram,const double max_tau,
% const double min_tau,const double delta_tau,
% const double smooth_threshold,short *extrema)
%
% A description of each parameter follows.
%
% o histogram: Specifies an array of integers representing the number
% of pixels for each intensity of a particular color component.
%
% o extrema: Specifies a pointer to an array of integers. They
% represent the peaks and valleys of the histogram for each color
% component.
%
*/
static void ActiveNodes(IntervalTree **list,ssize_t *number_nodes,
IntervalTree *node)
{
if (node == (IntervalTree *) NULL)
return;
if (node->stability >= node->mean_stability)
{
list[(*number_nodes)++]=node;
ActiveNodes(list,number_nodes,node->sibling);
}
else
{
ActiveNodes(list,number_nodes,node->sibling);
ActiveNodes(list,number_nodes,node->child);
}
}
static void FreeNodes(IntervalTree *node)
{
if (node == (IntervalTree *) NULL)
return;
FreeNodes(node->sibling);
FreeNodes(node->child);
node=(IntervalTree *) RelinquishMagickMemory(node);
}
static double OptimalTau(const ssize_t *histogram,const double max_tau,
const double min_tau,const double delta_tau,const double smooth_threshold,
short *extrema)
{
IntervalTree
**list,
*node,
*root;
MagickBooleanType
peak;
double
average_tau,
*derivative,
*second_derivative,
tau,
value;
register ssize_t
i,
x;
size_t
count,
number_crossings;
ssize_t
index,
j,
k,
number_nodes;
ZeroCrossing
*zero_crossing;
/*
Allocate interval tree.
*/
list=(IntervalTree **) AcquireQuantumMemory((size_t) TreeLength,
sizeof(*list));
if (list == (IntervalTree **) NULL)
return(0.0);
/*
Allocate zero crossing list.
*/
count=(size_t) ((max_tau-min_tau)/delta_tau)+2;
zero_crossing=(ZeroCrossing *) AcquireQuantumMemory((size_t) count,
sizeof(*zero_crossing));
if (zero_crossing == (ZeroCrossing *) NULL)
{
list=(IntervalTree **) RelinquishMagickMemory(list);
return(0.0);
}
for (i=0; i < (ssize_t) count; i++)
zero_crossing[i].tau=(-1.0);
/*
Initialize zero crossing list.
*/
derivative=(double *) AcquireCriticalMemory(256*sizeof(*derivative));
second_derivative=(double *) AcquireCriticalMemory(256*
sizeof(*second_derivative));
i=0;
for (tau=max_tau; tau >= min_tau; tau-=delta_tau)
{
zero_crossing[i].tau=tau;
ScaleSpace(histogram,tau,zero_crossing[i].histogram);
DerivativeHistogram(zero_crossing[i].histogram,derivative);
DerivativeHistogram(derivative,second_derivative);
ZeroCrossHistogram(second_derivative,smooth_threshold,
zero_crossing[i].crossings);
i++;
}
/*
Add an entry for the original histogram.
*/
zero_crossing[i].tau=0.0;
for (j=0; j <= 255; j++)
zero_crossing[i].histogram[j]=(double) histogram[j];
DerivativeHistogram(zero_crossing[i].histogram,derivative);
DerivativeHistogram(derivative,second_derivative);
ZeroCrossHistogram(second_derivative,smooth_threshold,
zero_crossing[i].crossings);
number_crossings=(size_t) i;
derivative=(double *) RelinquishMagickMemory(derivative);
second_derivative=(double *) RelinquishMagickMemory(second_derivative);
/*
Ensure the scale-space fingerprints form lines in scale-space, not loops.
*/
ConsolidateCrossings(zero_crossing,number_crossings);
/*
Force endpoints to be included in the interval.
*/
for (i=0; i <= (ssize_t) number_crossings; i++)
{
for (j=0; j < 255; j++)
if (zero_crossing[i].crossings[j] != 0)
break;
zero_crossing[i].crossings[0]=(-zero_crossing[i].crossings[j]);
for (j=255; j > 0; j--)
if (zero_crossing[i].crossings[j] != 0)
break;
zero_crossing[i].crossings[255]=(-zero_crossing[i].crossings[j]);
}
/*
Initialize interval tree.
*/
root=InitializeIntervalTree(zero_crossing,number_crossings);
if (root == (IntervalTree *) NULL)
{
zero_crossing=(ZeroCrossing *) RelinquishMagickMemory(zero_crossing);
list=(IntervalTree **) RelinquishMagickMemory(list);
return(0.0);
}
/*
Find active nodes: stability is greater (or equal) to the mean stability of
its children.
*/
number_nodes=0;
ActiveNodes(list,&number_nodes,root->child);
/*
Initialize extrema.
*/
for (i=0; i <= 255; i++)
extrema[i]=0;
for (i=0; i < number_nodes; i++)
{
/*
Find this tau in zero crossings list.
*/
k=0;
node=list[i];
for (j=0; j <= (ssize_t) number_crossings; j++)
if (zero_crossing[j].tau == node->tau)
k=j;
/*
Find the value of the peak.
*/
peak=zero_crossing[k].crossings[node->right] == -1 ? MagickTrue :
MagickFalse;
index=node->left;
value=zero_crossing[k].histogram[index];
for (x=node->left; x <= node->right; x++)
{
if (peak != MagickFalse)
{
if (zero_crossing[k].histogram[x] > value)
{
value=zero_crossing[k].histogram[x];
index=x;
}
}
else
if (zero_crossing[k].histogram[x] < value)
{
value=zero_crossing[k].histogram[x];
index=x;
}
}
for (x=node->left; x <= node->right; x++)
{
if (index == 0)
index=256;
if (peak != MagickFalse)
extrema[x]=(short) index;
else
extrema[x]=(short) (-index);
}
}
/*
Determine the average tau.
*/
average_tau=0.0;
for (i=0; i < number_nodes; i++)
average_tau+=list[i]->tau;
average_tau/=(double) number_nodes;
/*
Relinquish resources.
*/
FreeNodes(root);
zero_crossing=(ZeroCrossing *) RelinquishMagickMemory(zero_crossing);
list=(IntervalTree **) RelinquishMagickMemory(list);
return(average_tau);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ S c a l e S p a c e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ScaleSpace() performs a scale-space filter on the 1D histogram.
%
% The format of the ScaleSpace method is:
%
% ScaleSpace(const ssize_t *histogram,const double tau,
% double *scale_histogram)
%
% A description of each parameter follows.
%
% o histogram: Specifies an array of doubles representing the number
% of pixels for each intensity of a particular color component.
%
*/
static void ScaleSpace(const ssize_t *histogram,const double tau,
double *scale_histogram)
{
double
alpha,
beta,
*gamma,
sum;
register ssize_t
u,
x;
gamma=(double *) AcquireQuantumMemory(256,sizeof(*gamma));
if (gamma == (double *) NULL)
ThrowFatalException(ResourceLimitFatalError,
"UnableToAllocateGammaMap");
alpha=PerceptibleReciprocal(tau*sqrt(2.0*MagickPI));
beta=(-1.0*PerceptibleReciprocal(2.0*tau*tau));
for (x=0; x <= 255; x++)
gamma[x]=0.0;
for (x=0; x <= 255; x++)
{
gamma[x]=exp((double) beta*x*x);
if (gamma[x] < MagickEpsilon)
break;
}
for (x=0; x <= 255; x++)
{
sum=0.0;
for (u=0; u <= 255; u++)
sum+=(double) histogram[u]*gamma[MagickAbsoluteValue(x-u)];
scale_histogram[x]=alpha*sum;
}
gamma=(double *) RelinquishMagickMemory(gamma);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S e g m e n t I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SegmentImage() segment an image by analyzing the histograms of the color
% components and identifying units that are homogeneous with the fuzzy
% C-means technique.
%
% The format of the SegmentImage method is:
%
% MagickBooleanType SegmentImage(Image *image,
% const ColorspaceType colorspace,const MagickBooleanType verbose,
% const double cluster_threshold,const double smooth_threshold,
% ExceptionInfo *exception)
%
% A description of each parameter follows.
%
% o image: the image.
%
% o colorspace: Indicate the colorspace.
%
% o verbose: Set to MagickTrue to print detailed information about the
% identified classes.
%
% o cluster_threshold: This represents the minimum number of pixels
% contained in a hexahedra before it can be considered valid (expressed
% as a percentage).
%
% o smooth_threshold: the smoothing threshold eliminates noise in the second
% derivative of the histogram. As the value is increased, you can expect a
% smoother second derivative.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType SegmentImage(Image *image,
const ColorspaceType colorspace,const MagickBooleanType verbose,
const double cluster_threshold,const double smooth_threshold,
ExceptionInfo *exception)
{
ColorspaceType
previous_colorspace;
MagickBooleanType
status;
register ssize_t
i;
short
*extrema[MaxDimension];
ssize_t
*histogram[MaxDimension];
/*
Allocate histogram and extrema.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
for (i=0; i < MaxDimension; i++)
{
histogram[i]=(ssize_t *) AcquireQuantumMemory(256,sizeof(**histogram));
extrema[i]=(short *) AcquireQuantumMemory(256,sizeof(**extrema));
if ((histogram[i] == (ssize_t *) NULL) || (extrema[i] == (short *) NULL))
{
for (i-- ; i >= 0; i--)
{
extrema[i]=(short *) RelinquishMagickMemory(extrema[i]);
histogram[i]=(ssize_t *) RelinquishMagickMemory(histogram[i]);
}
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename)
}
}
/*
Initialize histogram.
*/
previous_colorspace=image->colorspace;
(void) TransformImageColorspace(image,colorspace,exception);
InitializeHistogram(image,histogram,exception);
(void) OptimalTau(histogram[Red],Tau,0.2,DeltaTau,
smooth_threshold == 0.0 ? 1.0 : smooth_threshold,extrema[Red]);
(void) OptimalTau(histogram[Green],Tau,0.2,DeltaTau,
smooth_threshold == 0.0 ? 1.0 : smooth_threshold,extrema[Green]);
(void) OptimalTau(histogram[Blue],Tau,0.2,DeltaTau,
smooth_threshold == 0.0 ? 1.0 : smooth_threshold,extrema[Blue]);
/*
Classify using the fuzzy c-Means technique.
*/
status=Classify(image,extrema,cluster_threshold,WeightingExponent,verbose,
exception);
(void) TransformImageColorspace(image,previous_colorspace,exception);
/*
Relinquish resources.
*/
for (i=0; i < MaxDimension; i++)
{
extrema[i]=(short *) RelinquishMagickMemory(extrema[i]);
histogram[i]=(ssize_t *) RelinquishMagickMemory(histogram[i]);
}
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ Z e r o C r o s s H i s t o g r a m %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ZeroCrossHistogram() find the zero crossings in a histogram and marks
% directions as: 1 is negative to positive; 0 is zero crossing; and -1
% is positive to negative.
%
% The format of the ZeroCrossHistogram method is:
%
% ZeroCrossHistogram(double *second_derivative,
% const double smooth_threshold,short *crossings)
%
% A description of each parameter follows.
%
% o second_derivative: Specifies an array of doubles representing the
% second derivative of the histogram of a particular color component.
%
% o crossings: This array of integers is initialized with
% -1, 0, or 1 representing the slope of the first derivative of the
% of a particular color component.
%
*/
static void ZeroCrossHistogram(double *second_derivative,
const double smooth_threshold,short *crossings)
{
register ssize_t
i;
ssize_t
parity;
/*
Merge low numbers to zero to help prevent noise.
*/
for (i=0; i <= 255; i++)
if ((second_derivative[i] < smooth_threshold) &&
(second_derivative[i] >= -smooth_threshold))
second_derivative[i]=0.0;
/*
Mark zero crossings.
*/
parity=0;
for (i=0; i <= 255; i++)
{
crossings[i]=0;
if (second_derivative[i] < 0.0)
{
if (parity > 0)
crossings[i]=(-1);
parity=1;
}
else
if (second_derivative[i] > 0.0)
{
if (parity < 0)
crossings[i]=1;
parity=(-1);
}
}
}
|
yinyang.c | #include "yinyang.h"
#include "kmeans_utils.h"
#include "../../utils/matrix/csr_matrix/csr_to_vector_list.h"
#include "../../utils/matrix/vector_list/vector_list_math.h"
#include "../../utils/matrix/csr_matrix/csr_math.h"
#include "../../utils/vector/common/common_vector_math.h"
#include "../../utils/vector/sparse/sparse_vector_math.h"
#include "../../utils/fcl_logging.h"
#include <math.h>
#include <unistd.h>
#include <float.h>
struct kmeans_result* yinyang_kmeans(struct csr_matrix* samples, struct kmeans_params *prms) {
uint32_t i;
uint64_t j;
uint64_t block_vectors_dim;
uint64_t no_groups;
uint32_t disable_optimizations;
uint64_t keys_per_block;
VALUE_TYPE desired_bv_annz; /* desired size of the block vectors */
uint64_t *cluster_to_group;
struct csr_matrix block_vectors_samples;
struct sparse_vector* block_vectors_clusters; /* block vector matrix of clusters */
struct general_kmeans_context ctx;
struct kmeans_result* res;
VALUE_TYPE *distance_clustersold_to_clustersnew;
struct group* groups;
VALUE_TYPE *group_max_drift;
VALUE_TYPE **lower_bounds;
disable_optimizations = prms->kmeans_algorithm_id == ALGORITHM_YINYANG;
initialize_general_context(prms, &ctx, samples);
desired_bv_annz = d_get_subfloat_default(&(prms->tr)
, "additional_params", "bv_annz", 0.3);
block_vectors_dim = 0;
keys_per_block = 0;
if (!disable_optimizations) {
initialize_csr_matrix_zero(&block_vectors_samples);
if (prms->kmeans_algorithm_id == ALGORITHM_BV_YINYANG) {
/* search for a suitable size of the block vectors for the input samples and create them */
search_samples_block_vectors(prms, ctx.samples, desired_bv_annz
, &block_vectors_samples
, &block_vectors_dim);
}
if (prms->kmeans_algorithm_id == ALGORITHM_BV_YINYANG_ONDEMAND) {
block_vectors_dim = search_block_vector_size(ctx.samples, desired_bv_annz, prms->verbose);
keys_per_block = ctx.samples->dim / block_vectors_dim;
if (ctx.samples->dim % block_vectors_dim > 0) keys_per_block++;
}
/* create block vectors for the clusters */
create_block_vectors_list_from_vector_list(ctx.cluster_vectors
, block_vectors_dim
, ctx.no_clusters
, ctx.samples->dim
, &block_vectors_clusters);
}
distance_clustersold_to_clustersnew = (VALUE_TYPE*) calloc(ctx.no_clusters, sizeof(VALUE_TYPE));
/* no_groups is set to no_clusters / 10 as suggested in the yinyang paper */
no_groups = ctx.no_clusters / 10;
if (no_groups == 0) no_groups = 1;
/* create yinyang cluster groups by doing 5 k-means iterations on the clusters */
create_kmeans_cluster_groups(ctx.cluster_vectors
, ctx.no_clusters
, ctx.samples->sample_count
, &groups, &no_groups);
group_max_drift = (VALUE_TYPE*) calloc(no_groups, sizeof(VALUE_TYPE));
lower_bounds = (VALUE_TYPE**) calloc(ctx.samples->sample_count, sizeof(VALUE_TYPE*));
for (i = 0; i < ctx.samples->sample_count; i++) {
lower_bounds[i] = (VALUE_TYPE*) calloc(no_groups, sizeof(VALUE_TYPE));
}
cluster_to_group = (uint64_t*) calloc(ctx.no_clusters, sizeof(uint64_t));
for (i = 0; i < no_groups; i++) {
for (j = 0; j < groups[i].no_clusters; j++) {
cluster_to_group[groups[i].clusters[j]] = i;
}
}
for (i = 0; i < prms->iteration_limit && !ctx.converged && !prms->stop; i++) {
uint64_t saved_calculations_prev_cluster, saved_calculations_bv;
uint64_t saved_calculations_global, saved_calculations_local;
uint64_t done_blockvector_calcs;
uint64_t groups_not_skipped;
done_blockvector_calcs = 0;
saved_calculations_bv = 0;
saved_calculations_global = 0;
saved_calculations_local = 0;
saved_calculations_prev_cluster = 0;
groups_not_skipped = 0;
/* initialize data needed for the iteration */
pre_process_iteration(&ctx);
if (i == 0) {
/* first iteration is done with regular kmeans to find the upper and lower bounds */
uint64_t sample_id, l;
/* do one regular kmeans step to initialize bounds */
#pragma omp parallel for schedule(dynamic, 1000) private(l)
for (sample_id = 0; sample_id < ctx.samples->sample_count; sample_id++) {
uint64_t cluster_id;
VALUE_TYPE dist;
uint32_t is_first_assignment;
struct sparse_vector bv;
bv.nnz = 0;
bv.keys = NULL;
bv.values = NULL;
is_first_assignment = 0;
if (omp_get_thread_num() == 0) check_signals(&(prms->stop));
if (!prms->stop) {
for (l = 0; l < no_groups; l++) {
lower_bounds[sample_id][l] = DBL_MAX;
}
for (cluster_id = 0; cluster_id < ctx.no_clusters; cluster_id++) {
if (!disable_optimizations) {
/* block vector optimizations */
/* check if sqrt( ||s||² + ||c||² - 2*< s_B, c_B > ) >= ctx.cluster_distances[sample_id] */
if (prms->kmeans_algorithm_id == ALGORITHM_BV_YINYANG) {
/* evaluate block vector approximation. */
dist = euclid_vector_list(&block_vectors_samples, sample_id
, block_vectors_clusters, cluster_id
, ctx.vector_lengths_samples
, ctx.vector_lengths_clusters);
} else {
/* kmeans_algorithm_id == ALGORITHM_BV_YINYANG_ONDEMAND */
if (bv.keys == NULL) {
create_block_vector_from_csr_matrix_vector(ctx.samples
, sample_id
, keys_per_block
, &bv);
}
dist = euclid_vector(bv.keys, bv.values, bv.nnz
, block_vectors_clusters[cluster_id].keys
, block_vectors_clusters[cluster_id].values
, block_vectors_clusters[cluster_id].nnz
, ctx.vector_lengths_samples[sample_id]
, ctx.vector_lengths_clusters[cluster_id]);
}
done_blockvector_calcs += 1;
/* we do this fabs to not run into numeric errors */
if (dist >= ctx.cluster_distances[sample_id] && fabs(dist - ctx.cluster_distances[sample_id]) >= 1e-6) {
saved_calculations_bv += 1;
goto end_cluster_init;
}
}
dist = euclid_vector_list(samples, sample_id, ctx.cluster_vectors, cluster_id
, ctx.vector_lengths_samples, ctx.vector_lengths_clusters);
/*#pragma omp critical*/
ctx.done_calculations += 1;
if (dist < ctx.cluster_distances[sample_id]) {
if (is_first_assignment) {
is_first_assignment = 0;
} else {
lower_bounds[sample_id][cluster_to_group[ctx.cluster_assignments[sample_id]]] = ctx.cluster_distances[sample_id];
}
ctx.cluster_distances[sample_id] = dist;
ctx.cluster_assignments[sample_id] = cluster_id;
} else {
end_cluster_init:;
if (dist < lower_bounds[sample_id][cluster_to_group[cluster_id]]) {
lower_bounds[sample_id][cluster_to_group[cluster_id]] = dist;
}
}
}
}
if (!disable_optimizations) {
free_null(bv.keys);
free_null(bv.values);
}
}
} else {
#pragma omp parallel for schedule(dynamic, 1000)
for (j = 0; j < ctx.samples->sample_count; j++) {
VALUE_TYPE dist;
uint64_t cluster_id, sample_id, l;
VALUE_TYPE *temp_lower_bounds;
VALUE_TYPE global_lower_bound;
VALUE_TYPE *should_group_be_updated;
struct sparse_vector bv;
bv.nnz = 0;
bv.keys = NULL;
bv.values = NULL;
sample_id = j;
if (omp_get_thread_num() == 0) check_signals(&(prms->stop));
if (!prms->stop) {
/* update upper bound of this sample with drift of assigned cluster */
ctx.cluster_distances[sample_id] = ctx.cluster_distances[sample_id] + distance_clustersold_to_clustersnew[ctx.cluster_assignments[sample_id]];
temp_lower_bounds = (VALUE_TYPE*) calloc(no_groups, sizeof(VALUE_TYPE));
should_group_be_updated = (VALUE_TYPE*) calloc(no_groups, sizeof(VALUE_TYPE));
global_lower_bound = DBL_MAX;
for (l = 0; l < no_groups; l++) {
temp_lower_bounds[l] = lower_bounds[sample_id][l];
lower_bounds[sample_id][l] = lower_bounds[sample_id][l] - group_max_drift[l];
if (global_lower_bound > lower_bounds[sample_id][l]) global_lower_bound = lower_bounds[sample_id][l];
}
/* check if the global lower bound is already bigger than the current upper bound */
if (global_lower_bound >= ctx.cluster_distances[sample_id]) {
saved_calculations_global += ctx.no_clusters;
goto end;
}
/* tighten the upper bound by calculating the actual distance to the current closest cluster */
ctx.cluster_distances[sample_id]
= euclid_vector_list(samples, sample_id, ctx.cluster_vectors, ctx.cluster_assignments[sample_id]
, ctx.vector_lengths_samples, ctx.vector_lengths_clusters);
/*#pragma omp critical*/
ctx.done_calculations += 1;
/* recheck if the global lower bound is now bigger than the current upper bound */
if (global_lower_bound >= ctx.cluster_distances[sample_id]) {
saved_calculations_global += ctx.no_clusters - 1;
goto end;
}
for (l = 0; l < no_groups; l++) {
if (lower_bounds[sample_id][l] < ctx.cluster_distances[sample_id]) {
should_group_be_updated[l] = 1;
groups_not_skipped += 1;
lower_bounds[sample_id][l] = DBL_MAX;
}
}
for (cluster_id = 0; cluster_id < ctx.no_clusters; cluster_id++) {
if (!should_group_be_updated[cluster_to_group[cluster_id]]) {
saved_calculations_prev_cluster++;
continue;
}
if (ctx.cluster_counts[cluster_id] == 0 || cluster_id == ctx.previous_cluster_assignments[sample_id]) continue;
if (lower_bounds[sample_id][cluster_to_group[cluster_id]] < temp_lower_bounds[cluster_to_group[cluster_id]] - distance_clustersold_to_clustersnew[cluster_id]) {
dist = lower_bounds[sample_id][cluster_to_group[cluster_id]];
saved_calculations_local += 1;
goto end_cluster;
}
if (!disable_optimizations) {
if (i < 15) {
/* block vector optimizations */
/* check if sqrt( ||s||² + ||c||² - 2*< s_B, c_B > ) >= ctx.cluster_distances[sample_id] */
if (prms->kmeans_algorithm_id == ALGORITHM_BV_YINYANG) {
/* evaluate block vector approximation. */
dist = euclid_vector_list(&block_vectors_samples, sample_id
, block_vectors_clusters, cluster_id
, ctx.vector_lengths_samples
, ctx.vector_lengths_clusters);
} else {
/* kmeans_algorithm_id == ALGORITHM_BV_YINYANG_ONDEMAND */
if (bv.keys == NULL) {
create_block_vector_from_csr_matrix_vector(ctx.samples
, sample_id
, keys_per_block
, &bv);
}
dist = euclid_vector(bv.keys, bv.values, bv.nnz
, block_vectors_clusters[cluster_id].keys
, block_vectors_clusters[cluster_id].values
, block_vectors_clusters[cluster_id].nnz
, ctx.vector_lengths_samples[sample_id]
, ctx.vector_lengths_clusters[cluster_id]);
}
done_blockvector_calcs += 1;
if (dist >= ctx.cluster_distances[sample_id] && fabs(dist - ctx.cluster_distances[sample_id]) >= 1e-6) {
saved_calculations_bv += 1;
goto end_cluster;
}
}
}
dist = euclid_vector_list(samples, sample_id, ctx.cluster_vectors, cluster_id
, ctx.vector_lengths_samples, ctx.vector_lengths_clusters);
/*#pragma omp critical*/
ctx.done_calculations += 1;
if (dist < ctx.cluster_distances[sample_id]) {
lower_bounds[sample_id][cluster_to_group[ctx.cluster_assignments[sample_id]]] = ctx.cluster_distances[sample_id];
ctx.cluster_distances[sample_id] = dist;
ctx.cluster_assignments[sample_id] = cluster_id;
} else {
end_cluster:;
if (dist < lower_bounds[sample_id][cluster_to_group[cluster_id]]) {
lower_bounds[sample_id][cluster_to_group[cluster_id]] = dist;
}
}
}
end:;
free(should_group_be_updated);
free(temp_lower_bounds);
}
if (!disable_optimizations) {
free_null(bv.keys);
free_null(bv.values);
}
} /* block iterate over samples */
} /* block is first iteration */
post_process_iteration(&ctx, prms);
/* shift clusters to new position */
calculate_shifted_clusters(&ctx);
/* calculate distance between a cluster before and after the shift */
calculate_distance_clustersold_to_clustersnew(distance_clustersold_to_clustersnew
, ctx.shifted_cluster_vectors
, ctx.cluster_vectors
, ctx.no_clusters
, ctx.vector_lengths_shifted_clusters
, ctx.vector_lengths_clusters
, ctx.clusters_not_changed);
switch_to_shifted_clusters(&ctx);
/* ------------ calculate maximum drift for every group ------------- */
{
uint64_t *clusters;
uint64_t n_clusters, l, k;
VALUE_TYPE drift;
for (l = 0; l < no_groups; l++) {
clusters = groups[l].clusters;
n_clusters = groups[l].no_clusters;
group_max_drift[l] = 0;
for (k = 0; k < n_clusters; k++) {
drift = distance_clustersold_to_clustersnew[clusters[k]];
if (group_max_drift[l] < drift) group_max_drift[l] = drift;
}
}
}
if (!disable_optimizations) {
/* update only block vectors for cluster that shifted */
update_changed_blockvectors(ctx.cluster_vectors
, block_vectors_dim
, ctx.no_clusters
, ctx.samples->dim
, ctx.clusters_not_changed
, block_vectors_clusters);
d_add_ilist(&(prms->tr), "iteration_bv_calcs", done_blockvector_calcs);
d_add_ilist(&(prms->tr), "iteration_bv_calcs_success", saved_calculations_bv);
}
print_iteration_summary(&ctx, prms, i);
/* print block vector and yinyang statistics */
if (prms->verbose) LOG_INFO("statistics [BV] b:%" PRINTF_INT64_MODIFIER "u/db:%" PRINTF_INT64_MODIFIER "u [YY] grp_not_skip=%" PRINTF_INT64_MODIFIER "u/pc:%" PRINTF_INT64_MODIFIER "u/g=%" PRINTF_INT64_MODIFIER "u/l=%" PRINTF_INT64_MODIFIER "u"
, saved_calculations_bv
, done_blockvector_calcs
, groups_not_skipped
, saved_calculations_prev_cluster
, saved_calculations_global
, saved_calculations_local);
}
if (prms->verbose) LOG_INFO("total total_no_calcs = %" PRINTF_INT64_MODIFIER "u", ctx.total_no_calcs);
res = create_kmeans_result(prms, &ctx);
/* cleanup all */
if (!disable_optimizations) {
free_csr_matrix(&block_vectors_samples);
free_vector_list(block_vectors_clusters, ctx.no_clusters);
free(block_vectors_clusters);
}
free_general_context(&ctx, prms);
free_null(distance_clustersold_to_clustersnew);
free_null(group_max_drift);
for (i = 0; i < ctx.samples->sample_count; i++) {
free_null(lower_bounds[i]);
}
for (i = 0; i < no_groups; i++) {
free_null(groups[i].clusters);
}
free_null(groups);
free_null(lower_bounds);
free_null(cluster_to_group);
return res;
}
|
Parser.h | //===--- Parser.h - C Language Parser ---------------------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file defines the Parser interface.
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_CLANG_PARSE_PARSER_H
#define LLVM_CLANG_PARSE_PARSER_H
#include "clang/AST/OpenMPClause.h"
#include "clang/AST/Availability.h"
#include "clang/Basic/BitmaskEnum.h"
#include "clang/Basic/OpenMPKinds.h"
#include "clang/Basic/OperatorPrecedence.h"
#include "clang/Basic/Specifiers.h"
#include "clang/Lex/CodeCompletionHandler.h"
#include "clang/Lex/Preprocessor.h"
#include "clang/Sema/DeclSpec.h"
#include "clang/Sema/Sema.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/Support/Compiler.h"
#include "llvm/Support/PrettyStackTrace.h"
#include "llvm/Support/SaveAndRestore.h"
#include <memory>
#include <stack>
namespace clang {
class PragmaHandler;
class Scope;
class BalancedDelimiterTracker;
class CorrectionCandidateCallback;
class DeclGroupRef;
class DiagnosticBuilder;
struct LoopHint;
class Parser;
class ParsingDeclRAIIObject;
class ParsingDeclSpec;
class ParsingDeclarator;
class ParsingFieldDeclarator;
class ColonProtectionRAIIObject;
class InMessageExpressionRAIIObject;
class PoisonSEHIdentifiersRAIIObject;
class OMPClause;
class ObjCTypeParamList;
class ObjCTypeParameter;
/// Parser - This implements a parser for the C family of languages. After
/// parsing units of the grammar, productions are invoked to handle whatever has
/// been read.
///
class Parser : public CodeCompletionHandler {
friend class ColonProtectionRAIIObject;
friend class ParsingOpenMPDirectiveRAII;
friend class InMessageExpressionRAIIObject;
friend class PoisonSEHIdentifiersRAIIObject;
friend class ObjCDeclContextSwitch;
friend class ParenBraceBracketBalancer;
friend class BalancedDelimiterTracker;
Preprocessor &PP;
/// Tok - The current token we are peeking ahead. All parsing methods assume
/// that this is valid.
Token Tok;
// PrevTokLocation - The location of the token we previously
// consumed. This token is used for diagnostics where we expected to
// see a token following another token (e.g., the ';' at the end of
// a statement).
SourceLocation PrevTokLocation;
/// Tracks an expected type for the current token when parsing an expression.
/// Used by code completion for ranking.
PreferredTypeBuilder PreferredType;
unsigned short ParenCount = 0, BracketCount = 0, BraceCount = 0;
unsigned short MisplacedModuleBeginCount = 0;
/// Actions - These are the callbacks we invoke as we parse various constructs
/// in the file.
Sema &Actions;
DiagnosticsEngine &Diags;
/// ScopeCache - Cache scopes to reduce malloc traffic.
enum { ScopeCacheSize = 16 };
unsigned NumCachedScopes;
Scope *ScopeCache[ScopeCacheSize];
/// Identifiers used for SEH handling in Borland. These are only
/// allowed in particular circumstances
// __except block
IdentifierInfo *Ident__exception_code,
*Ident___exception_code,
*Ident_GetExceptionCode;
// __except filter expression
IdentifierInfo *Ident__exception_info,
*Ident___exception_info,
*Ident_GetExceptionInfo;
// __finally
IdentifierInfo *Ident__abnormal_termination,
*Ident___abnormal_termination,
*Ident_AbnormalTermination;
/// Contextual keywords for Microsoft extensions.
IdentifierInfo *Ident__except;
mutable IdentifierInfo *Ident_sealed;
/// Ident_super - IdentifierInfo for "super", to support fast
/// comparison.
IdentifierInfo *Ident_super;
/// Ident_vector, Ident_bool - cached IdentifierInfos for "vector" and
/// "bool" fast comparison. Only present if AltiVec or ZVector are enabled.
IdentifierInfo *Ident_vector;
IdentifierInfo *Ident_bool;
/// Ident_pixel - cached IdentifierInfos for "pixel" fast comparison.
/// Only present if AltiVec enabled.
IdentifierInfo *Ident_pixel;
/// Objective-C contextual keywords.
IdentifierInfo *Ident_instancetype;
/// Identifier for "introduced".
IdentifierInfo *Ident_introduced;
/// Identifier for "deprecated".
IdentifierInfo *Ident_deprecated;
/// Identifier for "obsoleted".
IdentifierInfo *Ident_obsoleted;
/// Identifier for "unavailable".
IdentifierInfo *Ident_unavailable;
/// Identifier for "message".
IdentifierInfo *Ident_message;
/// Identifier for "strict".
IdentifierInfo *Ident_strict;
/// Identifier for "replacement".
IdentifierInfo *Ident_replacement;
/// Identifiers used by the 'external_source_symbol' attribute.
IdentifierInfo *Ident_language, *Ident_defined_in,
*Ident_generated_declaration;
/// C++11 contextual keywords.
mutable IdentifierInfo *Ident_final;
mutable IdentifierInfo *Ident_GNU_final;
mutable IdentifierInfo *Ident_override;
// C++2a contextual keywords.
mutable IdentifierInfo *Ident_import;
mutable IdentifierInfo *Ident_module;
// C++ type trait keywords that can be reverted to identifiers and still be
// used as type traits.
llvm::SmallDenseMap<IdentifierInfo *, tok::TokenKind> RevertibleTypeTraits;
std::unique_ptr<PragmaHandler> AlignHandler;
std::unique_ptr<PragmaHandler> GCCVisibilityHandler;
std::unique_ptr<PragmaHandler> OptionsHandler;
std::unique_ptr<PragmaHandler> PackHandler;
std::unique_ptr<PragmaHandler> MSStructHandler;
std::unique_ptr<PragmaHandler> UnusedHandler;
std::unique_ptr<PragmaHandler> WeakHandler;
std::unique_ptr<PragmaHandler> RedefineExtnameHandler;
std::unique_ptr<PragmaHandler> FPContractHandler;
std::unique_ptr<PragmaHandler> OpenCLExtensionHandler;
std::unique_ptr<PragmaHandler> OpenMPHandler;
std::unique_ptr<PragmaHandler> PCSectionHandler;
std::unique_ptr<PragmaHandler> MSCommentHandler;
std::unique_ptr<PragmaHandler> MSDetectMismatchHandler;
std::unique_ptr<PragmaHandler> MSPointersToMembers;
std::unique_ptr<PragmaHandler> MSVtorDisp;
std::unique_ptr<PragmaHandler> MSInitSeg;
std::unique_ptr<PragmaHandler> MSDataSeg;
std::unique_ptr<PragmaHandler> MSBSSSeg;
std::unique_ptr<PragmaHandler> MSConstSeg;
std::unique_ptr<PragmaHandler> MSCodeSeg;
std::unique_ptr<PragmaHandler> MSSection;
std::unique_ptr<PragmaHandler> MSRuntimeChecks;
std::unique_ptr<PragmaHandler> MSIntrinsic;
std::unique_ptr<PragmaHandler> MSOptimize;
std::unique_ptr<PragmaHandler> CUDAForceHostDeviceHandler;
std::unique_ptr<PragmaHandler> OptimizeHandler;
std::unique_ptr<PragmaHandler> LoopHintHandler;
std::unique_ptr<PragmaHandler> UnrollHintHandler;
std::unique_ptr<PragmaHandler> NoUnrollHintHandler;
std::unique_ptr<PragmaHandler> UnrollAndJamHintHandler;
std::unique_ptr<PragmaHandler> NoUnrollAndJamHintHandler;
std::unique_ptr<PragmaHandler> FPHandler;
std::unique_ptr<PragmaHandler> STDCFENVHandler;
std::unique_ptr<PragmaHandler> STDCCXLIMITHandler;
std::unique_ptr<PragmaHandler> STDCUnknownHandler;
std::unique_ptr<PragmaHandler> AttributePragmaHandler;
std::unique_ptr<PragmaHandler> MaxTokensHerePragmaHandler;
std::unique_ptr<PragmaHandler> MaxTokensTotalPragmaHandler;
std::unique_ptr<CommentHandler> CommentSemaHandler;
/// Whether the '>' token acts as an operator or not. This will be
/// true except when we are parsing an expression within a C++
/// template argument list, where the '>' closes the template
/// argument list.
bool GreaterThanIsOperator;
/// ColonIsSacred - When this is false, we aggressively try to recover from
/// code like "foo : bar" as if it were a typo for "foo :: bar". This is not
/// safe in case statements and a few other things. This is managed by the
/// ColonProtectionRAIIObject RAII object.
bool ColonIsSacred;
/// Parsing OpenMP directive mode.
bool OpenMPDirectiveParsing = false;
/// When true, we are directly inside an Objective-C message
/// send expression.
///
/// This is managed by the \c InMessageExpressionRAIIObject class, and
/// should not be set directly.
bool InMessageExpression;
/// Gets set to true after calling ProduceSignatureHelp, it is for a
/// workaround to make sure ProduceSignatureHelp is only called at the deepest
/// function call.
bool CalledSignatureHelp = false;
/// The "depth" of the template parameters currently being parsed.
unsigned TemplateParameterDepth;
/// RAII class that manages the template parameter depth.
class TemplateParameterDepthRAII {
unsigned &Depth;
unsigned AddedLevels;
public:
explicit TemplateParameterDepthRAII(unsigned &Depth)
: Depth(Depth), AddedLevels(0) {}
~TemplateParameterDepthRAII() {
Depth -= AddedLevels;
}
void operator++() {
++Depth;
++AddedLevels;
}
void addDepth(unsigned D) {
Depth += D;
AddedLevels += D;
}
void setAddedDepth(unsigned D) {
Depth = Depth - AddedLevels + D;
AddedLevels = D;
}
unsigned getDepth() const { return Depth; }
unsigned getOriginalDepth() const { return Depth - AddedLevels; }
};
/// Factory object for creating ParsedAttr objects.
AttributeFactory AttrFactory;
/// Gathers and cleans up TemplateIdAnnotations when parsing of a
/// top-level declaration is finished.
SmallVector<TemplateIdAnnotation *, 16> TemplateIds;
/// Identifiers which have been declared within a tentative parse.
SmallVector<IdentifierInfo *, 8> TentativelyDeclaredIdentifiers;
/// Tracker for '<' tokens that might have been intended to be treated as an
/// angle bracket instead of a less-than comparison.
///
/// This happens when the user intends to form a template-id, but typoes the
/// template-name or forgets a 'template' keyword for a dependent template
/// name.
///
/// We track these locations from the point where we see a '<' with a
/// name-like expression on its left until we see a '>' or '>>' that might
/// match it.
struct AngleBracketTracker {
/// Flags used to rank candidate template names when there is more than one
/// '<' in a scope.
enum Priority : unsigned short {
/// A non-dependent name that is a potential typo for a template name.
PotentialTypo = 0x0,
/// A dependent name that might instantiate to a template-name.
DependentName = 0x2,
/// A space appears before the '<' token.
SpaceBeforeLess = 0x0,
/// No space before the '<' token
NoSpaceBeforeLess = 0x1,
LLVM_MARK_AS_BITMASK_ENUM(/*LargestValue*/ DependentName)
};
struct Loc {
Expr *TemplateName;
SourceLocation LessLoc;
AngleBracketTracker::Priority Priority;
unsigned short ParenCount, BracketCount, BraceCount;
bool isActive(Parser &P) const {
return P.ParenCount == ParenCount && P.BracketCount == BracketCount &&
P.BraceCount == BraceCount;
}
bool isActiveOrNested(Parser &P) const {
return isActive(P) || P.ParenCount > ParenCount ||
P.BracketCount > BracketCount || P.BraceCount > BraceCount;
}
};
SmallVector<Loc, 8> Locs;
/// Add an expression that might have been intended to be a template name.
/// In the case of ambiguity, we arbitrarily select the innermost such
/// expression, for example in 'foo < bar < baz', 'bar' is the current
/// candidate. No attempt is made to track that 'foo' is also a candidate
/// for the case where we see a second suspicious '>' token.
void add(Parser &P, Expr *TemplateName, SourceLocation LessLoc,
Priority Prio) {
if (!Locs.empty() && Locs.back().isActive(P)) {
if (Locs.back().Priority <= Prio) {
Locs.back().TemplateName = TemplateName;
Locs.back().LessLoc = LessLoc;
Locs.back().Priority = Prio;
}
} else {
Locs.push_back({TemplateName, LessLoc, Prio,
P.ParenCount, P.BracketCount, P.BraceCount});
}
}
/// Mark the current potential missing template location as having been
/// handled (this happens if we pass a "corresponding" '>' or '>>' token
/// or leave a bracket scope).
void clear(Parser &P) {
while (!Locs.empty() && Locs.back().isActiveOrNested(P))
Locs.pop_back();
}
/// Get the current enclosing expression that might hve been intended to be
/// a template name.
Loc *getCurrent(Parser &P) {
if (!Locs.empty() && Locs.back().isActive(P))
return &Locs.back();
return nullptr;
}
};
AngleBracketTracker AngleBrackets;
IdentifierInfo *getSEHExceptKeyword();
/// True if we are within an Objective-C container while parsing C-like decls.
///
/// This is necessary because Sema thinks we have left the container
/// to parse the C-like decls, meaning Actions.getObjCDeclContext() will
/// be NULL.
bool ParsingInObjCContainer;
/// Whether to skip parsing of function bodies.
///
/// This option can be used, for example, to speed up searches for
/// declarations/definitions when indexing.
bool SkipFunctionBodies;
/// The location of the expression statement that is being parsed right now.
/// Used to determine if an expression that is being parsed is a statement or
/// just a regular sub-expression.
SourceLocation ExprStatementTokLoc;
/// Flags describing a context in which we're parsing a statement.
enum class ParsedStmtContext {
/// This context permits declarations in language modes where declarations
/// are not statements.
AllowDeclarationsInC = 0x1,
/// This context permits standalone OpenMP directives.
AllowStandaloneOpenMPDirectives = 0x2,
/// This context is at the top level of a GNU statement expression.
InStmtExpr = 0x4,
/// The context of a regular substatement.
SubStmt = 0,
/// The context of a compound-statement.
Compound = AllowDeclarationsInC | AllowStandaloneOpenMPDirectives,
LLVM_MARK_AS_BITMASK_ENUM(InStmtExpr)
};
/// Act on an expression statement that might be the last statement in a
/// GNU statement expression. Checks whether we are actually at the end of
/// a statement expression and builds a suitable expression statement.
StmtResult handleExprStmt(ExprResult E, ParsedStmtContext StmtCtx);
public:
Parser(Preprocessor &PP, Sema &Actions, bool SkipFunctionBodies);
~Parser() override;
const LangOptions &getLangOpts() const { return PP.getLangOpts(); }
const TargetInfo &getTargetInfo() const { return PP.getTargetInfo(); }
Preprocessor &getPreprocessor() const { return PP; }
Sema &getActions() const { return Actions; }
AttributeFactory &getAttrFactory() { return AttrFactory; }
const Token &getCurToken() const { return Tok; }
Scope *getCurScope() const { return Actions.getCurScope(); }
void incrementMSManglingNumber() const {
return Actions.incrementMSManglingNumber();
}
Decl *getObjCDeclContext() const { return Actions.getObjCDeclContext(); }
// Type forwarding. All of these are statically 'void*', but they may all be
// different actual classes based on the actions in place.
typedef OpaquePtr<DeclGroupRef> DeclGroupPtrTy;
typedef OpaquePtr<TemplateName> TemplateTy;
typedef SmallVector<TemplateParameterList *, 4> TemplateParameterLists;
typedef Sema::FullExprArg FullExprArg;
// Parsing methods.
/// Initialize - Warm up the parser.
///
void Initialize();
/// Parse the first top-level declaration in a translation unit.
bool ParseFirstTopLevelDecl(DeclGroupPtrTy &Result);
/// ParseTopLevelDecl - Parse one top-level declaration. Returns true if
/// the EOF was encountered.
bool ParseTopLevelDecl(DeclGroupPtrTy &Result, bool IsFirstDecl = false);
bool ParseTopLevelDecl() {
DeclGroupPtrTy Result;
return ParseTopLevelDecl(Result);
}
/// ConsumeToken - Consume the current 'peek token' and lex the next one.
/// This does not work with special tokens: string literals, code completion,
/// annotation tokens and balanced tokens must be handled using the specific
/// consume methods.
/// Returns the location of the consumed token.
SourceLocation ConsumeToken() {
assert(!isTokenSpecial() &&
"Should consume special tokens with Consume*Token");
PrevTokLocation = Tok.getLocation();
PP.Lex(Tok);
return PrevTokLocation;
}
bool TryConsumeToken(tok::TokenKind Expected) {
if (Tok.isNot(Expected))
return false;
assert(!isTokenSpecial() &&
"Should consume special tokens with Consume*Token");
PrevTokLocation = Tok.getLocation();
PP.Lex(Tok);
return true;
}
bool TryConsumeToken(tok::TokenKind Expected, SourceLocation &Loc) {
if (!TryConsumeToken(Expected))
return false;
Loc = PrevTokLocation;
return true;
}
/// ConsumeAnyToken - Dispatch to the right Consume* method based on the
/// current token type. This should only be used in cases where the type of
/// the token really isn't known, e.g. in error recovery.
SourceLocation ConsumeAnyToken(bool ConsumeCodeCompletionTok = false) {
if (isTokenParen())
return ConsumeParen();
if (isTokenBracket())
return ConsumeBracket();
if (isTokenBrace())
return ConsumeBrace();
if (isTokenStringLiteral())
return ConsumeStringToken();
if (Tok.is(tok::code_completion))
return ConsumeCodeCompletionTok ? ConsumeCodeCompletionToken()
: handleUnexpectedCodeCompletionToken();
if (Tok.isAnnotation())
return ConsumeAnnotationToken();
return ConsumeToken();
}
SourceLocation getEndOfPreviousToken() {
return PP.getLocForEndOfToken(PrevTokLocation);
}
/// Retrieve the underscored keyword (_Nonnull, _Nullable) that corresponds
/// to the given nullability kind.
IdentifierInfo *getNullabilityKeyword(NullabilityKind nullability) {
return Actions.getNullabilityKeyword(nullability);
}
private:
//===--------------------------------------------------------------------===//
// Low-Level token peeking and consumption methods.
//
/// isTokenParen - Return true if the cur token is '(' or ')'.
bool isTokenParen() const {
return Tok.isOneOf(tok::l_paren, tok::r_paren);
}
/// isTokenBracket - Return true if the cur token is '[' or ']'.
bool isTokenBracket() const {
return Tok.isOneOf(tok::l_square, tok::r_square);
}
/// isTokenBrace - Return true if the cur token is '{' or '}'.
bool isTokenBrace() const {
return Tok.isOneOf(tok::l_brace, tok::r_brace);
}
/// isTokenStringLiteral - True if this token is a string-literal.
bool isTokenStringLiteral() const {
return tok::isStringLiteral(Tok.getKind());
}
/// isTokenSpecial - True if this token requires special consumption methods.
bool isTokenSpecial() const {
return isTokenStringLiteral() || isTokenParen() || isTokenBracket() ||
isTokenBrace() || Tok.is(tok::code_completion) || Tok.isAnnotation();
}
/// Returns true if the current token is '=' or is a type of '='.
/// For typos, give a fixit to '='
bool isTokenEqualOrEqualTypo();
/// Return the current token to the token stream and make the given
/// token the current token.
void UnconsumeToken(Token &Consumed) {
Token Next = Tok;
PP.EnterToken(Consumed, /*IsReinject*/true);
PP.Lex(Tok);
PP.EnterToken(Next, /*IsReinject*/true);
}
SourceLocation ConsumeAnnotationToken() {
assert(Tok.isAnnotation() && "wrong consume method");
SourceLocation Loc = Tok.getLocation();
PrevTokLocation = Tok.getAnnotationEndLoc();
PP.Lex(Tok);
return Loc;
}
/// ConsumeParen - This consume method keeps the paren count up-to-date.
///
SourceLocation ConsumeParen() {
assert(isTokenParen() && "wrong consume method");
if (Tok.getKind() == tok::l_paren)
++ParenCount;
else if (ParenCount) {
AngleBrackets.clear(*this);
--ParenCount; // Don't let unbalanced )'s drive the count negative.
}
PrevTokLocation = Tok.getLocation();
PP.Lex(Tok);
return PrevTokLocation;
}
/// ConsumeBracket - This consume method keeps the bracket count up-to-date.
///
SourceLocation ConsumeBracket() {
assert(isTokenBracket() && "wrong consume method");
if (Tok.getKind() == tok::l_square)
++BracketCount;
else if (BracketCount) {
AngleBrackets.clear(*this);
--BracketCount; // Don't let unbalanced ]'s drive the count negative.
}
PrevTokLocation = Tok.getLocation();
PP.Lex(Tok);
return PrevTokLocation;
}
/// ConsumeBrace - This consume method keeps the brace count up-to-date.
///
SourceLocation ConsumeBrace() {
assert(isTokenBrace() && "wrong consume method");
if (Tok.getKind() == tok::l_brace)
++BraceCount;
else if (BraceCount) {
AngleBrackets.clear(*this);
--BraceCount; // Don't let unbalanced }'s drive the count negative.
}
PrevTokLocation = Tok.getLocation();
PP.Lex(Tok);
return PrevTokLocation;
}
/// ConsumeStringToken - Consume the current 'peek token', lexing a new one
/// and returning the token kind. This method is specific to strings, as it
/// handles string literal concatenation, as per C99 5.1.1.2, translation
/// phase #6.
SourceLocation ConsumeStringToken() {
assert(isTokenStringLiteral() &&
"Should only consume string literals with this method");
PrevTokLocation = Tok.getLocation();
PP.Lex(Tok);
return PrevTokLocation;
}
/// Consume the current code-completion token.
///
/// This routine can be called to consume the code-completion token and
/// continue processing in special cases where \c cutOffParsing() isn't
/// desired, such as token caching or completion with lookahead.
SourceLocation ConsumeCodeCompletionToken() {
assert(Tok.is(tok::code_completion));
PrevTokLocation = Tok.getLocation();
PP.Lex(Tok);
return PrevTokLocation;
}
///\ brief When we are consuming a code-completion token without having
/// matched specific position in the grammar, provide code-completion results
/// based on context.
///
/// \returns the source location of the code-completion token.
SourceLocation handleUnexpectedCodeCompletionToken();
/// Abruptly cut off parsing; mainly used when we have reached the
/// code-completion point.
void cutOffParsing() {
if (PP.isCodeCompletionEnabled())
PP.setCodeCompletionReached();
// Cut off parsing by acting as if we reached the end-of-file.
Tok.setKind(tok::eof);
}
/// Determine if we're at the end of the file or at a transition
/// between modules.
bool isEofOrEom() {
tok::TokenKind Kind = Tok.getKind();
return Kind == tok::eof || Kind == tok::annot_module_begin ||
Kind == tok::annot_module_end || Kind == tok::annot_module_include;
}
/// Checks if the \p Level is valid for use in a fold expression.
bool isFoldOperator(prec::Level Level) const;
/// Checks if the \p Kind is a valid operator for fold expressions.
bool isFoldOperator(tok::TokenKind Kind) const;
/// Initialize all pragma handlers.
void initializePragmaHandlers();
/// Destroy and reset all pragma handlers.
void resetPragmaHandlers();
/// Handle the annotation token produced for #pragma unused(...)
void HandlePragmaUnused();
/// Handle the annotation token produced for
/// #pragma GCC visibility...
void HandlePragmaVisibility();
/// Handle the annotation token produced for
/// #pragma pack...
void HandlePragmaPack();
/// Handle the annotation token produced for
/// #pragma ms_struct...
void HandlePragmaMSStruct();
/// Handle the annotation token produced for
/// #pragma comment...
void HandlePragmaMSComment();
void HandlePragmaMSPointersToMembers();
void HandlePragmaMSVtorDisp();
void HandlePragmaMSPragma();
bool HandlePragmaMSSection(StringRef PragmaName,
SourceLocation PragmaLocation);
bool HandlePragmaMSSegment(StringRef PragmaName,
SourceLocation PragmaLocation);
bool HandlePragmaMSInitSeg(StringRef PragmaName,
SourceLocation PragmaLocation);
/// Handle the annotation token produced for
/// #pragma align...
void HandlePragmaAlign();
/// Handle the annotation token produced for
/// #pragma clang __debug dump...
void HandlePragmaDump();
/// Handle the annotation token produced for
/// #pragma weak id...
void HandlePragmaWeak();
/// Handle the annotation token produced for
/// #pragma weak id = id...
void HandlePragmaWeakAlias();
/// Handle the annotation token produced for
/// #pragma redefine_extname...
void HandlePragmaRedefineExtname();
/// Handle the annotation token produced for
/// #pragma STDC FP_CONTRACT...
void HandlePragmaFPContract();
/// Handle the annotation token produced for
/// #pragma STDC FENV_ACCESS...
void HandlePragmaFEnvAccess();
/// \brief Handle the annotation token produced for
/// #pragma clang fp ...
void HandlePragmaFP();
/// Handle the annotation token produced for
/// #pragma OPENCL EXTENSION...
void HandlePragmaOpenCLExtension();
/// Handle the annotation token produced for
/// #pragma clang __debug captured
StmtResult HandlePragmaCaptured();
/// Handle the annotation token produced for
/// #pragma clang loop and #pragma unroll.
bool HandlePragmaLoopHint(LoopHint &Hint);
bool ParsePragmaAttributeSubjectMatchRuleSet(
attr::ParsedSubjectMatchRuleSet &SubjectMatchRules,
SourceLocation &AnyLoc, SourceLocation &LastMatchRuleEndLoc);
void HandlePragmaAttribute();
/// GetLookAheadToken - This peeks ahead N tokens and returns that token
/// without consuming any tokens. LookAhead(0) returns 'Tok', LookAhead(1)
/// returns the token after Tok, etc.
///
/// Note that this differs from the Preprocessor's LookAhead method, because
/// the Parser always has one token lexed that the preprocessor doesn't.
///
const Token &GetLookAheadToken(unsigned N) {
if (N == 0 || Tok.is(tok::eof)) return Tok;
return PP.LookAhead(N-1);
}
public:
/// NextToken - This peeks ahead one token and returns it without
/// consuming it.
const Token &NextToken() {
return PP.LookAhead(0);
}
/// getTypeAnnotation - Read a parsed type out of an annotation token.
static ParsedType getTypeAnnotation(const Token &Tok) {
return ParsedType::getFromOpaquePtr(Tok.getAnnotationValue());
}
private:
static void setTypeAnnotation(Token &Tok, ParsedType T) {
Tok.setAnnotationValue(T.getAsOpaquePtr());
}
static NamedDecl *getNonTypeAnnotation(const Token &Tok) {
return static_cast<NamedDecl*>(Tok.getAnnotationValue());
}
static void setNonTypeAnnotation(Token &Tok, NamedDecl *ND) {
Tok.setAnnotationValue(ND);
}
static IdentifierInfo *getIdentifierAnnotation(const Token &Tok) {
return static_cast<IdentifierInfo*>(Tok.getAnnotationValue());
}
static void setIdentifierAnnotation(Token &Tok, IdentifierInfo *ND) {
Tok.setAnnotationValue(ND);
}
/// Read an already-translated primary expression out of an annotation
/// token.
static ExprResult getExprAnnotation(const Token &Tok) {
return ExprResult::getFromOpaquePointer(Tok.getAnnotationValue());
}
/// Set the primary expression corresponding to the given annotation
/// token.
static void setExprAnnotation(Token &Tok, ExprResult ER) {
Tok.setAnnotationValue(ER.getAsOpaquePointer());
}
public:
// If NeedType is true, then TryAnnotateTypeOrScopeToken will try harder to
// find a type name by attempting typo correction.
bool TryAnnotateTypeOrScopeToken();
bool TryAnnotateTypeOrScopeTokenAfterScopeSpec(CXXScopeSpec &SS,
bool IsNewScope);
bool TryAnnotateCXXScopeToken(bool EnteringContext = false);
bool MightBeCXXScopeToken() {
return Tok.is(tok::identifier) || Tok.is(tok::coloncolon) ||
(Tok.is(tok::annot_template_id) &&
NextToken().is(tok::coloncolon)) ||
Tok.is(tok::kw_decltype) || Tok.is(tok::kw___super);
}
bool TryAnnotateOptionalCXXScopeToken(bool EnteringContext = false) {
return MightBeCXXScopeToken() && TryAnnotateCXXScopeToken(EnteringContext);
}
private:
enum AnnotatedNameKind {
/// Annotation has failed and emitted an error.
ANK_Error,
/// The identifier is a tentatively-declared name.
ANK_TentativeDecl,
/// The identifier is a template name. FIXME: Add an annotation for that.
ANK_TemplateName,
/// The identifier can't be resolved.
ANK_Unresolved,
/// Annotation was successful.
ANK_Success
};
AnnotatedNameKind TryAnnotateName(CorrectionCandidateCallback *CCC = nullptr);
/// Push a tok::annot_cxxscope token onto the token stream.
void AnnotateScopeToken(CXXScopeSpec &SS, bool IsNewAnnotation);
/// TryAltiVecToken - Check for context-sensitive AltiVec identifier tokens,
/// replacing them with the non-context-sensitive keywords. This returns
/// true if the token was replaced.
bool TryAltiVecToken(DeclSpec &DS, SourceLocation Loc,
const char *&PrevSpec, unsigned &DiagID,
bool &isInvalid) {
if (!getLangOpts().AltiVec && !getLangOpts().ZVector)
return false;
if (Tok.getIdentifierInfo() != Ident_vector &&
Tok.getIdentifierInfo() != Ident_bool &&
(!getLangOpts().AltiVec || Tok.getIdentifierInfo() != Ident_pixel))
return false;
return TryAltiVecTokenOutOfLine(DS, Loc, PrevSpec, DiagID, isInvalid);
}
/// TryAltiVecVectorToken - Check for context-sensitive AltiVec vector
/// identifier token, replacing it with the non-context-sensitive __vector.
/// This returns true if the token was replaced.
bool TryAltiVecVectorToken() {
if ((!getLangOpts().AltiVec && !getLangOpts().ZVector) ||
Tok.getIdentifierInfo() != Ident_vector) return false;
return TryAltiVecVectorTokenOutOfLine();
}
bool TryAltiVecVectorTokenOutOfLine();
bool TryAltiVecTokenOutOfLine(DeclSpec &DS, SourceLocation Loc,
const char *&PrevSpec, unsigned &DiagID,
bool &isInvalid);
/// Returns true if the current token is the identifier 'instancetype'.
///
/// Should only be used in Objective-C language modes.
bool isObjCInstancetype() {
assert(getLangOpts().ObjC);
if (Tok.isAnnotation())
return false;
if (!Ident_instancetype)
Ident_instancetype = PP.getIdentifierInfo("instancetype");
return Tok.getIdentifierInfo() == Ident_instancetype;
}
/// TryKeywordIdentFallback - For compatibility with system headers using
/// keywords as identifiers, attempt to convert the current token to an
/// identifier and optionally disable the keyword for the remainder of the
/// translation unit. This returns false if the token was not replaced,
/// otherwise emits a diagnostic and returns true.
bool TryKeywordIdentFallback(bool DisableKeyword);
/// Get the TemplateIdAnnotation from the token.
TemplateIdAnnotation *takeTemplateIdAnnotation(const Token &tok);
/// TentativeParsingAction - An object that is used as a kind of "tentative
/// parsing transaction". It gets instantiated to mark the token position and
/// after the token consumption is done, Commit() or Revert() is called to
/// either "commit the consumed tokens" or revert to the previously marked
/// token position. Example:
///
/// TentativeParsingAction TPA(*this);
/// ConsumeToken();
/// ....
/// TPA.Revert();
///
class TentativeParsingAction {
Parser &P;
PreferredTypeBuilder PrevPreferredType;
Token PrevTok;
size_t PrevTentativelyDeclaredIdentifierCount;
unsigned short PrevParenCount, PrevBracketCount, PrevBraceCount;
bool isActive;
public:
explicit TentativeParsingAction(Parser& p) : P(p) {
PrevPreferredType = P.PreferredType;
PrevTok = P.Tok;
PrevTentativelyDeclaredIdentifierCount =
P.TentativelyDeclaredIdentifiers.size();
PrevParenCount = P.ParenCount;
PrevBracketCount = P.BracketCount;
PrevBraceCount = P.BraceCount;
P.PP.EnableBacktrackAtThisPos();
isActive = true;
}
void Commit() {
assert(isActive && "Parsing action was finished!");
P.TentativelyDeclaredIdentifiers.resize(
PrevTentativelyDeclaredIdentifierCount);
P.PP.CommitBacktrackedTokens();
isActive = false;
}
void Revert() {
assert(isActive && "Parsing action was finished!");
P.PP.Backtrack();
P.PreferredType = PrevPreferredType;
P.Tok = PrevTok;
P.TentativelyDeclaredIdentifiers.resize(
PrevTentativelyDeclaredIdentifierCount);
P.ParenCount = PrevParenCount;
P.BracketCount = PrevBracketCount;
P.BraceCount = PrevBraceCount;
isActive = false;
}
~TentativeParsingAction() {
assert(!isActive && "Forgot to call Commit or Revert!");
}
};
/// A TentativeParsingAction that automatically reverts in its destructor.
/// Useful for disambiguation parses that will always be reverted.
class RevertingTentativeParsingAction
: private Parser::TentativeParsingAction {
public:
RevertingTentativeParsingAction(Parser &P)
: Parser::TentativeParsingAction(P) {}
~RevertingTentativeParsingAction() { Revert(); }
};
class UnannotatedTentativeParsingAction;
/// ObjCDeclContextSwitch - An object used to switch context from
/// an objective-c decl context to its enclosing decl context and
/// back.
class ObjCDeclContextSwitch {
Parser &P;
Decl *DC;
SaveAndRestore<bool> WithinObjCContainer;
public:
explicit ObjCDeclContextSwitch(Parser &p)
: P(p), DC(p.getObjCDeclContext()),
WithinObjCContainer(P.ParsingInObjCContainer, DC != nullptr) {
if (DC)
P.Actions.ActOnObjCTemporaryExitContainerContext(cast<DeclContext>(DC));
}
~ObjCDeclContextSwitch() {
if (DC)
P.Actions.ActOnObjCReenterContainerContext(cast<DeclContext>(DC));
}
};
/// ExpectAndConsume - The parser expects that 'ExpectedTok' is next in the
/// input. If so, it is consumed and false is returned.
///
/// If a trivial punctuator misspelling is encountered, a FixIt error
/// diagnostic is issued and false is returned after recovery.
///
/// If the input is malformed, this emits the specified diagnostic and true is
/// returned.
bool ExpectAndConsume(tok::TokenKind ExpectedTok,
unsigned Diag = diag::err_expected,
StringRef DiagMsg = "");
/// The parser expects a semicolon and, if present, will consume it.
///
/// If the next token is not a semicolon, this emits the specified diagnostic,
/// or, if there's just some closing-delimiter noise (e.g., ')' or ']') prior
/// to the semicolon, consumes that extra token.
bool ExpectAndConsumeSemi(unsigned DiagID);
/// The kind of extra semi diagnostic to emit.
enum ExtraSemiKind {
OutsideFunction = 0,
InsideStruct = 1,
InstanceVariableList = 2,
AfterMemberFunctionDefinition = 3
};
/// Consume any extra semi-colons until the end of the line.
void ConsumeExtraSemi(ExtraSemiKind Kind, DeclSpec::TST T = TST_unspecified);
/// Return false if the next token is an identifier. An 'expected identifier'
/// error is emitted otherwise.
///
/// The parser tries to recover from the error by checking if the next token
/// is a C++ keyword when parsing Objective-C++. Return false if the recovery
/// was successful.
bool expectIdentifier();
public:
//===--------------------------------------------------------------------===//
// Scope manipulation
/// ParseScope - Introduces a new scope for parsing. The kind of
/// scope is determined by ScopeFlags. Objects of this type should
/// be created on the stack to coincide with the position where the
/// parser enters the new scope, and this object's constructor will
/// create that new scope. Similarly, once the object is destroyed
/// the parser will exit the scope.
class ParseScope {
Parser *Self;
ParseScope(const ParseScope &) = delete;
void operator=(const ParseScope &) = delete;
public:
// ParseScope - Construct a new object to manage a scope in the
// parser Self where the new Scope is created with the flags
// ScopeFlags, but only when we aren't about to enter a compound statement.
ParseScope(Parser *Self, unsigned ScopeFlags, bool EnteredScope = true,
bool BeforeCompoundStmt = false)
: Self(Self) {
if (EnteredScope && !BeforeCompoundStmt)
Self->EnterScope(ScopeFlags);
else {
if (BeforeCompoundStmt)
Self->incrementMSManglingNumber();
this->Self = nullptr;
}
}
// Exit - Exit the scope associated with this object now, rather
// than waiting until the object is destroyed.
void Exit() {
if (Self) {
Self->ExitScope();
Self = nullptr;
}
}
~ParseScope() {
Exit();
}
};
/// EnterScope - Start a new scope.
void EnterScope(unsigned ScopeFlags);
/// ExitScope - Pop a scope off the scope stack.
void ExitScope();
private:
/// RAII object used to modify the scope flags for the current scope.
class ParseScopeFlags {
Scope *CurScope;
unsigned OldFlags;
ParseScopeFlags(const ParseScopeFlags &) = delete;
void operator=(const ParseScopeFlags &) = delete;
public:
ParseScopeFlags(Parser *Self, unsigned ScopeFlags, bool ManageFlags = true);
~ParseScopeFlags();
};
//===--------------------------------------------------------------------===//
// Diagnostic Emission and Error recovery.
public:
DiagnosticBuilder Diag(SourceLocation Loc, unsigned DiagID);
DiagnosticBuilder Diag(const Token &Tok, unsigned DiagID);
DiagnosticBuilder Diag(unsigned DiagID) {
return Diag(Tok, DiagID);
}
private:
void SuggestParentheses(SourceLocation Loc, unsigned DK,
SourceRange ParenRange);
void CheckNestedObjCContexts(SourceLocation AtLoc);
public:
/// Control flags for SkipUntil functions.
enum SkipUntilFlags {
StopAtSemi = 1 << 0, ///< Stop skipping at semicolon
/// Stop skipping at specified token, but don't skip the token itself
StopBeforeMatch = 1 << 1,
StopAtCodeCompletion = 1 << 2 ///< Stop at code completion
};
friend constexpr SkipUntilFlags operator|(SkipUntilFlags L,
SkipUntilFlags R) {
return static_cast<SkipUntilFlags>(static_cast<unsigned>(L) |
static_cast<unsigned>(R));
}
/// SkipUntil - Read tokens until we get to the specified token, then consume
/// it (unless StopBeforeMatch is specified). Because we cannot guarantee
/// that the token will ever occur, this skips to the next token, or to some
/// likely good stopping point. If Flags has StopAtSemi flag, skipping will
/// stop at a ';' character. Balances (), [], and {} delimiter tokens while
/// skipping.
///
/// If SkipUntil finds the specified token, it returns true, otherwise it
/// returns false.
bool SkipUntil(tok::TokenKind T,
SkipUntilFlags Flags = static_cast<SkipUntilFlags>(0)) {
return SkipUntil(llvm::makeArrayRef(T), Flags);
}
bool SkipUntil(tok::TokenKind T1, tok::TokenKind T2,
SkipUntilFlags Flags = static_cast<SkipUntilFlags>(0)) {
tok::TokenKind TokArray[] = {T1, T2};
return SkipUntil(TokArray, Flags);
}
bool SkipUntil(tok::TokenKind T1, tok::TokenKind T2, tok::TokenKind T3,
SkipUntilFlags Flags = static_cast<SkipUntilFlags>(0)) {
tok::TokenKind TokArray[] = {T1, T2, T3};
return SkipUntil(TokArray, Flags);
}
bool SkipUntil(ArrayRef<tok::TokenKind> Toks,
SkipUntilFlags Flags = static_cast<SkipUntilFlags>(0));
/// SkipMalformedDecl - Read tokens until we get to some likely good stopping
/// point for skipping past a simple-declaration.
void SkipMalformedDecl();
/// The location of the first statement inside an else that might
/// have a missleading indentation. If there is no
/// MisleadingIndentationChecker on an else active, this location is invalid.
SourceLocation MisleadingIndentationElseLoc;
private:
//===--------------------------------------------------------------------===//
// Lexing and parsing of C++ inline methods.
struct ParsingClass;
/// [class.mem]p1: "... the class is regarded as complete within
/// - function bodies
/// - default arguments
/// - exception-specifications (TODO: C++0x)
/// - and brace-or-equal-initializers for non-static data members
/// (including such things in nested classes)."
/// LateParsedDeclarations build the tree of those elements so they can
/// be parsed after parsing the top-level class.
class LateParsedDeclaration {
public:
virtual ~LateParsedDeclaration();
virtual void ParseLexedMethodDeclarations();
virtual void ParseLexedMemberInitializers();
virtual void ParseLexedMethodDefs();
virtual void ParseLexedAttributes();
virtual void ParseLexedPragmas();
};
/// Inner node of the LateParsedDeclaration tree that parses
/// all its members recursively.
class LateParsedClass : public LateParsedDeclaration {
public:
LateParsedClass(Parser *P, ParsingClass *C);
~LateParsedClass() override;
void ParseLexedMethodDeclarations() override;
void ParseLexedMemberInitializers() override;
void ParseLexedMethodDefs() override;
void ParseLexedAttributes() override;
void ParseLexedPragmas() override;
private:
Parser *Self;
ParsingClass *Class;
};
/// Contains the lexed tokens of an attribute with arguments that
/// may reference member variables and so need to be parsed at the
/// end of the class declaration after parsing all other member
/// member declarations.
/// FIXME: Perhaps we should change the name of LateParsedDeclaration to
/// LateParsedTokens.
struct LateParsedAttribute : public LateParsedDeclaration {
Parser *Self;
CachedTokens Toks;
IdentifierInfo &AttrName;
IdentifierInfo *MacroII = nullptr;
SourceLocation AttrNameLoc;
SmallVector<Decl*, 2> Decls;
explicit LateParsedAttribute(Parser *P, IdentifierInfo &Name,
SourceLocation Loc)
: Self(P), AttrName(Name), AttrNameLoc(Loc) {}
void ParseLexedAttributes() override;
void addDecl(Decl *D) { Decls.push_back(D); }
};
/// Contains the lexed tokens of a pragma with arguments that
/// may reference member variables and so need to be parsed at the
/// end of the class declaration after parsing all other member
/// member declarations.
class LateParsedPragma : public LateParsedDeclaration {
Parser *Self = nullptr;
AccessSpecifier AS = AS_none;
CachedTokens Toks;
public:
explicit LateParsedPragma(Parser *P, AccessSpecifier AS)
: Self(P), AS(AS) {}
void takeToks(CachedTokens &Cached) { Toks.swap(Cached); }
const CachedTokens &toks() const { return Toks; }
AccessSpecifier getAccessSpecifier() const { return AS; }
void ParseLexedPragmas() override;
};
// A list of late-parsed attributes. Used by ParseGNUAttributes.
class LateParsedAttrList: public SmallVector<LateParsedAttribute *, 2> {
public:
LateParsedAttrList(bool PSoon = false) : ParseSoon(PSoon) { }
bool parseSoon() { return ParseSoon; }
private:
bool ParseSoon; // Are we planning to parse these shortly after creation?
};
/// Contains the lexed tokens of a member function definition
/// which needs to be parsed at the end of the class declaration
/// after parsing all other member declarations.
struct LexedMethod : public LateParsedDeclaration {
Parser *Self;
Decl *D;
CachedTokens Toks;
/// Whether this member function had an associated template
/// scope. When true, D is a template declaration.
/// otherwise, it is a member function declaration.
bool TemplateScope;
explicit LexedMethod(Parser* P, Decl *MD)
: Self(P), D(MD), TemplateScope(false) {}
void ParseLexedMethodDefs() override;
};
/// LateParsedDefaultArgument - Keeps track of a parameter that may
/// have a default argument that cannot be parsed yet because it
/// occurs within a member function declaration inside the class
/// (C++ [class.mem]p2).
struct LateParsedDefaultArgument {
explicit LateParsedDefaultArgument(Decl *P,
std::unique_ptr<CachedTokens> Toks = nullptr)
: Param(P), Toks(std::move(Toks)) { }
/// Param - The parameter declaration for this parameter.
Decl *Param;
/// Toks - The sequence of tokens that comprises the default
/// argument expression, not including the '=' or the terminating
/// ')' or ','. This will be NULL for parameters that have no
/// default argument.
std::unique_ptr<CachedTokens> Toks;
};
/// LateParsedMethodDeclaration - A method declaration inside a class that
/// contains at least one entity whose parsing needs to be delayed
/// until the class itself is completely-defined, such as a default
/// argument (C++ [class.mem]p2).
struct LateParsedMethodDeclaration : public LateParsedDeclaration {
explicit LateParsedMethodDeclaration(Parser *P, Decl *M)
: Self(P), Method(M), TemplateScope(false),
ExceptionSpecTokens(nullptr) {}
void ParseLexedMethodDeclarations() override;
Parser* Self;
/// Method - The method declaration.
Decl *Method;
/// Whether this member function had an associated template
/// scope. When true, D is a template declaration.
/// otherwise, it is a member function declaration.
bool TemplateScope;
/// DefaultArgs - Contains the parameters of the function and
/// their default arguments. At least one of the parameters will
/// have a default argument, but all of the parameters of the
/// method will be stored so that they can be reintroduced into
/// scope at the appropriate times.
SmallVector<LateParsedDefaultArgument, 8> DefaultArgs;
/// The set of tokens that make up an exception-specification that
/// has not yet been parsed.
CachedTokens *ExceptionSpecTokens;
};
/// LateParsedMemberInitializer - An initializer for a non-static class data
/// member whose parsing must to be delayed until the class is completely
/// defined (C++11 [class.mem]p2).
struct LateParsedMemberInitializer : public LateParsedDeclaration {
LateParsedMemberInitializer(Parser *P, Decl *FD)
: Self(P), Field(FD) { }
void ParseLexedMemberInitializers() override;
Parser *Self;
/// Field - The field declaration.
Decl *Field;
/// CachedTokens - The sequence of tokens that comprises the initializer,
/// including any leading '='.
CachedTokens Toks;
};
/// LateParsedDeclarationsContainer - During parsing of a top (non-nested)
/// C++ class, its method declarations that contain parts that won't be
/// parsed until after the definition is completed (C++ [class.mem]p2),
/// the method declarations and possibly attached inline definitions
/// will be stored here with the tokens that will be parsed to create those
/// entities.
typedef SmallVector<LateParsedDeclaration*,2> LateParsedDeclarationsContainer;
/// Representation of a class that has been parsed, including
/// any member function declarations or definitions that need to be
/// parsed after the corresponding top-level class is complete.
struct ParsingClass {
ParsingClass(Decl *TagOrTemplate, bool TopLevelClass, bool IsInterface)
: TopLevelClass(TopLevelClass), TemplateScope(false),
IsInterface(IsInterface), TagOrTemplate(TagOrTemplate) { }
/// Whether this is a "top-level" class, meaning that it is
/// not nested within another class.
bool TopLevelClass : 1;
/// Whether this class had an associated template
/// scope. When true, TagOrTemplate is a template declaration;
/// otherwise, it is a tag declaration.
bool TemplateScope : 1;
/// Whether this class is an __interface.
bool IsInterface : 1;
/// The class or class template whose definition we are parsing.
Decl *TagOrTemplate;
/// LateParsedDeclarations - Method declarations, inline definitions and
/// nested classes that contain pieces whose parsing will be delayed until
/// the top-level class is fully defined.
LateParsedDeclarationsContainer LateParsedDeclarations;
};
/// The stack of classes that is currently being
/// parsed. Nested and local classes will be pushed onto this stack
/// when they are parsed, and removed afterward.
std::stack<ParsingClass *> ClassStack;
ParsingClass &getCurrentClass() {
assert(!ClassStack.empty() && "No lexed method stacks!");
return *ClassStack.top();
}
/// RAII object used to manage the parsing of a class definition.
class ParsingClassDefinition {
Parser &P;
bool Popped;
Sema::ParsingClassState State;
public:
ParsingClassDefinition(Parser &P, Decl *TagOrTemplate, bool TopLevelClass,
bool IsInterface)
: P(P), Popped(false),
State(P.PushParsingClass(TagOrTemplate, TopLevelClass, IsInterface)) {
}
/// Pop this class of the stack.
void Pop() {
assert(!Popped && "Nested class has already been popped");
Popped = true;
P.PopParsingClass(State);
}
~ParsingClassDefinition() {
if (!Popped)
P.PopParsingClass(State);
}
};
/// Contains information about any template-specific
/// information that has been parsed prior to parsing declaration
/// specifiers.
struct ParsedTemplateInfo {
ParsedTemplateInfo()
: Kind(NonTemplate), TemplateParams(nullptr), TemplateLoc() { }
ParsedTemplateInfo(TemplateParameterLists *TemplateParams,
bool isSpecialization,
bool lastParameterListWasEmpty = false)
: Kind(isSpecialization? ExplicitSpecialization : Template),
TemplateParams(TemplateParams),
LastParameterListWasEmpty(lastParameterListWasEmpty) { }
explicit ParsedTemplateInfo(SourceLocation ExternLoc,
SourceLocation TemplateLoc)
: Kind(ExplicitInstantiation), TemplateParams(nullptr),
ExternLoc(ExternLoc), TemplateLoc(TemplateLoc),
LastParameterListWasEmpty(false){ }
/// The kind of template we are parsing.
enum {
/// We are not parsing a template at all.
NonTemplate = 0,
/// We are parsing a template declaration.
Template,
/// We are parsing an explicit specialization.
ExplicitSpecialization,
/// We are parsing an explicit instantiation.
ExplicitInstantiation
} Kind;
/// The template parameter lists, for template declarations
/// and explicit specializations.
TemplateParameterLists *TemplateParams;
/// The location of the 'extern' keyword, if any, for an explicit
/// instantiation
SourceLocation ExternLoc;
/// The location of the 'template' keyword, for an explicit
/// instantiation.
SourceLocation TemplateLoc;
/// Whether the last template parameter list was empty.
bool LastParameterListWasEmpty;
SourceRange getSourceRange() const LLVM_READONLY;
};
void LexTemplateFunctionForLateParsing(CachedTokens &Toks);
void ParseLateTemplatedFuncDef(LateParsedTemplate &LPT);
static void LateTemplateParserCallback(void *P, LateParsedTemplate &LPT);
static void LateTemplateParserCleanupCallback(void *P);
Sema::ParsingClassState
PushParsingClass(Decl *TagOrTemplate, bool TopLevelClass, bool IsInterface);
void DeallocateParsedClasses(ParsingClass *Class);
void PopParsingClass(Sema::ParsingClassState);
enum CachedInitKind {
CIK_DefaultArgument,
CIK_DefaultInitializer
};
NamedDecl *ParseCXXInlineMethodDef(AccessSpecifier AS,
ParsedAttributes &AccessAttrs,
ParsingDeclarator &D,
const ParsedTemplateInfo &TemplateInfo,
const VirtSpecifiers &VS,
SourceLocation PureSpecLoc);
void ParseCXXNonStaticMemberInitializer(Decl *VarD);
void ParseLexedAttributes(ParsingClass &Class);
void ParseLexedAttributeList(LateParsedAttrList &LAs, Decl *D,
bool EnterScope, bool OnDefinition);
void ParseLexedAttribute(LateParsedAttribute &LA,
bool EnterScope, bool OnDefinition);
void ParseLexedMethodDeclarations(ParsingClass &Class);
void ParseLexedMethodDeclaration(LateParsedMethodDeclaration &LM);
void ParseLexedMethodDefs(ParsingClass &Class);
void ParseLexedMethodDef(LexedMethod &LM);
void ParseLexedMemberInitializers(ParsingClass &Class);
void ParseLexedMemberInitializer(LateParsedMemberInitializer &MI);
void ParseLexedObjCMethodDefs(LexedMethod &LM, bool parseMethod);
void ParseLexedPragmas(ParsingClass &Class);
void ParseLexedPragma(LateParsedPragma &LP);
bool ConsumeAndStoreFunctionPrologue(CachedTokens &Toks);
bool ConsumeAndStoreInitializer(CachedTokens &Toks, CachedInitKind CIK);
bool ConsumeAndStoreConditional(CachedTokens &Toks);
bool ConsumeAndStoreUntil(tok::TokenKind T1,
CachedTokens &Toks,
bool StopAtSemi = true,
bool ConsumeFinalToken = true) {
return ConsumeAndStoreUntil(T1, T1, Toks, StopAtSemi, ConsumeFinalToken);
}
bool ConsumeAndStoreUntil(tok::TokenKind T1, tok::TokenKind T2,
CachedTokens &Toks,
bool StopAtSemi = true,
bool ConsumeFinalToken = true);
//===--------------------------------------------------------------------===//
// C99 6.9: External Definitions.
struct ParsedAttributesWithRange : ParsedAttributes {
ParsedAttributesWithRange(AttributeFactory &factory)
: ParsedAttributes(factory) {}
void clear() {
ParsedAttributes::clear();
Range = SourceRange();
}
SourceRange Range;
};
struct ParsedAttributesViewWithRange : ParsedAttributesView {
ParsedAttributesViewWithRange() : ParsedAttributesView() {}
void clearListOnly() {
ParsedAttributesView::clearListOnly();
Range = SourceRange();
}
SourceRange Range;
};
DeclGroupPtrTy ParseExternalDeclaration(ParsedAttributesWithRange &attrs,
ParsingDeclSpec *DS = nullptr);
bool isDeclarationAfterDeclarator();
bool isStartOfFunctionDefinition(const ParsingDeclarator &Declarator);
DeclGroupPtrTy ParseDeclarationOrFunctionDefinition(
ParsedAttributesWithRange &attrs,
ParsingDeclSpec *DS = nullptr,
AccessSpecifier AS = AS_none);
DeclGroupPtrTy ParseDeclOrFunctionDefInternal(ParsedAttributesWithRange &attrs,
ParsingDeclSpec &DS,
AccessSpecifier AS);
void SkipFunctionBody();
Decl *ParseFunctionDefinition(ParsingDeclarator &D,
const ParsedTemplateInfo &TemplateInfo = ParsedTemplateInfo(),
LateParsedAttrList *LateParsedAttrs = nullptr);
void ParseKNRParamDeclarations(Declarator &D);
// EndLoc is filled with the location of the last token of the simple-asm.
ExprResult ParseSimpleAsm(bool ForAsmLabel, SourceLocation *EndLoc);
ExprResult ParseAsmStringLiteral(bool ForAsmLabel);
// Objective-C External Declarations
void MaybeSkipAttributes(tok::ObjCKeywordKind Kind);
DeclGroupPtrTy ParseObjCAtDirectives(ParsedAttributesWithRange &Attrs);
DeclGroupPtrTy ParseObjCAtClassDeclaration(SourceLocation atLoc);
Decl *ParseObjCAtInterfaceDeclaration(SourceLocation AtLoc,
ParsedAttributes &prefixAttrs);
class ObjCTypeParamListScope;
ObjCTypeParamList *parseObjCTypeParamList();
ObjCTypeParamList *parseObjCTypeParamListOrProtocolRefs(
ObjCTypeParamListScope &Scope, SourceLocation &lAngleLoc,
SmallVectorImpl<IdentifierLocPair> &protocolIdents,
SourceLocation &rAngleLoc, bool mayBeProtocolList = true);
void HelperActionsForIvarDeclarations(Decl *interfaceDecl, SourceLocation atLoc,
BalancedDelimiterTracker &T,
SmallVectorImpl<Decl *> &AllIvarDecls,
bool RBraceMissing);
void ParseObjCClassInstanceVariables(Decl *interfaceDecl,
tok::ObjCKeywordKind visibility,
SourceLocation atLoc);
bool ParseObjCProtocolReferences(SmallVectorImpl<Decl *> &P,
SmallVectorImpl<SourceLocation> &PLocs,
bool WarnOnDeclarations,
bool ForObjCContainer,
SourceLocation &LAngleLoc,
SourceLocation &EndProtoLoc,
bool consumeLastToken);
/// Parse the first angle-bracket-delimited clause for an
/// Objective-C object or object pointer type, which may be either
/// type arguments or protocol qualifiers.
void parseObjCTypeArgsOrProtocolQualifiers(
ParsedType baseType,
SourceLocation &typeArgsLAngleLoc,
SmallVectorImpl<ParsedType> &typeArgs,
SourceLocation &typeArgsRAngleLoc,
SourceLocation &protocolLAngleLoc,
SmallVectorImpl<Decl *> &protocols,
SmallVectorImpl<SourceLocation> &protocolLocs,
SourceLocation &protocolRAngleLoc,
bool consumeLastToken,
bool warnOnIncompleteProtocols);
/// Parse either Objective-C type arguments or protocol qualifiers; if the
/// former, also parse protocol qualifiers afterward.
void parseObjCTypeArgsAndProtocolQualifiers(
ParsedType baseType,
SourceLocation &typeArgsLAngleLoc,
SmallVectorImpl<ParsedType> &typeArgs,
SourceLocation &typeArgsRAngleLoc,
SourceLocation &protocolLAngleLoc,
SmallVectorImpl<Decl *> &protocols,
SmallVectorImpl<SourceLocation> &protocolLocs,
SourceLocation &protocolRAngleLoc,
bool consumeLastToken);
/// Parse a protocol qualifier type such as '<NSCopying>', which is
/// an anachronistic way of writing 'id<NSCopying>'.
TypeResult parseObjCProtocolQualifierType(SourceLocation &rAngleLoc);
/// Parse Objective-C type arguments and protocol qualifiers, extending the
/// current type with the parsed result.
TypeResult parseObjCTypeArgsAndProtocolQualifiers(SourceLocation loc,
ParsedType type,
bool consumeLastToken,
SourceLocation &endLoc);
void ParseObjCInterfaceDeclList(tok::ObjCKeywordKind contextKey,
Decl *CDecl);
DeclGroupPtrTy ParseObjCAtProtocolDeclaration(SourceLocation atLoc,
ParsedAttributes &prefixAttrs);
struct ObjCImplParsingDataRAII {
Parser &P;
Decl *Dcl;
bool HasCFunction;
typedef SmallVector<LexedMethod*, 8> LateParsedObjCMethodContainer;
LateParsedObjCMethodContainer LateParsedObjCMethods;
ObjCImplParsingDataRAII(Parser &parser, Decl *D)
: P(parser), Dcl(D), HasCFunction(false) {
P.CurParsedObjCImpl = this;
Finished = false;
}
~ObjCImplParsingDataRAII();
void finish(SourceRange AtEnd);
bool isFinished() const { return Finished; }
private:
bool Finished;
};
ObjCImplParsingDataRAII *CurParsedObjCImpl;
void StashAwayMethodOrFunctionBodyTokens(Decl *MDecl);
DeclGroupPtrTy ParseObjCAtImplementationDeclaration(SourceLocation AtLoc,
ParsedAttributes &Attrs);
DeclGroupPtrTy ParseObjCAtEndDeclaration(SourceRange atEnd);
Decl *ParseObjCAtAliasDeclaration(SourceLocation atLoc);
Decl *ParseObjCPropertySynthesize(SourceLocation atLoc);
Decl *ParseObjCPropertyDynamic(SourceLocation atLoc);
IdentifierInfo *ParseObjCSelectorPiece(SourceLocation &MethodLocation);
// Definitions for Objective-c context sensitive keywords recognition.
enum ObjCTypeQual {
objc_in=0, objc_out, objc_inout, objc_oneway, objc_bycopy, objc_byref,
objc_nonnull, objc_nullable, objc_null_unspecified,
objc_NumQuals
};
IdentifierInfo *ObjCTypeQuals[objc_NumQuals];
bool isTokIdentifier_in() const;
ParsedType ParseObjCTypeName(ObjCDeclSpec &DS, DeclaratorContext Ctx,
ParsedAttributes *ParamAttrs);
void ParseObjCMethodRequirement();
Decl *ParseObjCMethodPrototype(
tok::ObjCKeywordKind MethodImplKind = tok::objc_not_keyword,
bool MethodDefinition = true);
Decl *ParseObjCMethodDecl(SourceLocation mLoc, tok::TokenKind mType,
tok::ObjCKeywordKind MethodImplKind = tok::objc_not_keyword,
bool MethodDefinition=true);
void ParseObjCPropertyAttribute(ObjCDeclSpec &DS);
Decl *ParseObjCMethodDefinition();
public:
//===--------------------------------------------------------------------===//
// C99 6.5: Expressions.
/// TypeCastState - State whether an expression is or may be a type cast.
enum TypeCastState {
NotTypeCast = 0,
MaybeTypeCast,
IsTypeCast
};
ExprResult ParseExpression(TypeCastState isTypeCast = NotTypeCast);
ExprResult ParseConstantExpressionInExprEvalContext(
TypeCastState isTypeCast = NotTypeCast);
ExprResult ParseConstantExpression(TypeCastState isTypeCast = NotTypeCast);
ExprResult ParseCaseExpression(SourceLocation CaseLoc);
ExprResult ParseConstraintExpression();
ExprResult
ParseConstraintLogicalAndExpression(bool IsTrailingRequiresClause);
ExprResult ParseConstraintLogicalOrExpression(bool IsTrailingRequiresClause);
// Expr that doesn't include commas.
ExprResult ParseAssignmentExpression(TypeCastState isTypeCast = NotTypeCast);
ExprResult ParseMSAsmIdentifier(llvm::SmallVectorImpl<Token> &LineToks,
unsigned &NumLineToksConsumed,
bool IsUnevaluated);
ExprResult ParseStringLiteralExpression(bool AllowUserDefinedLiteral = false);
private:
ExprResult ParseExpressionWithLeadingAt(SourceLocation AtLoc);
ExprResult ParseExpressionWithLeadingExtension(SourceLocation ExtLoc);
ExprResult ParseRHSOfBinaryExpression(ExprResult LHS,
prec::Level MinPrec);
/// Control what ParseCastExpression will parse.
enum CastParseKind {
AnyCastExpr = 0,
UnaryExprOnly,
PrimaryExprOnly
};
ExprResult ParseCastExpression(CastParseKind ParseKind,
bool isAddressOfOperand,
bool &NotCastExpr,
TypeCastState isTypeCast,
bool isVectorLiteral = false,
bool *NotPrimaryExpression = nullptr);
ExprResult ParseCastExpression(CastParseKind ParseKind,
bool isAddressOfOperand = false,
TypeCastState isTypeCast = NotTypeCast,
bool isVectorLiteral = false,
bool *NotPrimaryExpression = nullptr);
/// Returns true if the next token cannot start an expression.
bool isNotExpressionStart();
/// Returns true if the next token would start a postfix-expression
/// suffix.
bool isPostfixExpressionSuffixStart() {
tok::TokenKind K = Tok.getKind();
return (K == tok::l_square || K == tok::l_paren ||
K == tok::period || K == tok::arrow ||
K == tok::plusplus || K == tok::minusminus);
}
bool diagnoseUnknownTemplateId(ExprResult TemplateName, SourceLocation Less);
void checkPotentialAngleBracket(ExprResult &PotentialTemplateName);
bool checkPotentialAngleBracketDelimiter(const AngleBracketTracker::Loc &,
const Token &OpToken);
bool checkPotentialAngleBracketDelimiter(const Token &OpToken) {
if (auto *Info = AngleBrackets.getCurrent(*this))
return checkPotentialAngleBracketDelimiter(*Info, OpToken);
return false;
}
ExprResult ParsePostfixExpressionSuffix(ExprResult LHS);
ExprResult ParseUnaryExprOrTypeTraitExpression();
ExprResult ParseBuiltinPrimaryExpression();
ExprResult ParseExprAfterUnaryExprOrTypeTrait(const Token &OpTok,
bool &isCastExpr,
ParsedType &CastTy,
SourceRange &CastRange);
typedef SmallVector<Expr*, 20> ExprListTy;
typedef SmallVector<SourceLocation, 20> CommaLocsTy;
/// ParseExpressionList - Used for C/C++ (argument-)expression-list.
bool ParseExpressionList(SmallVectorImpl<Expr *> &Exprs,
SmallVectorImpl<SourceLocation> &CommaLocs,
llvm::function_ref<void()> ExpressionStarts =
llvm::function_ref<void()>());
/// ParseSimpleExpressionList - A simple comma-separated list of expressions,
/// used for misc language extensions.
bool ParseSimpleExpressionList(SmallVectorImpl<Expr*> &Exprs,
SmallVectorImpl<SourceLocation> &CommaLocs);
/// ParenParseOption - Control what ParseParenExpression will parse.
enum ParenParseOption {
SimpleExpr, // Only parse '(' expression ')'
FoldExpr, // Also allow fold-expression <anything>
CompoundStmt, // Also allow '(' compound-statement ')'
CompoundLiteral, // Also allow '(' type-name ')' '{' ... '}'
CastExpr // Also allow '(' type-name ')' <anything>
};
ExprResult ParseParenExpression(ParenParseOption &ExprType,
bool stopIfCastExpr,
bool isTypeCast,
ParsedType &CastTy,
SourceLocation &RParenLoc);
ExprResult ParseCXXAmbiguousParenExpression(
ParenParseOption &ExprType, ParsedType &CastTy,
BalancedDelimiterTracker &Tracker, ColonProtectionRAIIObject &ColonProt);
ExprResult ParseCompoundLiteralExpression(ParsedType Ty,
SourceLocation LParenLoc,
SourceLocation RParenLoc);
ExprResult ParseGenericSelectionExpression();
ExprResult ParseObjCBoolLiteral();
ExprResult ParseFoldExpression(ExprResult LHS, BalancedDelimiterTracker &T);
//===--------------------------------------------------------------------===//
// C++ Expressions
ExprResult tryParseCXXIdExpression(CXXScopeSpec &SS, bool isAddressOfOperand,
Token &Replacement);
ExprResult ParseCXXIdExpression(bool isAddressOfOperand = false);
bool areTokensAdjacent(const Token &A, const Token &B);
void CheckForTemplateAndDigraph(Token &Next, ParsedType ObjectTypePtr,
bool EnteringContext, IdentifierInfo &II,
CXXScopeSpec &SS);
bool ParseOptionalCXXScopeSpecifier(CXXScopeSpec &SS, ParsedType ObjectType,
bool EnteringContext,
bool *MayBePseudoDestructor = nullptr,
bool IsTypename = false,
IdentifierInfo **LastII = nullptr,
bool OnlyNamespace = false,
bool InUsingDeclaration = false);
//===--------------------------------------------------------------------===//
// C++11 5.1.2: Lambda expressions
/// Result of tentatively parsing a lambda-introducer.
enum class LambdaIntroducerTentativeParse {
/// This appears to be a lambda-introducer, which has been fully parsed.
Success,
/// This is a lambda-introducer, but has not been fully parsed, and this
/// function needs to be called again to parse it.
Incomplete,
/// This is definitely an Objective-C message send expression, rather than
/// a lambda-introducer, attribute-specifier, or array designator.
MessageSend,
/// This is not a lambda-introducer.
Invalid,
};
// [...] () -> type {...}
ExprResult ParseLambdaExpression();
ExprResult TryParseLambdaExpression();
bool
ParseLambdaIntroducer(LambdaIntroducer &Intro,
LambdaIntroducerTentativeParse *Tentative = nullptr);
ExprResult ParseLambdaExpressionAfterIntroducer(LambdaIntroducer &Intro);
//===--------------------------------------------------------------------===//
// C++ 5.2p1: C++ Casts
ExprResult ParseCXXCasts();
/// Parse a __builtin_bit_cast(T, E), used to implement C++2a std::bit_cast.
ExprResult ParseBuiltinBitCast();
//===--------------------------------------------------------------------===//
// C++ 5.2p1: C++ Type Identification
ExprResult ParseCXXTypeid();
//===--------------------------------------------------------------------===//
// C++ : Microsoft __uuidof Expression
ExprResult ParseCXXUuidof();
//===--------------------------------------------------------------------===//
// C++ 5.2.4: C++ Pseudo-Destructor Expressions
ExprResult ParseCXXPseudoDestructor(Expr *Base, SourceLocation OpLoc,
tok::TokenKind OpKind,
CXXScopeSpec &SS,
ParsedType ObjectType);
//===--------------------------------------------------------------------===//
// C++ 9.3.2: C++ 'this' pointer
ExprResult ParseCXXThis();
//===--------------------------------------------------------------------===//
// C++ 15: C++ Throw Expression
ExprResult ParseThrowExpression();
ExceptionSpecificationType tryParseExceptionSpecification(
bool Delayed,
SourceRange &SpecificationRange,
SmallVectorImpl<ParsedType> &DynamicExceptions,
SmallVectorImpl<SourceRange> &DynamicExceptionRanges,
ExprResult &NoexceptExpr,
CachedTokens *&ExceptionSpecTokens);
// EndLoc is filled with the location of the last token of the specification.
ExceptionSpecificationType ParseDynamicExceptionSpecification(
SourceRange &SpecificationRange,
SmallVectorImpl<ParsedType> &Exceptions,
SmallVectorImpl<SourceRange> &Ranges);
//===--------------------------------------------------------------------===//
// C++0x 8: Function declaration trailing-return-type
TypeResult ParseTrailingReturnType(SourceRange &Range,
bool MayBeFollowedByDirectInit);
//===--------------------------------------------------------------------===//
// C++ 2.13.5: C++ Boolean Literals
ExprResult ParseCXXBoolLiteral();
//===--------------------------------------------------------------------===//
// C++ 5.2.3: Explicit type conversion (functional notation)
ExprResult ParseCXXTypeConstructExpression(const DeclSpec &DS);
/// ParseCXXSimpleTypeSpecifier - [C++ 7.1.5.2] Simple type specifiers.
/// This should only be called when the current token is known to be part of
/// simple-type-specifier.
void ParseCXXSimpleTypeSpecifier(DeclSpec &DS);
bool ParseCXXTypeSpecifierSeq(DeclSpec &DS);
//===--------------------------------------------------------------------===//
// C++ 5.3.4 and 5.3.5: C++ new and delete
bool ParseExpressionListOrTypeId(SmallVectorImpl<Expr*> &Exprs,
Declarator &D);
void ParseDirectNewDeclarator(Declarator &D);
ExprResult ParseCXXNewExpression(bool UseGlobal, SourceLocation Start);
ExprResult ParseCXXDeleteExpression(bool UseGlobal,
SourceLocation Start);
//===--------------------------------------------------------------------===//
// C++ if/switch/while/for condition expression.
struct ForRangeInfo;
Sema::ConditionResult ParseCXXCondition(StmtResult *InitStmt,
SourceLocation Loc,
Sema::ConditionKind CK,
ForRangeInfo *FRI = nullptr);
//===--------------------------------------------------------------------===//
// C++ Coroutines
ExprResult ParseCoyieldExpression();
//===--------------------------------------------------------------------===//
// C++ Concepts
ExprResult ParseRequiresExpression();
void ParseTrailingRequiresClause(Declarator &D);
//===--------------------------------------------------------------------===//
// C99 6.7.8: Initialization.
/// ParseInitializer
/// initializer: [C99 6.7.8]
/// assignment-expression
/// '{' ...
ExprResult ParseInitializer() {
if (Tok.isNot(tok::l_brace))
return ParseAssignmentExpression();
return ParseBraceInitializer();
}
bool MayBeDesignationStart();
ExprResult ParseBraceInitializer();
ExprResult ParseInitializerWithPotentialDesignator(
llvm::function_ref<void(const Designation &)> CodeCompleteCB);
//===--------------------------------------------------------------------===//
// clang Expressions
ExprResult ParseBlockLiteralExpression(); // ^{...}
//===--------------------------------------------------------------------===//
// Objective-C Expressions
ExprResult ParseObjCAtExpression(SourceLocation AtLocation);
ExprResult ParseObjCStringLiteral(SourceLocation AtLoc);
ExprResult ParseObjCCharacterLiteral(SourceLocation AtLoc);
ExprResult ParseObjCNumericLiteral(SourceLocation AtLoc);
ExprResult ParseObjCBooleanLiteral(SourceLocation AtLoc, bool ArgValue);
ExprResult ParseObjCArrayLiteral(SourceLocation AtLoc);
ExprResult ParseObjCDictionaryLiteral(SourceLocation AtLoc);
ExprResult ParseObjCBoxedExpr(SourceLocation AtLoc);
ExprResult ParseObjCEncodeExpression(SourceLocation AtLoc);
ExprResult ParseObjCSelectorExpression(SourceLocation AtLoc);
ExprResult ParseObjCProtocolExpression(SourceLocation AtLoc);
bool isSimpleObjCMessageExpression();
ExprResult ParseObjCMessageExpression();
ExprResult ParseObjCMessageExpressionBody(SourceLocation LBracloc,
SourceLocation SuperLoc,
ParsedType ReceiverType,
Expr *ReceiverExpr);
ExprResult ParseAssignmentExprWithObjCMessageExprStart(
SourceLocation LBracloc, SourceLocation SuperLoc,
ParsedType ReceiverType, Expr *ReceiverExpr);
bool ParseObjCXXMessageReceiver(bool &IsExpr, void *&TypeOrExpr);
//===--------------------------------------------------------------------===//
// C99 6.8: Statements and Blocks.
/// A SmallVector of statements, with stack size 32 (as that is the only one
/// used.)
typedef SmallVector<Stmt*, 32> StmtVector;
/// A SmallVector of expressions, with stack size 12 (the maximum used.)
typedef SmallVector<Expr*, 12> ExprVector;
/// A SmallVector of types.
typedef SmallVector<ParsedType, 12> TypeVector;
StmtResult
ParseStatement(SourceLocation *TrailingElseLoc = nullptr,
ParsedStmtContext StmtCtx = ParsedStmtContext::SubStmt);
StmtResult ParseStatementOrDeclaration(
StmtVector &Stmts, ParsedStmtContext StmtCtx,
SourceLocation *TrailingElseLoc = nullptr);
StmtResult ParseStatementOrDeclarationAfterAttributes(
StmtVector &Stmts,
ParsedStmtContext StmtCtx,
SourceLocation *TrailingElseLoc,
ParsedAttributesWithRange &Attrs);
StmtResult ParseExprStatement(ParsedStmtContext StmtCtx);
StmtResult ParseLabeledStatement(ParsedAttributesWithRange &attrs,
ParsedStmtContext StmtCtx);
StmtResult ParseCaseStatement(ParsedStmtContext StmtCtx,
bool MissingCase = false,
ExprResult Expr = ExprResult());
StmtResult ParseDefaultStatement(ParsedStmtContext StmtCtx);
StmtResult ParseCompoundStatement(bool isStmtExpr = false);
StmtResult ParseCompoundStatement(bool isStmtExpr,
unsigned ScopeFlags);
void ParseCompoundStatementLeadingPragmas();
bool ConsumeNullStmt(StmtVector &Stmts);
StmtResult ParseCompoundStatementBody(bool isStmtExpr = false);
bool ParseParenExprOrCondition(StmtResult *InitStmt,
Sema::ConditionResult &CondResult,
SourceLocation Loc,
Sema::ConditionKind CK);
StmtResult ParseIfStatement(SourceLocation *TrailingElseLoc);
StmtResult ParseSwitchStatement(SourceLocation *TrailingElseLoc);
StmtResult ParseWhileStatement(SourceLocation *TrailingElseLoc);
StmtResult ParseDoStatement();
StmtResult ParseForStatement(SourceLocation *TrailingElseLoc);
StmtResult ParseGotoStatement();
StmtResult ParseContinueStatement();
StmtResult ParseBreakStatement();
StmtResult ParseReturnStatement();
StmtResult ParseAsmStatement(bool &msAsm);
StmtResult ParseMicrosoftAsmStatement(SourceLocation AsmLoc);
StmtResult ParsePragmaLoopHint(StmtVector &Stmts,
ParsedStmtContext StmtCtx,
SourceLocation *TrailingElseLoc,
ParsedAttributesWithRange &Attrs);
/// Describes the behavior that should be taken for an __if_exists
/// block.
enum IfExistsBehavior {
/// Parse the block; this code is always used.
IEB_Parse,
/// Skip the block entirely; this code is never used.
IEB_Skip,
/// Parse the block as a dependent block, which may be used in
/// some template instantiations but not others.
IEB_Dependent
};
/// Describes the condition of a Microsoft __if_exists or
/// __if_not_exists block.
struct IfExistsCondition {
/// The location of the initial keyword.
SourceLocation KeywordLoc;
/// Whether this is an __if_exists block (rather than an
/// __if_not_exists block).
bool IsIfExists;
/// Nested-name-specifier preceding the name.
CXXScopeSpec SS;
/// The name we're looking for.
UnqualifiedId Name;
/// The behavior of this __if_exists or __if_not_exists block
/// should.
IfExistsBehavior Behavior;
};
bool ParseMicrosoftIfExistsCondition(IfExistsCondition& Result);
void ParseMicrosoftIfExistsStatement(StmtVector &Stmts);
void ParseMicrosoftIfExistsExternalDeclaration();
void ParseMicrosoftIfExistsClassDeclaration(DeclSpec::TST TagType,
ParsedAttributes &AccessAttrs,
AccessSpecifier &CurAS);
bool ParseMicrosoftIfExistsBraceInitializer(ExprVector &InitExprs,
bool &InitExprsOk);
bool ParseAsmOperandsOpt(SmallVectorImpl<IdentifierInfo *> &Names,
SmallVectorImpl<Expr *> &Constraints,
SmallVectorImpl<Expr *> &Exprs);
//===--------------------------------------------------------------------===//
// C++ 6: Statements and Blocks
StmtResult ParseCXXTryBlock();
StmtResult ParseCXXTryBlockCommon(SourceLocation TryLoc, bool FnTry = false);
StmtResult ParseCXXCatchBlock(bool FnCatch = false);
//===--------------------------------------------------------------------===//
// MS: SEH Statements and Blocks
StmtResult ParseSEHTryBlock();
StmtResult ParseSEHExceptBlock(SourceLocation Loc);
StmtResult ParseSEHFinallyBlock(SourceLocation Loc);
StmtResult ParseSEHLeaveStatement();
//===--------------------------------------------------------------------===//
// Objective-C Statements
StmtResult ParseObjCAtStatement(SourceLocation atLoc,
ParsedStmtContext StmtCtx);
StmtResult ParseObjCTryStmt(SourceLocation atLoc);
StmtResult ParseObjCThrowStmt(SourceLocation atLoc);
StmtResult ParseObjCSynchronizedStmt(SourceLocation atLoc);
StmtResult ParseObjCAutoreleasePoolStmt(SourceLocation atLoc);
//===--------------------------------------------------------------------===//
// C99 6.7: Declarations.
/// A context for parsing declaration specifiers. TODO: flesh this
/// out, there are other significant restrictions on specifiers than
/// would be best implemented in the parser.
enum class DeclSpecContext {
DSC_normal, // normal context
DSC_class, // class context, enables 'friend'
DSC_type_specifier, // C++ type-specifier-seq or C specifier-qualifier-list
DSC_trailing, // C++11 trailing-type-specifier in a trailing return type
DSC_alias_declaration, // C++11 type-specifier-seq in an alias-declaration
DSC_top_level, // top-level/namespace declaration context
DSC_template_param, // template parameter context
DSC_template_type_arg, // template type argument context
DSC_objc_method_result, // ObjC method result context, enables 'instancetype'
DSC_condition // condition declaration context
};
/// Is this a context in which we are parsing just a type-specifier (or
/// trailing-type-specifier)?
static bool isTypeSpecifier(DeclSpecContext DSC) {
switch (DSC) {
case DeclSpecContext::DSC_normal:
case DeclSpecContext::DSC_template_param:
case DeclSpecContext::DSC_class:
case DeclSpecContext::DSC_top_level:
case DeclSpecContext::DSC_objc_method_result:
case DeclSpecContext::DSC_condition:
return false;
case DeclSpecContext::DSC_template_type_arg:
case DeclSpecContext::DSC_type_specifier:
case DeclSpecContext::DSC_trailing:
case DeclSpecContext::DSC_alias_declaration:
return true;
}
llvm_unreachable("Missing DeclSpecContext case");
}
/// Is this a context in which we can perform class template argument
/// deduction?
static bool isClassTemplateDeductionContext(DeclSpecContext DSC) {
switch (DSC) {
case DeclSpecContext::DSC_normal:
case DeclSpecContext::DSC_template_param:
case DeclSpecContext::DSC_class:
case DeclSpecContext::DSC_top_level:
case DeclSpecContext::DSC_condition:
case DeclSpecContext::DSC_type_specifier:
return true;
case DeclSpecContext::DSC_objc_method_result:
case DeclSpecContext::DSC_template_type_arg:
case DeclSpecContext::DSC_trailing:
case DeclSpecContext::DSC_alias_declaration:
return false;
}
llvm_unreachable("Missing DeclSpecContext case");
}
/// Information on a C++0x for-range-initializer found while parsing a
/// declaration which turns out to be a for-range-declaration.
struct ForRangeInit {
SourceLocation ColonLoc;
ExprResult RangeExpr;
bool ParsedForRangeDecl() { return !ColonLoc.isInvalid(); }
};
struct ForRangeInfo : ForRangeInit {
StmtResult LoopVar;
};
DeclGroupPtrTy ParseDeclaration(DeclaratorContext Context,
SourceLocation &DeclEnd,
ParsedAttributesWithRange &attrs,
SourceLocation *DeclSpecStart = nullptr);
DeclGroupPtrTy
ParseSimpleDeclaration(DeclaratorContext Context, SourceLocation &DeclEnd,
ParsedAttributesWithRange &attrs, bool RequireSemi,
ForRangeInit *FRI = nullptr,
SourceLocation *DeclSpecStart = nullptr);
bool MightBeDeclarator(DeclaratorContext Context);
DeclGroupPtrTy ParseDeclGroup(ParsingDeclSpec &DS, DeclaratorContext Context,
SourceLocation *DeclEnd = nullptr,
ForRangeInit *FRI = nullptr);
Decl *ParseDeclarationAfterDeclarator(Declarator &D,
const ParsedTemplateInfo &TemplateInfo = ParsedTemplateInfo());
bool ParseAsmAttributesAfterDeclarator(Declarator &D);
Decl *ParseDeclarationAfterDeclaratorAndAttributes(
Declarator &D,
const ParsedTemplateInfo &TemplateInfo = ParsedTemplateInfo(),
ForRangeInit *FRI = nullptr);
Decl *ParseFunctionStatementBody(Decl *Decl, ParseScope &BodyScope);
Decl *ParseFunctionTryBlock(Decl *Decl, ParseScope &BodyScope);
/// When in code-completion, skip parsing of the function/method body
/// unless the body contains the code-completion point.
///
/// \returns true if the function body was skipped.
bool trySkippingFunctionBody();
bool ParseImplicitInt(DeclSpec &DS, CXXScopeSpec *SS,
const ParsedTemplateInfo &TemplateInfo,
AccessSpecifier AS, DeclSpecContext DSC,
ParsedAttributesWithRange &Attrs);
DeclSpecContext
getDeclSpecContextFromDeclaratorContext(DeclaratorContext Context);
void ParseDeclarationSpecifiers(
DeclSpec &DS,
const ParsedTemplateInfo &TemplateInfo = ParsedTemplateInfo(),
AccessSpecifier AS = AS_none,
DeclSpecContext DSC = DeclSpecContext::DSC_normal,
LateParsedAttrList *LateAttrs = nullptr);
bool DiagnoseMissingSemiAfterTagDefinition(
DeclSpec &DS, AccessSpecifier AS, DeclSpecContext DSContext,
LateParsedAttrList *LateAttrs = nullptr);
void ParseSpecifierQualifierList(
DeclSpec &DS, AccessSpecifier AS = AS_none,
DeclSpecContext DSC = DeclSpecContext::DSC_normal);
void ParseObjCTypeQualifierList(ObjCDeclSpec &DS,
DeclaratorContext Context);
void ParseEnumSpecifier(SourceLocation TagLoc, DeclSpec &DS,
const ParsedTemplateInfo &TemplateInfo,
AccessSpecifier AS, DeclSpecContext DSC);
void ParseEnumBody(SourceLocation StartLoc, Decl *TagDecl);
void ParseStructUnionBody(SourceLocation StartLoc, DeclSpec::TST TagType,
Decl *TagDecl);
void ParseStructDeclaration(
ParsingDeclSpec &DS,
llvm::function_ref<void(ParsingFieldDeclarator &)> FieldsCallback);
bool isDeclarationSpecifier(bool DisambiguatingWithExpression = false);
bool isTypeSpecifierQualifier();
/// isKnownToBeTypeSpecifier - Return true if we know that the specified token
/// is definitely a type-specifier. Return false if it isn't part of a type
/// specifier or if we're not sure.
bool isKnownToBeTypeSpecifier(const Token &Tok) const;
/// Return true if we know that we are definitely looking at a
/// decl-specifier, and isn't part of an expression such as a function-style
/// cast. Return false if it's no a decl-specifier, or we're not sure.
bool isKnownToBeDeclarationSpecifier() {
if (getLangOpts().CPlusPlus)
return isCXXDeclarationSpecifier() == TPResult::True;
return isDeclarationSpecifier(true);
}
/// isDeclarationStatement - Disambiguates between a declaration or an
/// expression statement, when parsing function bodies.
/// Returns true for declaration, false for expression.
bool isDeclarationStatement() {
if (getLangOpts().CPlusPlus)
return isCXXDeclarationStatement();
return isDeclarationSpecifier(true);
}
/// isForInitDeclaration - Disambiguates between a declaration or an
/// expression in the context of the C 'clause-1' or the C++
// 'for-init-statement' part of a 'for' statement.
/// Returns true for declaration, false for expression.
bool isForInitDeclaration() {
if (getLangOpts().OpenMP)
Actions.startOpenMPLoop();
if (getLangOpts().CPlusPlus)
return isCXXSimpleDeclaration(/*AllowForRangeDecl=*/true);
return isDeclarationSpecifier(true);
}
/// Determine whether this is a C++1z for-range-identifier.
bool isForRangeIdentifier();
/// Determine whether we are currently at the start of an Objective-C
/// class message that appears to be missing the open bracket '['.
bool isStartOfObjCClassMessageMissingOpenBracket();
/// Starting with a scope specifier, identifier, or
/// template-id that refers to the current class, determine whether
/// this is a constructor declarator.
bool isConstructorDeclarator(bool Unqualified, bool DeductionGuide = false);
/// Specifies the context in which type-id/expression
/// disambiguation will occur.
enum TentativeCXXTypeIdContext {
TypeIdInParens,
TypeIdUnambiguous,
TypeIdAsTemplateArgument
};
/// isTypeIdInParens - Assumes that a '(' was parsed and now we want to know
/// whether the parens contain an expression or a type-id.
/// Returns true for a type-id and false for an expression.
bool isTypeIdInParens(bool &isAmbiguous) {
if (getLangOpts().CPlusPlus)
return isCXXTypeId(TypeIdInParens, isAmbiguous);
isAmbiguous = false;
return isTypeSpecifierQualifier();
}
bool isTypeIdInParens() {
bool isAmbiguous;
return isTypeIdInParens(isAmbiguous);
}
/// Checks if the current tokens form type-id or expression.
/// It is similar to isTypeIdInParens but does not suppose that type-id
/// is in parenthesis.
bool isTypeIdUnambiguously() {
bool IsAmbiguous;
if (getLangOpts().CPlusPlus)
return isCXXTypeId(TypeIdUnambiguous, IsAmbiguous);
return isTypeSpecifierQualifier();
}
/// isCXXDeclarationStatement - C++-specialized function that disambiguates
/// between a declaration or an expression statement, when parsing function
/// bodies. Returns true for declaration, false for expression.
bool isCXXDeclarationStatement();
/// isCXXSimpleDeclaration - C++-specialized function that disambiguates
/// between a simple-declaration or an expression-statement.
/// If during the disambiguation process a parsing error is encountered,
/// the function returns true to let the declaration parsing code handle it.
/// Returns false if the statement is disambiguated as expression.
bool isCXXSimpleDeclaration(bool AllowForRangeDecl);
/// isCXXFunctionDeclarator - Disambiguates between a function declarator or
/// a constructor-style initializer, when parsing declaration statements.
/// Returns true for function declarator and false for constructor-style
/// initializer. Sets 'IsAmbiguous' to true to indicate that this declaration
/// might be a constructor-style initializer.
/// If during the disambiguation process a parsing error is encountered,
/// the function returns true to let the declaration parsing code handle it.
bool isCXXFunctionDeclarator(bool *IsAmbiguous = nullptr);
struct ConditionDeclarationOrInitStatementState;
enum class ConditionOrInitStatement {
Expression, ///< Disambiguated as an expression (either kind).
ConditionDecl, ///< Disambiguated as the declaration form of condition.
InitStmtDecl, ///< Disambiguated as a simple-declaration init-statement.
ForRangeDecl, ///< Disambiguated as a for-range declaration.
Error ///< Can't be any of the above!
};
/// Disambiguates between the different kinds of things that can happen
/// after 'if (' or 'switch ('. This could be one of two different kinds of
/// declaration (depending on whether there is a ';' later) or an expression.
ConditionOrInitStatement
isCXXConditionDeclarationOrInitStatement(bool CanBeInitStmt,
bool CanBeForRangeDecl);
bool isCXXTypeId(TentativeCXXTypeIdContext Context, bool &isAmbiguous);
bool isCXXTypeId(TentativeCXXTypeIdContext Context) {
bool isAmbiguous;
return isCXXTypeId(Context, isAmbiguous);
}
/// TPResult - Used as the result value for functions whose purpose is to
/// disambiguate C++ constructs by "tentatively parsing" them.
enum class TPResult {
True, False, Ambiguous, Error
};
/// Based only on the given token kind, determine whether we know that
/// we're at the start of an expression or a type-specifier-seq (which may
/// be an expression, in C++).
///
/// This routine does not attempt to resolve any of the trick cases, e.g.,
/// those involving lookup of identifiers.
///
/// \returns \c TPR_true if this token starts an expression, \c TPR_false if
/// this token starts a type-specifier-seq, or \c TPR_ambiguous if it cannot
/// tell.
TPResult isExpressionOrTypeSpecifierSimple(tok::TokenKind Kind);
/// isCXXDeclarationSpecifier - Returns TPResult::True if it is a
/// declaration specifier, TPResult::False if it is not,
/// TPResult::Ambiguous if it could be either a decl-specifier or a
/// function-style cast, and TPResult::Error if a parsing error was
/// encountered. If it could be a braced C++11 function-style cast, returns
/// BracedCastResult.
/// Doesn't consume tokens.
TPResult
isCXXDeclarationSpecifier(TPResult BracedCastResult = TPResult::False,
bool *InvalidAsDeclSpec = nullptr);
/// Given that isCXXDeclarationSpecifier returns \c TPResult::True or
/// \c TPResult::Ambiguous, determine whether the decl-specifier would be
/// a type-specifier other than a cv-qualifier.
bool isCXXDeclarationSpecifierAType();
/// Determine whether the current token sequence might be
/// '<' template-argument-list '>'
/// rather than a less-than expression.
TPResult isTemplateArgumentList(unsigned TokensToSkip);
/// Determine whether an '(' after an 'explicit' keyword is part of a C++20
/// 'explicit(bool)' declaration, in earlier language modes where that is an
/// extension.
TPResult isExplicitBool();
/// Determine whether an identifier has been tentatively declared as a
/// non-type. Such tentative declarations should not be found to name a type
/// during a tentative parse, but also should not be annotated as a non-type.
bool isTentativelyDeclared(IdentifierInfo *II);
// "Tentative parsing" functions, used for disambiguation. If a parsing error
// is encountered they will return TPResult::Error.
// Returning TPResult::True/False indicates that the ambiguity was
// resolved and tentative parsing may stop. TPResult::Ambiguous indicates
// that more tentative parsing is necessary for disambiguation.
// They all consume tokens, so backtracking should be used after calling them.
TPResult TryParseSimpleDeclaration(bool AllowForRangeDecl);
TPResult TryParseTypeofSpecifier();
TPResult TryParseProtocolQualifiers();
TPResult TryParsePtrOperatorSeq();
TPResult TryParseOperatorId();
TPResult TryParseInitDeclaratorList();
TPResult TryParseDeclarator(bool mayBeAbstract, bool mayHaveIdentifier = true,
bool mayHaveDirectInit = false);
TPResult
TryParseParameterDeclarationClause(bool *InvalidAsDeclaration = nullptr,
bool VersusTemplateArg = false);
TPResult TryParseFunctionDeclarator();
TPResult TryParseBracketDeclarator();
TPResult TryConsumeDeclarationSpecifier();
/// Try to skip a possibly empty sequence of 'attribute-specifier's without
/// full validation of the syntactic structure of attributes.
bool TrySkipAttributes();
public:
TypeResult ParseTypeName(SourceRange *Range = nullptr,
DeclaratorContext Context
= DeclaratorContext::TypeNameContext,
AccessSpecifier AS = AS_none,
Decl **OwnedType = nullptr,
ParsedAttributes *Attrs = nullptr);
private:
void ParseBlockId(SourceLocation CaretLoc);
/// Are [[]] attributes enabled?
bool standardAttributesAllowed() const {
const LangOptions &LO = getLangOpts();
return LO.DoubleSquareBracketAttributes;
}
// Check for the start of an attribute-specifier-seq in a context where an
// attribute is not allowed.
bool CheckProhibitedCXX11Attribute() {
assert(Tok.is(tok::l_square));
if (!standardAttributesAllowed() || NextToken().isNot(tok::l_square))
return false;
return DiagnoseProhibitedCXX11Attribute();
}
bool DiagnoseProhibitedCXX11Attribute();
void CheckMisplacedCXX11Attribute(ParsedAttributesWithRange &Attrs,
SourceLocation CorrectLocation) {
if (!standardAttributesAllowed())
return;
if ((Tok.isNot(tok::l_square) || NextToken().isNot(tok::l_square)) &&
Tok.isNot(tok::kw_alignas))
return;
DiagnoseMisplacedCXX11Attribute(Attrs, CorrectLocation);
}
void DiagnoseMisplacedCXX11Attribute(ParsedAttributesWithRange &Attrs,
SourceLocation CorrectLocation);
void stripTypeAttributesOffDeclSpec(ParsedAttributesWithRange &Attrs,
DeclSpec &DS, Sema::TagUseKind TUK);
// FixItLoc = possible correct location for the attributes
void ProhibitAttributes(ParsedAttributesWithRange &Attrs,
SourceLocation FixItLoc = SourceLocation()) {
if (Attrs.Range.isInvalid())
return;
DiagnoseProhibitedAttributes(Attrs.Range, FixItLoc);
Attrs.clear();
}
void ProhibitAttributes(ParsedAttributesViewWithRange &Attrs,
SourceLocation FixItLoc = SourceLocation()) {
if (Attrs.Range.isInvalid())
return;
DiagnoseProhibitedAttributes(Attrs.Range, FixItLoc);
Attrs.clearListOnly();
}
void DiagnoseProhibitedAttributes(const SourceRange &Range,
SourceLocation FixItLoc);
// Forbid C++11 and C2x attributes that appear on certain syntactic locations
// which standard permits but we don't supported yet, for example, attributes
// appertain to decl specifiers.
void ProhibitCXX11Attributes(ParsedAttributesWithRange &Attrs,
unsigned DiagID);
/// Skip C++11 and C2x attributes and return the end location of the
/// last one.
/// \returns SourceLocation() if there are no attributes.
SourceLocation SkipCXX11Attributes();
/// Diagnose and skip C++11 and C2x attributes that appear in syntactic
/// locations where attributes are not allowed.
void DiagnoseAndSkipCXX11Attributes();
/// Parses syntax-generic attribute arguments for attributes which are
/// known to the implementation, and adds them to the given ParsedAttributes
/// list with the given attribute syntax. Returns the number of arguments
/// parsed for the attribute.
unsigned
ParseAttributeArgsCommon(IdentifierInfo *AttrName, SourceLocation AttrNameLoc,
ParsedAttributes &Attrs, SourceLocation *EndLoc,
IdentifierInfo *ScopeName, SourceLocation ScopeLoc,
ParsedAttr::Syntax Syntax);
void MaybeParseGNUAttributes(Declarator &D,
LateParsedAttrList *LateAttrs = nullptr) {
if (Tok.is(tok::kw___attribute)) {
ParsedAttributes attrs(AttrFactory);
SourceLocation endLoc;
ParseGNUAttributes(attrs, &endLoc, LateAttrs, &D);
D.takeAttributes(attrs, endLoc);
}
}
void MaybeParseGNUAttributes(ParsedAttributes &attrs,
SourceLocation *endLoc = nullptr,
LateParsedAttrList *LateAttrs = nullptr) {
if (Tok.is(tok::kw___attribute))
ParseGNUAttributes(attrs, endLoc, LateAttrs);
}
void ParseGNUAttributes(ParsedAttributes &attrs,
SourceLocation *endLoc = nullptr,
LateParsedAttrList *LateAttrs = nullptr,
Declarator *D = nullptr);
void ParseGNUAttributeArgs(IdentifierInfo *AttrName,
SourceLocation AttrNameLoc,
ParsedAttributes &Attrs, SourceLocation *EndLoc,
IdentifierInfo *ScopeName, SourceLocation ScopeLoc,
ParsedAttr::Syntax Syntax, Declarator *D);
IdentifierLoc *ParseIdentifierLoc();
unsigned
ParseClangAttributeArgs(IdentifierInfo *AttrName, SourceLocation AttrNameLoc,
ParsedAttributes &Attrs, SourceLocation *EndLoc,
IdentifierInfo *ScopeName, SourceLocation ScopeLoc,
ParsedAttr::Syntax Syntax);
void MaybeParseCXX11Attributes(Declarator &D) {
if (standardAttributesAllowed() && isCXX11AttributeSpecifier()) {
ParsedAttributesWithRange attrs(AttrFactory);
SourceLocation endLoc;
ParseCXX11Attributes(attrs, &endLoc);
D.takeAttributes(attrs, endLoc);
}
}
void MaybeParseCXX11Attributes(ParsedAttributes &attrs,
SourceLocation *endLoc = nullptr) {
if (standardAttributesAllowed() && isCXX11AttributeSpecifier()) {
ParsedAttributesWithRange attrsWithRange(AttrFactory);
ParseCXX11Attributes(attrsWithRange, endLoc);
attrs.takeAllFrom(attrsWithRange);
}
}
void MaybeParseCXX11Attributes(ParsedAttributesWithRange &attrs,
SourceLocation *endLoc = nullptr,
bool OuterMightBeMessageSend = false) {
if (standardAttributesAllowed() &&
isCXX11AttributeSpecifier(false, OuterMightBeMessageSend))
ParseCXX11Attributes(attrs, endLoc);
}
void ParseCXX11AttributeSpecifier(ParsedAttributes &attrs,
SourceLocation *EndLoc = nullptr);
void ParseCXX11Attributes(ParsedAttributesWithRange &attrs,
SourceLocation *EndLoc = nullptr);
/// Parses a C++11 (or C2x)-style attribute argument list. Returns true
/// if this results in adding an attribute to the ParsedAttributes list.
bool ParseCXX11AttributeArgs(IdentifierInfo *AttrName,
SourceLocation AttrNameLoc,
ParsedAttributes &Attrs, SourceLocation *EndLoc,
IdentifierInfo *ScopeName,
SourceLocation ScopeLoc);
IdentifierInfo *TryParseCXX11AttributeIdentifier(SourceLocation &Loc);
void MaybeParseMicrosoftAttributes(ParsedAttributes &attrs,
SourceLocation *endLoc = nullptr) {
if (getLangOpts().MicrosoftExt && Tok.is(tok::l_square))
ParseMicrosoftAttributes(attrs, endLoc);
}
void ParseMicrosoftUuidAttributeArgs(ParsedAttributes &Attrs);
void ParseMicrosoftAttributes(ParsedAttributes &attrs,
SourceLocation *endLoc = nullptr);
void MaybeParseMicrosoftDeclSpecs(ParsedAttributes &Attrs,
SourceLocation *End = nullptr) {
const auto &LO = getLangOpts();
if (LO.DeclSpecKeyword && Tok.is(tok::kw___declspec))
ParseMicrosoftDeclSpecs(Attrs, End);
}
void ParseMicrosoftDeclSpecs(ParsedAttributes &Attrs,
SourceLocation *End = nullptr);
bool ParseMicrosoftDeclSpecArgs(IdentifierInfo *AttrName,
SourceLocation AttrNameLoc,
ParsedAttributes &Attrs);
void ParseMicrosoftTypeAttributes(ParsedAttributes &attrs);
void DiagnoseAndSkipExtendedMicrosoftTypeAttributes();
SourceLocation SkipExtendedMicrosoftTypeAttributes();
void ParseMicrosoftInheritanceClassAttributes(ParsedAttributes &attrs);
void ParseBorlandTypeAttributes(ParsedAttributes &attrs);
void ParseOpenCLKernelAttributes(ParsedAttributes &attrs);
void ParseOpenCLQualifiers(ParsedAttributes &Attrs);
/// Parses opencl_unroll_hint attribute if language is OpenCL v2.0
/// or higher.
/// \return false if error happens.
bool MaybeParseOpenCLUnrollHintAttribute(ParsedAttributes &Attrs) {
if (getLangOpts().OpenCL)
return ParseOpenCLUnrollHintAttribute(Attrs);
return true;
}
/// Parses opencl_unroll_hint attribute.
/// \return false if error happens.
bool ParseOpenCLUnrollHintAttribute(ParsedAttributes &Attrs);
void ParseNullabilityTypeSpecifiers(ParsedAttributes &attrs);
VersionTuple ParseVersionTuple(SourceRange &Range);
void ParseAvailabilityAttribute(IdentifierInfo &Availability,
SourceLocation AvailabilityLoc,
ParsedAttributes &attrs,
SourceLocation *endLoc,
IdentifierInfo *ScopeName,
SourceLocation ScopeLoc,
ParsedAttr::Syntax Syntax);
Optional<AvailabilitySpec> ParseAvailabilitySpec();
ExprResult ParseAvailabilityCheckExpr(SourceLocation StartLoc);
void ParseExternalSourceSymbolAttribute(IdentifierInfo &ExternalSourceSymbol,
SourceLocation Loc,
ParsedAttributes &Attrs,
SourceLocation *EndLoc,
IdentifierInfo *ScopeName,
SourceLocation ScopeLoc,
ParsedAttr::Syntax Syntax);
void ParseObjCBridgeRelatedAttribute(IdentifierInfo &ObjCBridgeRelated,
SourceLocation ObjCBridgeRelatedLoc,
ParsedAttributes &attrs,
SourceLocation *endLoc,
IdentifierInfo *ScopeName,
SourceLocation ScopeLoc,
ParsedAttr::Syntax Syntax);
void ParseTypeTagForDatatypeAttribute(IdentifierInfo &AttrName,
SourceLocation AttrNameLoc,
ParsedAttributes &Attrs,
SourceLocation *EndLoc,
IdentifierInfo *ScopeName,
SourceLocation ScopeLoc,
ParsedAttr::Syntax Syntax);
void ParseSwiftNewtypeAttribute(IdentifierInfo &SwiftNewtype,
SourceLocation SwiftNewtypeLoc,
ParsedAttributes &attrs,
SourceLocation *endLoc,
IdentifierInfo *ScopeName,
SourceLocation ScopeLoc,
ParsedAttr::Syntax Syntax);
void
ParseAttributeWithTypeArg(IdentifierInfo &AttrName,
SourceLocation AttrNameLoc, ParsedAttributes &Attrs,
SourceLocation *EndLoc, IdentifierInfo *ScopeName,
SourceLocation ScopeLoc, ParsedAttr::Syntax Syntax);
void ParseTypeofSpecifier(DeclSpec &DS);
SourceLocation ParseDecltypeSpecifier(DeclSpec &DS);
void AnnotateExistingDecltypeSpecifier(const DeclSpec &DS,
SourceLocation StartLoc,
SourceLocation EndLoc);
void ParseUnderlyingTypeSpecifier(DeclSpec &DS);
void ParseAtomicSpecifier(DeclSpec &DS);
ExprResult ParseAlignArgument(SourceLocation Start,
SourceLocation &EllipsisLoc);
void ParseAlignmentSpecifier(ParsedAttributes &Attrs,
SourceLocation *endLoc = nullptr);
void ParsePtrauthQualifier(ParsedAttributes &Attrs);
VirtSpecifiers::Specifier isCXX11VirtSpecifier(const Token &Tok) const;
VirtSpecifiers::Specifier isCXX11VirtSpecifier() const {
return isCXX11VirtSpecifier(Tok);
}
void ParseOptionalCXX11VirtSpecifierSeq(VirtSpecifiers &VS, bool IsInterface,
SourceLocation FriendLoc);
bool isCXX11FinalKeyword() const;
/// DeclaratorScopeObj - RAII object used in Parser::ParseDirectDeclarator to
/// enter a new C++ declarator scope and exit it when the function is
/// finished.
class DeclaratorScopeObj {
Parser &P;
CXXScopeSpec &SS;
bool EnteredScope;
bool CreatedScope;
public:
DeclaratorScopeObj(Parser &p, CXXScopeSpec &ss)
: P(p), SS(ss), EnteredScope(false), CreatedScope(false) {}
void EnterDeclaratorScope() {
assert(!EnteredScope && "Already entered the scope!");
assert(SS.isSet() && "C++ scope was not set!");
CreatedScope = true;
P.EnterScope(0); // Not a decl scope.
if (!P.Actions.ActOnCXXEnterDeclaratorScope(P.getCurScope(), SS))
EnteredScope = true;
}
~DeclaratorScopeObj() {
if (EnteredScope) {
assert(SS.isSet() && "C++ scope was cleared ?");
P.Actions.ActOnCXXExitDeclaratorScope(P.getCurScope(), SS);
}
if (CreatedScope)
P.ExitScope();
}
};
/// ParseDeclarator - Parse and verify a newly-initialized declarator.
void ParseDeclarator(Declarator &D);
/// A function that parses a variant of direct-declarator.
typedef void (Parser::*DirectDeclParseFunction)(Declarator&);
void ParseDeclaratorInternal(Declarator &D,
DirectDeclParseFunction DirectDeclParser);
enum AttrRequirements {
AR_NoAttributesParsed = 0, ///< No attributes are diagnosed.
AR_GNUAttributesParsedAndRejected = 1 << 0, ///< Diagnose GNU attributes.
AR_GNUAttributesParsed = 1 << 1,
AR_CXX11AttributesParsed = 1 << 2,
AR_DeclspecAttributesParsed = 1 << 3,
AR_AllAttributesParsed = AR_GNUAttributesParsed |
AR_CXX11AttributesParsed |
AR_DeclspecAttributesParsed,
AR_VendorAttributesParsed = AR_GNUAttributesParsed |
AR_DeclspecAttributesParsed
};
void ParseTypeQualifierListOpt(
DeclSpec &DS, unsigned AttrReqs = AR_AllAttributesParsed,
bool AtomicAllowed = true, bool IdentifierRequired = false,
Optional<llvm::function_ref<void()>> CodeCompletionHandler = None);
void ParseDirectDeclarator(Declarator &D);
void ParseDecompositionDeclarator(Declarator &D);
void ParseParenDeclarator(Declarator &D);
void ParseFunctionDeclarator(Declarator &D,
ParsedAttributes &attrs,
BalancedDelimiterTracker &Tracker,
bool IsAmbiguous,
bool RequiresArg = false);
void InitCXXThisScopeForDeclaratorIfRelevant(
const Declarator &D, const DeclSpec &DS,
llvm::Optional<Sema::CXXThisScopeRAII> &ThisScope);
bool ParseRefQualifier(bool &RefQualifierIsLValueRef,
SourceLocation &RefQualifierLoc);
bool isFunctionDeclaratorIdentifierList();
void ParseFunctionDeclaratorIdentifierList(
Declarator &D,
SmallVectorImpl<DeclaratorChunk::ParamInfo> &ParamInfo);
void ParseParameterDeclarationClause(
DeclaratorContext DeclaratorContext,
ParsedAttributes &attrs,
SmallVectorImpl<DeclaratorChunk::ParamInfo> &ParamInfo,
SourceLocation &EllipsisLoc);
void ParseBracketDeclarator(Declarator &D);
void ParseMisplacedBracketDeclarator(Declarator &D);
//===--------------------------------------------------------------------===//
// C++ 7: Declarations [dcl.dcl]
/// The kind of attribute specifier we have found.
enum CXX11AttributeKind {
/// This is not an attribute specifier.
CAK_NotAttributeSpecifier,
/// This should be treated as an attribute-specifier.
CAK_AttributeSpecifier,
/// The next tokens are '[[', but this is not an attribute-specifier. This
/// is ill-formed by C++11 [dcl.attr.grammar]p6.
CAK_InvalidAttributeSpecifier
};
CXX11AttributeKind
isCXX11AttributeSpecifier(bool Disambiguate = false,
bool OuterMightBeMessageSend = false);
void DiagnoseUnexpectedNamespace(NamedDecl *Context);
DeclGroupPtrTy ParseNamespace(DeclaratorContext Context,
SourceLocation &DeclEnd,
SourceLocation InlineLoc = SourceLocation());
struct InnerNamespaceInfo {
SourceLocation NamespaceLoc;
SourceLocation InlineLoc;
SourceLocation IdentLoc;
IdentifierInfo *Ident;
};
using InnerNamespaceInfoList = llvm::SmallVector<InnerNamespaceInfo, 4>;
void ParseInnerNamespace(const InnerNamespaceInfoList &InnerNSs,
unsigned int index, SourceLocation &InlineLoc,
ParsedAttributes &attrs,
BalancedDelimiterTracker &Tracker);
Decl *ParseLinkage(ParsingDeclSpec &DS, DeclaratorContext Context);
Decl *ParseExportDeclaration();
DeclGroupPtrTy ParseUsingDirectiveOrDeclaration(
DeclaratorContext Context, const ParsedTemplateInfo &TemplateInfo,
SourceLocation &DeclEnd, ParsedAttributesWithRange &attrs);
Decl *ParseUsingDirective(DeclaratorContext Context,
SourceLocation UsingLoc,
SourceLocation &DeclEnd,
ParsedAttributes &attrs);
struct UsingDeclarator {
SourceLocation TypenameLoc;
CXXScopeSpec SS;
UnqualifiedId Name;
SourceLocation EllipsisLoc;
void clear() {
TypenameLoc = EllipsisLoc = SourceLocation();
SS.clear();
Name.clear();
}
};
bool ParseUsingDeclarator(DeclaratorContext Context, UsingDeclarator &D);
DeclGroupPtrTy ParseUsingDeclaration(DeclaratorContext Context,
const ParsedTemplateInfo &TemplateInfo,
SourceLocation UsingLoc,
SourceLocation &DeclEnd,
AccessSpecifier AS = AS_none);
Decl *ParseAliasDeclarationAfterDeclarator(
const ParsedTemplateInfo &TemplateInfo, SourceLocation UsingLoc,
UsingDeclarator &D, SourceLocation &DeclEnd, AccessSpecifier AS,
ParsedAttributes &Attrs, Decl **OwnedType = nullptr);
Decl *ParseStaticAssertDeclaration(SourceLocation &DeclEnd);
Decl *ParseNamespaceAlias(SourceLocation NamespaceLoc,
SourceLocation AliasLoc, IdentifierInfo *Alias,
SourceLocation &DeclEnd);
//===--------------------------------------------------------------------===//
// C++ 9: classes [class] and C structs/unions.
bool isValidAfterTypeSpecifier(bool CouldBeBitfield);
void ParseClassSpecifier(tok::TokenKind TagTokKind, SourceLocation TagLoc,
DeclSpec &DS, const ParsedTemplateInfo &TemplateInfo,
AccessSpecifier AS, bool EnteringContext,
DeclSpecContext DSC,
ParsedAttributesWithRange &Attributes);
void SkipCXXMemberSpecification(SourceLocation StartLoc,
SourceLocation AttrFixitLoc,
unsigned TagType,
Decl *TagDecl);
void ParseCXXMemberSpecification(SourceLocation StartLoc,
SourceLocation AttrFixitLoc,
ParsedAttributesWithRange &Attrs,
unsigned TagType,
Decl *TagDecl);
ExprResult ParseCXXMemberInitializer(Decl *D, bool IsFunction,
SourceLocation &EqualLoc);
bool
ParseCXXMemberDeclaratorBeforeInitializer(Declarator &DeclaratorInfo,
VirtSpecifiers &VS,
ExprResult &BitfieldSize,
LateParsedAttrList &LateAttrs);
void MaybeParseAndDiagnoseDeclSpecAfterCXX11VirtSpecifierSeq(Declarator &D,
VirtSpecifiers &VS);
DeclGroupPtrTy ParseCXXClassMemberDeclaration(
AccessSpecifier AS, ParsedAttributes &Attr,
const ParsedTemplateInfo &TemplateInfo = ParsedTemplateInfo(),
ParsingDeclRAIIObject *DiagsFromTParams = nullptr);
DeclGroupPtrTy ParseCXXClassMemberDeclarationWithPragmas(
AccessSpecifier &AS, ParsedAttributesWithRange &AccessAttrs,
DeclSpec::TST TagType, Decl *Tag);
void ParseConstructorInitializer(Decl *ConstructorDecl);
MemInitResult ParseMemInitializer(Decl *ConstructorDecl);
void HandleMemberFunctionDeclDelays(Declarator& DeclaratorInfo,
Decl *ThisDecl);
//===--------------------------------------------------------------------===//
// C++ 10: Derived classes [class.derived]
TypeResult ParseBaseTypeSpecifier(SourceLocation &BaseLoc,
SourceLocation &EndLocation);
void ParseBaseClause(Decl *ClassDecl);
BaseResult ParseBaseSpecifier(Decl *ClassDecl);
AccessSpecifier getAccessSpecifierIfPresent() const;
bool ParseUnqualifiedIdTemplateId(CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
IdentifierInfo *Name,
SourceLocation NameLoc,
bool EnteringContext,
ParsedType ObjectType,
UnqualifiedId &Id,
bool AssumeTemplateId);
bool ParseUnqualifiedIdOperator(CXXScopeSpec &SS, bool EnteringContext,
ParsedType ObjectType,
UnqualifiedId &Result);
//===--------------------------------------------------------------------===//
// OpenMP: Directives and clauses.
/// Parse clauses for '#pragma omp declare simd'.
DeclGroupPtrTy ParseOMPDeclareSimdClauses(DeclGroupPtrTy Ptr,
CachedTokens &Toks,
SourceLocation Loc);
/// Parse a property kind into \p TIProperty for the selector set \p Set and
/// selector \p Selector.
void parseOMPTraitPropertyKind(OMPTraitInfo::OMPTraitProperty &TIProperty,
llvm::omp::TraitSet Set,
llvm::omp::TraitSelector Selector,
llvm::StringMap<SourceLocation> &Seen);
/// Parse a selector kind into \p TISelector for the selector set \p Set.
void parseOMPTraitSelectorKind(OMPTraitInfo::OMPTraitSelector &TISelector,
llvm::omp::TraitSet Set,
llvm::StringMap<SourceLocation> &Seen);
/// Parse a selector set kind into \p TISet.
void parseOMPTraitSetKind(OMPTraitInfo::OMPTraitSet &TISet,
llvm::StringMap<SourceLocation> &Seen);
/// Parses an OpenMP context property.
void parseOMPContextProperty(OMPTraitInfo::OMPTraitSelector &TISelector,
llvm::omp::TraitSet Set,
llvm::StringMap<SourceLocation> &Seen);
/// Parses an OpenMP context selector.
void parseOMPContextSelector(OMPTraitInfo::OMPTraitSelector &TISelector,
llvm::omp::TraitSet Set,
llvm::StringMap<SourceLocation> &SeenSelectors);
/// Parses an OpenMP context selector set.
void parseOMPContextSelectorSet(OMPTraitInfo::OMPTraitSet &TISet,
llvm::StringMap<SourceLocation> &SeenSets);
/// Parses OpenMP context selectors.
bool parseOMPContextSelectors(SourceLocation Loc, OMPTraitInfo &TI);
/// Parse clauses for '#pragma omp declare variant'.
void ParseOMPDeclareVariantClauses(DeclGroupPtrTy Ptr, CachedTokens &Toks,
SourceLocation Loc);
/// Parse clauses for '#pragma omp declare target'.
DeclGroupPtrTy ParseOMPDeclareTargetClauses();
/// Parse '#pragma omp end declare target'.
void ParseOMPEndDeclareTargetDirective(OpenMPDirectiveKind DKind,
SourceLocation Loc);
/// Parses declarative OpenMP directives.
DeclGroupPtrTy ParseOpenMPDeclarativeDirectiveWithExtDecl(
AccessSpecifier &AS, ParsedAttributesWithRange &Attrs,
bool Delayed = false, DeclSpec::TST TagType = DeclSpec::TST_unspecified,
Decl *TagDecl = nullptr);
/// Parse 'omp declare reduction' construct.
DeclGroupPtrTy ParseOpenMPDeclareReductionDirective(AccessSpecifier AS);
/// Parses initializer for provided omp_priv declaration inside the reduction
/// initializer.
void ParseOpenMPReductionInitializerForDecl(VarDecl *OmpPrivParm);
/// Parses 'omp declare mapper' directive.
DeclGroupPtrTy ParseOpenMPDeclareMapperDirective(AccessSpecifier AS);
/// Parses variable declaration in 'omp declare mapper' directive.
TypeResult parseOpenMPDeclareMapperVarDecl(SourceRange &Range,
DeclarationName &Name,
AccessSpecifier AS = AS_none);
/// Parses simple list of variables.
///
/// \param Kind Kind of the directive.
/// \param Callback Callback function to be called for the list elements.
/// \param AllowScopeSpecifier true, if the variables can have fully
/// qualified names.
///
bool ParseOpenMPSimpleVarList(
OpenMPDirectiveKind Kind,
const llvm::function_ref<void(CXXScopeSpec &, DeclarationNameInfo)> &
Callback,
bool AllowScopeSpecifier);
/// Parses declarative or executable directive.
///
/// \param StmtCtx The context in which we're parsing the directive.
StmtResult
ParseOpenMPDeclarativeOrExecutableDirective(ParsedStmtContext StmtCtx);
/// Parses clause of kind \a CKind for directive of a kind \a Kind.
///
/// \param DKind Kind of current directive.
/// \param CKind Kind of current clause.
/// \param FirstClause true, if this is the first clause of a kind \a CKind
/// in current directive.
///
OMPClause *ParseOpenMPClause(OpenMPDirectiveKind DKind,
OpenMPClauseKind CKind, bool FirstClause);
/// Parses clause with a single expression of a kind \a Kind.
///
/// \param Kind Kind of current clause.
/// \param ParseOnly true to skip the clause's semantic actions and return
/// nullptr.
///
OMPClause *ParseOpenMPSingleExprClause(OpenMPClauseKind Kind,
bool ParseOnly);
/// Parses simple clause of a kind \a Kind.
///
/// \param Kind Kind of current clause.
/// \param ParseOnly true to skip the clause's semantic actions and return
/// nullptr.
///
OMPClause *ParseOpenMPSimpleClause(OpenMPClauseKind Kind, bool ParseOnly);
/// Parses clause with a single expression and an additional argument
/// of a kind \a Kind.
///
/// \param Kind Kind of current clause.
/// \param ParseOnly true to skip the clause's semantic actions and return
/// nullptr.
///
OMPClause *ParseOpenMPSingleExprWithArgClause(OpenMPClauseKind Kind,
bool ParseOnly);
/// Parses clause without any additional arguments.
///
/// \param Kind Kind of current clause.
/// \param ParseOnly true to skip the clause's semantic actions and return
/// nullptr.
///
OMPClause *ParseOpenMPClause(OpenMPClauseKind Kind, bool ParseOnly = false);
/// Parses clause with the list of variables of a kind \a Kind.
///
/// \param Kind Kind of current clause.
/// \param ParseOnly true to skip the clause's semantic actions and return
/// nullptr.
///
OMPClause *ParseOpenMPVarListClause(OpenMPDirectiveKind DKind,
OpenMPClauseKind Kind, bool ParseOnly);
public:
/// Parses simple expression in parens for single-expression clauses of OpenMP
/// constructs.
/// \param RLoc Returned location of right paren.
ExprResult ParseOpenMPParensExpr(StringRef ClauseName, SourceLocation &RLoc,
bool IsAddressOfOperand = false);
/// Data used for parsing list of variables in OpenMP clauses.
struct OpenMPVarListDataTy {
Expr *TailExpr = nullptr;
SourceLocation ColonLoc;
SourceLocation RLoc;
CXXScopeSpec ReductionOrMapperIdScopeSpec;
DeclarationNameInfo ReductionOrMapperId;
int ExtraModifier = -1; ///< Additional modifier for linear, map, depend or
///< lastprivate clause.
SmallVector<OpenMPMapModifierKind, OMPMapClause::NumberOfModifiers>
MapTypeModifiers;
SmallVector<SourceLocation, OMPMapClause::NumberOfModifiers>
MapTypeModifiersLoc;
bool IsMapTypeImplicit = false;
SourceLocation DepLinMapLastLoc;
};
/// Parses clauses with list.
bool ParseOpenMPVarList(OpenMPDirectiveKind DKind, OpenMPClauseKind Kind,
SmallVectorImpl<Expr *> &Vars,
OpenMPVarListDataTy &Data);
bool ParseUnqualifiedId(CXXScopeSpec &SS, bool EnteringContext,
bool AllowDestructorName,
bool AllowConstructorName,
bool AllowDeductionGuide,
ParsedType ObjectType,
SourceLocation *TemplateKWLoc,
UnqualifiedId &Result);
/// Parses the mapper modifier in map, to, and from clauses.
bool parseMapperModifier(OpenMPVarListDataTy &Data);
/// Parses map-type-modifiers in map clause.
/// map([ [map-type-modifier[,] [map-type-modifier[,] ...] map-type : ] list)
/// where, map-type-modifier ::= always | close | mapper(mapper-identifier)
bool parseMapTypeModifiers(OpenMPVarListDataTy &Data);
private:
//===--------------------------------------------------------------------===//
// C++ 14: Templates [temp]
// C++ 14.1: Template Parameters [temp.param]
Decl *ParseDeclarationStartingWithTemplate(DeclaratorContext Context,
SourceLocation &DeclEnd,
ParsedAttributes &AccessAttrs,
AccessSpecifier AS = AS_none);
Decl *ParseTemplateDeclarationOrSpecialization(DeclaratorContext Context,
SourceLocation &DeclEnd,
ParsedAttributes &AccessAttrs,
AccessSpecifier AS);
Decl *ParseSingleDeclarationAfterTemplate(
DeclaratorContext Context, const ParsedTemplateInfo &TemplateInfo,
ParsingDeclRAIIObject &DiagsFromParams, SourceLocation &DeclEnd,
ParsedAttributes &AccessAttrs, AccessSpecifier AS = AS_none);
bool ParseTemplateParameters(unsigned Depth,
SmallVectorImpl<NamedDecl *> &TemplateParams,
SourceLocation &LAngleLoc,
SourceLocation &RAngleLoc);
bool ParseTemplateParameterList(unsigned Depth,
SmallVectorImpl<NamedDecl*> &TemplateParams);
TPResult isStartOfTemplateTypeParameter();
NamedDecl *ParseTemplateParameter(unsigned Depth, unsigned Position);
NamedDecl *ParseTypeParameter(unsigned Depth, unsigned Position);
NamedDecl *ParseTemplateTemplateParameter(unsigned Depth, unsigned Position);
NamedDecl *ParseNonTypeTemplateParameter(unsigned Depth, unsigned Position);
bool isTypeConstraintAnnotation();
bool TryAnnotateTypeConstraint();
NamedDecl *
ParseConstrainedTemplateTypeParameter(unsigned Depth, unsigned Position);
void DiagnoseMisplacedEllipsis(SourceLocation EllipsisLoc,
SourceLocation CorrectLoc,
bool AlreadyHasEllipsis,
bool IdentifierHasName);
void DiagnoseMisplacedEllipsisInDeclarator(SourceLocation EllipsisLoc,
Declarator &D);
// C++ 14.3: Template arguments [temp.arg]
typedef SmallVector<ParsedTemplateArgument, 16> TemplateArgList;
bool ParseGreaterThanInTemplateList(SourceLocation &RAngleLoc,
bool ConsumeLastToken,
bool ObjCGenericList);
bool ParseTemplateIdAfterTemplateName(bool ConsumeLastToken,
SourceLocation &LAngleLoc,
TemplateArgList &TemplateArgs,
SourceLocation &RAngleLoc);
bool AnnotateTemplateIdToken(TemplateTy Template, TemplateNameKind TNK,
CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
UnqualifiedId &TemplateName,
bool AllowTypeAnnotation = true,
bool TypeConstraint = false);
void AnnotateTemplateIdTokenAsType(CXXScopeSpec &SS,
bool IsClassName = false);
bool ParseTemplateArgumentList(TemplateArgList &TemplateArgs);
ParsedTemplateArgument ParseTemplateTemplateArgument();
ParsedTemplateArgument ParseTemplateArgument();
Decl *ParseExplicitInstantiation(DeclaratorContext Context,
SourceLocation ExternLoc,
SourceLocation TemplateLoc,
SourceLocation &DeclEnd,
ParsedAttributes &AccessAttrs,
AccessSpecifier AS = AS_none);
// C++2a: Template, concept definition [temp]
Decl *
ParseConceptDefinition(const ParsedTemplateInfo &TemplateInfo,
SourceLocation &DeclEnd);
//===--------------------------------------------------------------------===//
// Modules
DeclGroupPtrTy ParseModuleDecl(bool IsFirstDecl);
Decl *ParseModuleImport(SourceLocation AtLoc);
bool parseMisplacedModuleImport();
bool tryParseMisplacedModuleImport() {
tok::TokenKind Kind = Tok.getKind();
if (Kind == tok::annot_module_begin || Kind == tok::annot_module_end ||
Kind == tok::annot_module_include)
return parseMisplacedModuleImport();
return false;
}
bool ParseModuleName(
SourceLocation UseLoc,
SmallVectorImpl<std::pair<IdentifierInfo *, SourceLocation>> &Path,
bool IsImport);
//===--------------------------------------------------------------------===//
// C++11/G++: Type Traits [Type-Traits.html in the GCC manual]
ExprResult ParseTypeTrait();
/// Parse the given string as a type.
///
/// This is a dangerous utility function currently employed only by API notes.
/// It is not a general entry-point for safely parsing types from strings.
///
/// \param typeStr The string to be parsed as a type.
/// \param context The name of the context in which this string is being
/// parsed, which will be used in diagnostics.
/// \param includeLoc The location at which this parse was triggered.
TypeResult parseTypeFromString(StringRef typeStr, StringRef context,
SourceLocation includeLoc);
//===--------------------------------------------------------------------===//
// Embarcadero: Arary and Expression Traits
ExprResult ParseArrayTypeTrait();
ExprResult ParseExpressionTrait();
ExprResult ParseBuiltinPtrauthTypeDiscriminator();
//===--------------------------------------------------------------------===//
// Preprocessor code-completion pass-through
void CodeCompleteDirective(bool InConditional) override;
void CodeCompleteInConditionalExclusion() override;
void CodeCompleteMacroName(bool IsDefinition) override;
void CodeCompletePreprocessorExpression() override;
void CodeCompleteMacroArgument(IdentifierInfo *Macro, MacroInfo *MacroInfo,
unsigned ArgumentIndex) override;
void CodeCompleteIncludedFile(llvm::StringRef Dir, bool IsAngled) override;
void CodeCompleteNaturalLanguage() override;
class GNUAsmQualifiers {
unsigned Qualifiers = AQ_unspecified;
public:
enum AQ {
AQ_unspecified = 0,
AQ_volatile = 1,
AQ_inline = 2,
AQ_goto = 4,
};
static const char *getQualifierName(AQ Qualifier);
bool setAsmQualifier(AQ Qualifier);
inline bool isVolatile() const { return Qualifiers & AQ_volatile; };
inline bool isInline() const { return Qualifiers & AQ_inline; };
inline bool isGoto() const { return Qualifiers & AQ_goto; }
};
bool isGCCAsmStatement(const Token &TokAfterAsm) const;
bool isGNUAsmQualifier(const Token &TokAfterAsm) const;
GNUAsmQualifiers::AQ getGNUAsmQualifier(const Token &Tok) const;
bool parseGNUAsmQualifierListOpt(GNUAsmQualifiers &AQ);
};
} // end namespace clang
#endif
|
THTensorMath.c | #ifndef TH_GENERIC_FILE
#define TH_GENERIC_FILE "generic/THTensorMath.c"
#else
#ifndef NAN
#define NAN (nan(NULL))
#endif
#ifdef _OPENMP
#include <omp.h>
#endif
#define TH_OMP_OVERHEAD_THRESHOLD 100000
#ifdef _OPENMP
#ifndef _WIN32
#define PRAGMA(P) _Pragma(#P)
#else
#define PRAGMA(P) __pragma(P)
#endif
#define TH_TENSOR_APPLY_CONTIG(TYPE, TENSOR, CODE) \
{ \
int inOmp = omp_in_parallel(); \
ptrdiff_t TH_TENSOR_size = THTensor_(nElement)(TENSOR); \
PRAGMA(omp parallel if ((TH_TENSOR_size > TH_OMP_OVERHEAD_THRESHOLD) && (!inOmp))) \
{ \
size_t num_threads = omp_get_num_threads(); \
size_t tid = omp_get_thread_num(); \
ptrdiff_t TH_TENSOR_offset = tid * (TH_TENSOR_size / num_threads); \
ptrdiff_t TH_TENSOR_end = tid == num_threads - 1 ? TH_TENSOR_size : \
TH_TENSOR_offset + TH_TENSOR_size / num_threads; \
ptrdiff_t TENSOR##_len = TH_TENSOR_end - TH_TENSOR_offset; \
TYPE *TENSOR##_data = THTensor_(data)(TENSOR) + TH_TENSOR_offset; \
CODE \
} \
}
#else
#define TH_TENSOR_APPLY_CONTIG(TYPE, TENSOR, CODE) \
{ \
TYPE *TENSOR##_data = THTensor_(data)(TENSOR); \
ptrdiff_t TENSOR##_len = THTensor_(nElement)(TENSOR); \
CODE \
}
#endif
#ifdef _OPENMP
#define TH_TENSOR_APPLY2_CONTIG(TYPE1, TENSOR1, TYPE2, TENSOR2, CODE) \
{ \
int inOmp = omp_in_parallel(); \
ptrdiff_t TH_TENSOR_size = THTensor_(nElement)(TENSOR1); \
PRAGMA(omp parallel if ((TH_TENSOR_size > TH_OMP_OVERHEAD_THRESHOLD) && (!inOmp))) \
{ \
size_t num_threads = omp_get_num_threads(); \
size_t tid = omp_get_thread_num(); \
ptrdiff_t TH_TENSOR_offset = tid * (TH_TENSOR_size / num_threads); \
ptrdiff_t TH_TENSOR_end = tid == num_threads - 1 ? TH_TENSOR_size : \
TH_TENSOR_offset + TH_TENSOR_size / num_threads; \
ptrdiff_t TENSOR1##_len = TH_TENSOR_end - TH_TENSOR_offset; \
TYPE1 *TENSOR1##_data = THTensor_(data)(TENSOR1) + TH_TENSOR_offset; \
TYPE2 *TENSOR2##_data = THTensor_(data)(TENSOR2) + TH_TENSOR_offset; \
CODE \
} \
}
#else
#define TH_TENSOR_APPLY2_CONTIG(TYPE1, TENSOR1, TYPE2, TENSOR2, CODE) \
{ \
TYPE1 *TENSOR1##_data = THTensor_(data)(TENSOR1); \
TYPE2 *TENSOR2##_data = THTensor_(data)(TENSOR2); \
ptrdiff_t TENSOR1##_len = THTensor_(nElement)(TENSOR1); \
CODE \
}
#endif
#ifdef _OPENMP
#define TH_TENSOR_APPLY3_CONTIG(TYPE1, TENSOR1, TYPE2, TENSOR2, TYPE3, TENSOR3, CODE) \
{ \
int inOmp = omp_in_parallel(); \
ptrdiff_t TH_TENSOR_size = THTensor_(nElement)(TENSOR1); \
PRAGMA(omp parallel if ((TH_TENSOR_size > TH_OMP_OVERHEAD_THRESHOLD) && (!inOmp))) \
{ \
size_t num_threads = omp_get_num_threads(); \
size_t tid = omp_get_thread_num(); \
ptrdiff_t TH_TENSOR_offset = tid * (TH_TENSOR_size / num_threads); \
ptrdiff_t TH_TENSOR_end = tid == num_threads - 1 ? TH_TENSOR_size : \
TH_TENSOR_offset + TH_TENSOR_size / num_threads; \
ptrdiff_t TENSOR1##_len = TH_TENSOR_end - TH_TENSOR_offset; \
TYPE1 *TENSOR1##_data = THTensor_(data)(TENSOR1) + TH_TENSOR_offset; \
TYPE2 *TENSOR2##_data = THTensor_(data)(TENSOR2) + TH_TENSOR_offset; \
TYPE3 *TENSOR3##_data = THTensor_(data)(TENSOR3) + TH_TENSOR_offset; \
CODE \
} \
}
#else
#define TH_TENSOR_APPLY3_CONTIG(TYPE1, TENSOR1, TYPE2, TENSOR2, TYPE3, TENSOR3, CODE) \
{ \
TYPE1 *TENSOR1##_data = THTensor_(data)(TENSOR1); \
TYPE2 *TENSOR2##_data = THTensor_(data)(TENSOR2); \
TYPE3 *TENSOR3##_data = THTensor_(data)(TENSOR3); \
ptrdiff_t TENSOR1##_len = THTensor_(nElement)(TENSOR1); \
CODE \
}
#endif
#define TH_CHECK_SAME_SIZE(TENSOR1, TENSOR2) \
{ \
if(!THTensor_(isSameSizeAs)(TENSOR1, TENSOR2)) { \
THDescBuff T1buff = _THSizeDesc(TENSOR1->size, TENSOR1->nDimension); \
THDescBuff T2buff = _THSizeDesc(TENSOR2->size, TENSOR2->nDimension); \
THError("inconsistent tensor size, expected %s %s and %s %s to have the same size", \
#TENSOR1, T1buff.str, #TENSOR2, T2buff.str); \
} \
}
// Used for `scatter` and `scatterAdd`
// Assumes TENSOR1 is real
// TENSOR2 is src
// TENSOR3 is index
// Tests:
// 1. index->size[d] <= src->size[d] for all d
// 2. index->size[d] <= real->size[d] for all d != dim
#define TH_TENSOR_DIM_APPLY3_SIZE_SCATTER(TENSOR1, TENSOR2, TENSOR3, DIMENSION) \
{ \
int shape_check_flag = 0; \
for(TH_TENSOR_DIM_APPLY_i = 0; TH_TENSOR_DIM_APPLY_i < TENSOR1->nDimension; TH_TENSOR_DIM_APPLY_i++) \
{ \
int64_t TENSOR3##_dim_size = TENSOR3->size[TH_TENSOR_DIM_APPLY_i]; \
if (TH_TENSOR_DIM_APPLY_i != DIMENSION) { \
if (TENSOR3##_dim_size > TENSOR1->size[TH_TENSOR_DIM_APPLY_i]) { \
shape_check_flag = 1; \
break; \
} \
} \
if (TENSOR3##_dim_size > TENSOR2->size[TH_TENSOR_DIM_APPLY_i]) { \
shape_check_flag = 1; \
break; \
} \
} \
if (shape_check_flag == 1) { \
THDescBuff T1buff = _THSizeDesc(TENSOR1->size, TENSOR1->nDimension); \
THDescBuff T2buff = _THSizeDesc(TENSOR2->size, TENSOR2->nDimension); \
THDescBuff T3buff = _THSizeDesc(TENSOR3->size, TENSOR3->nDimension); \
THError("Expected %s %s to be smaller size than %s %s and to be smaller than %s %s apart from dimension %d", \
#TENSOR3, T3buff.str, #TENSOR2, T2buff.str, #TENSOR1, T1buff.str, DIMENSION); \
} \
}
static inline real THTensor_(powOne)(real x, real y) {
#if defined(TH_REAL_IS_FLOAT)
return powf(x, y);
#elif defined(TH_REAL_IS_DOUBLE)
return pow(x, y);
#else
THArgCheck(y >= 0, 1,
"Integers to negative integer powers are not allowed");
real result = 1;
while (y) {
if (y & 1) {
result *= x;
}
y /= 2;
x *= x;
}
return result;
#endif
}
void THTensor_(fill)(THTensor *r_, real value)
{
if (THTensor_(isContiguous)(r_) || THTensor_(isTransposed)(r_)) {
TH_TENSOR_APPLY_CONTIG(real, r_, THVector_(fill)(r__data, value, r__len););
} else {
TH_TENSOR_APPLY(real, r_,
if (r__stride == 1) {
THVector_(fill)(r__data, value, r__size);
r__i = r__size;
r__data += r__stride * r__size;
break;
} else {
*r__data = value;
}
);
}
}
void THTensor_(zero)(THTensor *r_)
{
THTensor_(fill)(r_, 0);
}
void THTensor_(maskedFill)(THTensor *tensor, THByteTensor *mask, real value)
{
TH_TENSOR_APPLY2(real, tensor, unsigned char, mask,
if (*mask_data > 1)
{
THFree(mask_counter);
THFree(tensor_counter);
THError("Mask tensor can take 0 and 1 values only");
}
else if (*mask_data == 1)
{
*tensor_data = value;
});
}
void THTensor_(maskedCopy)(THTensor *tensor, THByteTensor *mask, THTensor* src )
{
THTensor *srct = THTensor_(newContiguous)(src);
real *src_data = THTensor_(data)(srct);
ptrdiff_t cntr = 0;
ptrdiff_t nelem = THTensor_(nElement)(srct);
if (THTensor_(nElement)(tensor) != THByteTensor_nElement(mask))
{
THTensor_(free)(srct);
THError("Number of elements of destination tensor != Number of elements in mask");
}
TH_TENSOR_APPLY2(real, tensor, unsigned char, mask,
if (*mask_data > 1)
{
THTensor_(free)(srct);
THFree(mask_counter);
THFree(tensor_counter);
THError("Mask tensor can take 0 and 1 values only");
}
else if (*mask_data == 1)
{
if (cntr == nelem)
{
THTensor_(free)(srct);
THFree(mask_counter);
THFree(tensor_counter);
THError("Number of elements of src < number of ones in mask");
}
*tensor_data = *src_data;
src_data++;
cntr++;
});
THTensor_(free)(srct);
}
void THTensor_(maskedSelect)(THTensor *tensor, THTensor *src, THByteTensor *mask)
{
ptrdiff_t numel = THByteTensor_sumall(mask);
real *tensor_data;
#ifdef DEBUG
THAssert(numel <= LONG_MAX);
#endif
THTensor_(resize1d)(tensor,numel);
tensor_data = THTensor_(data)(tensor);
TH_TENSOR_APPLY2(real, src, unsigned char, mask,
if (*mask_data > 1)
{
THFree(mask_counter);
THFree(src_counter);
THError("Mask tensor can take 0 and 1 values only");
}
else if (*mask_data == 1)
{
*tensor_data = *src_data;
tensor_data++;
});
}
// Finds non-zero elements of a tensor and returns their subscripts
void THTensor_(nonzero)(THLongTensor *subscript, THTensor *tensor)
{
ptrdiff_t numel = 0;
int64_t *subscript_data;
int64_t i = 0;
int64_t dim;
int64_t div = 1;
#ifdef TH_REAL_IS_HALF
#define IS_NONZERO(val) ((val.x & 0x7fff) != 0)
#else
#define IS_NONZERO(val) ((val)!=0)
#endif
/* First Pass to determine size of subscripts */
TH_TENSOR_APPLY(real, tensor,
if IS_NONZERO(*tensor_data) {
++numel;
});
#ifdef DEBUG
THAssert(numel <= LONG_MAX);
#endif
THLongTensor_resize2d(subscript, numel, tensor->nDimension);
/* Second pass populates subscripts */
subscript_data = THLongTensor_data(subscript);
TH_TENSOR_APPLY(real, tensor,
if IS_NONZERO(*tensor_data) {
div = 1;
for (dim = tensor->nDimension - 1; dim >= 0; dim--) {
*(subscript_data + dim) = (i/div) % tensor->size[dim];
div *= tensor->size[dim];
}
subscript_data += tensor->nDimension;
}
++i;);
}
void THTensor_(indexSelect)(THTensor *tensor, THTensor *src, int dim, THLongTensor *index)
{
ptrdiff_t i, numel;
THLongStorage *newSize;
THTensor *tSlice, *sSlice;
int64_t *index_data;
real *tensor_data, *src_data;
THArgCheck(index->nDimension <= 1, 3, "Index is supposed to be an empty tensor or a vector");
THArgCheck(dim < src->nDimension, 4, "Indexing dim %d is out of bounds of tensor", dim + TH_INDEX_BASE);
THArgCheck(src->nDimension > 0, 2, "Source tensor is empty");
numel = THLongTensor_nElement(index);
newSize = THLongStorage_newWithSize(src->nDimension);
THLongStorage_rawCopy(newSize,src->size);
#ifdef DEBUG
THAssert(numel <= LONG_MAX);
#endif
newSize->data[dim] = numel;
THTensor_(resize)(tensor,newSize,NULL);
THLongStorage_free(newSize);
index = THLongTensor_newContiguous(index);
index_data = THLongTensor_data(index);
if (dim == 0 && THTensor_(isContiguous)(src) && THTensor_(isContiguous)(tensor))
{
tensor_data = THTensor_(data)(tensor);
src_data = THTensor_(data)(src);
ptrdiff_t rowsize = THTensor_(nElement)(src) / src->size[0];
// check that the indices are within range
int64_t max = src->size[0] - 1 + TH_INDEX_BASE;
for (i=0; i<numel; i++) {
if (index_data[i] < TH_INDEX_BASE || index_data[i] > max) {
THLongTensor_free(index);
THError("index out of range");
}
}
if (src->nDimension == 1) {
#pragma omp parallel for if(numel > TH_OMP_OVERHEAD_THRESHOLD) private(i)
for (i=0; i<numel; i++)
tensor_data[i] = src_data[index_data[i] - TH_INDEX_BASE];
} else {
#pragma omp parallel for if(numel*rowsize > TH_OMP_OVERHEAD_THRESHOLD) private(i)
for (i=0; i<numel; i++)
memcpy(tensor_data + i*rowsize, src_data + (index_data[i] - TH_INDEX_BASE)*rowsize, rowsize*sizeof(real));
}
}
else if (src->nDimension == 1)
{
for (i=0; i<numel; i++)
THTensor_(set1d)(tensor,i,THTensor_(get1d)(src,index_data[i] - TH_INDEX_BASE));
}
else
{
for (i=0; i<numel; i++)
{
tSlice = THTensor_(new)();
sSlice = THTensor_(new)();
THTensor_(select)(tSlice, tensor, dim, i);
THTensor_(select)(sSlice, src, dim, index_data[i] - TH_INDEX_BASE);
THTensor_(copy)(tSlice, sSlice);
THTensor_(free)(tSlice);
THTensor_(free)(sSlice);
}
}
THLongTensor_free(index);
}
void THTensor_(indexCopy)(THTensor *tensor, int dim, THLongTensor *index, THTensor *src)
{
ptrdiff_t i, numel;
THTensor *tSlice, *sSlice;
int64_t *index_data;
// Error checking for this function has moved to ATen!!
numel = THLongTensor_nElement(index);
index = THLongTensor_newContiguous(index);
index_data = THLongTensor_data(index);
if (tensor->nDimension > 1 )
{
tSlice = THTensor_(new)();
sSlice = THTensor_(new)();
for (i=0; i<numel; i++)
{
THTensor_(select)(tSlice, tensor, dim, index_data[i] - TH_INDEX_BASE);
THTensor_(select)(sSlice, src, dim, i);
THTensor_(copy)(tSlice, sSlice);
}
THTensor_(free)(tSlice);
THTensor_(free)(sSlice);
}
else
{
for (i=0; i<numel; i++)
{
THTensor_(set1d)(tensor, index_data[i] - TH_INDEX_BASE, THTensor_(get1d)(src,i));
}
}
THLongTensor_free(index);
}
static ptrdiff_t THTensor_(dataOffset)(THTensor* tensor, ptrdiff_t linearIndex) {
int64_t *size = tensor->size;
int64_t *stride = tensor->stride;
int nDim = tensor->nDimension;
ptrdiff_t dataOffset = 0;
for (int i = nDim - 1; i >= 0; i--) {
dataOffset += (linearIndex % size[i]) * stride[i];
linearIndex /= size[i];
}
return dataOffset;
}
static inline void THTensor_(checkLinearIndex)(int64_t linearIndex, int64_t numel) {
THArgCheck(linearIndex < numel && linearIndex >= -numel, 2, "out of range: %d out of %d", (int)linearIndex, (int)numel);
}
static inline int64_t THTensor_(wrapLinearIndex)(int64_t linearIndex, int64_t numel) {
return linearIndex < 0 ? linearIndex + numel : linearIndex;
}
void THTensor_(take)(THTensor *r_, THTensor *src, THLongTensor *index)
{
THTensor_(resizeNd)(r_, index->nDimension, index->size, NULL);
THTensor* dst = THTensor_(newContiguous)(r_);
index = THLongTensor_newContiguous(index);
int64_t* index_data = THLongTensor_data(index);
ptrdiff_t srcElements = THTensor_(nElement)(src);
real* src_data = THTensor_(data)(src);
real* dst_data = THTensor_(data)(dst);
ptrdiff_t nIndices = THLongTensor_nElement(index);
int isContiguous = THTensor_(isContiguous)(src);
// Exceptions must not be thrown across OpenMP parallel sections, so we
// record the position of the invalid index and throw the exception after the
// loop.
int64_t invalidIdxPos = -1;
ptrdiff_t i;
#pragma omp parallel for if(nIndices > TH_OMP_OVERHEAD_THRESHOLD) private(i)
for (i = 0; i < nIndices; i++) {
int64_t idx = index_data[i];
if (idx < srcElements && idx >= -srcElements) {
idx = THTensor_(wrapLinearIndex)(idx, srcElements);
if (isContiguous) {
dst_data[i] = src_data[idx];
} else {
dst_data[i] = src_data[THTensor_(dataOffset)(src, idx)];
}
} else {
THAtomicCompareAndSwapLong(&invalidIdxPos, -1, i);
}
}
if (invalidIdxPos >= 0) {
THTensor_(checkLinearIndex)(index_data[invalidIdxPos], srcElements);
}
THLongTensor_free(index);
THTensor_(freeCopyTo)(dst, r_);
}
void THTensor_(put)(THTensor *tensor, THLongTensor *index, THTensor *src, int accumulate)
{
THArgCheck(THLongTensor_nElement(index) == THTensor_(nElement)(src), 3,
"src should have the same number of elements as index");
index = THLongTensor_newContiguous(index);
src = THTensor_(newContiguous)(src);
real* data = THTensor_(data)(tensor);
ptrdiff_t numel = THTensor_(nElement)(tensor);
int is_contiguous = THTensor_(isContiguous)(tensor);
TH_TENSOR_APPLY2(int64_t, index, real, src,
THTensor_(checkLinearIndex)(*index_data, numel);
int64_t linearIndex = THTensor_(wrapLinearIndex)(*index_data, numel);
int64_t dataOffset = is_contiguous ? linearIndex : THTensor_(dataOffset)(tensor, linearIndex);
if (accumulate) {
data[dataOffset] += *src_data;
} else {
data[dataOffset] = *src_data;
}
);
THTensor_(free)(src);
THLongTensor_free(index);
}
void THTensor_(indexAdd)(THTensor *tensor, int dim, THLongTensor *index, THTensor *src)
{
ptrdiff_t i, numel;
THTensor *tSlice, *sSlice;
int64_t *index_data;
numel = THLongTensor_nElement(index);
THArgCheck(index->nDimension == 1, 3, "Index is supposed to be a vector");
THArgCheck(dim < src->nDimension, 4,"Indexing dim %d is out of bounds of tensor", dim + TH_INDEX_BASE);
THArgCheck(numel == src->size[dim],4,"Number of indices should be equal to source:size(dim)");
index = THLongTensor_newContiguous(index);
index_data = THLongTensor_data(index);
if (tensor->nDimension > 1)
{
tSlice = THTensor_(new)();
sSlice = THTensor_(new)();
for (i=0; i<numel; i++)
{
THTensor_(select)(tSlice, tensor, dim, index_data[i] - TH_INDEX_BASE);
THTensor_(select)(sSlice, src, dim, i);
THTensor_(cadd)(tSlice, tSlice, 1.0, sSlice);
}
THTensor_(free)(tSlice);
THTensor_(free)(sSlice);
}
else
{
for (i=0; i<numel; i++)
{
THTensor_(set1d)(tensor,
index_data[i] - TH_INDEX_BASE,
THTensor_(get1d)(src,i) + THTensor_(get1d)(tensor,index_data[i] - TH_INDEX_BASE));
}
}
THLongTensor_free(index);
}
void THTensor_(indexFill)(THTensor *tensor, int dim, THLongTensor *index, real val)
{
ptrdiff_t i, numel;
THTensor *tSlice;
int64_t *index_data;
numel = THLongTensor_nElement(index);
THArgCheck(index->nDimension == 1, 3, "Index is supposed to be a vector");
THArgCheck(dim < tensor->nDimension, 4,"Indexing dim %d is out of bounds of tensor", dim + TH_INDEX_BASE);
index = THLongTensor_newContiguous(index);
index_data = THLongTensor_data(index);
for (i=0; i<numel; i++)
{
if (tensor->nDimension > 1)
{
tSlice = THTensor_(new)();
THTensor_(select)(tSlice, tensor,dim,index_data[i] - TH_INDEX_BASE);
THTensor_(fill)(tSlice, val);
THTensor_(free)(tSlice);
}
else
{
THTensor_(set1d)(tensor, index_data[i] - TH_INDEX_BASE, val);
}
}
THLongTensor_free(index);
}
void THTensor_(gather)(THTensor *tensor, THTensor *src, int dim, THLongTensor *index)
{
int64_t elems_per_row, i, idx;
THArgCheck(THLongTensor_nDimension(index) == THTensor_(nDimension)(src), 4,
"Index tensor must have same dimensions as input tensor");
THArgCheck(dim >= 0 && dim < THTensor_(nDimension)(tensor), 3,
"Index dimension is out of bounds");
THArgCheck(THTensor_(nDimension)(src) == THTensor_(nDimension)(tensor), 2,
"Input tensor must have same dimensions as output tensor");
elems_per_row = THLongTensor_size(index, dim);
TH_TENSOR_DIM_APPLY3(real, tensor, real, src, int64_t, index, dim,
TH_TENSOR_DIM_APPLY3_SIZE_EQ_EXCEPT_DIM,
for (i = 0; i < elems_per_row; ++i)
{
idx = *(index_data + i*index_stride);
if (idx < TH_INDEX_BASE || idx >= src_size + TH_INDEX_BASE)
{
THFree(TH_TENSOR_DIM_APPLY_counter);
THError("Invalid index in gather");
}
*(tensor_data + i*tensor_stride) = src_data[(idx - TH_INDEX_BASE) * src_stride];
})
}
void THTensor_(scatter)(THTensor *tensor, int dim, THLongTensor *index, THTensor *src)
{
int64_t elems_per_row, i, idx;
THArgCheck(dim < THTensor_(nDimension)(tensor), 2, "Index dimension is out of bounds");
THArgCheck(THLongTensor_nDimension(index) == THTensor_(nDimension)(tensor), 3,
"Index tensor must have same dimensions as output tensor");
THArgCheck(THTensor_(nDimension)(src) == THTensor_(nDimension)(tensor), 4,
"Input tensor must have same dimensions as output tensor");
elems_per_row = THLongTensor_size(index, dim);
TH_TENSOR_DIM_APPLY3(real, tensor, real, src, int64_t, index, dim,
TH_TENSOR_DIM_APPLY3_SIZE_SCATTER,
for (i = 0; i < elems_per_row; ++i)
{
idx = *(index_data + i*index_stride);
if (idx < TH_INDEX_BASE || idx >= tensor_size + TH_INDEX_BASE)
{
THFree(TH_TENSOR_DIM_APPLY_counter);
THError("Invalid index in scatter");
}
tensor_data[(idx - TH_INDEX_BASE) * tensor_stride] = *(src_data + i*src_stride);
})
}
void THTensor_(scatterAdd)(THTensor *tensor, int dim, THLongTensor *index, THTensor *src)
{
int64_t elems_per_row, i, idx;
THArgCheck(dim < THTensor_(nDimension)(tensor), 2, "Index dimension is out of bounds");
THArgCheck(THLongTensor_nDimension(index) == THTensor_(nDimension)(tensor), 3,
"Index tensor must have same dimensions as output tensor");
THArgCheck(THTensor_(nDimension)(src) == THTensor_(nDimension)(tensor), 4,
"Input tensor must have same dimensions as output tensor");
elems_per_row = THLongTensor_size(index, dim);
TH_TENSOR_DIM_APPLY3(real, tensor, real, src, int64_t, index, dim,
TH_TENSOR_DIM_APPLY3_SIZE_SCATTER,
for (i = 0; i < elems_per_row; ++i)
{
idx = *(index_data + i*index_stride);
if (idx < TH_INDEX_BASE || idx >= tensor_size + TH_INDEX_BASE)
{
THFree(TH_TENSOR_DIM_APPLY_counter);
THError("Invalid index in scatterAdd");
}
tensor_data[(idx - TH_INDEX_BASE) * tensor_stride] += *(src_data + i*src_stride);
})
}
void THTensor_(scatterFill)(THTensor *tensor, int dim, THLongTensor *index, real val)
{
int64_t elems_per_row, i, idx;
THArgCheck(dim < THTensor_(nDimension)(tensor), 2, "Index dimension is out of bounds");
THArgCheck(THLongTensor_nDimension(index) == THTensor_(nDimension)(tensor), 3,
"Index tensor must have same dimensions as output tensor");
elems_per_row = THLongTensor_size(index, dim);
TH_TENSOR_DIM_APPLY2(real, tensor, int64_t, index, dim,
for (i = 0; i < elems_per_row; ++i)
{
idx = *(index_data + i*index_stride);
if (idx < TH_INDEX_BASE || idx >= tensor_size + TH_INDEX_BASE)
{
THFree(TH_TENSOR_DIM_APPLY_counter);
THError("Invalid index in scatter");
}
tensor_data[(idx - TH_INDEX_BASE) * tensor_stride] = val;
})
}
accreal THTensor_(dot)(THTensor *tensor, THTensor *src)
{
accreal sum = 0;
/* we use a trick here. careful with that. */
TH_TENSOR_APPLY2(real, tensor, real, src,
int64_t sz = (tensor_size-tensor_i < src_size-src_i ? tensor_size-tensor_i : src_size-src_i);
sum += THBlas_(dot)(sz, src_data, src_stride, tensor_data, tensor_stride);
tensor_i += sz;
src_i += sz;
tensor_data += sz*tensor_stride;
src_data += sz*src_stride;
break;);
return sum;
}
#undef th_isnan
#if defined(TH_REAL_IS_FLOAT) || defined(TH_REAL_IS_DOUBLE)
#define th_isnan(val) \
(std::isnan(val))
#else
#define th_isnan(val) (0)
#endif
#undef th_isnan_break
#if defined(TH_REAL_IS_FLOAT) || defined(TH_REAL_IS_DOUBLE)
#define th_isnan_break(val) \
if (std::isnan(val)) break;
#else
#define th_isnan_break(val)
#endif
real THTensor_(minall)(THTensor *tensor)
{
real theMin;
real value;
THArgCheck(tensor->nDimension > 0, 1, "tensor must have one dimension");
theMin = THTensor_(data)(tensor)[0];
TH_TENSOR_APPLY(real, tensor,
value = *tensor_data;
/* This is not the same as value<theMin in the case of NaNs */
if(!(value >= theMin))
{
theMin = value;
th_isnan_break(value)
});
return theMin;
}
real THTensor_(maxall)(THTensor *tensor)
{
real theMax;
real value;
THArgCheck(tensor->nDimension > 0, 1, "tensor must have one dimension");
theMax = THTensor_(data)(tensor)[0];
TH_TENSOR_APPLY(real, tensor,
value = *tensor_data;
/* This is not the same as value>theMax in the case of NaNs */
if(!(value <= theMax))
{
theMax = value;
th_isnan_break(value)
});
return theMax;
}
static void THTensor_(quickselectnoidx)(real *arr, int64_t k, int64_t elements, int64_t stride);
real THTensor_(medianall)(THTensor *tensor)
{
THArgCheck(tensor->nDimension > 0, 1, "tensor must have one dimension");
real theMedian;
ptrdiff_t numel;
int64_t k;
THTensor *temp_;
real *temp__data;
numel = THTensor_(nElement)(tensor);
k = (numel-1) >> 1;
temp_ = THTensor_(newClone)(tensor);
temp__data = THTensor_(data)(temp_);
THTensor_(quickselectnoidx)(temp__data, k, numel, 1);
theMedian = temp__data[k];
THTensor_(free)(temp_);
return theMedian;
}
accreal THTensor_(sumall)(THTensor *tensor)
{
accreal sum = 0;
int serial_path = 0;
#ifdef _OPENMP
int inOMP = omp_in_parallel();
if(inOMP) {
serial_path = 1;
} else {
TH_TENSOR_APPLY_REDUCTION_OMP(real, tensor, +:sum, sum += *tensor_data;);
}
#else
serial_path = 1;
#endif
if (serial_path) {
TH_TENSOR_APPLY(real, tensor, sum += *tensor_data;);
}
return sum;
}
accreal THTensor_(prodall)(THTensor *tensor)
{
accreal prod = 1;
int serial_path = 0;
#ifdef _OPENMP
int inOMP = omp_in_parallel();
if(inOMP) {
serial_path = 1;
} else {
TH_TENSOR_APPLY_REDUCTION_OMP(real, tensor, *:prod, prod *= *tensor_data;);
}
#else
serial_path = 1;
#endif
if (serial_path) {
TH_TENSOR_APPLY(real, tensor, prod *= *tensor_data;);
}
return prod;
}
void THTensor_(add)(THTensor *r_, THTensor *t, real value)
{
THTensor_(resizeAs)(r_, t);
int64_t r_Size = THTensor_(nElement)(r_);
int r_Contig = THTensor_(isContiguous)(r_);
int tContig = THTensor_(isContiguous)(t);
int serial_path = 0;
if (r_Contig && tContig) {
TH_TENSOR_APPLY2_CONTIG(real, r_, real, t, THVector_(adds)(r__data, t_data, value, r__len););
} else {
#ifdef _OPENMP
int inOMP = omp_in_parallel();
if (inOMP) {
serial_path = 1;
} else {
TH_TENSOR_APPLY2_OMP(r_Size, r_Contig, tContig, real, r_, real, t, *r__data = *t_data + value;)
}
#else
serial_path = 1;
#endif
}
if (serial_path) {
TH_TENSOR_APPLY2(real, r_, real, t, *r__data = *t_data + value;);
}
}
void THTensor_(sub)(THTensor *r_, THTensor *t, real value)
{
THTensor_(add)(r_, t, -value);
}
void THTensor_(add_scaled)(THTensor *r_, THTensor *t, real value, real alpha)
{
THTensor_(add)(r_, t, value * alpha);
}
void THTensor_(sub_scaled)(THTensor *r_, THTensor *t, real value, real alpha)
{
THTensor_(add)(r_, t, -value * alpha);
}
void THTensor_(mul)(THTensor *r_, THTensor *t, real value)
{
THTensor_(resizeAs)(r_, t);
int64_t r_Size = THTensor_(nElement)(r_);
int r_Contig = THTensor_(isContiguous)(r_);
int tContig = THTensor_(isContiguous)(t);
int serial_path = 0;
if (r_Contig && tContig) {
TH_TENSOR_APPLY2_CONTIG(real, r_, real, t, THVector_(muls)(r__data, t_data, value, r__len););
} else {
#ifdef _OPENMP
int inOMP = omp_in_parallel();
if (inOMP) {
serial_path = 1;
} else {
TH_TENSOR_APPLY2_OMP(r_Size, r_Contig, tContig, real, r_, real, t, *r__data = *t_data * value;)
}
#else
serial_path = 1;
#endif
}
if (serial_path) {
TH_TENSOR_APPLY2(real, r_, real, t, *r__data = *t_data * value;);
}
}
void THTensor_(div)(THTensor *r_, THTensor *t, real value)
{
THTensor_(resizeAs)(r_, t);
int64_t r_Size = THTensor_(nElement)(r_);
int r_Contig = THTensor_(isContiguous)(r_);
int tContig = THTensor_(isContiguous)(t);
int serial_path = 0;
if (r_Contig && tContig) {
TH_TENSOR_APPLY2_CONTIG(real, r_, real, t, THVector_(divs)(r__data, t_data, value, r__len););
} else {
#ifdef _OPENMP
int inOMP = omp_in_parallel();
if (inOMP) {
serial_path = 1;
} else {
TH_TENSOR_APPLY2_OMP(r_Size, r_Contig, tContig, real, r_, real, t, *r__data = *t_data / value;)
}
#else
serial_path = 1;
#endif
}
if (serial_path) {
TH_TENSOR_APPLY2(real, r_, real, t, *r__data = *t_data / value;);
}
}
void THTensor_(lshift)(THTensor *r_, THTensor *t, real value)
{
#if defined(TH_REAL_IS_FLOAT)
return THTensor_(mul)(r_, t, powf(2, value));
#elif defined(TH_REAL_IS_DOUBLE)
return THTensor_(mul)(r_, t, pow(2, value));
#elif defined(TH_REAL_IS_HALF)
return THError("lshift is not supported for torch.HalfTensor");
#else
THTensor_(resizeAs)(r_, t);
int64_t r_Size = THTensor_(nElement)(r_);
int r_Contig = THTensor_(isContiguous)(r_);
int tContig = THTensor_(isContiguous)(t);
int serial_path = 0;
if (r_Contig && tContig) {
real *tp = THTensor_(data)(t);
real *rp = THTensor_(data)(r_);
int64_t i;
#pragma omp parallel for if(r_Size > TH_OMP_OVERHEAD_THRESHOLD * 100) private(i)
for (i=0; i<r_Size; i++) {
#if defined(TH_REAL_IS_BYTE)
rp[i] = ((real) tp[i]) << value;
#else
rp[i] = ((ureal) tp[i]) << value;
#endif
}
} else {
#ifdef _OPENMP
int inOMP = omp_in_parallel();
if (inOMP) {
serial_path = 1;
} else {
#if defined(TH_REAL_IS_BYTE)
TH_TENSOR_APPLY2_OMP(r_Size, r_Contig, tContig, real, r_, real, t, *r__data = (((real) *t_data) << value););
#else
TH_TENSOR_APPLY2_OMP(r_Size, r_Contig, tContig, real, r_, real, t, *r__data = (((ureal) *t_data) << value););
#endif
}
#else
serial_path = 1;
#endif
}
if (serial_path) {
#if defined(TH_REAL_IS_BYTE)
TH_TENSOR_APPLY2(real, r_, real, t, *r__data = (((real) *t_data) << value););
#else
TH_TENSOR_APPLY2(real, r_, real, t, *r__data = (((ureal) *t_data) << value););
#endif
}
#endif
}
void THTensor_(rshift)(THTensor *r_, THTensor *t, real value)
{
#if defined(TH_REAL_IS_FLOAT)
return THTensor_(div)(r_, t, powf(2, value));
#elif defined(TH_REAL_IS_DOUBLE)
return THTensor_(div)(r_, t, pow(2, value));
#elif defined(TH_REAL_IS_HALF)
return THError("rshift is not supported for torch.HalfTensor");
#else
THTensor_(resizeAs)(r_, t);
int64_t r_Size = THTensor_(nElement)(r_);
int r_Contig = THTensor_(isContiguous)(r_);
int tContig = THTensor_(isContiguous)(t);
int serial_path = 0;
if (r_Contig && tContig) {
real *tp = THTensor_(data)(t);
real *rp = THTensor_(data)(r_);
int64_t i;
#pragma omp parallel for if(r_Size > TH_OMP_OVERHEAD_THRESHOLD * 100) private(i)
for (i=0; i<r_Size; i++) {
#if defined(TH_REAL_IS_BYTE)
rp[i] = ((real) tp[i]) >> value;
#else
rp[i] = ((ureal) tp[i]) >> value;
#endif
}
} else {
#ifdef _OPENMP
int inOMP = omp_in_parallel();
if (inOMP) {
serial_path = 1;
} else {
#if defined(TH_REAL_IS_BYTE)
TH_TENSOR_APPLY2_OMP(r_Size, r_Contig, tContig, real, r_, real, t, *r__data = (((real) *t_data) >> value););
#else
TH_TENSOR_APPLY2_OMP(r_Size, r_Contig, tContig, real, r_, real, t, *r__data = (((ureal) *t_data) >> value););
#endif
}
#else
serial_path = 1;
#endif
}
if (serial_path) {
#if defined(TH_REAL_IS_BYTE)
TH_TENSOR_APPLY2(real, r_, real, t, *r__data = (((real) *t_data) >> value););
#else
TH_TENSOR_APPLY2(real, r_, real, t, *r__data = (((ureal) *t_data) >> value););
#endif
}
#endif
}
void THTensor_(fmod)(THTensor *r_, THTensor *t, real value)
{
THTensor_(resizeAs)(r_, t);
int64_t r_Size = THTensor_(nElement)(r_);
int r_Contig = THTensor_(isContiguous)(r_);
int tContig = THTensor_(isContiguous)(t);
int serial_path = 0;
if (r_Contig && tContig) {
real *tp = THTensor_(data)(t);
real *rp = THTensor_(data)(r_);
int64_t i;
#pragma omp parallel for if(r_Size > TH_OMP_OVERHEAD_THRESHOLD) private(i)
for (i=0; i<r_Size; i++) {
#if defined(TH_REAL_IS_FLOAT) || defined(TH_REAL_IS_DOUBLE)
rp[i] = fmod(tp[i], value);
#else
rp[i] = tp[i] % value;
#endif
}
} else {
#ifdef _OPENMP
int inOMP = omp_in_parallel();
if (inOMP) {
serial_path = 1;
} else {
#if defined(TH_REAL_IS_FLOAT) || defined(TH_REAL_IS_DOUBLE)
TH_TENSOR_APPLY2_OMP(r_Size, r_Contig, tContig, real, r_, real, t, *r__data = fmod(*t_data, value););
#else
TH_TENSOR_APPLY2_OMP(r_Size, r_Contig, tContig, real, r_, real, t, *r__data = (*t_data % value););
#endif
}
#else
serial_path = 1;
#endif
}
if (serial_path) {
#if defined(TH_REAL_IS_FLOAT) || defined(TH_REAL_IS_DOUBLE)
TH_TENSOR_APPLY2(real, r_, real, t, *r__data = fmod(*t_data, value););
#else
TH_TENSOR_APPLY2(real, r_, real, t, *r__data = (*t_data % value););
#endif
}
}
// Should wrap if the value (a) has a different sign than the divisor (b), but is not 0.
static inline bool modulo_wrap(real a, real b) {
return (a != 0) && (a < 0) != (b < 0);
}
void THTensor_(remainder)(THTensor *r_, THTensor *t, real value)
{
THTensor_(resizeAs)(r_, t);
int64_t r_Size = THTensor_(nElement)(r_);
int r_Contig = THTensor_(isContiguous)(r_);
int tContig = THTensor_(isContiguous)(t);
int serial_path = 0;
if (r_Contig && tContig) {
real *tp = THTensor_(data)(t);
real *rp = THTensor_(data)(r_);
int64_t i;
#pragma omp parallel for if(r_Size > TH_OMP_OVERHEAD_THRESHOLD) private(i)
for (i=0; i<r_Size; i++) {
#if defined(TH_REAL_IS_FLOAT) || defined(TH_REAL_IS_DOUBLE)
rp[i] = (value == 0)? NAN : tp[i] - value * floor(tp[i] / value);
#else
// There is no NAN for integers
rp[i] = tp[i] % value;
if (modulo_wrap(rp[i], value))
rp[i] += value;
#endif
}
} else {
#ifdef _OPENMP
int inOMP = omp_in_parallel();
if (inOMP) {
serial_path = 1;
} else {
#if defined(TH_REAL_IS_FLOAT) || defined(TH_REAL_IS_DOUBLE)
TH_TENSOR_APPLY2_OMP(r_Size, r_Contig, tContig, real, r_, real, t, *r__data = (value == 0)? NAN : *t_data - value * floor(*t_data / value););
#else
// There is no NAN for integers
TH_TENSOR_APPLY2_OMP(r_Size, r_Contig, tContig, real, r_, real, t, *r__data = *t_data % value;
if (modulo_wrap(*r__data, value)) *r__data += value;);
#endif
}
#else
serial_path = 1;
#endif
}
if (serial_path) {
#if defined(TH_REAL_IS_FLOAT) || defined(TH_REAL_IS_DOUBLE)
TH_TENSOR_APPLY2(real, r_, real, t, *r__data = (value == 0)? NAN : *t_data - value * floor(*t_data / value););
#else
// There is no NAN for integers
TH_TENSOR_APPLY2(real, r_, real, t, *r__data = *t_data % value;
if (modulo_wrap(*r__data, value)) *r__data += value;);
#endif
}
}
void THTensor_(bitand)(THTensor *r_, THTensor *t, real value)
{
#if defined(TH_REAL_IS_FLOAT) || defined(TH_REAL_IS_DOUBLE) || defined(TH_REAL_IS_HALF)
(void)r_;
(void)t;
(void)value;
return THError("bitand is only supported for integer type tensors");
#else
THTensor_(resizeAs)(r_, t);
int64_t r_Size = THTensor_(nElement)(r_);
int r_Contig = THTensor_(isContiguous)(r_);
int serial_path = 0;
int tContig = THTensor_(isContiguous)(t);
if (r_Contig && tContig) {
real *tp = THTensor_(data)(t);
real *rp = THTensor_(data)(r_);
int64_t i;
#pragma omp parallel for if(r_Size > TH_OMP_OVERHEAD_THRESHOLD * 100) private(i)
for (i=0; i<r_Size; i++) {
rp[i] = tp[i] & value;
}
} else {
#ifdef _OPENMP
int inOMP = omp_in_parallel();
if (inOMP) {
serial_path = 1;
} else {
TH_TENSOR_APPLY2_OMP(r_Size, r_Contig, tContig, real, r_, real, t, *r__data = *t_data & value;);
}
#else
serial_path = 1;
#endif
}
if (serial_path) {
TH_TENSOR_APPLY2(real, r_, real, t, *r__data = *t_data & value;);
}
#endif
}
void THTensor_(bitor)(THTensor *r_, THTensor *t, real value)
{
#if defined(TH_REAL_IS_FLOAT) || defined(TH_REAL_IS_DOUBLE) || defined(TH_REAL_IS_HALF)
(void)r_;
(void)t;
(void)value;
return THError("bitor is only supported for integer type tensors");
#else
THTensor_(resizeAs)(r_, t);
int64_t r_Size = THTensor_(nElement)(r_);
int r_Contig = THTensor_(isContiguous)(r_);
int tContig = THTensor_(isContiguous)(t);
int serial_path = 0;
if (r_Contig && tContig) {
real *tp = THTensor_(data)(t);
real *rp = THTensor_(data)(r_);
int64_t i;
#pragma omp parallel for if(r_Size > TH_OMP_OVERHEAD_THRESHOLD * 100) private(i)
for (i=0; i<r_Size; i++) {
rp[i] = tp[i] | value;
}
} else {
#ifdef _OPENMP
int inOMP = omp_in_parallel();
if (inOMP) {
serial_path = 1;
} else {
TH_TENSOR_APPLY2_OMP(r_Size, r_Contig, tContig, real, r_, real, t, *r__data = *t_data | value;);
}
#else
serial_path = 1;
#endif
}
if (serial_path) {
TH_TENSOR_APPLY2(real, r_, real, t, *r__data = *t_data | value;);
}
#endif
}
void THTensor_(bitxor)(THTensor *r_, THTensor *t, real value)
{
#if defined(TH_REAL_IS_FLOAT) || defined(TH_REAL_IS_DOUBLE) || defined(TH_REAL_IS_HALF)
(void)r_;
(void)t;
(void)value;
return THError("bitxor is only supported for integer type tensors");
#else
THTensor_(resizeAs)(r_, t);
int64_t r_Size = THTensor_(nElement)(r_);
int r_Contig = THTensor_(isContiguous)(r_);
int tContig = THTensor_(isContiguous)(t);
int serial_path = 0;
if (r_Contig && tContig) {
real *tp = THTensor_(data)(t);
real *rp = THTensor_(data)(r_);
int64_t i;
#pragma omp parallel for if(r_Size > TH_OMP_OVERHEAD_THRESHOLD * 100) private(i)
for (i=0; i<r_Size; i++) {
rp[i] = tp[i] ^ value;
}
} else {
#ifdef _OPENMP
int inOMP = omp_in_parallel();
if (inOMP) {
serial_path = 1;
} else {
TH_TENSOR_APPLY2_OMP(r_Size, r_Contig, tContig, real, r_, real, t, *r__data = *t_data ^ value;);
}
#else
serial_path = 1;
#endif
}
if (serial_path) {
TH_TENSOR_APPLY2(real, r_, real, t, *r__data = *t_data ^ value;);
}
#endif
}
void THTensor_(clamp)(THTensor *r_, THTensor *t, real min_value, real max_value)
{
THTensor_(resizeAs)(r_, t);
int64_t r_Size = THTensor_(nElement)(r_);
int r_Contig = THTensor_(isContiguous)(r_);
int tContig = THTensor_(isContiguous)(t);
int serial_path = 0;
if (r_Contig && tContig) {
real *tp = THTensor_(data)(t);
real *rp = THTensor_(data)(r_);
/* real t_val; */
int64_t i;
#pragma omp parallel for if(r_Size > TH_OMP_OVERHEAD_THRESHOLD) private(i)
for (i=0; i<r_Size; i++)
rp[i] = (tp[i] < min_value) ? min_value : (tp[i] > max_value ? max_value : tp[i]);
} else {
#ifdef _OPENMP
int inOMP = omp_in_parallel();
if (inOMP) {
serial_path = 1;
} else {
TH_TENSOR_APPLY2_OMP(r_Size, r_Contig, tContig, real, r_, real, t, *r__data = (*t_data < min_value) ? min_value : (*t_data > max_value ? max_value : *t_data););
}
#else
serial_path = 1;
#endif
}
if (serial_path) {
TH_TENSOR_APPLY2(real, r_, real, t, *r__data = (*t_data < min_value) ? min_value : (*t_data > max_value ? max_value : *t_data););
}
}
void THTensor_(cadd)(THTensor *r_, THTensor *t, real value, THTensor *src)
{
THTensor_(resizeAs)(r_, t);
int64_t r_Size = THTensor_(nElement)(r_);
int64_t srcSize = THTensor_(nElement)(src);
int r_Contig = THTensor_(isContiguous)(r_);
int tContig = THTensor_(isContiguous)(t);
int srcContig = THTensor_(isContiguous)(src);
int serial_path = 0;
if (srcSize == r_Size){
if (r_Contig && tContig && srcContig) {
if(r_ == t) {
THBlas_(axpy)(THTensor_(nElement)(t), value, THTensor_(data)(src), 1, THTensor_(data)(r_), 1);
} else {
TH_TENSOR_APPLY3_CONTIG(real, r_, real, t, real, src, THVector_(cadd)(r__data, t_data, src_data, value, r__len););
}
} else {
#if _OPENMP
int inOMP = omp_in_parallel();
if (inOMP) {
serial_path = 1;
} else {
TH_TENSOR_APPLY3_OMP(r_Size, r_Contig, tContig, srcContig, real, r_, real, t, real, src, *r__data = *t_data + value * *src_data;);
}
#else
serial_path = 1;
#endif
}
} else {
serial_path = 1;
}
if (serial_path) {
TH_TENSOR_APPLY3(real, r_, real, t, real, src, *r__data = *t_data + value * *src_data;);
}
}
void THTensor_(csub)(THTensor *r_, THTensor *t, real value, THTensor *src)
{
THTensor_(cadd)(r_, t, -value, src);
}
void THTensor_(cmul)(THTensor *r_, THTensor *t, THTensor *src)
{
THTensor_(resizeAs)(r_, t);
int64_t r_Size = THTensor_(nElement)(r_);
int64_t srcSize = THTensor_(nElement)(src);
int r_Contig = THTensor_(isContiguous)(r_);
int tContig = THTensor_(isContiguous)(t);
int srcContig = THTensor_(isContiguous)(src);
int serial_path = 0;
if (srcSize == r_Size){
if (r_Contig && tContig && srcContig) {
TH_TENSOR_APPLY3_CONTIG(real, r_, real, t, real, src, THVector_(cmul)(r__data, t_data, src_data, r__len););
} else {
#if _OPENMP
int inOMP = omp_in_parallel();
if (inOMP) {
serial_path = 1;
} else {
TH_TENSOR_APPLY3_OMP(r_Size, r_Contig, tContig, srcContig, real, r_, real, t, real, src, *r__data = *t_data * *src_data;);
}
#else
serial_path = 1;
#endif
}
} else {
serial_path = 1;
}
if (serial_path) {
TH_TENSOR_APPLY3(real, r_, real, t, real, src, *r__data = *t_data * *src_data;);
}
}
void THTensor_(pow)(THTensor *r_, THTensor *t, real value)
{
THTensor_(resizeAs)(r_, t);
if(value == 1){
THTensor_(copy)(r_, t);
}
else if(value == 2){
THTensor_(cmul)(r_, t, t);
}
else if(value == 3){
TH_TENSOR_APPLY2(real, r_, real, t, *r__data = *t_data * *t_data * *t_data;);
}
#if defined(TH_REAL_IS_FLOAT) || defined(TH_REAL_IS_DOUBLE)
#if defined (TH_REAL_IS_FLOAT)
#define TH_MATH_NAME(fn) fn##f
#else
#define TH_MATH_NAME(fn) fn
#endif
else if(value == 0.5){
THTensor_(sqrt)(r_, t);
}
else if(value == -0.5){
THTensor_(rsqrt)(r_, t);
}
else if(value == -1){
THTensor_(cinv)(r_, t);
}
else if(value == -2){
TH_TENSOR_APPLY2(real, r_, real, t, *r__data = TH_MATH_NAME(1.0) / (*t_data * *t_data););
}
else{
TH_TENSOR_APPLY2(real, r_, real, t, *r__data = TH_MATH_NAME(pow)(*t_data, value););
}
#undef TH_MATH_NAME
#else
else {
TH_TENSOR_APPLY2(real, r_, real, t, *r__data = THTensor_(powOne)(*t_data, value););
}
#endif
}
void THTensor_(cpow)(THTensor *r_, THTensor *t, THTensor *src)
{
THTensor_(resizeAs)(r_, t);
int64_t r_Size = THTensor_(nElement)(r_);
int64_t srcSize = THTensor_(nElement)(src);
int r_Contig = THTensor_(isContiguous)(r_);
int tContig = THTensor_(isContiguous)(t);
int srcContig = THTensor_(isContiguous)(src);
int serial_path = 0;
if (srcSize == r_Size){
if (r_Contig && tContig && srcContig) {
real *tp = THTensor_(data)(t);
real *sp = THTensor_(data)(src);
real *rp = THTensor_(data)(r_);
int64_t i;
#pragma omp parallel for if(r_Size > TH_OMP_OVERHEAD_THRESHOLD) private(i)
for (i=0; i<r_Size; i++)
rp[i] = THTensor_(powOne)(tp[i], sp[i]);
} else {
#if _OPENMP
int inOMP = omp_in_parallel();
if (inOMP) {
serial_path = 1;
} else {
TH_TENSOR_APPLY3_OMP(r_Size, r_Contig, tContig, srcContig, real, r_, real, t, real, src, *r__data = THTensor_(powOne)(*t_data, *src_data););
}
#else
serial_path = 1;
#endif
}
} else {
serial_path = 1;
}
if (serial_path) {
TH_TENSOR_APPLY3(real, r_, real, t, real, src, *r__data = THTensor_(powOne)(*t_data, *src_data););
}
}
void THTensor_(cdiv)(THTensor *r_, THTensor *t, THTensor *src)
{
THTensor_(resizeAs)(r_, t);
int64_t r_Size = THTensor_(nElement)(r_);
int64_t srcSize = THTensor_(nElement)(src);
int r_Contig = THTensor_(isContiguous)(r_);
int tContig = THTensor_(isContiguous)(t);
int srcContig = THTensor_(isContiguous)(src);
int serial_path = 0;
if (srcSize == r_Size){
if (r_Contig && tContig && srcContig) {
TH_TENSOR_APPLY3_CONTIG(real, r_, real, t, real, src, THVector_(cdiv)(r__data, t_data, src_data, r__len););
} else {
#if _OPENMP
int inOMP = omp_in_parallel();
if (inOMP) {
serial_path = 1;
} else {
TH_TENSOR_APPLY3_OMP(r_Size, r_Contig, tContig, srcContig, real, r_, real, t, real, src, *r__data = *t_data / *src_data;);
}
#else
serial_path = 1;
#endif
}
} else {
serial_path = 1;
}
if (serial_path) {
TH_TENSOR_APPLY3(real, r_, real, t, real, src, *r__data = *t_data / *src_data;);
}
}
void THTensor_(clshift)(THTensor *r_, THTensor *t, THTensor *src)
{
#if defined(TH_REAL_IS_HALF)
return THError("clshift is not supported for torch.HalfTensor");
#endif
THTensor_(resizeAs)(r_, t);
int64_t r_Size = THTensor_(nElement)(r_);
int64_t srcSize = THTensor_(nElement)(src);
int r_Contig = THTensor_(isContiguous)(r_);
int tContig = THTensor_(isContiguous)(t);
int srcContig = THTensor_(isContiguous)(src);
int serial_path = 0;
if (srcSize == r_Size){
if (r_Contig && tContig && srcContig) {
real *tp = THTensor_(data)(t);
real *sp = THTensor_(data)(src);
real *rp = THTensor_(data)(r_);
int64_t i;
#pragma omp parallel for if(r_Size > TH_OMP_OVERHEAD_THRESHOLD) private(i)
for (i=0; i<r_Size; i++) {
#if defined(TH_REAL_IS_FLOAT)
rp[i] = tp[i] * powf(2, sp[i]);
#elif defined(TH_REAL_IS_DOUBLE)
rp[i] = tp[i] * pow(2, sp[i]);
#elif defined(TH_REAL_IS_BYTE)
rp[i] = ((real) tp[i]) << sp[i];
#else
rp[i] = ((ureal) tp[i]) << sp[i];
#endif
}
} else {
#if _OPENMP
int inOMP = omp_in_parallel();
if (inOMP) {
serial_path = 1;
} else {
#if defined(TH_REAL_IS_FLOAT)
TH_TENSOR_APPLY3_OMP(r_Size, r_Contig, tContig, srcContig, real, r_, real, t, real, src, *r__data = *t_data * powf(2, *src_data););
#elif defined(TH_REAL_IS_DOUBLE)
TH_TENSOR_APPLY3_OMP(r_Size, r_Contig, tContig, srcContig, real, r_, real, t, real, src, *r__data = *t_data * pow(2, *src_data););
#elif defined(TH_REAL_IS_BYTE)
TH_TENSOR_APPLY3_OMP(r_Size, r_Contig, tContig, srcContig, real, r_, real, t, real, src, *r__data = ((real)*t_data) << *src_data;);
#else
TH_TENSOR_APPLY3_OMP(r_Size, r_Contig, tContig, srcContig, real, r_, real, t, real, src, *r__data = ((ureal)*t_data) << *src_data;);
#endif
}
#else
serial_path = 1;
#endif
}
} else {
serial_path = 1;
}
if (serial_path) {
#if defined(TH_REAL_IS_FLOAT)
TH_TENSOR_APPLY3(real, r_, real, t, real, src, *r__data = *t_data * powf(2, *src_data););
#elif defined(TH_REAL_IS_DOUBLE)
TH_TENSOR_APPLY3(real, r_, real, t, real, src, *r__data = *t_data * pow(2, *src_data););
#elif defined(TH_REAL_IS_BYTE)
TH_TENSOR_APPLY3(real, r_, real, t, real, src, *r__data = ((real)*t_data) << *src_data;);
#else
TH_TENSOR_APPLY3(real, r_, real, t, real, src, *r__data = ((ureal)*t_data) << *src_data;);
#endif
}
}
void THTensor_(crshift)(THTensor *r_, THTensor *t, THTensor *src)
{
#if defined(TH_REAL_IS_HALF)
return THError("crshift is not supported for torch.HalfTensor");
#endif
THTensor_(resizeAs)(r_, t);
int64_t r_Size = THTensor_(nElement)(r_);
int64_t srcSize = THTensor_(nElement)(src);
int r_Contig = THTensor_(isContiguous)(r_);
int tContig = THTensor_(isContiguous)(t);
int srcContig = THTensor_(isContiguous)(src);
int serial_path = 0;
if (srcSize == r_Size){
if (r_Contig && tContig && srcContig) {
real *tp = THTensor_(data)(t);
real *sp = THTensor_(data)(src);
real *rp = THTensor_(data)(r_);
int64_t i;
#pragma omp parallel for if(r_Size > TH_OMP_OVERHEAD_THRESHOLD) private(i)
for (i=0; i<r_Size; i++) {
#if defined(TH_REAL_IS_FLOAT)
rp[i] = tp[i] / powf(2, sp[i]);
#elif defined(TH_REAL_IS_DOUBLE)
rp[i] = tp[i] / pow(2, sp[i]);
#elif defined(TH_REAL_IS_BYTE)
rp[i] = ((real) tp[i]) >> sp[i];
#else
rp[i] = ((ureal) tp[i]) >> sp[i];
#endif
}
} else {
#if _OPENMP
int inOMP = omp_in_parallel();
if (inOMP) {
serial_path = 1;
} else {
#if defined(TH_REAL_IS_FLOAT)
TH_TENSOR_APPLY3_OMP(r_Size, r_Contig, tContig, srcContig, real, r_, real, t, real, src, *r__data = *t_data / powf(2, *src_data););
#elif defined(TH_REAL_IS_DOUBLE)
TH_TENSOR_APPLY3_OMP(r_Size, r_Contig, tContig, srcContig, real, r_, real, t, real, src, *r__data = *t_data / pow(2, *src_data););
#elif defined(TH_REAL_IS_BYTE)
TH_TENSOR_APPLY3_OMP(r_Size, r_Contig, tContig, srcContig, real, r_, real, t, real, src, *r__data = ((real)*t_data) >> *src_data;);
#else
TH_TENSOR_APPLY3_OMP(r_Size, r_Contig, tContig, srcContig, real, r_, real, t, real, src, *r__data = ((ureal)*t_data) >> *src_data;);
#endif
}
#else
serial_path = 1;
#endif
}
} else {
serial_path = 1;
}
if (serial_path) {
#if defined(TH_REAL_IS_FLOAT)
TH_TENSOR_APPLY3(real, r_, real, t, real, src, *r__data = *t_data / powf(2, *src_data););
#elif defined(TH_REAL_IS_DOUBLE)
TH_TENSOR_APPLY3(real, r_, real, t, real, src, *r__data = *t_data / pow(2, *src_data););
#elif defined(TH_REAL_IS_BYTE)
TH_TENSOR_APPLY3(real, r_, real, t, real, src, *r__data = ((real)*t_data) >> *src_data;);
#else
TH_TENSOR_APPLY3(real, r_, real, t, real, src, *r__data = ((ureal)*t_data) >> *src_data;);
#endif
}
}
void THTensor_(cfmod)(THTensor *r_, THTensor *t, THTensor *src)
{
THTensor_(resizeAs)(r_, t);
int64_t r_Size = THTensor_(nElement)(r_);
int64_t srcSize = THTensor_(nElement)(src);
int r_Contig = THTensor_(isContiguous)(r_);
int tContig = THTensor_(isContiguous)(t);
int srcContig = THTensor_(isContiguous)(src);
int serial_path = 0;
if (srcSize == r_Size){
if (r_Contig && tContig && srcContig) {
real *tp = THTensor_(data)(t);
real *sp = THTensor_(data)(src);
real *rp = THTensor_(data)(r_);
int64_t i;
#pragma omp parallel for if(r_Size > TH_OMP_OVERHEAD_THRESHOLD) private(i)
for (i=0; i<r_Size; i++) {
#if defined(TH_REAL_IS_FLOAT) || defined(TH_REAL_IS_DOUBLE)
rp[i] = fmod(tp[i], sp[i]);
#else
rp[i] = tp[i] % sp[i];
#endif
}
} else {
#if _OPENMP
int inOMP = omp_in_parallel();
if (inOMP) {
serial_path = 1;
} else {
#if defined(TH_REAL_IS_FLOAT) || defined(TH_REAL_IS_DOUBLE)
TH_TENSOR_APPLY3_OMP(r_Size, r_Contig, tContig, srcContig,real, r_, real, t, real, src, *r__data = fmod(*t_data, *src_data););
#else
TH_TENSOR_APPLY3_OMP(r_Size, r_Contig, tContig, srcContig, real, r_, real, t, real, src, *r__data = (*t_data % *src_data););
#endif
}
#else
serial_path = 1;
#endif
}
} else {
serial_path = 1;
}
if (serial_path) {
#if defined(TH_REAL_IS_FLOAT) || defined(TH_REAL_IS_DOUBLE)
TH_TENSOR_APPLY3(real, r_, real, t, real, src, *r__data = fmod(*t_data, *src_data););
#else
TH_TENSOR_APPLY3(real, r_, real, t, real, src, *r__data = (*t_data % *src_data););
#endif
}
}
void THTensor_(cremainder)(THTensor *r_, THTensor *t, THTensor *src)
{
THTensor_(resizeAs)(r_, t);
int64_t r_Size = THTensor_(nElement)(r_);
int64_t srcSize = THTensor_(nElement)(src);
int r_Contig = THTensor_(isContiguous)(r_);
int tContig = THTensor_(isContiguous)(t);
int srcContig = THTensor_(isContiguous)(src);
int serial_path = 0;
if (srcSize == r_Size){
if (r_Contig && tContig && srcContig) {
real *tp = THTensor_(data)(t);
real *sp = THTensor_(data)(src);
real *rp = THTensor_(data)(r_);
int64_t i;
#pragma omp parallel for if(r_Size > TH_OMP_OVERHEAD_THRESHOLD) private(i)
for (i=0; i<r_Size; i++) {
#if defined(TH_REAL_IS_FLOAT) || defined(TH_REAL_IS_DOUBLE)
rp[i] = (sp[i] == 0)? NAN : tp[i] - sp[i] * floor(tp[i] / sp[i]);
#else
// There is no NAN for integers
rp[i] = tp[i] % sp[i];
if (modulo_wrap(rp[i], sp[i]))
rp[i] += sp[i];
#endif
}
} else {
#if _OPENMP
int inOMP = omp_in_parallel();
if (inOMP) {
serial_path = 1;
} else {
#if defined(TH_REAL_IS_FLOAT) || defined(TH_REAL_IS_DOUBLE)
TH_TENSOR_APPLY3_OMP(r_Size, r_Contig, tContig, srcContig, real, r_, real, t, real, src, *r__data = (*src_data == 0)? NAN : *t_data - *src_data * floor(*t_data / *src_data););
#else
TH_TENSOR_APPLY3_OMP(r_Size, r_Contig, tContig, srcContig, real, r_, real, t, real, src, *r__data = *t_data % *src_data;
if (modulo_wrap(*r__data, *src_data)) *r__data += *src_data;);
#endif
}
#else
serial_path = 1;
#endif
}
} else {
serial_path = 1;
}
if (serial_path) {
#if defined(TH_REAL_IS_FLOAT) || defined(TH_REAL_IS_DOUBLE)
TH_TENSOR_APPLY3(real, r_, real, t, real, src, *r__data = (*src_data == 0)? NAN : *t_data - *src_data * floor(*t_data / *src_data););
#else
// There is no NAN for integers
TH_TENSOR_APPLY3(real, r_, real, t, real, src, *r__data = *t_data % *src_data;
if (modulo_wrap(*r__data, *src_data)) *r__data += *src_data;);
#endif
}
}
void THTensor_(cbitand)(THTensor *r_, THTensor *t, THTensor *src)
{
#if defined(TH_REAL_IS_FLOAT) || defined(TH_REAL_IS_DOUBLE) || defined(TH_REAL_IS_HALF)
(void)r_;
(void)t;
(void)src;
return THError("cbitand is only supported for integer type tensors");
#else
THTensor_(resizeAs)(r_, t);
int64_t r_Size = THTensor_(nElement)(r_);
int64_t srcSize = THTensor_(nElement)(src);
int r_Contig = THTensor_(isContiguous)(r_);
int tContig = THTensor_(isContiguous)(t);
int srcContig = THTensor_(isContiguous)(src);
int serial_path = 0;
if (srcSize == r_Size){
if (r_Contig && tContig && srcContig) {
real *tp = THTensor_(data)(t);
real *sp = THTensor_(data)(src);
real *rp = THTensor_(data)(r_);
int64_t i;
#pragma omp parallel for if(r_Size > TH_OMP_OVERHEAD_THRESHOLD) private(i)
for (i=0; i<r_Size; i++) {
rp[i] = tp[i] & sp[i];
}
} else {
#if _OPENMP
int inOMP = omp_in_parallel();
if (inOMP) {
serial_path = 1;
} else {
TH_TENSOR_APPLY3_OMP(r_Size, r_Contig, tContig, srcContig, real, r_, real, t, real, src, *r__data = *t_data & *src_data;);
}
#else
serial_path = 1;
#endif
}
} else {
serial_path = 1;
}
if (serial_path) {
TH_TENSOR_APPLY3(real, r_, real, t, real, src, *r__data = *t_data & *src_data;);
}
#endif
}
void THTensor_(cbitor)(THTensor *r_, THTensor *t, THTensor *src)
{
#if defined(TH_REAL_IS_FLOAT) || defined(TH_REAL_IS_DOUBLE) || defined(TH_REAL_IS_HALF)
(void)r_;
(void)t;
(void)src;
return THError("cbitor is only supported for integer type tensors");
#else
THTensor_(resizeAs)(r_, t);
int64_t r_Size = THTensor_(nElement)(r_);
int64_t srcSize = THTensor_(nElement)(src);
int r_Contig = THTensor_(isContiguous)(r_);
int tContig = THTensor_(isContiguous)(t);
int srcContig = THTensor_(isContiguous)(src);
int serial_path = 0;
if (srcSize == r_Size){
if (r_Contig && tContig && srcContig) {
real *tp = THTensor_(data)(t);
real *sp = THTensor_(data)(src);
real *rp = THTensor_(data)(r_);
int64_t i;
#pragma omp parallel for if(r_Size > TH_OMP_OVERHEAD_THRESHOLD) private(i)
for (i=0; i<r_Size; i++) {
rp[i] = tp[i] | sp[i];
}
} else {
#if _OPENMP
int inOMP = omp_in_parallel();
if (inOMP) {
serial_path = 1;
} else {
TH_TENSOR_APPLY3_OMP(r_Size, r_Contig, tContig, srcContig, real, r_, real, t, real, src, *r__data = *t_data | *src_data;);
}
#else
serial_path = 1;
#endif
}
} else {
serial_path = 1;
}
if (serial_path) {
TH_TENSOR_APPLY3(real, r_, real, t, real, src, *r__data = *t_data | *src_data;);
}
#endif
}
void THTensor_(cbitxor)(THTensor *r_, THTensor *t, THTensor *src)
{
#if defined(TH_REAL_IS_FLOAT) || defined(TH_REAL_IS_DOUBLE) || defined(TH_REAL_IS_HALF)
(void)r_;
(void)t;
(void)src;
return THError("cbitxor is only supported for integer type tensors");
#else
THTensor_(resizeAs)(r_, t);
int64_t r_Size = THTensor_(nElement)(r_);
int64_t srcSize = THTensor_(nElement)(src);
int r_Contig = THTensor_(isContiguous)(r_);
int tContig = THTensor_(isContiguous)(t);
int srcContig = THTensor_(isContiguous)(src);
int serial_path = 0;
if (srcSize == r_Size){
if (r_Contig && tContig && srcContig) {
real *tp = THTensor_(data)(t);
real *sp = THTensor_(data)(src);
real *rp = THTensor_(data)(r_);
int64_t i;
#pragma omp parallel for if(r_Size > TH_OMP_OVERHEAD_THRESHOLD) private(i)
for (i=0; i<r_Size; i++) {
rp[i] = tp[i] ^ sp[i];
}
} else {
#if _OPENMP
int inOMP = omp_in_parallel();
if (inOMP) {
serial_path = 1;
} else {
TH_TENSOR_APPLY3_OMP(r_Size, r_Contig, tContig, srcContig, real, r_, real, t, real, src, *r__data = *t_data ^ *src_data;);
}
#else
serial_path = 1;
#endif
}
} else {
serial_path = 1;
}
if (serial_path) {
TH_TENSOR_APPLY3(real, r_, real, t, real, src, *r__data = *t_data ^ *src_data;);
}
#endif
}
void THTensor_(tpow)(THTensor *r_, real value, THTensor *t)
{
THTensor_(resizeAs)(r_, t);
int64_t r_Size = THTensor_(nElement)(r_);
int r_Contig = THTensor_(isContiguous)(r_);
int tContig = THTensor_(isContiguous)(t);
int serial_path = 0;
if (r_Contig && tContig) {
real *tp = THTensor_(data)(t);
real *rp = THTensor_(data)(r_);
int64_t i;
#pragma omp parallel for if(r_Size > TH_OMP_OVERHEAD_THRESHOLD) private(i)
for (i=0; i<r_Size; i++)
rp[i] = THTensor_(powOne)(value, tp[i]);
} else {
#if _OPENMP
int inOMP = omp_in_parallel();
if (inOMP) {
serial_path = 1;
} else {
TH_TENSOR_APPLY2_OMP(r_Size, r_Contig, tContig, real, r_, real, t, *r__data = THTensor_(powOne)(value, *t_data););
}
#else
serial_path = 1;
#endif
}
if (serial_path) {
TH_TENSOR_APPLY2(real, r_, real, t, *r__data = THTensor_(powOne)(value, *t_data););
}
}
void THTensor_(addcmul)(THTensor *r_, THTensor *t, real value, THTensor *src1, THTensor *src2)
{
if(r_ != t)
{
THTensor_(resizeAs)(r_, t);
THTensor_(copy)(r_, t);
}
int64_t r_Size = THTensor_(nElement)(r_);
int64_t src1Size = THTensor_(nElement)(src1);
int64_t src2Size = THTensor_(nElement)(src2);
int r_Contig = THTensor_(isContiguous)(r_);
int src1Contig = THTensor_(isContiguous)(src1);
int src2Contig = THTensor_(isContiguous)(src2);
int serial_path = 0;
if( (src1Size == src2Size) && (src1Size == r_Size) ){
#if _OPENMP
int inOMP = omp_in_parallel();
if (inOMP) {
serial_path = 1;
} else {
TH_TENSOR_APPLY3_OMP(r_Size, r_Contig, src1Contig, src2Contig, real, r_, real, src1, real, src2, *r__data += value * *src1_data * *src2_data;);
}
#else
serial_path = 1;
#endif
} else {
serial_path = 1;
}
if (serial_path) {
TH_TENSOR_APPLY3(real, r_, real, src1, real, src2, *r__data += value * *src1_data * *src2_data;);
}
}
void THTensor_(addcdiv)(THTensor *r_, THTensor *t, real value, THTensor *src1, THTensor *src2)
{
if(r_ != t)
{
THTensor_(resizeAs)(r_, t);
THTensor_(copy)(r_, t);
}
int64_t r_Size = THTensor_(nElement)(r_);
int64_t src1Size = THTensor_(nElement)(src1);
int64_t src2Size = THTensor_(nElement)(src2);
int r_Contig = THTensor_(isContiguous)(r_);
int src1Contig = THTensor_(isContiguous)(src1);
int src2Contig = THTensor_(isContiguous)(src2);
int serial_path = 0;
if( (src1Size == src2Size) && (src1Size == r_Size) ){
#if _OPENMP
int inOMP = omp_in_parallel();
if (inOMP) {
serial_path = 1;
} else {
TH_TENSOR_APPLY3_OMP(r_Size, r_Contig, src1Contig, src2Contig, real, r_, real, src1, real, src2, *r__data += value * *src1_data / *src2_data;);
}
#else
serial_path = 1;
#endif
} else {
serial_path = 1;
}
if (serial_path) {
TH_TENSOR_APPLY3(real, r_, real, src1, real, src2, *r__data += value * *src1_data / *src2_data;);
}
}
void THTensor_(addmv)(THTensor *r_, real beta, THTensor *t, real alpha, THTensor *mat, THTensor *vec)
{
if( (mat->nDimension != 2) || (vec->nDimension != 1) )
THError("matrix and vector expected, got %dD, %dD",
mat->nDimension, vec->nDimension);
if( mat->size[1] != vec->size[0] ) {
THDescBuff bm = THTensor_(sizeDesc)(mat);
THDescBuff bv = THTensor_(sizeDesc)(vec);
THError("size mismatch, %s, %s", bm.str, bv.str);
}
if(t->nDimension != 1)
THError("vector expected, got t: %dD", t->nDimension);
if(t->size[0] != mat->size[0]) {
THDescBuff bt = THTensor_(sizeDesc)(t);
THDescBuff bm = THTensor_(sizeDesc)(mat);
THError("size mismatch, t: %s, mat: %s", bt.str, bm.str);
}
if(r_ != t)
{
THTensor_(resizeAs)(r_, t);
THTensor_(copy)(r_, t);
}
// n == 1 || lda >= max(1, m)
#define LDA_COND(M, N, LDA) ((N) == 1 || (LDA) >= THMax(1, (M)))
if(mat->stride[0] == 1 && LDA_COND(mat->size[0], mat->size[1], mat->stride[1]))
{
THBlas_(gemv)('n', mat->size[0], mat->size[1],
alpha, THTensor_(data)(mat), mat->stride[1],
THTensor_(data)(vec), vec->stride[0],
beta, THTensor_(data)(r_), r_->stride[0]);
}
else if(mat->stride[1] == 1 && LDA_COND(mat->size[1], mat->size[0], mat->stride[0]))
{
THBlas_(gemv)('t', mat->size[1], mat->size[0],
alpha, THTensor_(data)(mat), mat->stride[0],
THTensor_(data)(vec), vec->stride[0],
beta, THTensor_(data)(r_), r_->stride[0]);
}
else
{
THTensor *cmat = THTensor_(newContiguous)(mat);
THBlas_(gemv)('t', mat->size[1], mat->size[0],
alpha, THTensor_(data)(cmat), cmat->stride[0],
THTensor_(data)(vec), vec->stride[0],
beta, THTensor_(data)(r_), r_->stride[0]);
THTensor_(free)(cmat);
}
#undef LDA_COND
}
void THTensor_(match)(THTensor *r_, THTensor *m1, THTensor *m2, real gain)
{
int64_t N1 = m1->size[0];
int64_t N2 = m2->size[0];
int64_t dim;
real *m1_p;
real *m2_p;
real *r_p;
int64_t i;
THTensor_(resize2d)(r_, N1, N2);
m1 = THTensor_(newContiguous)(m1);
m2 = THTensor_(newContiguous)(m2);
THTensor_(resize2d)(m1, N1, THTensor_(nElement)(m1) / N1);
THTensor_(resize2d)(m2, N2, THTensor_(nElement)(m2) / N2);
dim = m1->size[1];
THArgCheck(m1->size[1] == m2->size[1], 3, "m1 and m2 must have the same inner vector dim");
m1_p = THTensor_(data)(m1);
m2_p = THTensor_(data)(m2);
r_p = THTensor_(data)(r_);
#pragma omp parallel for private(i)
for (i=0; i<N1; i++) {
int64_t j,k;
for (j=0; j<N2; j++) {
real sum = 0;
for (k=0; k<dim; k++) {
real term = m1_p[ i*dim + k ] - m2_p[ j*dim + k ];
sum += term*term;
}
r_p[ i*N2 + j ] = gain * sum;
}
}
THTensor_(free)(m1);
THTensor_(free)(m2);
}
void THTensor_(addmm)(THTensor *r_, real beta, THTensor *t, real alpha, THTensor *m1, THTensor *m2)
{
char transpose_r, transpose_m1, transpose_m2;
THTensor *r__, *m1_, *m2_;
int free_m1 = 0;
int free_m2 = 0;
if( (m1->nDimension != 2) || (m2->nDimension != 2))
THError("matrices expected, got %dD, %dD tensors", m1->nDimension, m2->nDimension);
if(m1->size[1] != m2->size[0]) {
THDescBuff bm1 = THTensor_(sizeDesc)(m1);
THDescBuff bm2 = THTensor_(sizeDesc)(m2);
THError("size mismatch, m1: %s, m2: %s", bm1.str, bm2.str);
}
if( t->nDimension != 2 )
THError("matrix expected, got %dD tensor for t", t->nDimension);
if( (t->size[0] != m1->size[0]) || (t->size[1] != m2->size[1]) ) {
THDescBuff bt = THTensor_(sizeDesc)(t);
THDescBuff bm1 = THTensor_(sizeDesc)(m1);
THDescBuff bm2 = THTensor_(sizeDesc)(m2);
THError("size mismatch, t: %s, m1: %s, m2: %s", bt.str, bm1.str, bm2.str);
}
if(t != r_)
{
THTensor_(resizeAs)(r_, t);
if (beta != 0.0) {
THTensor_(copy)(r_, t);
}
}
// n == 1 || ldc >= max(1, m)
#define LDC_COND(M, N, LDC) ((N) == 1 || (LDC) >= THMax(1, M))
/* r_ */
if(r_->stride[0] == 1 &&
LDC_COND(r_->size[0], r_->size[1], r_->stride[1]))
{
transpose_r = 'n';
r__ = r_;
}
else if(r_->stride[1] == 1 &&
LDC_COND(r_->size[1], r_->size[0], r_->stride[0]))
{
THTensor *swap = m2;
m2 = m1;
m1 = swap;
transpose_r = 't';
r__ = r_;
}
else
{
transpose_r = 'n';
// make r__ FORTRAN contiguous
THTensor *transp_r_ = THTensor_(newTranspose)(r_, 0, 1);
r__ = THTensor_(newClone)(transp_r_);
THTensor_(free)(transp_r_);
THTensor_(transpose)(r__, NULL, 0, 1);
}
#undef LDC_COND
int64_t m = r__->size[(transpose_r == 'n' ? 0 : 1)];
int64_t n = r__->size[(transpose_r == 'n' ? 1 : 0)];
int64_t k = m1->size[(transpose_r == 'n' ? 1 : 0)];
int64_t ldr__ = r__->stride[(transpose_r == 'n' ? 1 : 0)];
/* m1 */
/* Need ldm1_ >= max(1, (transpose_m1 == 'n' ? m : k)) */
if(m1->stride[(transpose_r == 'n' ? 0 : 1)] == 1 &&
m1->stride[(transpose_r == 'n' ? 1 : 0)] >= THMax(1, m))
{
transpose_m1 = 'n';
m1_ = m1;
}
else if(m1->stride[(transpose_r == 'n' ? 1 : 0)] == 1 &&
m1->stride[(transpose_r == 'n' ? 0 : 1)] >= THMax(1, k))
{
transpose_m1 = 't';
m1_ = m1;
}
else
{
transpose_m1 = (transpose_r == 'n' ? 't' : 'n');
m1_ = THTensor_(newContiguous)(m1);
free_m1 = 1;
}
/* m2 */
/* Need ldm2_ >= max(1, (transpose_m2 == 'n' ? k : n)) */
if(m2->stride[(transpose_r == 'n' ? 0 : 1)] == 1 &&
m2->stride[(transpose_r == 'n' ? 1 : 0)] >= THMax(1, k))
{
transpose_m2 = 'n';
m2_ = m2;
}
else if(m2->stride[(transpose_r == 'n' ? 1 : 0)] == 1 &&
m2->stride[(transpose_r == 'n' ? 0 : 1)] >= THMax(1, n))
{
transpose_m2 = 't';
m2_ = m2;
}
else
{
transpose_m2 = (transpose_r == 'n' ? 't' : 'n');
m2_ = THTensor_(newContiguous)(m2);
free_m2 = 1;
}
int64_t ldm1_ = (transpose_m1 == 'n' ? m1_->stride[(transpose_r == 'n' ? 1 : 0)] : m1_->stride[(transpose_r == 'n' ? 0 : 1)]);
int64_t ldm2_ = (transpose_m2 == 'n' ? m2_->stride[(transpose_r == 'n' ? 1 : 0)] : m2_->stride[(transpose_r == 'n' ? 0 : 1)]);
#pragma omp critical(blasgemm)
/* do the operation */
THBlas_(gemm)(transpose_m1,
transpose_m2,
m,
n,
k,
alpha,
THTensor_(data)(m1_),
ldm1_,
THTensor_(data)(m2_),
ldm2_,
beta,
THTensor_(data)(r__),
ldr__);
/* free intermediate variables */
if(free_m1)
THTensor_(free)(m1_);
if(free_m2)
THTensor_(free)(m2_);
if(r__ != r_)
THTensor_(freeCopyTo)(r__, r_);
}
void THTensor_(addr)(THTensor *r_, real beta, THTensor *t, real alpha, THTensor *vec1, THTensor *vec2)
{
if( (vec1->nDimension != 1) || (vec2->nDimension != 1) )
THError("vector and vector expected, got %dD, %dD tensors",
vec1->nDimension, vec2->nDimension);
if(t->nDimension != 2)
THError("expected matrix, got %dD tensor for t", t->nDimension);
if( (t->size[0] != vec1->size[0]) || (t->size[1] != vec2->size[0]) ) {
THDescBuff bt = THTensor_(sizeDesc)(t);
THDescBuff bv1 = THTensor_(sizeDesc)(vec1);
THDescBuff bv2 = THTensor_(sizeDesc)(vec2);
THError("size mismatch, t: %s, vec1: %s, vec2: %s", bt.str, bv1.str, bv2.str);
}
if(r_ != t)
{
THTensor_(resizeAs)(r_, t);
THTensor_(copy)(r_, t);
}
if(beta == 0) {
THTensor_(zero)(r_);
}
else if(beta != 1)
THTensor_(mul)(r_, r_, beta);
// n == 1 || lda >= max(1, m)
#define LDA_COND(M, N, LDA) ((N) == 1 || (LDA) >= THMax(1, (M)))
if(r_->stride[0] == 1 && LDA_COND(vec1->size[0], vec2->size[0], r_->stride[1]))
{
THBlas_(ger)(vec1->size[0], vec2->size[0],
alpha, THTensor_(data)(vec1), vec1->stride[0],
THTensor_(data)(vec2), vec2->stride[0],
THTensor_(data)(r_), r_->stride[1]);
}
else if(r_->stride[1] == 1 && LDA_COND(vec2->size[0], vec1->size[0], r_->stride[0]))
{
THBlas_(ger)(vec2->size[0], vec1->size[0],
alpha, THTensor_(data)(vec2), vec2->stride[0],
THTensor_(data)(vec1), vec1->stride[0],
THTensor_(data)(r_), r_->stride[0]);
}
else
{
THTensor *cr = THTensor_(newClone)(r_);
THBlas_(ger)(vec2->size[0], vec1->size[0],
alpha, THTensor_(data)(vec2), vec2->stride[0],
THTensor_(data)(vec1), vec1->stride[0],
THTensor_(data)(cr), cr->stride[0]);
THTensor_(freeCopyTo)(cr, r_);
}
#undef LDA_COND
}
void THTensor_(addbmm)(THTensor *result, real beta, THTensor *t, real alpha, THTensor *batch1, THTensor *batch2)
{
int64_t batch;
THArgCheck(THTensor_(nDimension)(batch1) == 3, 1, "expected 3D tensor");
THArgCheck(THTensor_(nDimension)(batch2) == 3, 2, "expected 3D tensor");
THArgCheck(THTensor_(size)(batch1, 0) == THTensor_(size)(batch2, 0), 2,
"equal number of batches expected, got %d, %d",
THTensor_(size)(batch1, 0), THTensor_(size)(batch2, 0));
THArgCheck(THTensor_(size)(batch1, 2) == THTensor_(size)(batch2, 1), 2,
"wrong matrix size, batch1: %dx%d, batch2: %dx%d",
THTensor_(size)(batch1, 1), THTensor_(size)(batch1,2),
THTensor_(size)(batch2, 1), THTensor_(size)(batch2,2));
int64_t dim1 = THTensor_(size)(batch1, 1);
int64_t dim2 = THTensor_(size)(batch2, 2);
THArgCheck(THTensor_(size)(t, 0) == dim1, 1, "output tensor of incorrect size");
THArgCheck(THTensor_(size)(t, 1) == dim2, 1, "output tensor of incorrect size");
if (t != result) {
THTensor_(resizeAs)(result, t);
if (beta != 0.0) {
THTensor_(copy)(result, t);
}
}
THTensor *matrix1 = THTensor_(new)();
THTensor *matrix2 = THTensor_(new)();
for (batch = 0; batch < THTensor_(size)(batch1, 0); ++batch) {
THTensor_(select)(matrix1, batch1, 0, batch);
THTensor_(select)(matrix2, batch2, 0, batch);
THTensor_(addmm)(result, beta, result, alpha, matrix1, matrix2);
beta = 1; // accumulate output once
}
THTensor_(free)(matrix1);
THTensor_(free)(matrix2);
}
void THTensor_(baddbmm)(THTensor *result, real beta, THTensor *t, real alpha, THTensor *batch1, THTensor *batch2)
{
int64_t batch;
THArgCheck(THTensor_(nDimension)(batch1) == 3, 1, "expected 3D tensor, got %dD", THTensor_(nDimension)(batch1));
THArgCheck(THTensor_(nDimension)(batch2) == 3, 2, "expected 3D tensor, got %dD", THTensor_(nDimension)(batch2));
THArgCheck(THTensor_(size)(batch1, 0) == THTensor_(size)(batch2, 0), 2,
"equal number of batches expected, got %d, %d",
THTensor_(size)(batch1, 0), THTensor_(size)(batch2, 0));
THArgCheck(THTensor_(size)(batch1, 2) == THTensor_(size)(batch2, 1), 2,
"wrong matrix size, batch1: %dx%d, batch2: %dx%d",
THTensor_(size)(batch1, 1), THTensor_(size)(batch1, 2),
THTensor_(size)(batch2, 1), THTensor_(size)(batch2, 2));
int64_t bs = THTensor_(size)(batch1, 0);
int64_t dim1 = THTensor_(size)(batch1, 1);
int64_t dim2 = THTensor_(size)(batch2, 2);
THArgCheck(THTensor_(size)(t, 0) == bs, 1, "output tensor of incorrect size");
THArgCheck(THTensor_(size)(t, 1) == dim1, 1, "output tensor of incorrect size");
THArgCheck(THTensor_(size)(t, 2) == dim2, 1, "output tensor of incorrect size");
if (t != result) {
THTensor_(resizeAs)(result, t);
if (beta != 0.0) {
THTensor_(copy)(result, t);
}
}
THTensor *matrix1 = THTensor_(new)();
THTensor *matrix2 = THTensor_(new)();
THTensor *result_matrix = THTensor_(new)();
for (batch = 0; batch < THTensor_(size)(batch1, 0); ++batch) {
THTensor_(select)(matrix1, batch1, 0, batch);
THTensor_(select)(matrix2, batch2, 0, batch);
THTensor_(select)(result_matrix, result, 0, batch);
THTensor_(addmm)(result_matrix, beta, result_matrix, alpha, matrix1, matrix2);
}
THTensor_(free)(matrix1);
THTensor_(free)(matrix2);
THTensor_(free)(result_matrix);
}
ptrdiff_t THTensor_(numel)(THTensor *t)
{
return THTensor_(nElement)(t);
}
// Helper function to be used in a reduction operation.
// Due to resize semantics of outputs, if the specified output tensor r_ has
// same size as the output of the reduction operation, then any noncontiguities
// in r_ should be preserved.
// The reduction operation, however, needs to act on r_ with an extra dimension
// (the reduced dimension), so this function "resizes" r_ and preserves its
// noncontiguities if necessary.
void THTensor_(preserveReduceDimSemantics)(
THTensor *r_, int in_dims, int reduce_dimension, int keepdim) {
if (r_ && !keepdim &&
THTensor_(nDimension)(r_) == in_dims - 1 &&
THTensor_(nDimension)(r_) != 0) {
THTensor_(unsqueeze1d)(r_, r_, reduce_dimension);
}
}
void THTensor_(max)(THTensor *values_, THLongTensor *indices_, THTensor *t, int dimension, int keepdim)
{
THLongStorage *dim;
THArgCheck(dimension >= 0 && dimension < THTensor_(nDimension)(t), 2, "dimension %d out of range",
dimension + TH_INDEX_BASE);
int in_dims = THTensor_(nDimension)(t);
THTensor_(preserveReduceDimSemantics)(values_, in_dims, dimension, keepdim);
THLongTensor_preserveReduceDimSemantics(indices_, in_dims, dimension, keepdim);
dim = THTensor_(newSizeOf)(t);
THLongStorage_set(dim, dimension, 1);
THTensor_(resize)(values_, dim, NULL);
THLongTensor_resize(indices_, dim, NULL);
THLongStorage_free(dim);
// two implementations optimized for data locality
if (t->stride[dimension] == 1) {
real theMax;
real value;
int64_t theIndex;
int64_t i;
TH_TENSOR_DIM_APPLY3(real, t, real, values_, int64_t, indices_, dimension,
TH_TENSOR_DIM_APPLY3_SIZE_EQ_EXCEPT_DIM,
theMax = t_data[0];
theIndex = 0;
for(i = 0; i < t_size; i++)
{
value = t_data[i*t_stride];
/* This is not the same as value>theMax in the case of NaNs */
if(!(value <= theMax))
{
theIndex = i;
theMax = value;
th_isnan_break(value)
}
}
*indices__data = theIndex;
*values__data = theMax;);
} else {
if (THTensor_(nDimension)(t) > 1) {
THTensor *t0 = THTensor_(newSelect)(t, dimension, 0);
THTensor_(copy)(values_, t0);
THTensor_(free)(t0);
} else {
THTensor_(fill)(values_, THTensor_(get1d)(t, 0));
}
THLongTensor_zero(indices_);
if(t->size[dimension] == 1) {
if (!keepdim) {
THTensor_(squeeze1d)(values_, values_, dimension);
THLongTensor_squeeze1d(indices_, indices_, dimension);
}
return;
}
THTensor *tempValues_ = THTensor_(newWithTensor)(values_);
// tempValues_.expand_as(t)
tempValues_->size[dimension] = t->size[dimension];
tempValues_->stride[dimension] = 0;
THLongTensor *tempIndices_ = THLongTensor_newWithTensor(indices_);
// tempIndices_.expand_as(t)
tempIndices_->size[dimension] = t->size[dimension];
tempIndices_->stride[dimension] = 0;
TH_TENSOR_APPLY3_D(real, t, real, tempValues_, int64_t, tempIndices_, dimension,
if(!(*t_data <= *tempValues__data) && !th_isnan(*tempValues__data)) {
*tempValues__data = *t_data;
*tempIndices__data = *tempIndices__dimOffset;
});
THTensor_(free)(tempValues_);
THLongTensor_free(tempIndices_);
}
if (!keepdim) {
THTensor_(squeeze1d)(values_, values_, dimension);
THLongTensor_squeeze1d(indices_, indices_, dimension);
}
}
void THTensor_(min)(THTensor *values_, THLongTensor *indices_, THTensor *t, int dimension, int keepdim)
{
THLongStorage *dim;
THArgCheck(dimension >= 0 && dimension < THTensor_(nDimension)(t), 2, "dimension %d out of range",
dimension + TH_INDEX_BASE);
int in_dims = THTensor_(nDimension)(t);
THTensor_(preserveReduceDimSemantics)(values_, in_dims, dimension, keepdim);
THLongTensor_preserveReduceDimSemantics(indices_, in_dims, dimension, keepdim);
dim = THTensor_(newSizeOf)(t);
THLongStorage_set(dim, dimension, 1);
THTensor_(resize)(values_, dim, NULL);
THLongTensor_resize(indices_, dim, NULL);
THLongStorage_free(dim);
// two implementations optimized for data locality
if (t->stride[dimension] == 1) {
real theMax;
real value;
int64_t theIndex;
int64_t i;
TH_TENSOR_DIM_APPLY3(real, t, real, values_, int64_t, indices_, dimension,
TH_TENSOR_DIM_APPLY3_SIZE_EQ_EXCEPT_DIM,
theMax = t_data[0];
theIndex = 0;
for(i = 0; i < t_size; i++)
{
value = t_data[i*t_stride];
/* This is not the same as value>theMax in the case of NaNs */
if(!(value >= theMax))
{
theIndex = i;
theMax = value;
th_isnan_break(value)
}
}
*indices__data = theIndex;
*values__data = theMax;);
} else {
if (THTensor_(nDimension)(t) > 1) {
THTensor *t0 = THTensor_(newSelect)(t, dimension, 0);
THTensor_(copy)(values_, t0);
THTensor_(free)(t0);
} else {
THTensor_(fill)(values_, THTensor_(get1d)(t, 0));
}
THLongTensor_zero(indices_);
if(t->size[dimension] == 1) {
if (!keepdim) {
THTensor_(squeeze1d)(values_, values_, dimension);
THLongTensor_squeeze1d(indices_, indices_, dimension);
}
return;
}
THTensor *tempValues_ = THTensor_(newWithTensor)(values_);
// tempValues_.expand_as(t)
tempValues_->size[dimension] = t->size[dimension];
tempValues_->stride[dimension] = 0;
THLongTensor *tempIndices_ = THLongTensor_newWithTensor(indices_);
// tempIndices_.expand_as(t)
tempIndices_->size[dimension] = t->size[dimension];
tempIndices_->stride[dimension] = 0;
TH_TENSOR_APPLY3_D(real, t, real, tempValues_, int64_t, tempIndices_, dimension,
if(!(*t_data >= *tempValues__data) && !th_isnan(*tempValues__data)) {
*tempValues__data = *t_data;
*tempIndices__data = *tempIndices__dimOffset;
});
THTensor_(free)(tempValues_);
THLongTensor_free(tempIndices_);
}
if (!keepdim) {
THTensor_(squeeze1d)(values_, values_, dimension);
THLongTensor_squeeze1d(indices_, indices_, dimension);
}
}
void THTensor_(sum)(THTensor *r_, THTensor *t, int dimension, int keepdim)
{
THLongStorage *dim;
THArgCheck(dimension >= 0 && dimension < THTensor_(nDimension)(t), 2, "dimension %d out of range",
dimension + TH_INDEX_BASE);
THTensor_(preserveReduceDimSemantics)(r_, THTensor_(nDimension)(t), dimension, keepdim);
dim = THTensor_(newSizeOf)(t);
THLongStorage_set(dim, dimension, 1);
THTensor_(resize)(r_, dim, NULL);
THLongStorage_free(dim);
int serial_path = 0;
#ifdef _OPENMP
int inOMP = omp_in_parallel();
if (inOMP) {
serial_path = 1;
} else {
int r_Contig = THTensor_(isContiguous)(r_);
real *tp = THTensor_(data)(t);
real *rp = THTensor_(data)(r_);
if(r_Contig && (tp != rp)){
ptrdiff_t iter = 0;
ptrdiff_t r_Size = THTensor_(nElement)(r_);
int r_Dim = r_->nDimension;
#pragma omp parallel for if ( r_Size > TH_OMP_OVERHEAD_THRESHOLD)
for (iter = 0; iter < r_Size; iter++) {
int j;
int64_t quot;
int64_t rem = iter;
ptrdiff_t tBasicIndex = 0;
for(j = 0; j < r_Dim; ++j) {
if(j != dimension){
quot = rem/r_->stride[j];
rem = rem%r_->stride[j];
tBasicIndex += quot*t->stride[j];
}
}
real *t_data = tp+tBasicIndex;
real *r__data = rp+iter;
*r__data = 0;
for(j=0; j < t->size[dimension]; ++j) {
*r__data += *(t_data + j*t->stride[dimension]);
}
}
} else {
serial_path = 1;
}
}
#else
serial_path = 1;
#endif
if (serial_path) {
// two implementations optimized for data locality
if (t->stride[dimension] == 1) {
TH_TENSOR_DIM_APPLY2(real, t, real, r_, dimension,
accreal sum = 0;
int64_t i;
for(i = 0; i < t_size; i++)
sum += t_data[i*t_stride];
*r__data = (real)sum;);
} else {
THTensor_(zero)(r_);
THTensor *temp_ = THTensor_(newWithTensor)(r_);
// r_.expand_as(t)
temp_->size[dimension] = t->size[dimension];
temp_->stride[dimension] = 0;
TH_TENSOR_APPLY2(real, temp_, real, t, *temp__data = *temp__data + *t_data;);
THTensor_(free)(temp_);
}
}
if (!keepdim) {
THTensor_(squeeze1d)(r_, r_, dimension);
}
}
void THTensor_(prod)(THTensor *r_, THTensor *t, int dimension, int keepdim)
{
THLongStorage *dim;
THArgCheck(dimension >= 0 && dimension < THTensor_(nDimension)(t), 2, "dimension %d out of range",
dimension + TH_INDEX_BASE);
THTensor_(preserveReduceDimSemantics)(r_, THTensor_(nDimension)(t), dimension, keepdim);
dim = THTensor_(newSizeOf)(t);
THLongStorage_set(dim, dimension, 1);
THTensor_(resize)(r_, dim, NULL);
THLongStorage_free(dim);
int serial_path = 0;
#ifdef _OPENMP
int inOMP = omp_in_parallel();
if (inOMP) {
serial_path = 1;
} else {
int r_Contig = THTensor_(isContiguous)(r_);
real *tp = THTensor_(data)(t);
real *rp = THTensor_(data)(r_);
if(r_Contig && (tp != rp)){
ptrdiff_t iter = 0;
ptrdiff_t r_Size = THTensor_(nElement)(r_);
int r_Dim = r_->nDimension;
#pragma omp parallel for if ( r_Size > TH_OMP_OVERHEAD_THRESHOLD)
for (iter = 0; iter < r_Size; iter++) {
int j;
int64_t quot;
int64_t rem = iter;
ptrdiff_t tBasicIndex = 0;
for(j = 0; j < r_Dim; ++j) {
if(j != dimension){
quot = rem/r_->stride[j];
rem = rem%r_->stride[j];
tBasicIndex += quot*t->stride[j];
}
}
real *t_data = tp+tBasicIndex;
real *r__data = rp+iter;
*r__data = 1;
for(j=0; j < t->size[dimension]; ++j) {
*r__data *= *(t_data + j*t->stride[dimension]);
}
}
} else {
serial_path = 1;
}
}
#else
serial_path = 1;
#endif
if(serial_path) {
// two implementations optimized for data locality
if (t->stride[dimension] == 1) {
TH_TENSOR_DIM_APPLY2(real, t, real, r_, dimension,
accreal prod = 1;
int64_t i;
for(i = 0; i < t_size; i++)
prod *= t_data[i*t_stride];
*r__data = (real)prod;);
} else {
THTensor_(fill)(r_, 1);
THTensor *temp_ = THTensor_(newWithTensor)(r_);
// r_.expand_as(t)
temp_->size[dimension] = t->size[dimension];
temp_->stride[dimension] = 0;
TH_TENSOR_APPLY2(real, temp_, real, t, *temp__data = *temp__data * *t_data;);
THTensor_(free)(temp_);
}
}
if (!keepdim) {
THTensor_(squeeze1d)(r_, r_, dimension);
}
}
void THTensor_(cumsum)(THTensor *r_, THTensor *t, int dimension)
{
THArgCheck(dimension >= 0 && dimension < THTensor_(nDimension)(t), 2, "dimension %d out of range",
dimension + TH_INDEX_BASE);
THTensor_(resizeAs)(r_, t);
TH_TENSOR_DIM_APPLY2(real, t, real, r_, dimension,
accreal cumsum = 0;
int64_t i;
for(i = 0; i < t_size; i++)
{
cumsum += t_data[i*t_stride];
r__data[i*r__stride] = (real)cumsum;
});
}
void THTensor_(cumprod)(THTensor *r_, THTensor *t, int dimension)
{
THArgCheck(dimension >= 0 && dimension < THTensor_(nDimension)(t), 2, "dimension %d out of range",
dimension + TH_INDEX_BASE);
THTensor_(resizeAs)(r_, t);
TH_TENSOR_DIM_APPLY2(real, t, real, r_, dimension,
accreal cumprod = 1;
int64_t i;
for(i = 0; i < t_size; i++)
{
cumprod *= t_data[i*t_stride];
r__data[i*r__stride] = (real)cumprod;
});
}
void THTensor_(sign)(THTensor *r_, THTensor *t)
{
THTensor_(resizeAs)(r_, t);
#if defined (TH_REAL_IS_BYTE)
TH_TENSOR_APPLY2(real, r_, real, t,
if (*t_data > 0) *r__data = 1;
else *r__data = 0;);
#else
TH_TENSOR_APPLY2(real, r_, real, t,
if (*t_data > 0) *r__data = 1;
else if (*t_data < 0) *r__data = -1;
else *r__data = 0;);
#endif
}
accreal THTensor_(trace)(THTensor *t)
{
real *t_data = THTensor_(data)(t);
accreal sum = 0;
int64_t i = 0;
int64_t t_stride_0, t_stride_1, t_diag_size;
THArgCheck(THTensor_(nDimension)(t) == 2, 1, "expected a matrix");
t_stride_0 = THTensor_(stride)(t, 0);
t_stride_1 = THTensor_(stride)(t, 1);
t_diag_size = THMin(THTensor_(size)(t, 0), THTensor_(size)(t, 1));
while(i < t_diag_size)
{
sum += t_data[i*(t_stride_0+t_stride_1)];
i++;
}
return sum;
}
void THTensor_(cross)(THTensor *r_, THTensor *a, THTensor *b, int dimension)
{
int i;
if(THTensor_(nDimension)(a) != THTensor_(nDimension)(b))
THError("inconsistent tensor dimension %dD, %dD",
THTensor_(nDimension)(a), THTensor_(nDimension)(b));
for(i = 0; i < THTensor_(nDimension)(a); i++)
{
if(THTensor_(size)(a, i) != THTensor_(size)(b, i)) {
THDescBuff ba = THTensor_(sizeDesc)(a);
THDescBuff bb = THTensor_(sizeDesc)(b);
THError("inconsistent tensor sizes %s, %s", ba.str, bb.str);
}
}
if(dimension < 0)
{
for(i = 0; i < THTensor_(nDimension)(a); i++)
{
if(THTensor_(size)(a, i) == 3)
{
dimension = i;
break;
}
}
if(dimension < 0) {
THDescBuff ba = THTensor_(sizeDesc)(a);
THError("no dimension of size 3 in a: %s", ba.str);
}
}
THArgCheck(dimension >= 0 && dimension < THTensor_(nDimension)(a), 3, "dimension %d out of range",
dimension + TH_INDEX_BASE);
THArgCheck(THTensor_(size)(a, dimension) == 3, 3, "dimension %d does not have size 3",
dimension + TH_INDEX_BASE);
THTensor_(resizeAs)(r_, a);
TH_TENSOR_DIM_APPLY3(real, a, real, b, real, r_, dimension,
TH_TENSOR_DIM_APPLY3_SIZE_EQ_EXCEPT_DIM,
r__data[0*r__stride] = a_data[1*a_stride]*b_data[2*b_stride] - a_data[2*a_stride]*b_data[1*b_stride];
r__data[1*r__stride] = a_data[2*a_stride]*b_data[0*b_stride] - a_data[0*a_stride]*b_data[2*b_stride];
r__data[2*r__stride] = a_data[0*a_stride]*b_data[1*b_stride] - a_data[1*a_stride]*b_data[0*b_stride];);
}
void THTensor_(cmax)(THTensor *r, THTensor *t, THTensor *src) {
THTensor_(resizeAs)(r, t);
TH_TENSOR_APPLY3(real, r, real, t, real, src,
*r_data = *t_data > *src_data ? *t_data : *src_data;);
}
void THTensor_(cmin)(THTensor *r, THTensor *t, THTensor *src) {
THTensor_(resizeAs)(r, t);
TH_TENSOR_APPLY3(real, r, real, t, real, src,
*r_data = *t_data < *src_data ? *t_data : *src_data;);
}
void THTensor_(cmaxValue)(THTensor *r, THTensor *t, real value) {
THTensor_(resizeAs)(r, t);
TH_TENSOR_APPLY2(real, r, real, t,
*r_data = *t_data > value ? *t_data : value;);
}
void THTensor_(cminValue)(THTensor *r, THTensor *t, real value) {
THTensor_(resizeAs)(r, t);
TH_TENSOR_APPLY2(real, r, real, t,
*r_data = *t_data < value ? *t_data : value;);
}
void THTensor_(zeros)(THTensor *r_, THLongStorage *size)
{
THTensor_(resize)(r_, size, NULL);
THTensor_(zero)(r_);
}
void THTensor_(zerosLike)(THTensor *r_, THTensor *input)
{
THTensor_(resizeAs)(r_, input);
THTensor_(zero)(r_);
}
void THTensor_(onesLike)(THTensor *r_, THTensor *input)
{
THTensor_(resizeAs)(r_, input);
THTensor_(fill)(r_, 1);
}
void THTensor_(ones)(THTensor *r_, THLongStorage *size)
{
THTensor_(resize)(r_, size, NULL);
THTensor_(fill)(r_, 1);
}
void THTensor_(diag)(THTensor *r_, THTensor *t, int k)
{
THArgCheck(THTensor_(nDimension)(t) == 1 || THTensor_(nDimension)(t) == 2, 1, "matrix or a vector expected");
if(THTensor_(nDimension)(t) == 1)
{
real *t_data = THTensor_(data)(t);
int64_t t_stride_0 = THTensor_(stride)(t, 0);
int64_t t_size = THTensor_(size)(t, 0);
int64_t sz = t_size + (k >= 0 ? k : -k);
real *r__data;
int64_t r__stride_0;
int64_t r__stride_1;
int64_t i;
THTensor_(resize2d)(r_, sz, sz);
THTensor_(zero)(r_);
r__data = THTensor_(data)(r_);
r__stride_0 = THTensor_(stride)(r_, 0);
r__stride_1 = THTensor_(stride)(r_, 1);
r__data += (k >= 0 ? k*r__stride_1 : -k*r__stride_0);
for(i = 0; i < t_size; i++)
r__data[i*(r__stride_0+r__stride_1)] = t_data[i*t_stride_0];
}
else
{
real *t_data = THTensor_(data)(t);
int64_t t_stride_0 = THTensor_(stride)(t, 0);
int64_t t_stride_1 = THTensor_(stride)(t, 1);
int64_t sz;
real *r__data;
int64_t r__stride_0;
int64_t i;
if(k >= 0)
sz = THMin(THTensor_(size)(t, 0), THTensor_(size)(t, 1)-k);
else
sz = THMin(THTensor_(size)(t, 0)+k, THTensor_(size)(t, 1));
THTensor_(resize1d)(r_, sz);
r__data = THTensor_(data)(r_);
r__stride_0 = THTensor_(stride)(r_, 0);
t_data += (k >= 0 ? k*t_stride_1 : -k*t_stride_0);
for(i = 0; i < sz; i++)
r__data[i*r__stride_0] = t_data[i*(t_stride_0+t_stride_1)];
}
}
void THTensor_(eye)(THTensor *r_, int64_t n, int64_t m)
{
real *r__data;
int64_t i, sz;
THArgCheck(n > 0, 1, "invalid argument");
if(m <= 0)
m = n;
THTensor_(resize2d)(r_, n, m);
THTensor_(zero)(r_);
i = 0;
r__data = THTensor_(data)(r_);
sz = THMin(THTensor_(size)(r_, 0), THTensor_(size)(r_, 1));
for(i = 0; i < sz; i++)
r__data[i*(r_->stride[0]+r_->stride[1])] = 1;
}
void THTensor_(range)(THTensor *r_, accreal xmin, accreal xmax, accreal step)
{
ptrdiff_t size;
real i = 0;
THArgCheck(step > 0 || step < 0, 3, "step must be nonzero");
THArgCheck(((step > 0) && (xmax >= xmin)) || ((step < 0) && (xmax <= xmin))
, 2, "upper bound and larger bound inconsistent with step sign");
size = (ptrdiff_t) (((xmax - xmin) / step) + 1);
if (THTensor_(nElement)(r_) != size) {
THTensor_(resize1d)(r_, size);
}
TH_TENSOR_APPLY(real, r_, *r__data = xmin + (i++)*step;);
}
void THTensor_(arange)(THTensor *r_, accreal xmin, accreal xmax, accreal step) {
ptrdiff_t size;
real i = 0;
THArgCheck(step > 0 || step < 0, 3, "step must be nonzero");
THArgCheck(((step > 0) && (xmax >= xmin)) || ((step < 0) && (xmax <= xmin))
, 2, "upper bound and larger bound inconsistent with step sign");
size = (ptrdiff_t) ceil((double)(xmax - xmin) / step);
if (THTensor_(nElement)(r_) != size) {
THTensor_(resize1d)(r_, size);
}
TH_TENSOR_APPLY(real, r_, *r__data = xmin + (i++)*step;);
}
void THTensor_(randperm)(THTensor *r_, THGenerator *_generator, int64_t n)
{
real *r__data;
int64_t r__stride_0;
int64_t i;
THArgCheck(n > 0, 1, "must be strictly positive");
THTensor_(resize1d)(r_, n);
r__data = THTensor_(data)(r_);
r__stride_0 = THTensor_(stride)(r_,0);
for(i = 0; i < n; i++)
r__data[i*r__stride_0] = (real)(i);
for(i = 0; i < n-1; i++)
{
int64_t z = THRandom_random(_generator) % (n-i);
real sav = r__data[i*r__stride_0];
r__data[i*r__stride_0] = r__data[(z+i)*r__stride_0];
r__data[(z+i)*r__stride_0] = sav;
}
}
void THTensor_(reshape)(THTensor *r_, THTensor *t, THLongStorage *size)
{
THTensor_(resize)(r_, size, NULL);
THTensor_(copy)(r_, t);
}
/* I cut and pasted (slightly adapted) the quicksort code from
Sedgewick's 1978 "Implementing Quicksort Programs" article
http://www.csie.ntu.edu.tw/~b93076/p847-sedgewick.pdf
It is the state of the art existing implementation. The macros
are here to make as close a match as possible to the pseudocode of
Program 2 p.851
Note that other partition schemes exist, and are typically presented
in textbook, but those are less efficient. See e.g.
http://cs.stackexchange.com/questions/11458/quicksort-partitioning-hoare-vs-lomuto
Julien, November 12th 2013
*/
#define MAX_LEVELS 300
#define M_SMALL 10 /* Limit for small subfiles */
#define ARR(III) arr[(III)*stride]
#define IDX(III) idx[(III)*stride]
#define LONG_SWAP(AAA, BBB) swap = AAA; AAA = BBB; BBB = swap
#define REAL_SWAP(AAA, BBB) rswap = AAA; AAA = BBB; BBB = rswap
#define ARR_SWAP(III, JJJ) \
REAL_SWAP(ARR(III), ARR(JJJ));
#define BOTH_SWAP(III, JJJ) \
REAL_SWAP(ARR(III), ARR(JJJ)); \
LONG_SWAP(IDX(III), IDX(JJJ))
static void THTensor_(quicksortascend)(real *arr, int64_t *idx, int64_t elements, int64_t stride)
{
int64_t beg[MAX_LEVELS], end[MAX_LEVELS], i, j, L, R, P, swap, pid, stack = 0, sz_right, sz_left;
real rswap, piv;
unsigned char done = 0;
/* beg[0]=0; end[0]=elements; */
stack = 0;
L = 0; R = elements-1;
done = elements-1 <= M_SMALL;
while(!done) {
/* Use median of three for pivot choice */
P=(L+R)>>1;
BOTH_SWAP(P, L+1);
if (ARR(L+1) > ARR(R)) { BOTH_SWAP(L+1, R); }
if (ARR(L) > ARR(R)) { BOTH_SWAP(L, R); }
if (ARR(L+1) > ARR(L)) { BOTH_SWAP(L+1, L); }
i = L+1; j = R; piv = ARR(L); pid = IDX(L);
do {
do { i = i+1; } while(ARR(i) < piv);
do { j = j-1; } while(ARR(j) > piv);
if (j < i)
break;
BOTH_SWAP(i, j);
} while(1);
BOTH_SWAP(L, j);
/* Left subfile is (L, j-1) */
/* Right subfile is (i, R) */
sz_left = j-L;
sz_right = R-i+1;
if (sz_left <= M_SMALL && sz_right <= M_SMALL) {
/* both subfiles are small */
/* if stack empty */
if (stack == 0) {
done = 1;
} else {
stack--;
L = beg[stack];
R = end[stack];
}
} else if (sz_left <= M_SMALL || sz_right <= M_SMALL) {
/* exactly one of the subfiles is small */
/* (L,R) = large subfile */
if (sz_left > sz_right) {
/* Implicit: L = L; */
R = j-1;
} else {
L = i;
/* Implicit: R = R; */
}
} else {
/* none of the subfiles is small */
/* push large subfile */
/* (L,R) = small subfile */
if (sz_left > sz_right) {
beg[stack] = L;
end[stack] = j-1;
stack++;
L = i;
/* Implicit: R = R */
} else {
beg[stack] = i;
end[stack] = R;
stack++;
/* Implicit: L = L; */
R = j-1;
}
}
} /* while not done */
/* Now insertion sort on the concatenation of subfiles */
for(i=elements-2; i>=0; i--) {
if (ARR(i) > ARR(i+1)) {
piv = ARR(i);
pid = IDX(i);
j = i+1;
do {
ARR(j-1) = ARR(j);
IDX(j-1) = IDX(j);
j = j+1;
} while(j < elements && ARR(j) < piv);
ARR(j-1) = piv;
IDX(j-1) = pid;
}
}
}
static void THTensor_(quicksortdescend)(real *arr, int64_t *idx, int64_t elements, int64_t stride)
{
int64_t beg[MAX_LEVELS], end[MAX_LEVELS], i, j, L, R, P, swap, pid, stack = 0, sz_right, sz_left;
real rswap, piv;
unsigned char done = 0;
/* beg[0]=0; end[0]=elements; */
stack = 0;
L = 0; R = elements-1;
done = elements-1 <= M_SMALL;
while(!done) {
/* Use median of three for pivot choice */
P=(L+R)>>1;
BOTH_SWAP(P, L+1);
if (ARR(L+1) < ARR(R)) { BOTH_SWAP(L+1, R); }
if (ARR(L) < ARR(R)) { BOTH_SWAP(L, R); }
if (ARR(L+1) < ARR(L)) { BOTH_SWAP(L+1, L); }
i = L+1; j = R; piv = ARR(L); pid = IDX(L);
do {
do { i = i+1; } while(ARR(i) > piv);
do { j = j-1; } while(ARR(j) < piv);
if (j < i)
break;
BOTH_SWAP(i, j);
} while(1);
BOTH_SWAP(L, j);
/* Left subfile is (L, j-1) */
/* Right subfile is (i, R) */
sz_left = j-L;
sz_right = R-i+1;
if (sz_left <= M_SMALL && sz_right <= M_SMALL) {
/* both subfiles are small */
/* if stack empty */
if (stack == 0) {
done = 1;
} else {
stack--;
L = beg[stack];
R = end[stack];
}
} else if (sz_left <= M_SMALL || sz_right <= M_SMALL) {
/* exactly one of the subfiles is small */
/* (L,R) = large subfile */
if (sz_left > sz_right) {
/* Implicit: L = L; */
R = j-1;
} else {
L = i;
/* Implicit: R = R; */
}
} else {
/* none of the subfiles is small */
/* push large subfile */
/* (L,R) = small subfile */
if (sz_left > sz_right) {
beg[stack] = L;
end[stack] = j-1;
stack++;
L = i;
/* Implicit: R = R */
} else {
beg[stack] = i;
end[stack] = R;
stack++;
/* Implicit: L = L; */
R = j-1;
}
}
} /* while not done */
/* Now insertion sort on the concatenation of subfiles */
for(i=elements-2; i>=0; i--) {
if (ARR(i) < ARR(i+1)) {
piv = ARR(i);
pid = IDX(i);
j = i+1;
do {
ARR(j-1) = ARR(j);
IDX(j-1) = IDX(j);
j = j+1;
} while(j < elements && ARR(j) > piv);
ARR(j-1) = piv;
IDX(j-1) = pid;
}
}
}
#undef MAX_LEVELS
#undef M_SMALL
void THTensor_(sort)(THTensor *rt_, THLongTensor *ri_, THTensor *t, int dimension, int descendingOrder)
{
THArgCheck(dimension >= 0 && dimension < THTensor_(nDimension)(t), 2, "invalid dimension %d",
dimension + TH_INDEX_BASE);
THTensor_(resizeAs)(rt_, t);
THTensor_(copy)(rt_, t);
{
THLongStorage *size = THTensor_(newSizeOf)(t);
THLongTensor_resize(ri_, size, NULL);
THLongStorage_free(size);
}
if(descendingOrder)
{
TH_TENSOR_DIM_APPLY2(real, rt_, int64_t, ri_, dimension,
int64_t i;
for(i = 0; i < ri__size; i++)
ri__data[i*ri__stride] = i;
THTensor_(quicksortdescend)(rt__data, ri__data, rt__size, rt__stride);)
}
else
{
TH_TENSOR_DIM_APPLY2(real, rt_, int64_t, ri_, dimension,
int64_t i;
for(i = 0; i < ri__size; i++)
ri__data[i*ri__stride] = i;
THTensor_(quicksortascend)(rt__data, ri__data, rt__size, rt__stride);)
}
}
/* Implementation of the Quickselect algorithm, based on Nicolas Devillard's
public domain implementation at http://ndevilla.free.fr/median/median/
Adapted similarly to the above Quicksort algorithm.
This version does not produce indices along with values. */
static void THTensor_(quickselectnoidx)(real *arr, int64_t k, int64_t elements, int64_t stride)
{
int64_t P, L, R, i, j;
real rswap, piv;
L = 0;
R = elements-1;
do {
if (R <= L) /* One element only */
return;
if (R == L+1) { /* Two elements only */
if (ARR(L) > ARR(R)) {
ARR_SWAP(L, R);
}
return;
}
/* Use median of three for pivot choice */
P=(L+R)>>1;
ARR_SWAP(P, L+1);
if (ARR(L+1) > ARR(R)) { ARR_SWAP(L+1, R); }
if (ARR(L) > ARR(R)) { ARR_SWAP(L, R); }
if (ARR(L+1) > ARR(L)) { ARR_SWAP(L+1, L); }
i = L+1;
j = R;
piv = ARR(L);
do {
do i++; while(ARR(i) < piv);
do j--; while(ARR(j) > piv);
if (j < i)
break;
ARR_SWAP(i, j);
} while(1);
ARR_SWAP(L, j);
/* Re-set active partition */
if (j <= k) L=i;
if (j >= k) R=j-1;
} while(1);
}
/* Implementation of the Quickselect algorithm, based on Nicolas Devillard's
public domain implementation at http://ndevilla.free.fr/median/median/
Adapted similarly to the above Quicksort algorithm. */
static void THTensor_(quickselect)(real *arr, int64_t *idx, int64_t k, int64_t elements, int64_t stride)
{
int64_t P, L, R, i, j, swap;
real rswap, piv;
L = 0;
R = elements-1;
do {
if (R <= L) /* One element only */
return;
if (R == L+1) { /* Two elements only */
if (ARR(L) > ARR(R)) {
BOTH_SWAP(L, R);
}
return;
}
/* Use median of three for pivot choice */
P=(L+R)>>1;
BOTH_SWAP(P, L+1);
if (ARR(L+1) > ARR(R)) { BOTH_SWAP(L+1, R); }
if (ARR(L) > ARR(R)) { BOTH_SWAP(L, R); }
if (ARR(L+1) > ARR(L)) { BOTH_SWAP(L+1, L); }
i = L+1;
j = R;
piv = ARR(L);
do {
do i++; while(ARR(i) < piv);
do j--; while(ARR(j) > piv);
if (j < i)
break;
BOTH_SWAP(i, j);
} while(1);
BOTH_SWAP(L, j);
/* Re-set active partition */
if (j <= k) L=i;
if (j >= k) R=j-1;
} while(1);
}
#undef ARR
#undef IDX
#undef LONG_SWAP
#undef REAL_SWAP
#undef BOTH_SWAP
void THTensor_(mode)(THTensor *values_, THLongTensor *indices_, THTensor *t, int dimension, int keepdim)
{
THLongStorage *dim;
THTensor *temp_;
THLongTensor *tempi_;
real *temp__data;
int64_t *tempi__data;
int64_t t_size_dim;
THArgCheck(dimension >= 0 && dimension < THTensor_(nDimension)(t), 3, "dimension out of range");
int in_dims = THTensor_(nDimension)(t);
THTensor_(preserveReduceDimSemantics)(values_, in_dims, dimension, keepdim);
THLongTensor_preserveReduceDimSemantics(indices_, in_dims, dimension, keepdim);
dim = THTensor_(newSizeOf)(t);
THLongStorage_set(dim, dimension, 1);
THTensor_(resize)(values_, dim, NULL);
THLongTensor_resize(indices_, dim, NULL);
THLongStorage_free(dim);
t_size_dim = THTensor_(size)(t, dimension);
temp_ = THTensor_(new)();
THTensor_(resize1d)(temp_, t_size_dim);
temp__data = THTensor_(data)(temp_);
tempi_ = THLongTensor_new();
THLongTensor_resize1d(tempi_, t_size_dim);
tempi__data = THLongTensor_data(tempi_);
TH_TENSOR_DIM_APPLY3(real, t, real, values_, int64_t, indices_, dimension,
TH_TENSOR_DIM_APPLY3_SIZE_EQ_EXCEPT_DIM,
int64_t i;
real mode = 0;
int64_t modei = 0;
int64_t temp_freq = 0;
int64_t max_freq = 0;
for(i = 0; i < t_size_dim; i++)
temp__data[i] = t_data[i*t_stride];
for(i = 0; i < t_size_dim; i++)
tempi__data[i] = i;
THTensor_(quicksortascend)(temp__data, tempi__data, t_size_dim, 1);
for(i = 0; i < t_size_dim; i++)
{
temp_freq++;
if ((i == t_size_dim - 1) || (temp__data[i] != temp__data[i+1]))
{
if (temp_freq > max_freq)
{
mode = temp__data[i];
modei = tempi__data[i];
max_freq = temp_freq;
}
temp_freq = 0;
}
}
*values__data = mode;
*indices__data = modei;);
THTensor_(free)(temp_);
THLongTensor_free(tempi_);
if (!keepdim) {
THTensor_(squeeze1d)(values_, values_, dimension);
THLongTensor_squeeze1d(indices_, indices_, dimension);
}
}
void THTensor_(kthvalue)(THTensor *values_, THLongTensor *indices_, THTensor *t, int64_t k, int dimension, int keepdim)
{
THLongStorage *dim;
THTensor *temp_;
THLongTensor *tempi_;
real *temp__data;
int64_t *tempi__data;
int64_t t_size_dim;
THArgCheck(dimension >= 0 && dimension < THTensor_(nDimension)(t), 3, "dimension out of range");
THArgCheck(k > 0 && k <= t->size[dimension], 2, "selected index out of range");
int in_dims = THTensor_(nDimension)(t);
THTensor_(preserveReduceDimSemantics)(values_, in_dims, dimension, keepdim);
THLongTensor_preserveReduceDimSemantics(indices_, in_dims, dimension, keepdim);
dim = THTensor_(newSizeOf)(t);
THLongStorage_set(dim, dimension, 1);
THTensor_(resize)(values_, dim, NULL);
THLongTensor_resize(indices_, dim, NULL);
THLongStorage_free(dim);
t_size_dim = THTensor_(size)(t, dimension);
temp_ = THTensor_(new)();
THTensor_(resize1d)(temp_, t_size_dim);
temp__data = THTensor_(data)(temp_);
tempi_ = THLongTensor_new();
THLongTensor_resize1d(tempi_, t_size_dim);
tempi__data = THLongTensor_data(tempi_);
TH_TENSOR_DIM_APPLY3(real, t, real, values_, int64_t, indices_, dimension,
TH_TENSOR_DIM_APPLY3_SIZE_EQ_EXCEPT_DIM,
int64_t i;
for(i = 0; i < t_size_dim; i++)
temp__data[i] = t_data[i*t_stride];
for(i = 0; i < t_size_dim; i++)
tempi__data[i] = i;
THTensor_(quickselect)(temp__data, tempi__data, k - 1, t_size_dim, 1);
*values__data = temp__data[k-1];
*indices__data = tempi__data[k-1];);
THTensor_(free)(temp_);
THLongTensor_free(tempi_);
if (!keepdim) {
THTensor_(squeeze1d)(values_, values_, dimension);
THLongTensor_squeeze1d(indices_, indices_, dimension);
}
}
void THTensor_(median)(THTensor *values_, THLongTensor *indices_, THTensor *t, int dimension, int keepdim)
{
int64_t t_size_dim, k;
THArgCheck(dimension >= 0 && dimension < THTensor_(nDimension)(t), 3, "dimension out of range");
t_size_dim = THTensor_(size)(t, dimension);
k = (t_size_dim-1) >> 1; /* take middle or one-before-middle element */
THTensor_(kthvalue)(values_, indices_, t, k+1, dimension, keepdim);
}
void THTensor_(topk)(THTensor *rt_, THLongTensor *ri_, THTensor *t, int64_t k, int dim, int dir, int sorted)
{
int numDims = THTensor_(nDimension)(t);
THArgCheck(dim >= 0 && dim < numDims, 3, "dim not in range");
int64_t sliceSize = THTensor_(size)(t, dim);
THArgCheck(k > 0 && k <= sliceSize, 2, "k not in range for dimension");
THTensor *tmpResults = THTensor_(new)();
THTensor_(resize1d)(tmpResults, sliceSize);
real *tmp__data = THTensor_(data)(tmpResults);
THLongTensor *tmpIndices = THLongTensor_new();
THLongTensor_resize1d(tmpIndices, sliceSize);
int64_t *tmpi__data = THLongTensor_data(tmpIndices);
THLongStorage *topKSize = THTensor_(newSizeOf)(t);
THLongStorage_set(topKSize, dim, k);
THTensor_(resize)(rt_, topKSize, NULL);
THLongTensor_resize(ri_, topKSize, NULL);
THLongStorage_free(topKSize);
if (dir) {
/* k largest elements, descending order (optional: see sorted) */
int64_t K = sliceSize - k;
TH_TENSOR_DIM_APPLY3(real, t, real, rt_, int64_t, ri_, dim,
TH_TENSOR_DIM_APPLY3_SIZE_EQ_EXCEPT_DIM,
int64_t i;
for(i = 0; i < sliceSize; i++)
{
tmp__data[i] = t_data[i*t_stride];
tmpi__data[i] = i;
}
if (K > 0)
THTensor_(quickselect)(tmp__data, tmpi__data, K - 1, sliceSize, 1);
if (sorted)
THTensor_(quicksortdescend)(tmp__data + K, tmpi__data + K, k, 1);
for(i = 0; i < k; i++)
{
rt__data[i*rt__stride] = tmp__data[i + K];
ri__data[i*ri__stride] = tmpi__data[i + K];
})
}
else {
/* k smallest elements, ascending order (optional: see sorted) */
TH_TENSOR_DIM_APPLY3(real, t, real, rt_, int64_t, ri_, dim,
TH_TENSOR_DIM_APPLY3_SIZE_EQ_EXCEPT_DIM,
int64_t i;
for(i = 0; i < sliceSize; i++)
{
tmp__data[i] = t_data[i*t_stride];
tmpi__data[i] = i;
}
THTensor_(quickselect)(tmp__data, tmpi__data, k - 1, sliceSize, 1);
if (sorted)
THTensor_(quicksortascend)(tmp__data, tmpi__data, k - 1, 1);
for(i = 0; i < k; i++)
{
rt__data[i*rt__stride] = tmp__data[i];
ri__data[i*ri__stride] = tmpi__data[i];
})
}
THTensor_(free)(tmpResults);
THLongTensor_free(tmpIndices);
}
void THTensor_(tril)(THTensor *r_, THTensor *t, int64_t k)
{
int64_t t_size_0, t_size_1;
int64_t t_stride_0, t_stride_1;
int64_t r__stride_0, r__stride_1;
real *t_data, *r__data;
int64_t r, c;
THArgCheck(THTensor_(nDimension)(t) == 2, 1, "expected a matrix");
THTensor_(resizeAs)(r_, t);
t_size_0 = THTensor_(size)(t, 0);
t_size_1 = THTensor_(size)(t, 1);
t_stride_0 = THTensor_(stride)(t, 0);
t_stride_1 = THTensor_(stride)(t, 1);
r__stride_0 = THTensor_(stride)(r_, 0);
r__stride_1 = THTensor_(stride)(r_, 1);
r__data = THTensor_(data)(r_);
t_data = THTensor_(data)(t);
for(r = 0; r < t_size_0; r++)
{
int64_t sz = THMin(r+k+1, t_size_1);
for(c = THMax(0, r+k+1); c < t_size_1; c++)
r__data[r*r__stride_0+c*r__stride_1] = 0;
for(c = 0; c < sz; c++)
r__data[r*r__stride_0+c*r__stride_1] = t_data[r*t_stride_0+c*t_stride_1];
}
}
void THTensor_(triu)(THTensor *r_, THTensor *t, int64_t k)
{
int64_t t_size_0, t_size_1;
int64_t t_stride_0, t_stride_1;
int64_t r__stride_0, r__stride_1;
real *t_data, *r__data;
int64_t r, c;
THArgCheck(THTensor_(nDimension)(t) == 2, 1, "expected a matrix");
THTensor_(resizeAs)(r_, t);
t_size_0 = THTensor_(size)(t, 0);
t_size_1 = THTensor_(size)(t, 1);
t_stride_0 = THTensor_(stride)(t, 0);
t_stride_1 = THTensor_(stride)(t, 1);
r__stride_0 = THTensor_(stride)(r_, 0);
r__stride_1 = THTensor_(stride)(r_, 1);
r__data = THTensor_(data)(r_);
t_data = THTensor_(data)(t);
for(r = 0; r < t_size_0; r++)
{
int64_t sz = THMin(r+k, t_size_1);
for(c = THMax(0, r+k); c < t_size_1; c++)
r__data[r*r__stride_0+c*r__stride_1] = t_data[r*t_stride_0+c*t_stride_1];
for(c = 0; c < sz; c++)
r__data[r*r__stride_0+c*r__stride_1] = 0;
}
}
void THTensor_(cat)(THTensor *r_, THTensor *ta, THTensor *tb, int dimension)
{
THTensor* inputs[2];
inputs[0] = ta;
inputs[1] = tb;
THTensor_(catArray)(r_, inputs, 2, dimension);
}
void THTensor_(check_shape_except_dim)(THTensor *first, THTensor *second, int dimension);
inline void THTensor_(check_shape_except_dim)(THTensor *first, THTensor *second, int dimension)
{
int first_dims = first->nDimension;
int second_dims = second->nDimension;
THArgCheck(first_dims == second_dims, 0,
"Tensors must have same number of dimensions: got %d and %d",
first_dims, second_dims);
for (int dim = 0; dim < first_dims; dim++) {
if (dim == dimension) {
continue;
}
int64_t first_dim_size = first->size[dim];
int64_t second_dim_size = second->size[dim];
THArgCheck(first_dim_size == second_dim_size, 0,
"Sizes of tensors must match except in dimension %d. Got %lld and %lld in dimension %d",
dimension, (long long)first_dim_size, (long long)second_dim_size, dim);
}
}
void THTensor_(catArray)(THTensor *result, THTensor **inputs, int numInputs, int dimension)
{
// Find a non-empty tensor to record nDims
int allEmpty = 1;
int nDims = 0;
THTensor *notEmptyTensor;
for (int i = 0; i < numInputs; i++) {
int input_dims = inputs[i]->nDimension;
if (input_dims == 0) {
continue;
}
// We've found a non-empty tensor
allEmpty = 0;
notEmptyTensor = inputs[i];
nDims = input_dims;
break;
}
if (allEmpty) {
return;
}
// Compute cat_dimension based on the non-empty tensor
THArgCheck(dimension >= -1 && dimension < nDims, 4, "invalid dimension %d", dimension);
// When the user input dimension is -1 (i.e. -2 in C)
// Then we pick the last dimension across non-empty tensors.
int cat_dimension = dimension;
if (dimension + TH_INDEX_BASE == -1) {
cat_dimension = nDims ? nDims - 1 : 0;
}
THArgCheck(numInputs > 0, 3, "invalid number of inputs %d", numInputs);
// Compute size of the result in the cat dimension
int64_t cat_dim_size = 0;
for (int i = 0; i < numInputs; i++) {
THTensor *tensor = inputs[i];
if (tensor->nDimension == 0) {
continue;
}
THTensor_(check_shape_except_dim)(notEmptyTensor, tensor, cat_dimension);
cat_dim_size += tensor->size[cat_dimension];
}
// Compute the size of the result
THLongStorage *size = THLongStorage_newWithSize(nDims);
for (int dim = 0; dim < nDims; dim++) {
int64_t result_dim_size = notEmptyTensor->size[dim];
if (dim == cat_dimension) {
result_dim_size = cat_dim_size;
}
size->data[dim] = result_dim_size;
}
THTensor_(resize)(result, size, NULL);
// Check contiguity of all inputs and result
int allContiguous = 1;
for (int i = 0; i < numInputs; i++) {
if(inputs[i]->nDimension) {
allContiguous = allContiguous && THTensor_(isContiguous)(inputs[i]);
}
}
allContiguous = allContiguous && THTensor_(isContiguous)(result);
// First path is for contiguous inputs along dim 0
// Second path for non-contiguous
int64_t offset;
if (cat_dimension == 0 && allContiguous) {
real* result_data = result->storage->data + result->storageOffset;
offset = 0;
for (int j = 0; j < numInputs; j++) {
if (inputs[j]->nDimension) {
THTensor* input0 = inputs[j];
real* input0_data = input0->storage->data + input0->storageOffset;
int64_t input0_size = THTensor_(nElement)(input0);
memcpy(result_data + offset, input0_data, input0_size*sizeof(real));
offset += input0_size;
}
}
} else {
offset = 0;
for (int j = 0; j < numInputs; j++) {
if (inputs[j]->nDimension) {
int64_t dimSize = cat_dimension < inputs[j]->nDimension ? inputs[j]->size[cat_dimension] : 1;
THTensor *nt = THTensor_(newWithTensor)(result);
THTensor_(narrow)(nt, NULL, cat_dimension, offset, dimSize);
THTensor_(copy)(nt, inputs[j]);
THTensor_(free)(nt);
offset += dimSize;
}
}
}
THLongStorage_free(size);
}
int THTensor_(equal)(THTensor *ta, THTensor* tb)
{
int equal = 1;
if(!THTensor_(isSameSizeAs)(ta, tb))
return 0;
if (THTensor_(isContiguous)(ta) && THTensor_(isContiguous)(tb)) {
real *tap = THTensor_(data)(ta);
real *tbp = THTensor_(data)(tb);
ptrdiff_t sz = THTensor_(nElement)(ta);
ptrdiff_t i;
for (i=0; i<sz; ++i){
if(tap[i] != tbp[i]) return 0;
}
} else {
// Short-circuit the apply function on inequality
TH_TENSOR_APPLY2(real, ta, real, tb,
if (equal && *ta_data != *tb_data) {
equal = 0;
TH_TENSOR_APPLY_hasFinished = 1; break;
})
}
return equal;
}
#define TENSOR_IMPLEMENT_LOGICAL(NAME,OP) \
void THTensor_(NAME##Value)(THByteTensor *r_, THTensor* t, real value) \
{ \
THByteTensor_resizeNd(r_, t->nDimension, t->size, NULL); \
TH_TENSOR_APPLY2(unsigned char, r_, real, t, \
*r__data = (*t_data OP value) ? 1 : 0;); \
} \
void THTensor_(NAME##ValueT)(THTensor* r_, THTensor* t, real value) \
{ \
THTensor_(resizeNd)(r_, t->nDimension, t->size, NULL); \
TH_TENSOR_APPLY2(real, r_, real, t, \
*r__data = (*t_data OP value) ? 1 : 0;); \
} \
void THTensor_(NAME##Tensor)(THByteTensor *r_, THTensor *ta, THTensor *tb) \
{ \
THByteTensor_resizeNd(r_, ta->nDimension, ta->size, NULL); \
TH_TENSOR_APPLY3(unsigned char, r_, real, ta, real, tb, \
*r__data = (*ta_data OP *tb_data) ? 1 : 0;); \
} \
void THTensor_(NAME##TensorT)(THTensor *r_, THTensor *ta, THTensor *tb) \
{ \
THTensor_(resizeNd)(r_, ta->nDimension, ta->size, NULL); \
TH_TENSOR_APPLY3(real, r_, real, ta, real, tb, \
*r__data = (*ta_data OP *tb_data) ? 1 : 0;); \
} \
TENSOR_IMPLEMENT_LOGICAL(lt,<)
TENSOR_IMPLEMENT_LOGICAL(gt,>)
TENSOR_IMPLEMENT_LOGICAL(le,<=)
TENSOR_IMPLEMENT_LOGICAL(ge,>=)
TENSOR_IMPLEMENT_LOGICAL(eq,==)
TENSOR_IMPLEMENT_LOGICAL(ne,!=)
#ifdef _OPENMP
#define LAB_IMPLEMENT_BASIC_FUNCTION(NAME, CFUNC) \
void THTensor_(NAME)(THTensor *r_, THTensor *t) \
{ \
THTensor_(resizeAs)(r_, t); \
ptrdiff_t r_Size = THTensor_(nElement)(r_); \
int r_Contig = THTensor_(isContiguous)(r_); \
int tContig = THTensor_(isContiguous)(t); \
int inOMP = omp_in_parallel(); \
if( (r_Size > TH_OMP_OVERHEAD_THRESHOLD) && (!inOMP) ){ \
TH_TENSOR_APPLY2_OMP(r_Size, r_Contig, tContig, real, r_, real, t, *r__data = CFUNC(*t_data);); \
} \
else { \
TH_TENSOR_APPLY2(real, r_, real, t, *r__data = CFUNC(*t_data);); \
} \
}
#define LAB_IMPLEMENT_VECTORIZED_FUNCTION(NAME, CFUNC) \
void THTensor_(NAME)(THTensor *r_, THTensor *t) \
{ \
THTensor_(resizeAs)(r_, t); \
ptrdiff_t r_Size = THTensor_(nElement)(r_); \
int r_Contig = THTensor_(isContiguous)(r_); \
int tContig = THTensor_(isContiguous)(t); \
if (r_Contig && tContig) { \
TH_TENSOR_APPLY2_CONTIG(real, r_, real, t, THVector_(NAME)(r__data, t_data, r__len);); \
} else { \
int inOMP = omp_in_parallel(); \
if( (r_Size > TH_OMP_OVERHEAD_THRESHOLD) && (!inOMP) ){ \
TH_TENSOR_APPLY2_OMP(r_Size, r_Contig, tContig, real, r_, real, t, *r__data = CFUNC(*t_data);); \
} \
else { \
TH_TENSOR_APPLY2(real, r_, real, t, *r__data = CFUNC(*t_data);); \
} \
} \
}
#else
#define LAB_IMPLEMENT_BASIC_FUNCTION(NAME, CFUNC) \
void THTensor_(NAME)(THTensor *r_, THTensor *t) \
{ \
THTensor_(resizeAs)(r_, t); \
TH_TENSOR_APPLY2(real, t, real, r_, *r__data = CFUNC(*t_data);); \
} \
#define LAB_IMPLEMENT_VECTORIZED_FUNCTION(NAME, CFUNC) \
void THTensor_(NAME)(THTensor *r_, THTensor *t) \
{ \
THTensor_(resizeAs)(r_, t); \
int r_Contig = THTensor_(isContiguous)(r_); \
int tContig = THTensor_(isContiguous)(t); \
if (r_Contig && tContig) { \
TH_TENSOR_APPLY2_CONTIG(real, r_, real, t, THVector_(NAME)(r__data, t_data, r__len);); \
} else { \
TH_TENSOR_APPLY2(real, t, real, r_, *r__data = CFUNC(*t_data);); \
} \
} \
#endif
LAB_IMPLEMENT_BASIC_FUNCTION(neg,-)
#if defined(TH_REAL_IS_LONG)
LAB_IMPLEMENT_BASIC_FUNCTION(abs,labs)
#endif /* int64_t only part */
#if defined(TH_REAL_IS_SHORT) || defined(TH_REAL_IS_INT)
LAB_IMPLEMENT_BASIC_FUNCTION(abs,abs)
#endif /* int only part */
#if defined(TH_REAL_IS_BYTE)
#define TENSOR_IMPLEMENT_LOGICAL_SUM(NAME, OP, INIT_VALUE) \
int THTensor_(NAME)(THTensor *tensor) \
{ \
int sum = INIT_VALUE; \
TH_TENSOR_APPLY(real, tensor, sum = sum OP *tensor_data;); \
return sum; \
}
TENSOR_IMPLEMENT_LOGICAL_SUM(logicalall, &&, 1)
TENSOR_IMPLEMENT_LOGICAL_SUM(logicalany, ||, 0)
#endif /* Byte only part */
/* floating point only now */
#if defined(TH_REAL_IS_FLOAT) || defined(TH_REAL_IS_DOUBLE)
#if defined (TH_REAL_IS_FLOAT)
#define TH_MATH_NAME(fn) fn##f
#else
#define TH_MATH_NAME(fn) fn
#endif
LAB_IMPLEMENT_BASIC_FUNCTION(log,TH_MATH_NAME(log))
LAB_IMPLEMENT_BASIC_FUNCTION(lgamma,TH_MATH_NAME(lgamma))
LAB_IMPLEMENT_BASIC_FUNCTION(digamma,TH_MATH_NAME(TH_digamma))
LAB_IMPLEMENT_BASIC_FUNCTION(trigamma,TH_MATH_NAME(TH_trigamma))
LAB_IMPLEMENT_BASIC_FUNCTION(log10,TH_MATH_NAME(log10))
LAB_IMPLEMENT_BASIC_FUNCTION(log1p,TH_MATH_NAME(log1p))
LAB_IMPLEMENT_BASIC_FUNCTION(log2,TH_MATH_NAME(log2))
LAB_IMPLEMENT_BASIC_FUNCTION(exp,TH_MATH_NAME(exp))
LAB_IMPLEMENT_BASIC_FUNCTION(expm1,TH_MATH_NAME(expm1))
LAB_IMPLEMENT_BASIC_FUNCTION(cos,TH_MATH_NAME(cos))
LAB_IMPLEMENT_BASIC_FUNCTION(acos,TH_MATH_NAME(acos))
LAB_IMPLEMENT_BASIC_FUNCTION(cosh,TH_MATH_NAME(cosh))
LAB_IMPLEMENT_BASIC_FUNCTION(sin,TH_MATH_NAME(sin))
LAB_IMPLEMENT_BASIC_FUNCTION(asin,TH_MATH_NAME(asin))
LAB_IMPLEMENT_BASIC_FUNCTION(sinh,TH_MATH_NAME(sinh))
LAB_IMPLEMENT_BASIC_FUNCTION(tan,TH_MATH_NAME(tan))
LAB_IMPLEMENT_BASIC_FUNCTION(atan,TH_MATH_NAME(atan))
LAB_IMPLEMENT_BASIC_FUNCTION(tanh,TH_MATH_NAME(tanh))
LAB_IMPLEMENT_BASIC_FUNCTION(erf,TH_MATH_NAME(erf))
LAB_IMPLEMENT_BASIC_FUNCTION(erfinv,TH_erfinv)
LAB_IMPLEMENT_BASIC_FUNCTION(sqrt,TH_MATH_NAME(sqrt))
LAB_IMPLEMENT_BASIC_FUNCTION(rsqrt,TH_MATH_NAME(TH_rsqrt))
LAB_IMPLEMENT_BASIC_FUNCTION(ceil,TH_MATH_NAME(ceil))
LAB_IMPLEMENT_BASIC_FUNCTION(floor,TH_MATH_NAME(floor))
LAB_IMPLEMENT_BASIC_FUNCTION(round,TH_MATH_NAME(round))
LAB_IMPLEMENT_BASIC_FUNCTION(abs,TH_MATH_NAME(fabs))
LAB_IMPLEMENT_BASIC_FUNCTION(trunc,TH_MATH_NAME(trunc))
LAB_IMPLEMENT_BASIC_FUNCTION(frac,TH_MATH_NAME(TH_frac))
LAB_IMPLEMENT_BASIC_FUNCTION(cinv, TH_MATH_NAME(1.0) / )
LAB_IMPLEMENT_VECTORIZED_FUNCTION(sigmoid,TH_MATH_NAME(TH_sigmoid))
void THTensor_(atan2)(THTensor *r_, THTensor *tx, THTensor *ty)
{
THTensor_(resizeAs)(r_, tx);
TH_TENSOR_APPLY3(real, r_, real, tx, real, ty, *r__data = TH_MATH_NAME(atan2)(*tx_data,*ty_data););
}
void THTensor_(polygamma)(THTensor *r_, int64_t n, THTensor *t) {
switch (n) {
case 0: THTensor_(digamma)(r_, t); return;
case 1: THTensor_(trigamma)(r_, t); return;
default: THError("polygamma(n,x) is not implemented for n>=2");
}
}
void THTensor_(lerp)(THTensor *r_, THTensor *a, THTensor *b, real weight)
{
THArgCheck(THTensor_(nElement)(a) == THTensor_(nElement)(b), 2, "sizes do not match");
THTensor_(resizeAs)(r_, a);
TH_TENSOR_APPLY3(real, r_, real, a, real, b, *r__data = TH_MATH_NAME(TH_lerp)(*a_data, *b_data, weight););
}
void THTensor_(mean)(THTensor *r_, THTensor *t, int dimension, int keepdim)
{
THArgCheck(dimension >= 0 && dimension < THTensor_(nDimension)(t), 2, "invalid dimension %d",
dimension + TH_INDEX_BASE);
THTensor_(sum)(r_, t, dimension, keepdim);
THTensor_(div)(r_, r_, t->size[dimension]);
}
void THTensor_(std)(THTensor *r_, THTensor *t, int dimension, int biased, int keepdim)
{
THLongStorage *dim;
THArgCheck(dimension >= 0 && dimension < THTensor_(nDimension)(t), 3, "invalid dimension %d",
dimension + TH_INDEX_BASE);
THTensor_(preserveReduceDimSemantics)(r_, THTensor_(nDimension)(t), dimension, keepdim);
dim = THTensor_(newSizeOf)(t);
THLongStorage_set(dim, dimension, 1);
THTensor_(resize)(r_, dim, NULL);
THLongStorage_free(dim);
TH_TENSOR_DIM_APPLY2(real, t, real, r_, dimension,
// Uses Welford's algorithm for numeric stability
accreal mean = 0;
accreal M2 = 0;
int64_t i;
for (i = 0; i < t_size; i++)
{
real z = t_data[i*t_stride];
real delta = z - mean;
mean += delta / (i + 1);
real delta2 = z - mean;
M2 += delta * delta2;
}
if (biased && t_size >= 2)
{
*r__data = TH_MATH_NAME(sqrt)(M2 / t_size);
} else if (!biased && t_size >= 2) {
*r__data = TH_MATH_NAME(sqrt)(M2 / (t_size - 1));
} else if (biased && t_size == 1) {
*r__data = 0;
} else {
*r__data = NAN;
});
if (!keepdim) {
THTensor_(squeeze1d)(r_, r_, dimension);
}
}
void THTensor_(var)(THTensor *r_, THTensor *t, int dimension, int biased, int keepdim)
{
THLongStorage *dim;
THArgCheck(dimension >= 0 && dimension < THTensor_(nDimension)(t), 3, "invalid dimension %d",
dimension + TH_INDEX_BASE);
THTensor_(preserveReduceDimSemantics)(r_, THTensor_(nDimension)(t), dimension, keepdim);
dim = THTensor_(newSizeOf)(t);
THLongStorage_set(dim, dimension, 1);
THTensor_(resize)(r_, dim, NULL);
THLongStorage_free(dim);
TH_TENSOR_DIM_APPLY2(real, t, real, r_, dimension,
// Uses Welford's algorithm for numeric stability
accreal mean = 0;
accreal M2 = 0;
int64_t i;
for (i = 0; i < t_size; i++)
{
real z = t_data[i*t_stride];
real delta = z - mean;
mean += delta / (i + 1);
real delta2 = z - mean;
M2 += delta * delta2;
}
if (biased && t_size >= 2)
{
*r__data = M2 / t_size;
} else if (!biased && t_size >= 2) {
*r__data = M2 / (t_size - 1);
} else if (biased && t_size == 1) {
*r__data = 0;
} else {
*r__data = NAN;
});
if (!keepdim) {
THTensor_(squeeze1d)(r_, r_, dimension);
}
}
void THTensor_(norm)(THTensor *r_, THTensor *t, real value, int dimension, int keepdim)
{
THLongStorage *dim;
THArgCheck(dimension >= 0 && dimension < THTensor_(nDimension)(t), 3, "invalid dimension %d",
dimension + TH_INDEX_BASE);
THTensor_(preserveReduceDimSemantics)(r_, THTensor_(nDimension)(t), dimension, keepdim);
dim = THTensor_(newSizeOf)(t);
THLongStorage_set(dim, dimension, 1);
THTensor_(resize)(r_, dim, NULL);
THLongStorage_free(dim);
#define DIM_REDUCE(reduce, transform) \
TH_TENSOR_DIM_APPLY2(real, t, real, r_, dimension, \
accreal sum = 0; \
int64_t i; \
for(i = 0; i < t_size; i++) { \
(reduce); \
} \
(transform);) \
if(value == 0) {
DIM_REDUCE(sum += t_data[i*t_stride] != 0.0,
*r__data = sum);
} else if (value == 1) {
DIM_REDUCE(sum += TH_MATH_NAME(fabs)(t_data[i*t_stride]),
*r__data = sum);
} else if (value == 2) {
DIM_REDUCE(sum += t_data[i*t_stride] * t_data[i*t_stride],
*r__data = TH_MATH_NAME(sqrt)(sum));
} else if (value == 3) {
DIM_REDUCE(sum += TH_MATH_NAME(fabs)(t_data[i*t_stride] * t_data[i*t_stride] * t_data[i*t_stride]),
*r__data = TH_MATH_NAME(pow)(sum, 1.0/3));
} else {
DIM_REDUCE(sum += TH_MATH_NAME(pow)(TH_MATH_NAME(fabs)(t_data[i*t_stride]), value),
*r__data = TH_MATH_NAME(pow)(sum, 1.0/value));
}
if (!keepdim) {
THTensor_(squeeze1d)(r_, r_, dimension);
}
#undef DIM_REDUCE
}
accreal THTensor_(normall)(THTensor *tensor, real value)
{
accreal sum = 0;
if(value == 0) {
TH_TENSOR_APPLY(real, tensor, sum += *tensor_data != 0.0;);
return sum;
} else if(value == 1) {
TH_TENSOR_APPLY(real, tensor, sum += TH_MATH_NAME(fabs)(*tensor_data););
return sum;
} else if(value == 2) {
TH_TENSOR_APPLY(real, tensor, accreal z = *tensor_data; sum += z*z;);
return sqrt(sum);
} else if(value == 3) {
TH_TENSOR_APPLY(real, tensor, accreal z = *tensor_data; sum += std::abs(z*z*z););
return TH_MATH_NAME(pow)(sum, 1.0/3);
} else {
TH_TENSOR_APPLY(real, tensor, sum += TH_MATH_NAME(pow)(TH_MATH_NAME(fabs)(*tensor_data), value););
return TH_MATH_NAME(pow)(sum, 1.0/value);
}
}
void THTensor_(renorm)(THTensor *res, THTensor *src, real value, int dimension, real maxnorm)
{
int i;
THTensor *rowR, *rowS;
THArgCheck(dimension >= 0 && dimension < THTensor_(nDimension)(src), 3, "invalid dimension %d",
dimension + TH_INDEX_BASE);
THArgCheck(value > 0, 2, "non-positive-norm not supported");
THArgCheck(THTensor_(nDimension)(src) > 1, 1, "need at least 2 dimensions, got %d dimensions",
THTensor_(nDimension)(src));
rowR = THTensor_(new)();
rowS = THTensor_(new)();
THTensor_(resizeAs)(res, src);
for (i=0; i<src->size[dimension]; i++)
{
real norm = 0;
real new_norm;
THTensor_(select)(rowS, src, dimension, i);
THTensor_(select)(rowR, res, dimension, i);
if (value == 1) {
TH_TENSOR_APPLY(real, rowS, norm += fabs(*rowS_data););
} else if (value == 2) {
TH_TENSOR_APPLY(real, rowS, accreal z = *rowS_data; norm += z*z;);
} else {
TH_TENSOR_APPLY(real, rowS, norm += TH_MATH_NAME(pow)(TH_MATH_NAME(fabs)(*rowS_data), value););
}
norm = pow(norm, 1/value);
if (norm > maxnorm)
{
new_norm = maxnorm / (norm + 1e-7);
TH_TENSOR_APPLY2(
real, rowR, real, rowS,
*rowR_data = (*rowS_data) * new_norm;
)
}
else
THTensor_(copy)(rowR, rowS);
}
THTensor_(free)(rowR);
THTensor_(free)(rowS);
}
accreal THTensor_(dist)(THTensor *tensor, THTensor *src, real value)
{
real sum = 0;
TH_TENSOR_APPLY2(real, tensor, real, src,
sum += TH_MATH_NAME(pow)(
TH_MATH_NAME(fabs)(*tensor_data - *src_data), value););
return TH_MATH_NAME(pow)(sum, 1.0/value);
}
accreal THTensor_(meanall)(THTensor *tensor)
{
THArgCheck(tensor->nDimension > 0, 1, "empty Tensor");
return THTensor_(sumall)(tensor)/THTensor_(nElement)(tensor);
}
accreal THTensor_(varall)(THTensor *tensor, int biased)
{
accreal mean = THTensor_(meanall)(tensor);
accreal sum = 0;
TH_TENSOR_APPLY(real, tensor, sum += (*tensor_data - mean)*(*tensor_data - mean););
sum /= THTensor_(nElement)(tensor) - (biased ? 0 : 1);
return sum;
}
accreal THTensor_(stdall)(THTensor *tensor, int biased)
{
return sqrt(THTensor_(varall)(tensor, biased));
}
void THTensor_(linspace)(THTensor *r_, real a, real b, int64_t n)
{
real i = 0;
THArgCheck(n > 1 || (n == 1 && (a == b)), 3, "invalid number of points");
if (THTensor_(nElement)(r_) != n) {
THTensor_(resize1d)(r_, n);
}
if(n == 1) {
THTensor_(set1d)(r_, 0, a);
} else {
TH_TENSOR_APPLY(real, r_,
*r__data = a + (b-a)/((real)(n-1))*i;
i++;
);
}
}
void THTensor_(logspace)(THTensor *r_, real a, real b, int64_t n)
{
real i = 0;
THArgCheck(n > 1 || (n == 1 && (a == b)), 3, "invalid number of points");
if (THTensor_(nElement)(r_) != n) {
THTensor_(resize1d)(r_, n);
}
if(n == 1) {
THTensor_(set1d)(r_, 0, TH_MATH_NAME(pow)(10.0, a));
} else {
TH_TENSOR_APPLY(real, r_,
*r__data = TH_MATH_NAME(pow)(10.0, a + i*(b-a)/((real)(n-1)));
i++;
);
}
}
void THTensor_(rand)(THTensor *r_, THGenerator *_generator, THLongStorage *size)
{
THTensor_(resize)(r_, size, NULL);
THTensor_(uniform)(r_, _generator, 0, 1);
}
void THTensor_(randn)(THTensor *r_, THGenerator *_generator, THLongStorage *size)
{
THTensor_(resize)(r_, size, NULL);
THTensor_(normal)(r_, _generator, 0, 1);
}
void THTensor_(histc)(THTensor *hist, THTensor *tensor, int64_t nbins, real minvalue, real maxvalue)
{
real minval;
real maxval;
real *h_data;
THTensor_(resize1d)(hist, nbins);
THTensor_(zero)(hist);
minval = minvalue;
maxval = maxvalue;
if (minval == maxval)
{
minval = THTensor_(minall)(tensor);
maxval = THTensor_(maxall)(tensor);
}
if (minval == maxval)
{
minval = minval - 1;
maxval = maxval + 1;
}
h_data = THTensor_(data)(hist);
TH_TENSOR_APPLY(real, tensor,
if (*tensor_data >= minval && *tensor_data <= maxval) {
const int bin = (int)((*tensor_data-minval) / (maxval-minval) * nbins);
h_data[THMin(bin, nbins-1)] += 1;
}
);
}
void THTensor_(bhistc)(THTensor *hist, THTensor *tensor, int64_t nbins, real minvalue, real maxvalue)
{
THArgCheck(THTensor_(nDimension)(tensor) < 3, 2, "invalid dimension %d, the input must be a 2d tensor", THTensor_(nDimension)(tensor));
int dimension = 1;
THArgCheck(dimension >= 0 && dimension < THTensor_(nDimension)(tensor), 2, "invalid dimension %d",
dimension + TH_INDEX_BASE);
real minval;
real maxval;
THTensor_(resize2d)(hist, tensor->size[0], nbins);
THTensor_(zero)(hist);
minval = minvalue;
maxval = maxvalue;
if (minval == maxval)
{
minval = THTensor_(minall)(tensor);
maxval = THTensor_(maxall)(tensor);
}
if (minval == maxval)
{
minval = minval - 1;
maxval = maxval + 1;
}
TH_TENSOR_DIM_APPLY2(real, tensor, real, hist, dimension, int64_t i;
for(i = 0; i < tensor_size; i++)
{
if(tensor_data[i*tensor_stride] >= minval && tensor_data[i*tensor_stride] <= maxval) {
const int bin = (int)((tensor_data[i*tensor_stride]-minval) / (maxval-minval) * nbins);
hist_data[THMin(bin, nbins-1)] += 1;
}
}
);
}
// Approximate reparameterized gradient of Beta(x,alpha,beta) wrt alpha.
// Assumes x is close to zero and uses a Taylor expansion.
static inline real THTensor_(beta_grad_alpha_small)(real x, real alpha, real beta) {
const real factor = TH_MATH_NAME(TH_digamma)(alpha) - TH_MATH_NAME(TH_digamma)(alpha + beta) - TH_MATH_NAME(log)(x);
real numer = 1;
real series = numer / alpha * (factor + 1 / alpha);
for (int i = 1; i <= 10; ++i) {
numer *= (i - beta) * x / i;
const real denom = alpha + i;
series += numer / denom * (factor + 1 / denom);
}
const real result = x * TH_MATH_NAME(pow)(1 - x, -beta) * series;
return th_isnan(result) ? 0.0 : result;
}
// Approximate reparameterized gradient of Beta(x,alpha,beta) wrt beta.
// Assumes x is close to zero and uses a Taylor expansion.
static inline real THTensor_(beta_grad_beta_small)(real x, real alpha, real beta) {
const real factor = TH_MATH_NAME(TH_digamma)(alpha+beta) - TH_MATH_NAME(TH_digamma)(beta);
real numer = 1;
real betas = 1;
real dbetas = 0;
real series = factor / alpha;
for (int i = 1; i <= 8; ++i) {
numer *= -x / i;
dbetas = dbetas * (beta - i) + betas;
betas = betas * (beta - i);
series += numer / (alpha + i) * (dbetas + factor * betas);
}
const real result = -TH_MATH_NAME(pow)(1 - x, 1 - beta) * series;
return th_isnan(result) ? 0.0 : result;
}
// Approximate reparameterized gradient of Beta(x,alpha,beta) wrt alpha.
// Assumes alpha and beta are both large and uses a Rice saddle point expansion.
// To ensure numerical stability, this computation is performed at higher precision.
static inline real THTensor_(beta_grad_alpha_mid)(double x, double alpha, double beta) {
const double total = alpha + beta;
const double mean = alpha / total;
const double std = sqrt(alpha * beta / (total + 1)) / total;
if (mean - 0.1 * std <= x && x <= mean + 0.1 * std) {
// Avoid the singularity at x = mean.
const double poly = 47 * x * (beta*beta)*(beta*beta) + alpha * (
(43 + 20 * (16 + 27 * beta) * x) * (beta*beta)*beta + alpha * (
3 * (59 + 180 * beta - 90 * x) * (beta*beta) + alpha * (
(453 + 1620 * beta * (1 - x) - 455 * x) * beta + alpha * (
8 * (1 - x) * (135 * beta - 11)))));
const double prefactor_num = (1 + 12 * alpha) * (1 + 12 * beta) / (total * total);
const double prefactor_den = 12960 * alpha * alpha * alpha * beta * beta * (1 + 12 * total);
return prefactor_num / (1 - x) * poly / prefactor_den;
}
const double prefactor = -x / sqrt(2 * alpha * beta / total);
const double stirling = (1 + 1 / (12 * alpha) + 1 / (288 * alpha*alpha))
* (1 + 1 / (12 * beta) + 1 / (288 * beta*beta))
/ (1 + 1 / (12 * total) + 1 / (288 * total*total));
const double term1_num = 2 * (alpha*alpha) * (x - 1) + alpha * beta * (x - 1) - x * (beta*beta);
const double axbx = alpha * (x-1) + beta * x;
const double term1_den = sqrt(2 * alpha / beta) * pow(total, 1.5f) * axbx*axbx;
const double term1 = term1_num / term1_den;
const double term2 = 0.5f * log(alpha / (total * x));
const double term3_num = sqrt(8 * alpha * beta / total);
const double term3_den = beta * x + alpha * (x - 1);
const double term3 = term3_num / term3_den;
const double term4_base = beta * log(beta / (total * (1 - x))) +
alpha * log(alpha / (total * x));
const double term4 = pow(term4_base, -1.5f);
const double term1234 = term1 + term2 * (term3 + (x < mean ? term4 : -term4));
return stirling * prefactor * term1234;
}
// Computes a scaled reparameterized gradient
// -(d/dalpha cdf(x;alpha,beta)) / pdf(x;alpha,beta) / (1-x)
// for random number x drawn from a Beta distribution Beta(alpha,beta).
// This function inputs total=alpha+beta to make it easy to implement
// Dirichlet reparameterized gradients in terms of Betas.
static inline real THTensor_(dirichlet_grad_one)(real x, real alpha, real total) {
const real beta = total - alpha;
const real boundary = total * x * (1 - x);
// Use an asymptotic approximation for x close to 0.
if (x <= 0.5f && boundary < 2.5f) {
return THTensor_(beta_grad_alpha_small)(x, alpha, beta);
}
// Use an asymptotic approximation for x close to 1.
if (x >= 0.5f && boundary < 0.75f) {
return -THTensor_(beta_grad_beta_small)(1 - x, beta, alpha);
}
// Use an asymptotic approximation when alpha and (total - alpha) are both large.
if (alpha > 6 && beta > 6) {
return THTensor_(beta_grad_alpha_mid)(x, alpha, beta);
}
// Use a rational correction to an analytic approximation.
static const real c[2][3][3][4] = {
{{{1.003668233, -0.01061107488, -0.0657888334, 0.01201642863},
{0.6336835991, -0.3557432599, 0.05486251648, -0.001465281033},
{-0.03276231906, 0.004474107445, 0.002429354597, -0.0001557569013}},
{{0.221950385, -0.3187676331, 0.01799915743, 0.01074823814},
{-0.2951249643, 0.06219954479, 0.01535556598, 0.001550077057},
{0.02155310298, 0.004170831599, 0.001292462449, 6.976601077e-05}},
{{-0.05980841433, 0.008441916499, 0.01085618172, 0.002319392565},
{0.02911413504, 0.01400243777, -0.002721828457, 0.000751041181},
{0.005900514878, -0.001936558688, -9.495446725e-06, 5.385558597e-05}}},
{{{1, -0.02924021934, -0.04438342661, 0.007285809825},
{0.6357567472, -0.3473456711, 0.05454656494, -0.002407477521},
{-0.03301322327, 0.004845219414, 0.00231480583, -0.0002307248149}},
{{0.5925320577, -0.1757678135, 0.01505928619, 0.000564515273},
{0.1014815858, -0.06589186703, 0.01272886114, -0.0007316646956},
{-0.007258481865, 0.001096195486, 0.0003934994223, -4.12701925e-05}},
{{0.06469649321, -0.0236701437, 0.002902096474, -5.896963079e-05},
{0.001925008108, -0.002869809258, 0.0008000589141, -6.063713228e-05},
{-0.0003477407336, 6.959756487e-05, 1.097287507e-05, -1.650964693e-06}}},
};
const real u = TH_MATH_NAME(log)(x);
const real a = TH_MATH_NAME(log)(alpha) - u;
const real b = TH_MATH_NAME(log)(total) - a;
const real pow_u[3] = {1, u, u * u};
const real pow_a[3] = {1, a, a * a};
real p = 0.0;
real q = 0.0;
for (int i = 0; i < 3; ++i) {
for (int j = 0; j < 3; ++j) {
const real ua = pow_u[i] * pow_a[j];
p += ua * (c[0][i][j][0] + b * (c[0][i][j][1] + b * (c[0][i][j][2] + b * c[0][i][j][3])));
q += ua * (c[1][i][j][0] + b * (c[1][i][j][1] + b * (c[1][i][j][2] + b * c[1][i][j][3])));
}
}
const real approx = x * (TH_MATH_NAME(TH_digamma)(total) - TH_MATH_NAME(TH_digamma)(alpha)) / beta;
return p / q * approx;
}
void THTensor_(dirichlet_grad)(THTensor *self, THTensor *x, THTensor *alpha, THTensor *total)
{
x = THTensor_(newContiguous)(x);
alpha = THTensor_(newContiguous)(alpha);
total = THTensor_(newContiguous)(total);
TH_CHECK_SAME_SIZE(alpha, x);
TH_CHECK_SAME_SIZE(total, x);
THTensor_(resizeAs)(self, x);
THTensor* grad = THTensor_(newContiguous)(self);
real*const grad_data = THTensor_(data)(grad);
real*const x_data = THTensor_(data)(x);
real*const alpha_data = THTensor_(data)(alpha);
real*const total_data = THTensor_(data)(total);
const int64_t numel = THTensor_(nElement)(x);
int64_t i;
#pragma omp parallel for if(numel > TH_OMP_OVERHEAD_THRESHOLD) private(i)
for(i = 0; i < numel; ++i) {
grad_data[i] = THTensor_(dirichlet_grad_one)(x_data[i], alpha_data[i], total_data[i]);
}
THTensor_(freeCopyTo)(grad, self);
}
#undef TH_MATH_NAME
#endif /* floating point only part */
#undef IS_NONZERO
#endif
|
enhance.c | /*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% EEEEE N N H H AAA N N CCCC EEEEE %
% E NN N H H A A NN N C E %
% EEE N N N HHHHH AAAAA N N N C EEE %
% E N NN H H A A N NN C E %
% EEEEE N N H H A A N N CCCC EEEEE %
% %
% %
% MagickCore Image Enhancement Methods %
% %
% Software Design %
% Cristy %
% July 1992 %
% %
% %
% Copyright 1999-2018 ImageMagick Studio LLC, a non-profit organization %
% dedicated to making software imaging solutions freely available. %
% %
% You may not use this file except in compliance with the License. You may %
% obtain a copy of the License at %
% %
% https://www.imagemagick.org/script/license.php %
% %
% Unless required by applicable law or agreed to in writing, software %
% distributed under the License is distributed on an "AS IS" BASIS, %
% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. %
% See the License for the specific language governing permissions and %
% limitations under the License. %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
%
%
*/
/*
Include declarations.
*/
#include "magick/studio.h"
#include "magick/accelerate-private.h"
#include "magick/artifact.h"
#include "magick/attribute.h"
#include "magick/cache.h"
#include "magick/cache-view.h"
#include "magick/channel.h"
#include "magick/color.h"
#include "magick/color-private.h"
#include "magick/colorspace.h"
#include "magick/colorspace-private.h"
#include "magick/composite-private.h"
#include "magick/enhance.h"
#include "magick/exception.h"
#include "magick/exception-private.h"
#include "magick/fx.h"
#include "magick/gem.h"
#include "magick/geometry.h"
#include "magick/histogram.h"
#include "magick/image.h"
#include "magick/image-private.h"
#include "magick/memory_.h"
#include "magick/monitor.h"
#include "magick/monitor-private.h"
#include "magick/opencl.h"
#include "magick/opencl-private.h"
#include "magick/option.h"
#include "magick/pixel-accessor.h"
#include "magick/pixel-private.h"
#include "magick/quantum.h"
#include "magick/quantum-private.h"
#include "magick/resample.h"
#include "magick/resample-private.h"
#include "magick/resource_.h"
#include "magick/statistic.h"
#include "magick/string_.h"
#include "magick/string-private.h"
#include "magick/thread-private.h"
#include "magick/threshold.h"
#include "magick/token.h"
#include "magick/xml-tree.h"
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% A u t o G a m m a I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% AutoGammaImage() extract the 'mean' from the image and adjust the image
% to try make set its gamma appropriatally.
%
% The format of the AutoGammaImage method is:
%
% MagickBooleanType AutoGammaImage(Image *image)
% MagickBooleanType AutoGammaImageChannel(Image *image,
% const ChannelType channel)
%
% A description of each parameter follows:
%
% o image: The image to auto-level
%
% o channel: The channels to auto-level. If the special 'SyncChannels'
% flag is set all given channels is adjusted in the same way using the
% mean average of those channels.
%
*/
MagickExport MagickBooleanType AutoGammaImage(Image *image)
{
return(AutoGammaImageChannel(image,DefaultChannels));
}
MagickExport MagickBooleanType AutoGammaImageChannel(Image *image,
const ChannelType channel)
{
double
gamma,
mean,
logmean,
sans;
MagickStatusType
status;
logmean=log(0.5);
if ((channel & SyncChannels) != 0)
{
/*
Apply gamma correction equally accross all given channels
*/
(void) GetImageChannelMean(image,channel,&mean,&sans,&image->exception);
gamma=log(mean*QuantumScale)/logmean;
return(LevelImageChannel(image,channel,0.0,(double) QuantumRange,gamma));
}
/*
Auto-gamma each channel separateally
*/
status = MagickTrue;
if ((channel & RedChannel) != 0)
{
(void) GetImageChannelMean(image,RedChannel,&mean,&sans,
&image->exception);
gamma=log(mean*QuantumScale)/logmean;
status&=LevelImageChannel(image,RedChannel,0.0,(double) QuantumRange,
gamma);
}
if ((channel & GreenChannel) != 0)
{
(void) GetImageChannelMean(image,GreenChannel,&mean,&sans,
&image->exception);
gamma=log(mean*QuantumScale)/logmean;
status&=LevelImageChannel(image,GreenChannel,0.0,(double) QuantumRange,
gamma);
}
if ((channel & BlueChannel) != 0)
{
(void) GetImageChannelMean(image,BlueChannel,&mean,&sans,
&image->exception);
gamma=log(mean*QuantumScale)/logmean;
status&=LevelImageChannel(image,BlueChannel,0.0,(double) QuantumRange,
gamma);
}
if (((channel & OpacityChannel) != 0) &&
(image->matte != MagickFalse))
{
(void) GetImageChannelMean(image,OpacityChannel,&mean,&sans,
&image->exception);
gamma=log(mean*QuantumScale)/logmean;
status&=LevelImageChannel(image,OpacityChannel,0.0,(double) QuantumRange,
gamma);
}
if (((channel & IndexChannel) != 0) &&
(image->colorspace == CMYKColorspace))
{
(void) GetImageChannelMean(image,IndexChannel,&mean,&sans,
&image->exception);
gamma=log(mean*QuantumScale)/logmean;
status&=LevelImageChannel(image,IndexChannel,0.0,(double) QuantumRange,
gamma);
}
return(status != 0 ? MagickTrue : MagickFalse);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% A u t o L e v e l I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% AutoLevelImage() adjusts the levels of a particular image channel by
% scaling the minimum and maximum values to the full quantum range.
%
% The format of the LevelImage method is:
%
% MagickBooleanType AutoLevelImage(Image *image)
% MagickBooleanType AutoLevelImageChannel(Image *image,
% const ChannelType channel)
%
% A description of each parameter follows:
%
% o image: The image to auto-level
%
% o channel: The channels to auto-level. If the special 'SyncChannels'
% flag is set the min/max/mean value of all given channels is used for
% all given channels, to all channels in the same way.
%
*/
MagickExport MagickBooleanType AutoLevelImage(Image *image)
{
return(AutoLevelImageChannel(image,DefaultChannels));
}
MagickExport MagickBooleanType AutoLevelImageChannel(Image *image,
const ChannelType channel)
{
/*
Convenience method for a min/max histogram stretch.
*/
return(MinMaxStretchImage(image,channel,0.0,0.0));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% B r i g h t n e s s C o n t r a s t I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% BrightnessContrastImage() changes the brightness and/or contrast of an
% image. It converts the brightness and contrast parameters into slope and
% intercept and calls a polynomical function to apply to the image.
%
% The format of the BrightnessContrastImage method is:
%
% MagickBooleanType BrightnessContrastImage(Image *image,
% const double brightness,const double contrast)
% MagickBooleanType BrightnessContrastImageChannel(Image *image,
% const ChannelType channel,const double brightness,
% const double contrast)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o channel: the channel.
%
% o brightness: the brightness percent (-100 .. 100).
%
% o contrast: the contrast percent (-100 .. 100).
%
*/
MagickExport MagickBooleanType BrightnessContrastImage(Image *image,
const double brightness,const double contrast)
{
MagickBooleanType
status;
status=BrightnessContrastImageChannel(image,DefaultChannels,brightness,
contrast);
return(status);
}
MagickExport MagickBooleanType BrightnessContrastImageChannel(Image *image,
const ChannelType channel,const double brightness,const double contrast)
{
#define BrightnessContastImageTag "BrightnessContast/Image"
double
alpha,
intercept,
coefficients[2],
slope;
MagickBooleanType
status;
/*
Compute slope and intercept.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
alpha=contrast;
slope=tan((double) (MagickPI*(alpha/100.0+1.0)/4.0));
if (slope < 0.0)
slope=0.0;
intercept=brightness/100.0+((100-brightness)/200.0)*(1.0-slope);
coefficients[0]=slope;
coefficients[1]=intercept;
status=FunctionImageChannel(image,channel,PolynomialFunction,2,coefficients,
&image->exception);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% C o l o r D e c i s i o n L i s t I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ColorDecisionListImage() accepts a lightweight Color Correction Collection
% (CCC) file which solely contains one or more color corrections and applies
% the correction to the image. Here is a sample CCC file:
%
% <ColorCorrectionCollection xmlns="urn:ASC:CDL:v1.2">
% <ColorCorrection id="cc03345">
% <SOPNode>
% <Slope> 0.9 1.2 0.5 </Slope>
% <Offset> 0.4 -0.5 0.6 </Offset>
% <Power> 1.0 0.8 1.5 </Power>
% </SOPNode>
% <SATNode>
% <Saturation> 0.85 </Saturation>
% </SATNode>
% </ColorCorrection>
% </ColorCorrectionCollection>
%
% which includes the slop, offset, and power for each of the RGB channels
% as well as the saturation.
%
% The format of the ColorDecisionListImage method is:
%
% MagickBooleanType ColorDecisionListImage(Image *image,
% const char *color_correction_collection)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o color_correction_collection: the color correction collection in XML.
%
*/
MagickExport MagickBooleanType ColorDecisionListImage(Image *image,
const char *color_correction_collection)
{
#define ColorDecisionListCorrectImageTag "ColorDecisionList/Image"
typedef struct _Correction
{
double
slope,
offset,
power;
} Correction;
typedef struct _ColorCorrection
{
Correction
red,
green,
blue;
double
saturation;
} ColorCorrection;
CacheView
*image_view;
char
token[MaxTextExtent];
ColorCorrection
color_correction;
const char
*content,
*p;
ExceptionInfo
*exception;
MagickBooleanType
status;
MagickOffsetType
progress;
PixelPacket
*cdl_map;
register ssize_t
i;
ssize_t
y;
XMLTreeInfo
*cc,
*ccc,
*sat,
*sop;
/*
Allocate and initialize cdl maps.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
if (color_correction_collection == (const char *) NULL)
return(MagickFalse);
ccc=NewXMLTree((const char *) color_correction_collection,&image->exception);
if (ccc == (XMLTreeInfo *) NULL)
return(MagickFalse);
cc=GetXMLTreeChild(ccc,"ColorCorrection");
if (cc == (XMLTreeInfo *) NULL)
{
ccc=DestroyXMLTree(ccc);
return(MagickFalse);
}
color_correction.red.slope=1.0;
color_correction.red.offset=0.0;
color_correction.red.power=1.0;
color_correction.green.slope=1.0;
color_correction.green.offset=0.0;
color_correction.green.power=1.0;
color_correction.blue.slope=1.0;
color_correction.blue.offset=0.0;
color_correction.blue.power=1.0;
color_correction.saturation=0.0;
sop=GetXMLTreeChild(cc,"SOPNode");
if (sop != (XMLTreeInfo *) NULL)
{
XMLTreeInfo
*offset,
*power,
*slope;
slope=GetXMLTreeChild(sop,"Slope");
if (slope != (XMLTreeInfo *) NULL)
{
content=GetXMLTreeContent(slope);
p=(const char *) content;
for (i=0; (*p != '\0') && (i < 3); i++)
{
GetNextToken(p,&p,MaxTextExtent,token);
if (*token == ',')
GetNextToken(p,&p,MaxTextExtent,token);
switch (i)
{
case 0:
{
color_correction.red.slope=StringToDouble(token,(char **) NULL);
break;
}
case 1:
{
color_correction.green.slope=StringToDouble(token,
(char **) NULL);
break;
}
case 2:
{
color_correction.blue.slope=StringToDouble(token,
(char **) NULL);
break;
}
}
}
}
offset=GetXMLTreeChild(sop,"Offset");
if (offset != (XMLTreeInfo *) NULL)
{
content=GetXMLTreeContent(offset);
p=(const char *) content;
for (i=0; (*p != '\0') && (i < 3); i++)
{
GetNextToken(p,&p,MaxTextExtent,token);
if (*token == ',')
GetNextToken(p,&p,MaxTextExtent,token);
switch (i)
{
case 0:
{
color_correction.red.offset=StringToDouble(token,
(char **) NULL);
break;
}
case 1:
{
color_correction.green.offset=StringToDouble(token,
(char **) NULL);
break;
}
case 2:
{
color_correction.blue.offset=StringToDouble(token,
(char **) NULL);
break;
}
}
}
}
power=GetXMLTreeChild(sop,"Power");
if (power != (XMLTreeInfo *) NULL)
{
content=GetXMLTreeContent(power);
p=(const char *) content;
for (i=0; (*p != '\0') && (i < 3); i++)
{
GetNextToken(p,&p,MaxTextExtent,token);
if (*token == ',')
GetNextToken(p,&p,MaxTextExtent,token);
switch (i)
{
case 0:
{
color_correction.red.power=StringToDouble(token,(char **) NULL);
break;
}
case 1:
{
color_correction.green.power=StringToDouble(token,
(char **) NULL);
break;
}
case 2:
{
color_correction.blue.power=StringToDouble(token,
(char **) NULL);
break;
}
}
}
}
}
sat=GetXMLTreeChild(cc,"SATNode");
if (sat != (XMLTreeInfo *) NULL)
{
XMLTreeInfo
*saturation;
saturation=GetXMLTreeChild(sat,"Saturation");
if (saturation != (XMLTreeInfo *) NULL)
{
content=GetXMLTreeContent(saturation);
p=(const char *) content;
GetNextToken(p,&p,MaxTextExtent,token);
color_correction.saturation=StringToDouble(token,(char **) NULL);
}
}
ccc=DestroyXMLTree(ccc);
if (image->debug != MagickFalse)
{
(void) LogMagickEvent(TransformEvent,GetMagickModule(),
" Color Correction Collection:");
(void) LogMagickEvent(TransformEvent,GetMagickModule(),
" color_correction.red.slope: %g",color_correction.red.slope);
(void) LogMagickEvent(TransformEvent,GetMagickModule(),
" color_correction.red.offset: %g",color_correction.red.offset);
(void) LogMagickEvent(TransformEvent,GetMagickModule(),
" color_correction.red.power: %g",color_correction.red.power);
(void) LogMagickEvent(TransformEvent,GetMagickModule(),
" color_correction.green.slope: %g",color_correction.green.slope);
(void) LogMagickEvent(TransformEvent,GetMagickModule(),
" color_correction.green.offset: %g",color_correction.green.offset);
(void) LogMagickEvent(TransformEvent,GetMagickModule(),
" color_correction.green.power: %g",color_correction.green.power);
(void) LogMagickEvent(TransformEvent,GetMagickModule(),
" color_correction.blue.slope: %g",color_correction.blue.slope);
(void) LogMagickEvent(TransformEvent,GetMagickModule(),
" color_correction.blue.offset: %g",color_correction.blue.offset);
(void) LogMagickEvent(TransformEvent,GetMagickModule(),
" color_correction.blue.power: %g",color_correction.blue.power);
(void) LogMagickEvent(TransformEvent,GetMagickModule(),
" color_correction.saturation: %g",color_correction.saturation);
}
cdl_map=(PixelPacket *) AcquireQuantumMemory(MaxMap+1UL,sizeof(*cdl_map));
if (cdl_map == (PixelPacket *) NULL)
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
for (i=0; i <= (ssize_t) MaxMap; i++)
{
cdl_map[i].red=ClampToQuantum((MagickRealType) ScaleMapToQuantum((
MagickRealType) (MaxMap*(pow(color_correction.red.slope*i/MaxMap+
color_correction.red.offset,color_correction.red.power)))));
cdl_map[i].green=ClampToQuantum((MagickRealType) ScaleMapToQuantum((
MagickRealType) (MaxMap*(pow(color_correction.green.slope*i/MaxMap+
color_correction.green.offset,color_correction.green.power)))));
cdl_map[i].blue=ClampToQuantum((MagickRealType) ScaleMapToQuantum((
MagickRealType) (MaxMap*(pow(color_correction.blue.slope*i/MaxMap+
color_correction.blue.offset,color_correction.blue.power)))));
}
if (image->storage_class == PseudoClass)
{
/*
Apply transfer function to colormap.
*/
for (i=0; i < (ssize_t) image->colors; i++)
{
double
luma;
luma=0.212656*image->colormap[i].red+0.715158*image->colormap[i].green+
0.072186*image->colormap[i].blue;
image->colormap[i].red=ClampToQuantum(luma+color_correction.saturation*
cdl_map[ScaleQuantumToMap(image->colormap[i].red)].red-luma);
image->colormap[i].green=ClampToQuantum(luma+
color_correction.saturation*cdl_map[ScaleQuantumToMap(
image->colormap[i].green)].green-luma);
image->colormap[i].blue=ClampToQuantum(luma+color_correction.saturation*
cdl_map[ScaleQuantumToMap(image->colormap[i].blue)].blue-luma);
}
}
/*
Apply transfer function to image.
*/
status=MagickTrue;
progress=0;
exception=(&image->exception);
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) shared(progress,status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
double
luma;
register PixelPacket
*magick_restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
luma=0.212656*GetPixelRed(q)+0.715158*GetPixelGreen(q)+
0.072186*GetPixelBlue(q);
SetPixelRed(q,ClampToQuantum(luma+color_correction.saturation*
(cdl_map[ScaleQuantumToMap(GetPixelRed(q))].red-luma)));
SetPixelGreen(q,ClampToQuantum(luma+color_correction.saturation*
(cdl_map[ScaleQuantumToMap(GetPixelGreen(q))].green-luma)));
SetPixelBlue(q,ClampToQuantum(luma+color_correction.saturation*
(cdl_map[ScaleQuantumToMap(GetPixelBlue(q))].blue-luma)));
q++;
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_ColorDecisionListImageChannel)
#endif
proceed=SetImageProgress(image,ColorDecisionListCorrectImageTag,
progress++,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
image_view=DestroyCacheView(image_view);
cdl_map=(PixelPacket *) RelinquishMagickMemory(cdl_map);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% C l u t I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ClutImage() replaces each color value in the given image, by using it as an
% index to lookup a replacement color value in a Color Look UP Table in the
% form of an image. The values are extracted along a diagonal of the CLUT
% image so either a horizontal or vertial gradient image can be used.
%
% Typically this is used to either re-color a gray-scale image according to a
% color gradient in the CLUT image, or to perform a freeform histogram
% (level) adjustment according to the (typically gray-scale) gradient in the
% CLUT image.
%
% When the 'channel' mask includes the matte/alpha transparency channel but
% one image has no such channel it is assumed that that image is a simple
% gray-scale image that will effect the alpha channel values, either for
% gray-scale coloring (with transparent or semi-transparent colors), or
% a histogram adjustment of existing alpha channel values. If both images
% have matte channels, direct and normal indexing is applied, which is rarely
% used.
%
% The format of the ClutImage method is:
%
% MagickBooleanType ClutImage(Image *image,Image *clut_image)
% MagickBooleanType ClutImageChannel(Image *image,
% const ChannelType channel,Image *clut_image)
%
% A description of each parameter follows:
%
% o image: the image, which is replaced by indexed CLUT values
%
% o clut_image: the color lookup table image for replacement color values.
%
% o channel: the channel.
%
*/
MagickExport MagickBooleanType ClutImage(Image *image,const Image *clut_image)
{
return(ClutImageChannel(image,DefaultChannels,clut_image));
}
MagickExport MagickBooleanType ClutImageChannel(Image *image,
const ChannelType channel,const Image *clut_image)
{
#define ClutImageTag "Clut/Image"
CacheView
*clut_view,
*image_view;
ExceptionInfo
*exception;
MagickBooleanType
status;
MagickOffsetType
progress;
MagickPixelPacket
*clut_map;
register ssize_t
i;
ssize_t
adjust,
y;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(clut_image != (Image *) NULL);
assert(clut_image->signature == MagickCoreSignature);
if (SetImageStorageClass(image,DirectClass) == MagickFalse)
return(MagickFalse);
if ((IsGrayColorspace(image->colorspace) != MagickFalse) &&
(IsGrayColorspace(clut_image->colorspace) == MagickFalse))
(void) SetImageColorspace(image,sRGBColorspace);
clut_map=(MagickPixelPacket *) AcquireQuantumMemory(MaxMap+1UL,
sizeof(*clut_map));
if (clut_map == (MagickPixelPacket *) NULL)
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
/*
Clut image.
*/
status=MagickTrue;
progress=0;
adjust=(ssize_t) (clut_image->interpolate == IntegerInterpolatePixel ? 0 : 1);
exception=(&image->exception);
clut_view=AcquireAuthenticCacheView(clut_image,exception);
for (i=0; i <= (ssize_t) MaxMap; i++)
{
GetMagickPixelPacket(clut_image,clut_map+i);
(void) InterpolateMagickPixelPacket(clut_image,clut_view,
UndefinedInterpolatePixel,(double) i*(clut_image->columns-adjust)/MaxMap,
(double) i*(clut_image->rows-adjust)/MaxMap,clut_map+i,exception);
}
clut_view=DestroyCacheView(clut_view);
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) shared(progress,status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
MagickPixelPacket
pixel;
register IndexPacket
*magick_restrict indexes;
register PixelPacket
*magick_restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
indexes=GetCacheViewAuthenticIndexQueue(image_view);
GetMagickPixelPacket(image,&pixel);
for (x=0; x < (ssize_t) image->columns; x++)
{
SetMagickPixelPacket(image,q,indexes+x,&pixel);
if ((channel & RedChannel) != 0)
SetPixelRed(q,ClampPixelRed(clut_map+
ScaleQuantumToMap(GetPixelRed(q))));
if ((channel & GreenChannel) != 0)
SetPixelGreen(q,ClampPixelGreen(clut_map+
ScaleQuantumToMap(GetPixelGreen(q))));
if ((channel & BlueChannel) != 0)
SetPixelBlue(q,ClampPixelBlue(clut_map+
ScaleQuantumToMap(GetPixelBlue(q))));
if ((channel & OpacityChannel) != 0)
{
if (clut_image->matte == MagickFalse)
SetPixelAlpha(q,MagickPixelIntensityToQuantum(clut_map+
ScaleQuantumToMap((Quantum) GetPixelAlpha(q))));
else
if (image->matte == MagickFalse)
SetPixelOpacity(q,ClampPixelOpacity(clut_map+
ScaleQuantumToMap((Quantum) MagickPixelIntensity(&pixel))));
else
SetPixelOpacity(q,ClampPixelOpacity(
clut_map+ScaleQuantumToMap(GetPixelOpacity(q))));
}
if (((channel & IndexChannel) != 0) &&
(image->colorspace == CMYKColorspace))
SetPixelIndex(indexes+x,ClampToQuantum((clut_map+(ssize_t)
GetPixelIndex(indexes+x))->index));
q++;
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_ClutImageChannel)
#endif
proceed=SetImageProgress(image,ClutImageTag,progress++,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
image_view=DestroyCacheView(image_view);
clut_map=(MagickPixelPacket *) RelinquishMagickMemory(clut_map);
if ((clut_image->matte != MagickFalse) && ((channel & OpacityChannel) != 0))
(void) SetImageAlphaChannel(image,ActivateAlphaChannel);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% C o n t r a s t I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ContrastImage() enhances the intensity differences between the lighter and
% darker elements of the image. Set sharpen to a MagickTrue to increase the
% image contrast otherwise the contrast is reduced.
%
% The format of the ContrastImage method is:
%
% MagickBooleanType ContrastImage(Image *image,
% const MagickBooleanType sharpen)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o sharpen: Increase or decrease image contrast.
%
*/
static void Contrast(const int sign,Quantum *red,Quantum *green,Quantum *blue)
{
double
brightness,
hue,
saturation;
/*
Enhance contrast: dark color become darker, light color become lighter.
*/
assert(red != (Quantum *) NULL);
assert(green != (Quantum *) NULL);
assert(blue != (Quantum *) NULL);
hue=0.0;
saturation=0.0;
brightness=0.0;
ConvertRGBToHSB(*red,*green,*blue,&hue,&saturation,&brightness);
brightness+=0.5*sign*(0.5*(sin((double) (MagickPI*(brightness-0.5)))+1.0)-
brightness);
if (brightness > 1.0)
brightness=1.0;
else
if (brightness < 0.0)
brightness=0.0;
ConvertHSBToRGB(hue,saturation,brightness,red,green,blue);
}
MagickExport MagickBooleanType ContrastImage(Image *image,
const MagickBooleanType sharpen)
{
#define ContrastImageTag "Contrast/Image"
CacheView
*image_view;
ExceptionInfo
*exception;
int
sign;
MagickBooleanType
status;
MagickOffsetType
progress;
register ssize_t
i;
ssize_t
y;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
sign=sharpen != MagickFalse ? 1 : -1;
if (image->storage_class == PseudoClass)
{
/*
Contrast enhance colormap.
*/
for (i=0; i < (ssize_t) image->colors; i++)
Contrast(sign,&image->colormap[i].red,&image->colormap[i].green,
&image->colormap[i].blue);
}
/*
Contrast enhance image.
*/
#if defined(MAGICKCORE_OPENCL_SUPPORT)
status=AccelerateContrastImage(image,sharpen,&image->exception);
if (status != MagickFalse)
return status;
#endif
status=MagickTrue;
progress=0;
exception=(&image->exception);
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) shared(progress,status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
Quantum
blue,
green,
red;
register PixelPacket
*magick_restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
red=GetPixelRed(q);
green=GetPixelGreen(q);
blue=GetPixelBlue(q);
Contrast(sign,&red,&green,&blue);
SetPixelRed(q,red);
SetPixelGreen(q,green);
SetPixelBlue(q,blue);
q++;
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_ContrastImage)
#endif
proceed=SetImageProgress(image,ContrastImageTag,progress++,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
image_view=DestroyCacheView(image_view);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% C o n t r a s t S t r e t c h I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ContrastStretchImage() is a simple image enhancement technique that attempts
% to improve the contrast in an image by `stretching' the range of intensity
% values it contains to span a desired range of values. It differs from the
% more sophisticated histogram equalization in that it can only apply a
% linear scaling function to the image pixel values. As a result the
% `enhancement' is less harsh.
%
% The format of the ContrastStretchImage method is:
%
% MagickBooleanType ContrastStretchImage(Image *image,
% const char *levels)
% MagickBooleanType ContrastStretchImageChannel(Image *image,
% const size_t channel,const double black_point,
% const double white_point)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o channel: the channel.
%
% o black_point: the black point.
%
% o white_point: the white point.
%
% o levels: Specify the levels where the black and white points have the
% range of 0 to number-of-pixels (e.g. 1%, 10x90%, etc.).
%
*/
MagickExport MagickBooleanType ContrastStretchImage(Image *image,
const char *levels)
{
double
black_point,
white_point;
GeometryInfo
geometry_info;
MagickBooleanType
status;
MagickStatusType
flags;
/*
Parse levels.
*/
if (levels == (char *) NULL)
return(MagickFalse);
flags=ParseGeometry(levels,&geometry_info);
black_point=geometry_info.rho;
white_point=(double) image->columns*image->rows;
if ((flags & SigmaValue) != 0)
white_point=geometry_info.sigma;
if ((flags & PercentValue) != 0)
{
black_point*=(double) QuantumRange/100.0;
white_point*=(double) QuantumRange/100.0;
}
if ((flags & SigmaValue) == 0)
white_point=(double) image->columns*image->rows-black_point;
status=ContrastStretchImageChannel(image,DefaultChannels,black_point,
white_point);
return(status);
}
MagickExport MagickBooleanType ContrastStretchImageChannel(Image *image,
const ChannelType channel,const double black_point,const double white_point)
{
#define MaxRange(color) ((MagickRealType) ScaleQuantumToMap((Quantum) (color)))
#define ContrastStretchImageTag "ContrastStretch/Image"
CacheView
*image_view;
double
intensity;
ExceptionInfo
*exception;
MagickBooleanType
status;
MagickOffsetType
progress;
MagickPixelPacket
black,
*histogram,
white;
QuantumPixelPacket
*stretch_map;
register ssize_t
i;
ssize_t
y;
/*
Allocate histogram and stretch map.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
#if defined(MAGICKCORE_OPENCL_SUPPORT) && 0
/* Call OpenCL version */
status=AccelerateContrastStretchImageChannel(image,channel,black_point,
white_point,&image->exception);
if (status != MagickFalse)
return status;
#endif
histogram=(MagickPixelPacket *) AcquireQuantumMemory(MaxMap+1UL,
sizeof(*histogram));
stretch_map=(QuantumPixelPacket *) AcquireQuantumMemory(MaxMap+1UL,
sizeof(*stretch_map));
if ((histogram == (MagickPixelPacket *) NULL) ||
(stretch_map == (QuantumPixelPacket *) NULL))
{
if (stretch_map != (QuantumPixelPacket *) NULL)
stretch_map=(QuantumPixelPacket *) RelinquishMagickMemory(stretch_map);
if (histogram != (MagickPixelPacket *) NULL)
histogram=(MagickPixelPacket *) RelinquishMagickMemory(histogram);
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
}
/*
Form histogram.
*/
exception=(&image->exception);
if (SetImageGray(image,exception) != MagickFalse)
(void) SetImageColorspace(image,GRAYColorspace);
status=MagickTrue;
(void) ResetMagickMemory(histogram,0,(MaxMap+1)*sizeof(*histogram));
image_view=AcquireAuthenticCacheView(image,exception);
for (y=0; y < (ssize_t) image->rows; y++)
{
register const PixelPacket
*magick_restrict p;
register IndexPacket
*magick_restrict indexes;
register ssize_t
x;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception);
if (p == (const PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
indexes=GetCacheViewAuthenticIndexQueue(image_view);
if ((channel & SyncChannels) != 0)
for (x=0; x < (ssize_t) image->columns; x++)
{
Quantum
intensity;
intensity=ClampToQuantum(GetPixelIntensity(image,p));
histogram[ScaleQuantumToMap(intensity)].red++;
histogram[ScaleQuantumToMap(intensity)].green++;
histogram[ScaleQuantumToMap(intensity)].blue++;
histogram[ScaleQuantumToMap(intensity)].index++;
p++;
}
else
for (x=0; x < (ssize_t) image->columns; x++)
{
if ((channel & RedChannel) != 0)
histogram[ScaleQuantumToMap(GetPixelRed(p))].red++;
if ((channel & GreenChannel) != 0)
histogram[ScaleQuantumToMap(GetPixelGreen(p))].green++;
if ((channel & BlueChannel) != 0)
histogram[ScaleQuantumToMap(GetPixelBlue(p))].blue++;
if ((channel & OpacityChannel) != 0)
histogram[ScaleQuantumToMap(GetPixelOpacity(p))].opacity++;
if (((channel & IndexChannel) != 0) &&
(image->colorspace == CMYKColorspace))
histogram[ScaleQuantumToMap(GetPixelIndex(indexes+x))].index++;
p++;
}
}
/*
Find the histogram boundaries by locating the black/white levels.
*/
black.red=0.0;
white.red=MaxRange(QuantumRange);
if ((channel & RedChannel) != 0)
{
intensity=0.0;
for (i=0; i <= (ssize_t) MaxMap; i++)
{
intensity+=histogram[i].red;
if (intensity > black_point)
break;
}
black.red=(MagickRealType) i;
intensity=0.0;
for (i=(ssize_t) MaxMap; i != 0; i--)
{
intensity+=histogram[i].red;
if (intensity > ((double) image->columns*image->rows-white_point))
break;
}
white.red=(MagickRealType) i;
}
black.green=0.0;
white.green=MaxRange(QuantumRange);
if ((channel & GreenChannel) != 0)
{
intensity=0.0;
for (i=0; i <= (ssize_t) MaxMap; i++)
{
intensity+=histogram[i].green;
if (intensity > black_point)
break;
}
black.green=(MagickRealType) i;
intensity=0.0;
for (i=(ssize_t) MaxMap; i != 0; i--)
{
intensity+=histogram[i].green;
if (intensity > ((double) image->columns*image->rows-white_point))
break;
}
white.green=(MagickRealType) i;
}
black.blue=0.0;
white.blue=MaxRange(QuantumRange);
if ((channel & BlueChannel) != 0)
{
intensity=0.0;
for (i=0; i <= (ssize_t) MaxMap; i++)
{
intensity+=histogram[i].blue;
if (intensity > black_point)
break;
}
black.blue=(MagickRealType) i;
intensity=0.0;
for (i=(ssize_t) MaxMap; i != 0; i--)
{
intensity+=histogram[i].blue;
if (intensity > ((double) image->columns*image->rows-white_point))
break;
}
white.blue=(MagickRealType) i;
}
black.opacity=0.0;
white.opacity=MaxRange(QuantumRange);
if ((channel & OpacityChannel) != 0)
{
intensity=0.0;
for (i=0; i <= (ssize_t) MaxMap; i++)
{
intensity+=histogram[i].opacity;
if (intensity > black_point)
break;
}
black.opacity=(MagickRealType) i;
intensity=0.0;
for (i=(ssize_t) MaxMap; i != 0; i--)
{
intensity+=histogram[i].opacity;
if (intensity > ((double) image->columns*image->rows-white_point))
break;
}
white.opacity=(MagickRealType) i;
}
black.index=0.0;
white.index=MaxRange(QuantumRange);
if (((channel & IndexChannel) != 0) && (image->colorspace == CMYKColorspace))
{
intensity=0.0;
for (i=0; i <= (ssize_t) MaxMap; i++)
{
intensity+=histogram[i].index;
if (intensity > black_point)
break;
}
black.index=(MagickRealType) i;
intensity=0.0;
for (i=(ssize_t) MaxMap; i != 0; i--)
{
intensity+=histogram[i].index;
if (intensity > ((double) image->columns*image->rows-white_point))
break;
}
white.index=(MagickRealType) i;
}
histogram=(MagickPixelPacket *) RelinquishMagickMemory(histogram);
/*
Stretch the histogram to create the stretched image mapping.
*/
(void) ResetMagickMemory(stretch_map,0,(MaxMap+1)*sizeof(*stretch_map));
for (i=0; i <= (ssize_t) MaxMap; i++)
{
if ((channel & RedChannel) != 0)
{
if (i < (ssize_t) black.red)
stretch_map[i].red=(Quantum) 0;
else
if (i > (ssize_t) white.red)
stretch_map[i].red=QuantumRange;
else
if (black.red != white.red)
stretch_map[i].red=ScaleMapToQuantum((MagickRealType) (MaxMap*
(i-black.red)/(white.red-black.red)));
}
if ((channel & GreenChannel) != 0)
{
if (i < (ssize_t) black.green)
stretch_map[i].green=0;
else
if (i > (ssize_t) white.green)
stretch_map[i].green=QuantumRange;
else
if (black.green != white.green)
stretch_map[i].green=ScaleMapToQuantum((MagickRealType) (MaxMap*
(i-black.green)/(white.green-black.green)));
}
if ((channel & BlueChannel) != 0)
{
if (i < (ssize_t) black.blue)
stretch_map[i].blue=0;
else
if (i > (ssize_t) white.blue)
stretch_map[i].blue= QuantumRange;
else
if (black.blue != white.blue)
stretch_map[i].blue=ScaleMapToQuantum((MagickRealType) (MaxMap*
(i-black.blue)/(white.blue-black.blue)));
}
if ((channel & OpacityChannel) != 0)
{
if (i < (ssize_t) black.opacity)
stretch_map[i].opacity=0;
else
if (i > (ssize_t) white.opacity)
stretch_map[i].opacity=QuantumRange;
else
if (black.opacity != white.opacity)
stretch_map[i].opacity=ScaleMapToQuantum((MagickRealType) (MaxMap*
(i-black.opacity)/(white.opacity-black.opacity)));
}
if (((channel & IndexChannel) != 0) &&
(image->colorspace == CMYKColorspace))
{
if (i < (ssize_t) black.index)
stretch_map[i].index=0;
else
if (i > (ssize_t) white.index)
stretch_map[i].index=QuantumRange;
else
if (black.index != white.index)
stretch_map[i].index=ScaleMapToQuantum((MagickRealType) (MaxMap*
(i-black.index)/(white.index-black.index)));
}
}
/*
Stretch the image.
*/
if (((channel & OpacityChannel) != 0) || (((channel & IndexChannel) != 0) &&
(image->colorspace == CMYKColorspace)))
image->storage_class=DirectClass;
if (image->storage_class == PseudoClass)
{
/*
Stretch colormap.
*/
for (i=0; i < (ssize_t) image->colors; i++)
{
if ((channel & RedChannel) != 0)
{
if (black.red != white.red)
image->colormap[i].red=stretch_map[
ScaleQuantumToMap(image->colormap[i].red)].red;
}
if ((channel & GreenChannel) != 0)
{
if (black.green != white.green)
image->colormap[i].green=stretch_map[
ScaleQuantumToMap(image->colormap[i].green)].green;
}
if ((channel & BlueChannel) != 0)
{
if (black.blue != white.blue)
image->colormap[i].blue=stretch_map[
ScaleQuantumToMap(image->colormap[i].blue)].blue;
}
if ((channel & OpacityChannel) != 0)
{
if (black.opacity != white.opacity)
image->colormap[i].opacity=stretch_map[
ScaleQuantumToMap(image->colormap[i].opacity)].opacity;
}
}
}
/*
Stretch image.
*/
status=MagickTrue;
progress=0;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) shared(progress,status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
register IndexPacket
*magick_restrict indexes;
register PixelPacket
*magick_restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
indexes=GetCacheViewAuthenticIndexQueue(image_view);
for (x=0; x < (ssize_t) image->columns; x++)
{
if ((channel & RedChannel) != 0)
{
if (black.red != white.red)
SetPixelRed(q,stretch_map[
ScaleQuantumToMap(GetPixelRed(q))].red);
}
if ((channel & GreenChannel) != 0)
{
if (black.green != white.green)
SetPixelGreen(q,stretch_map[
ScaleQuantumToMap(GetPixelGreen(q))].green);
}
if ((channel & BlueChannel) != 0)
{
if (black.blue != white.blue)
SetPixelBlue(q,stretch_map[
ScaleQuantumToMap(GetPixelBlue(q))].blue);
}
if ((channel & OpacityChannel) != 0)
{
if (black.opacity != white.opacity)
SetPixelOpacity(q,stretch_map[
ScaleQuantumToMap(GetPixelOpacity(q))].opacity);
}
if (((channel & IndexChannel) != 0) &&
(image->colorspace == CMYKColorspace))
{
if (black.index != white.index)
SetPixelIndex(indexes+x,stretch_map[
ScaleQuantumToMap(GetPixelIndex(indexes+x))].index);
}
q++;
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_ContrastStretchImageChannel)
#endif
proceed=SetImageProgress(image,ContrastStretchImageTag,progress++,
image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
image_view=DestroyCacheView(image_view);
stretch_map=(QuantumPixelPacket *) RelinquishMagickMemory(stretch_map);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% E n h a n c e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% EnhanceImage() applies a digital filter that improves the quality of a
% noisy image.
%
% The format of the EnhanceImage method is:
%
% Image *EnhanceImage(const Image *image,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *EnhanceImage(const Image *image,ExceptionInfo *exception)
{
#define EnhancePixel(weight) \
mean=QuantumScale*((double) GetPixelRed(r)+pixel.red)/2.0; \
distance=QuantumScale*((double) GetPixelRed(r)-pixel.red); \
distance_squared=(4.0+mean)*distance*distance; \
mean=QuantumScale*((double) GetPixelGreen(r)+pixel.green)/2.0; \
distance=QuantumScale*((double) GetPixelGreen(r)-pixel.green); \
distance_squared+=(7.0-mean)*distance*distance; \
mean=QuantumScale*((double) GetPixelBlue(r)+pixel.blue)/2.0; \
distance=QuantumScale*((double) GetPixelBlue(r)-pixel.blue); \
distance_squared+=(5.0-mean)*distance*distance; \
mean=QuantumScale*((double) GetPixelOpacity(r)+pixel.opacity)/2.0; \
distance=QuantumScale*((double) GetPixelOpacity(r)-pixel.opacity); \
distance_squared+=(5.0-mean)*distance*distance; \
if (distance_squared < 0.069) \
{ \
aggregate.red+=(weight)*GetPixelRed(r); \
aggregate.green+=(weight)*GetPixelGreen(r); \
aggregate.blue+=(weight)*GetPixelBlue(r); \
aggregate.opacity+=(weight)*GetPixelOpacity(r); \
total_weight+=(weight); \
} \
r++;
#define EnhanceImageTag "Enhance/Image"
CacheView
*enhance_view,
*image_view;
Image
*enhance_image;
MagickBooleanType
status;
MagickOffsetType
progress;
MagickPixelPacket
zero;
ssize_t
y;
/*
Initialize enhanced image attributes.
*/
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
if ((image->columns < 5) || (image->rows < 5))
return((Image *) NULL);
enhance_image=CloneImage(image,image->columns,image->rows,MagickTrue,
exception);
if (enhance_image == (Image *) NULL)
return((Image *) NULL);
if (SetImageStorageClass(enhance_image,DirectClass) == MagickFalse)
{
InheritException(exception,&enhance_image->exception);
enhance_image=DestroyImage(enhance_image);
return((Image *) NULL);
}
/*
Enhance image.
*/
status=MagickTrue;
progress=0;
(void) ResetMagickMemory(&zero,0,sizeof(zero));
image_view=AcquireAuthenticCacheView(image,exception);
enhance_view=AcquireAuthenticCacheView(enhance_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) shared(progress,status) \
magick_number_threads(image,enhance_image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
register const PixelPacket
*magick_restrict p;
register PixelPacket
*magick_restrict q;
register ssize_t
x;
/*
Read another scan line.
*/
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,-2,y-2,image->columns+4,5,exception);
q=QueueCacheViewAuthenticPixels(enhance_view,0,y,enhance_image->columns,1,
exception);
if ((p == (const PixelPacket *) NULL) || (q == (PixelPacket *) NULL))
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
double
distance,
distance_squared,
mean,
total_weight;
MagickPixelPacket
aggregate;
PixelPacket
pixel;
register const PixelPacket
*magick_restrict r;
/*
Compute weighted average of target pixel color components.
*/
aggregate=zero;
total_weight=0.0;
r=p+2*(image->columns+4)+2;
pixel=(*r);
r=p;
EnhancePixel(5.0); EnhancePixel(8.0); EnhancePixel(10.0);
EnhancePixel(8.0); EnhancePixel(5.0);
r=p+(image->columns+4);
EnhancePixel(8.0); EnhancePixel(20.0); EnhancePixel(40.0);
EnhancePixel(20.0); EnhancePixel(8.0);
r=p+2*(image->columns+4);
EnhancePixel(10.0); EnhancePixel(40.0); EnhancePixel(80.0);
EnhancePixel(40.0); EnhancePixel(10.0);
r=p+3*(image->columns+4);
EnhancePixel(8.0); EnhancePixel(20.0); EnhancePixel(40.0);
EnhancePixel(20.0); EnhancePixel(8.0);
r=p+4*(image->columns+4);
EnhancePixel(5.0); EnhancePixel(8.0); EnhancePixel(10.0);
EnhancePixel(8.0); EnhancePixel(5.0);
SetPixelRed(q,(aggregate.red+(total_weight/2)-1)/total_weight);
SetPixelGreen(q,(aggregate.green+(total_weight/2)-1)/total_weight);
SetPixelBlue(q,(aggregate.blue+(total_weight/2)-1)/total_weight);
SetPixelOpacity(q,(aggregate.opacity+(total_weight/2)-1)/total_weight);
p++;
q++;
}
if (SyncCacheViewAuthenticPixels(enhance_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_EnhanceImage)
#endif
proceed=SetImageProgress(image,EnhanceImageTag,progress++,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
enhance_view=DestroyCacheView(enhance_view);
image_view=DestroyCacheView(image_view);
if (status == MagickFalse)
enhance_image=DestroyImage(enhance_image);
return(enhance_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% E q u a l i z e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% EqualizeImage() applies a histogram equalization to the image.
%
% The format of the EqualizeImage method is:
%
% MagickBooleanType EqualizeImage(Image *image)
% MagickBooleanType EqualizeImageChannel(Image *image,
% const ChannelType channel)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o channel: the channel.
%
*/
MagickExport MagickBooleanType EqualizeImage(Image *image)
{
return(EqualizeImageChannel(image,DefaultChannels));
}
MagickExport MagickBooleanType EqualizeImageChannel(Image *image,
const ChannelType channel)
{
#define EqualizeImageTag "Equalize/Image"
CacheView
*image_view;
ExceptionInfo
*exception;
MagickBooleanType
status;
MagickOffsetType
progress;
MagickPixelPacket
black,
*histogram,
intensity,
*map,
white;
QuantumPixelPacket
*equalize_map;
register ssize_t
i;
ssize_t
y;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
#if defined(MAGICKCORE_OPENCL_SUPPORT)
/* Call OpenCL version */
status=AccelerateEqualizeImage(image,channel,&image->exception);
if (status != MagickFalse)
return status;
#endif
/*
Allocate and initialize histogram arrays.
*/
equalize_map=(QuantumPixelPacket *) AcquireQuantumMemory(MaxMap+1UL,
sizeof(*equalize_map));
histogram=(MagickPixelPacket *) AcquireQuantumMemory(MaxMap+1UL,
sizeof(*histogram));
map=(MagickPixelPacket *) AcquireQuantumMemory(MaxMap+1UL,sizeof(*map));
if ((equalize_map == (QuantumPixelPacket *) NULL) ||
(histogram == (MagickPixelPacket *) NULL) ||
(map == (MagickPixelPacket *) NULL))
{
if (map != (MagickPixelPacket *) NULL)
map=(MagickPixelPacket *) RelinquishMagickMemory(map);
if (histogram != (MagickPixelPacket *) NULL)
histogram=(MagickPixelPacket *) RelinquishMagickMemory(histogram);
if (equalize_map != (QuantumPixelPacket *) NULL)
equalize_map=(QuantumPixelPacket *) RelinquishMagickMemory(
equalize_map);
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
}
/*
Form histogram.
*/
(void) ResetMagickMemory(histogram,0,(MaxMap+1)*sizeof(*histogram));
exception=(&image->exception);
image_view=AcquireVirtualCacheView(image,exception);
for (y=0; y < (ssize_t) image->rows; y++)
{
register const IndexPacket
*magick_restrict indexes;
register const PixelPacket
*magick_restrict p;
register ssize_t
x;
p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception);
if (p == (const PixelPacket *) NULL)
break;
indexes=GetCacheViewVirtualIndexQueue(image_view);
if ((channel & SyncChannels) != 0)
for (x=0; x < (ssize_t) image->columns; x++)
{
MagickRealType intensity=GetPixelIntensity(image,p);
histogram[ScaleQuantumToMap(ClampToQuantum(intensity))].red++;
p++;
}
else
for (x=0; x < (ssize_t) image->columns; x++)
{
if ((channel & RedChannel) != 0)
histogram[ScaleQuantumToMap(GetPixelRed(p))].red++;
if ((channel & GreenChannel) != 0)
histogram[ScaleQuantumToMap(GetPixelGreen(p))].green++;
if ((channel & BlueChannel) != 0)
histogram[ScaleQuantumToMap(GetPixelBlue(p))].blue++;
if ((channel & OpacityChannel) != 0)
histogram[ScaleQuantumToMap(GetPixelOpacity(p))].opacity++;
if (((channel & IndexChannel) != 0) &&
(image->colorspace == CMYKColorspace))
histogram[ScaleQuantumToMap(GetPixelIndex(indexes+x))].index++;
p++;
}
}
image_view=DestroyCacheView(image_view);
/*
Integrate the histogram to get the equalization map.
*/
(void) ResetMagickMemory(&intensity,0,sizeof(intensity));
for (i=0; i <= (ssize_t) MaxMap; i++)
{
if ((channel & SyncChannels) != 0)
{
intensity.red+=histogram[i].red;
map[i]=intensity;
continue;
}
if ((channel & RedChannel) != 0)
intensity.red+=histogram[i].red;
if ((channel & GreenChannel) != 0)
intensity.green+=histogram[i].green;
if ((channel & BlueChannel) != 0)
intensity.blue+=histogram[i].blue;
if ((channel & OpacityChannel) != 0)
intensity.opacity+=histogram[i].opacity;
if (((channel & IndexChannel) != 0) &&
(image->colorspace == CMYKColorspace))
intensity.index+=histogram[i].index;
map[i]=intensity;
}
black=map[0];
white=map[(int) MaxMap];
(void) ResetMagickMemory(equalize_map,0,(MaxMap+1)*sizeof(*equalize_map));
for (i=0; i <= (ssize_t) MaxMap; i++)
{
if ((channel & SyncChannels) != 0)
{
if (white.red != black.red)
equalize_map[i].red=ScaleMapToQuantum((MagickRealType) ((MaxMap*
(map[i].red-black.red))/(white.red-black.red)));
continue;
}
if (((channel & RedChannel) != 0) && (white.red != black.red))
equalize_map[i].red=ScaleMapToQuantum((MagickRealType) ((MaxMap*
(map[i].red-black.red))/(white.red-black.red)));
if (((channel & GreenChannel) != 0) && (white.green != black.green))
equalize_map[i].green=ScaleMapToQuantum((MagickRealType) ((MaxMap*
(map[i].green-black.green))/(white.green-black.green)));
if (((channel & BlueChannel) != 0) && (white.blue != black.blue))
equalize_map[i].blue=ScaleMapToQuantum((MagickRealType) ((MaxMap*
(map[i].blue-black.blue))/(white.blue-black.blue)));
if (((channel & OpacityChannel) != 0) && (white.opacity != black.opacity))
equalize_map[i].opacity=ScaleMapToQuantum((MagickRealType) ((MaxMap*
(map[i].opacity-black.opacity))/(white.opacity-black.opacity)));
if ((((channel & IndexChannel) != 0) &&
(image->colorspace == CMYKColorspace)) &&
(white.index != black.index))
equalize_map[i].index=ScaleMapToQuantum((MagickRealType) ((MaxMap*
(map[i].index-black.index))/(white.index-black.index)));
}
histogram=(MagickPixelPacket *) RelinquishMagickMemory(histogram);
map=(MagickPixelPacket *) RelinquishMagickMemory(map);
if (image->storage_class == PseudoClass)
{
/*
Equalize colormap.
*/
for (i=0; i < (ssize_t) image->colors; i++)
{
if ((channel & SyncChannels) != 0)
{
if (white.red != black.red)
{
image->colormap[i].red=equalize_map[
ScaleQuantumToMap(image->colormap[i].red)].red;
image->colormap[i].green=equalize_map[
ScaleQuantumToMap(image->colormap[i].green)].red;
image->colormap[i].blue=equalize_map[
ScaleQuantumToMap(image->colormap[i].blue)].red;
image->colormap[i].opacity=equalize_map[
ScaleQuantumToMap(image->colormap[i].opacity)].red;
}
continue;
}
if (((channel & RedChannel) != 0) && (white.red != black.red))
image->colormap[i].red=equalize_map[
ScaleQuantumToMap(image->colormap[i].red)].red;
if (((channel & GreenChannel) != 0) && (white.green != black.green))
image->colormap[i].green=equalize_map[
ScaleQuantumToMap(image->colormap[i].green)].green;
if (((channel & BlueChannel) != 0) && (white.blue != black.blue))
image->colormap[i].blue=equalize_map[
ScaleQuantumToMap(image->colormap[i].blue)].blue;
if (((channel & OpacityChannel) != 0) &&
(white.opacity != black.opacity))
image->colormap[i].opacity=equalize_map[
ScaleQuantumToMap(image->colormap[i].opacity)].opacity;
}
}
/*
Equalize image.
*/
status=MagickTrue;
progress=0;
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) shared(progress,status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
register IndexPacket
*magick_restrict indexes;
register PixelPacket
*magick_restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
indexes=GetCacheViewAuthenticIndexQueue(image_view);
for (x=0; x < (ssize_t) image->columns; x++)
{
if ((channel & SyncChannels) != 0)
{
if (white.red != black.red)
{
SetPixelRed(q,equalize_map[
ScaleQuantumToMap(GetPixelRed(q))].red);
SetPixelGreen(q,equalize_map[
ScaleQuantumToMap(GetPixelGreen(q))].red);
SetPixelBlue(q,equalize_map[
ScaleQuantumToMap(GetPixelBlue(q))].red);
SetPixelOpacity(q,equalize_map[
ScaleQuantumToMap(GetPixelOpacity(q))].red);
if (image->colorspace == CMYKColorspace)
SetPixelIndex(indexes+x,equalize_map[
ScaleQuantumToMap(GetPixelIndex(indexes+x))].red);
}
q++;
continue;
}
if (((channel & RedChannel) != 0) && (white.red != black.red))
SetPixelRed(q,equalize_map[
ScaleQuantumToMap(GetPixelRed(q))].red);
if (((channel & GreenChannel) != 0) && (white.green != black.green))
SetPixelGreen(q,equalize_map[
ScaleQuantumToMap(GetPixelGreen(q))].green);
if (((channel & BlueChannel) != 0) && (white.blue != black.blue))
SetPixelBlue(q,equalize_map[
ScaleQuantumToMap(GetPixelBlue(q))].blue);
if (((channel & OpacityChannel) != 0) && (white.opacity != black.opacity))
SetPixelOpacity(q,equalize_map[
ScaleQuantumToMap(GetPixelOpacity(q))].opacity);
if ((((channel & IndexChannel) != 0) &&
(image->colorspace == CMYKColorspace)) &&
(white.index != black.index))
SetPixelIndex(indexes+x,equalize_map[
ScaleQuantumToMap(GetPixelIndex(indexes+x))].index);
q++;
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_EqualizeImageChannel)
#endif
proceed=SetImageProgress(image,EqualizeImageTag,progress++,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
image_view=DestroyCacheView(image_view);
equalize_map=(QuantumPixelPacket *) RelinquishMagickMemory(equalize_map);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G a m m a I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GammaImage() gamma-corrects a particular image channel. The same
% image viewed on different devices will have perceptual differences in the
% way the image's intensities are represented on the screen. Specify
% individual gamma levels for the red, green, and blue channels, or adjust
% all three with the gamma parameter. Values typically range from 0.8 to 2.3.
%
% You can also reduce the influence of a particular channel with a gamma
% value of 0.
%
% The format of the GammaImage method is:
%
% MagickBooleanType GammaImage(Image *image,const char *level)
% MagickBooleanType GammaImageChannel(Image *image,
% const ChannelType channel,const double gamma)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o channel: the channel.
%
% o level: the image gamma as a string (e.g. 1.6,1.2,1.0).
%
% o gamma: the image gamma.
%
*/
static inline double gamma_pow(const double value,const double gamma)
{
return(value < 0.0 ? value : pow(value,gamma));
}
MagickExport MagickBooleanType GammaImage(Image *image,const char *level)
{
GeometryInfo
geometry_info;
MagickPixelPacket
gamma;
MagickStatusType
flags,
status;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
if (level == (char *) NULL)
return(MagickFalse);
flags=ParseGeometry(level,&geometry_info);
gamma.red=geometry_info.rho;
gamma.green=geometry_info.sigma;
if ((flags & SigmaValue) == 0)
gamma.green=gamma.red;
gamma.blue=geometry_info.xi;
if ((flags & XiValue) == 0)
gamma.blue=gamma.red;
if ((gamma.red == 1.0) && (gamma.green == 1.0) && (gamma.blue == 1.0))
return(MagickTrue);
if ((gamma.red == gamma.green) && (gamma.green == gamma.blue))
status=GammaImageChannel(image,(ChannelType) (RedChannel | GreenChannel |
BlueChannel),(double) gamma.red);
else
{
status=GammaImageChannel(image,RedChannel,(double) gamma.red);
status&=GammaImageChannel(image,GreenChannel,(double) gamma.green);
status&=GammaImageChannel(image,BlueChannel,(double) gamma.blue);
}
return(status != 0 ? MagickTrue : MagickFalse);
}
MagickExport MagickBooleanType GammaImageChannel(Image *image,
const ChannelType channel,const double gamma)
{
#define GammaCorrectImageTag "GammaCorrect/Image"
CacheView
*image_view;
ExceptionInfo
*exception;
MagickBooleanType
status;
MagickOffsetType
progress;
Quantum
*gamma_map;
register ssize_t
i;
ssize_t
y;
/*
Allocate and initialize gamma maps.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
if (gamma == 1.0)
return(MagickTrue);
gamma_map=(Quantum *) AcquireQuantumMemory(MaxMap+1UL,sizeof(*gamma_map));
if (gamma_map == (Quantum *) NULL)
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
(void) ResetMagickMemory(gamma_map,0,(MaxMap+1)*sizeof(*gamma_map));
if (gamma != 0.0)
for (i=0; i <= (ssize_t) MaxMap; i++)
gamma_map[i]=ClampToQuantum((MagickRealType) ScaleMapToQuantum((
MagickRealType) (MaxMap*pow((double) i/MaxMap,1.0/gamma))));
if (image->storage_class == PseudoClass)
{
/*
Gamma-correct colormap.
*/
for (i=0; i < (ssize_t) image->colors; i++)
{
#if !defined(MAGICKCORE_HDRI_SUPPORT)
if ((channel & RedChannel) != 0)
image->colormap[i].red=gamma_map[ScaleQuantumToMap(
image->colormap[i].red)];
if ((channel & GreenChannel) != 0)
image->colormap[i].green=gamma_map[ScaleQuantumToMap(
image->colormap[i].green)];
if ((channel & BlueChannel) != 0)
image->colormap[i].blue=gamma_map[ScaleQuantumToMap(
image->colormap[i].blue)];
if ((channel & OpacityChannel) != 0)
{
if (image->matte == MagickFalse)
image->colormap[i].opacity=gamma_map[ScaleQuantumToMap(
image->colormap[i].opacity)];
else
image->colormap[i].opacity=QuantumRange-gamma_map[
ScaleQuantumToMap((Quantum) (QuantumRange-
image->colormap[i].opacity))];
}
#else
if ((channel & RedChannel) != 0)
image->colormap[i].red=QuantumRange*gamma_pow(QuantumScale*
image->colormap[i].red,1.0/gamma);
if ((channel & GreenChannel) != 0)
image->colormap[i].green=QuantumRange*gamma_pow(QuantumScale*
image->colormap[i].green,1.0/gamma);
if ((channel & BlueChannel) != 0)
image->colormap[i].blue=QuantumRange*gamma_pow(QuantumScale*
image->colormap[i].blue,1.0/gamma);
if ((channel & OpacityChannel) != 0)
{
if (image->matte == MagickFalse)
image->colormap[i].opacity=QuantumRange*gamma_pow(QuantumScale*
image->colormap[i].opacity,1.0/gamma);
else
image->colormap[i].opacity=QuantumRange-QuantumRange*gamma_pow(
QuantumScale*(QuantumRange-image->colormap[i].opacity),1.0/
gamma);
}
#endif
}
}
/*
Gamma-correct image.
*/
status=MagickTrue;
progress=0;
exception=(&image->exception);
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) shared(progress,status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
register IndexPacket
*magick_restrict indexes;
register PixelPacket
*magick_restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
indexes=GetCacheViewAuthenticIndexQueue(image_view);
for (x=0; x < (ssize_t) image->columns; x++)
{
#if !defined(MAGICKCORE_HDRI_SUPPORT)
if ((channel & SyncChannels) != 0)
{
SetPixelRed(q,gamma_map[ScaleQuantumToMap(GetPixelRed(q))]);
SetPixelGreen(q,gamma_map[ScaleQuantumToMap(GetPixelGreen(q))]);
SetPixelBlue(q,gamma_map[ScaleQuantumToMap(GetPixelBlue(q))]);
}
else
{
if ((channel & RedChannel) != 0)
SetPixelRed(q,gamma_map[ScaleQuantumToMap(GetPixelRed(q))]);
if ((channel & GreenChannel) != 0)
SetPixelGreen(q,gamma_map[ScaleQuantumToMap(GetPixelGreen(q))]);
if ((channel & BlueChannel) != 0)
SetPixelBlue(q,gamma_map[ScaleQuantumToMap(GetPixelBlue(q))]);
if ((channel & OpacityChannel) != 0)
{
if (image->matte == MagickFalse)
SetPixelOpacity(q,gamma_map[ScaleQuantumToMap(
GetPixelOpacity(q))]);
else
SetPixelAlpha(q,gamma_map[ScaleQuantumToMap((Quantum)
GetPixelAlpha(q))]);
}
}
#else
if ((channel & SyncChannels) != 0)
{
SetPixelRed(q,QuantumRange*gamma_pow(QuantumScale*GetPixelRed(q),
1.0/gamma));
SetPixelGreen(q,QuantumRange*gamma_pow(QuantumScale*GetPixelGreen(q),
1.0/gamma));
SetPixelBlue(q,QuantumRange*gamma_pow(QuantumScale*GetPixelBlue(q),
1.0/gamma));
}
else
{
if ((channel & RedChannel) != 0)
SetPixelRed(q,QuantumRange*gamma_pow(QuantumScale*GetPixelRed(q),
1.0/gamma));
if ((channel & GreenChannel) != 0)
SetPixelGreen(q,QuantumRange*gamma_pow(QuantumScale*
GetPixelGreen(q),1.0/gamma));
if ((channel & BlueChannel) != 0)
SetPixelBlue(q,QuantumRange*gamma_pow(QuantumScale*GetPixelBlue(q),
1.0/gamma));
if ((channel & OpacityChannel) != 0)
{
if (image->matte == MagickFalse)
SetPixelOpacity(q,QuantumRange*gamma_pow(QuantumScale*
GetPixelOpacity(q),1.0/gamma));
else
SetPixelAlpha(q,QuantumRange*gamma_pow(QuantumScale*
GetPixelAlpha(q),1.0/gamma));
}
}
#endif
q++;
}
if (((channel & IndexChannel) != 0) &&
(image->colorspace == CMYKColorspace))
for (x=0; x < (ssize_t) image->columns; x++)
SetPixelIndex(indexes+x,gamma_map[ScaleQuantumToMap(
GetPixelIndex(indexes+x))]);
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_GammaImageChannel)
#endif
proceed=SetImageProgress(image,GammaCorrectImageTag,progress++,
image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
image_view=DestroyCacheView(image_view);
gamma_map=(Quantum *) RelinquishMagickMemory(gamma_map);
if (image->gamma != 0.0)
image->gamma*=gamma;
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G r a y s c a l e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GrayscaleImage() converts the colors in the reference image to gray.
%
% The format of the GrayscaleImageChannel method is:
%
% MagickBooleanType GrayscaleImage(Image *image,
% const PixelIntensityMethod method)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o channel: the channel.
%
*/
MagickExport MagickBooleanType GrayscaleImage(Image *image,
const PixelIntensityMethod method)
{
#define GrayscaleImageTag "Grayscale/Image"
CacheView
*image_view;
ExceptionInfo
*exception;
MagickBooleanType
status;
MagickOffsetType
progress;
ssize_t
y;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
if (image->storage_class == PseudoClass)
{
if (SyncImage(image) == MagickFalse)
return(MagickFalse);
if (SetImageStorageClass(image,DirectClass) == MagickFalse)
return(MagickFalse);
}
/*
Grayscale image.
*/
/* call opencl version */
#if defined(MAGICKCORE_OPENCL_SUPPORT)
status=AccelerateGrayscaleImage(image,method,&image->exception);
{
image->intensity=method;
image->type=GrayscaleType;
if ((method == Rec601LuminancePixelIntensityMethod) ||
(method == Rec709LuminancePixelIntensityMethod))
return(SetImageColorspace(image,LinearGRAYColorspace));
return(SetImageColorspace(image,GRAYColorspace));
}
#endif
status=MagickTrue;
progress=0;
exception=(&image->exception);
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) shared(progress,status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
register PixelPacket
*magick_restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
MagickRealType
blue,
green,
intensity,
red;
red=(MagickRealType) q->red;
green=(MagickRealType) q->green;
blue=(MagickRealType) q->blue;
intensity=0.0;
switch (method)
{
case AveragePixelIntensityMethod:
{
intensity=(red+green+blue)/3.0;
break;
}
case BrightnessPixelIntensityMethod:
{
intensity=MagickMax(MagickMax(red,green),blue);
break;
}
case LightnessPixelIntensityMethod:
{
intensity=(MagickMin(MagickMin(red,green),blue)+
MagickMax(MagickMax(red,green),blue))/2.0;
break;
}
case MSPixelIntensityMethod:
{
intensity=(MagickRealType) (((double) red*red+green*green+
blue*blue)/(3.0*QuantumRange));
break;
}
case Rec601LumaPixelIntensityMethod:
{
if (image->colorspace == RGBColorspace)
{
red=EncodePixelGamma(red);
green=EncodePixelGamma(green);
blue=EncodePixelGamma(blue);
}
intensity=0.298839*red+0.586811*green+0.114350*blue;
break;
}
case Rec601LuminancePixelIntensityMethod:
{
if (image->colorspace == sRGBColorspace)
{
red=DecodePixelGamma(red);
green=DecodePixelGamma(green);
blue=DecodePixelGamma(blue);
}
intensity=0.298839*red+0.586811*green+0.114350*blue;
break;
}
case Rec709LumaPixelIntensityMethod:
default:
{
if (image->colorspace == RGBColorspace)
{
red=EncodePixelGamma(red);
green=EncodePixelGamma(green);
blue=EncodePixelGamma(blue);
}
intensity=0.212656*red+0.715158*green+0.072186*blue;
break;
}
case Rec709LuminancePixelIntensityMethod:
{
if (image->colorspace == sRGBColorspace)
{
red=DecodePixelGamma(red);
green=DecodePixelGamma(green);
blue=DecodePixelGamma(blue);
}
intensity=0.212656*red+0.715158*green+0.072186*blue;
break;
}
case RMSPixelIntensityMethod:
{
intensity=(MagickRealType) (sqrt((double) red*red+green*green+
blue*blue)/sqrt(3.0));
break;
}
}
SetPixelGray(q,ClampToQuantum(intensity));
q++;
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_GrayscaleImageChannel)
#endif
proceed=SetImageProgress(image,GrayscaleImageTag,progress++,
image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
image_view=DestroyCacheView(image_view);
image->intensity=method;
image->type=GrayscaleType;
if ((method == Rec601LuminancePixelIntensityMethod) ||
(method == Rec709LuminancePixelIntensityMethod))
return(SetImageColorspace(image,LinearGRAYColorspace));
return(SetImageColorspace(image,GRAYColorspace));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% H a l d C l u t I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% HaldClutImage() applies a Hald color lookup table to the image. A Hald
% color lookup table is a 3-dimensional color cube mapped to 2 dimensions.
% Create it with the HALD coder. You can apply any color transformation to
% the Hald image and then use this method to apply the transform to the
% image.
%
% The format of the HaldClutImage method is:
%
% MagickBooleanType HaldClutImage(Image *image,Image *hald_image)
% MagickBooleanType HaldClutImageChannel(Image *image,
% const ChannelType channel,Image *hald_image)
%
% A description of each parameter follows:
%
% o image: the image, which is replaced by indexed CLUT values
%
% o hald_image: the color lookup table image for replacement color values.
%
% o channel: the channel.
%
*/
MagickExport MagickBooleanType HaldClutImage(Image *image,
const Image *hald_image)
{
return(HaldClutImageChannel(image,DefaultChannels,hald_image));
}
MagickExport MagickBooleanType HaldClutImageChannel(Image *image,
const ChannelType channel,const Image *hald_image)
{
#define HaldClutImageTag "Clut/Image"
typedef struct _HaldInfo
{
MagickRealType
x,
y,
z;
} HaldInfo;
CacheView
*hald_view,
*image_view;
double
width;
ExceptionInfo
*exception;
MagickBooleanType
status;
MagickOffsetType
progress;
MagickPixelPacket
zero;
size_t
cube_size,
length,
level;
ssize_t
y;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(hald_image != (Image *) NULL);
assert(hald_image->signature == MagickCoreSignature);
if (SetImageStorageClass(image,DirectClass) == MagickFalse)
return(MagickFalse);
if (IsGrayColorspace(image->colorspace) != MagickFalse)
(void) SetImageColorspace(image,sRGBColorspace);
if (image->matte == MagickFalse)
(void) SetImageAlphaChannel(image,OpaqueAlphaChannel);
/*
Hald clut image.
*/
status=MagickTrue;
progress=0;
length=(size_t) MagickMin((MagickRealType) hald_image->columns,
(MagickRealType) hald_image->rows);
for (level=2; (level*level*level) < length; level++) ;
level*=level;
cube_size=level*level;
width=(double) hald_image->columns;
GetMagickPixelPacket(hald_image,&zero);
exception=(&image->exception);
image_view=AcquireAuthenticCacheView(image,exception);
hald_view=AcquireAuthenticCacheView(hald_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) shared(progress,status) \
magick_number_threads(image,hald_image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
double
offset;
HaldInfo
point;
MagickPixelPacket
pixel,
pixel1,
pixel2,
pixel3,
pixel4;
register IndexPacket
*magick_restrict indexes;
register PixelPacket
*magick_restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
indexes=GetCacheViewAuthenticIndexQueue(hald_view);
pixel=zero;
pixel1=zero;
pixel2=zero;
pixel3=zero;
pixel4=zero;
for (x=0; x < (ssize_t) image->columns; x++)
{
point.x=QuantumScale*(level-1.0)*GetPixelRed(q);
point.y=QuantumScale*(level-1.0)*GetPixelGreen(q);
point.z=QuantumScale*(level-1.0)*GetPixelBlue(q);
offset=(double) (point.x+level*floor(point.y)+cube_size*floor(point.z));
point.x-=floor(point.x);
point.y-=floor(point.y);
point.z-=floor(point.z);
(void) InterpolateMagickPixelPacket(image,hald_view,
UndefinedInterpolatePixel,fmod(offset,width),floor(offset/width),
&pixel1,exception);
(void) InterpolateMagickPixelPacket(image,hald_view,
UndefinedInterpolatePixel,fmod(offset+level,width),floor((offset+level)/
width),&pixel2,exception);
MagickPixelCompositeAreaBlend(&pixel1,pixel1.opacity,&pixel2,
pixel2.opacity,point.y,&pixel3);
offset+=cube_size;
(void) InterpolateMagickPixelPacket(image,hald_view,
UndefinedInterpolatePixel,fmod(offset,width),floor(offset/width),
&pixel1,exception);
(void) InterpolateMagickPixelPacket(image,hald_view,
UndefinedInterpolatePixel,fmod(offset+level,width),floor((offset+level)/
width),&pixel2,exception);
MagickPixelCompositeAreaBlend(&pixel1,pixel1.opacity,&pixel2,
pixel2.opacity,point.y,&pixel4);
MagickPixelCompositeAreaBlend(&pixel3,pixel3.opacity,&pixel4,
pixel4.opacity,point.z,&pixel);
if ((channel & RedChannel) != 0)
SetPixelRed(q,ClampToQuantum(pixel.red));
if ((channel & GreenChannel) != 0)
SetPixelGreen(q,ClampToQuantum(pixel.green));
if ((channel & BlueChannel) != 0)
SetPixelBlue(q,ClampToQuantum(pixel.blue));
if (((channel & OpacityChannel) != 0) && (image->matte != MagickFalse))
SetPixelOpacity(q,ClampToQuantum(pixel.opacity));
if (((channel & IndexChannel) != 0) &&
(image->colorspace == CMYKColorspace))
SetPixelIndex(indexes+x,ClampToQuantum(pixel.index));
q++;
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_HaldClutImageChannel)
#endif
proceed=SetImageProgress(image,HaldClutImageTag,progress++,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
hald_view=DestroyCacheView(hald_view);
image_view=DestroyCacheView(image_view);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% L e v e l I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% LevelImage() adjusts the levels of a particular image channel by
% scaling the colors falling between specified white and black points to
% the full available quantum range.
%
% The parameters provided represent the black, and white points. The black
% point specifies the darkest color in the image. Colors darker than the
% black point are set to zero. White point specifies the lightest color in
% the image. Colors brighter than the white point are set to the maximum
% quantum value.
%
% If a '!' flag is given, map black and white colors to the given levels
% rather than mapping those levels to black and white. See
% LevelizeImageChannel() and LevelizeImageChannel(), below.
%
% Gamma specifies a gamma correction to apply to the image.
%
% The format of the LevelImage method is:
%
% MagickBooleanType LevelImage(Image *image,const char *levels)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o levels: Specify the levels where the black and white points have the
% range of 0-QuantumRange, and gamma has the range 0-10 (e.g. 10x90%+2).
% A '!' flag inverts the re-mapping.
%
*/
MagickExport MagickBooleanType LevelImage(Image *image,const char *levels)
{
double
black_point,
gamma,
white_point;
GeometryInfo
geometry_info;
MagickBooleanType
status;
MagickStatusType
flags;
/*
Parse levels.
*/
if (levels == (char *) NULL)
return(MagickFalse);
flags=ParseGeometry(levels,&geometry_info);
black_point=geometry_info.rho;
white_point=(double) QuantumRange;
if ((flags & SigmaValue) != 0)
white_point=geometry_info.sigma;
gamma=1.0;
if ((flags & XiValue) != 0)
gamma=geometry_info.xi;
if ((flags & PercentValue) != 0)
{
black_point*=(double) image->columns*image->rows/100.0;
white_point*=(double) image->columns*image->rows/100.0;
}
if ((flags & SigmaValue) == 0)
white_point=(double) QuantumRange-black_point;
if ((flags & AspectValue ) == 0)
status=LevelImageChannel(image,DefaultChannels,black_point,white_point,
gamma);
else
status=LevelizeImage(image,black_point,white_point,gamma);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% L e v e l I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% LevelImage() applies the normal level operation to the image, spreading
% out the values between the black and white points over the entire range of
% values. Gamma correction is also applied after the values has been mapped.
%
% It is typically used to improve image contrast, or to provide a controlled
% linear threshold for the image. If the black and white points are set to
% the minimum and maximum values found in the image, the image can be
% normalized. or by swapping black and white values, negate the image.
%
% The format of the LevelImage method is:
%
% MagickBooleanType LevelImage(Image *image,const double black_point,
% const double white_point,const double gamma)
% MagickBooleanType LevelImageChannel(Image *image,
% const ChannelType channel,const double black_point,
% const double white_point,const double gamma)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o channel: the channel.
%
% o black_point: The level which is to be mapped to zero (black)
%
% o white_point: The level which is to be mapped to QuantiumRange (white)
%
% o gamma: adjust gamma by this factor before mapping values.
% use 1.0 for purely linear stretching of image color values
%
*/
static inline double LevelPixel(const double black_point,
const double white_point,const double gamma,const MagickRealType pixel)
{
double
level_pixel,
scale;
if (fabs(white_point-black_point) < MagickEpsilon)
return(pixel);
scale=1.0/(white_point-black_point);
level_pixel=QuantumRange*gamma_pow(scale*((double) pixel-black_point),1.0/
gamma);
return(level_pixel);
}
MagickExport MagickBooleanType LevelImageChannel(Image *image,
const ChannelType channel,const double black_point,const double white_point,
const double gamma)
{
#define LevelImageTag "Level/Image"
CacheView
*image_view;
ExceptionInfo
*exception;
MagickBooleanType
status;
MagickOffsetType
progress;
register ssize_t
i;
ssize_t
y;
/*
Allocate and initialize levels map.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
if (image->storage_class == PseudoClass)
for (i=0; i < (ssize_t) image->colors; i++)
{
/*
Level colormap.
*/
if ((channel & RedChannel) != 0)
image->colormap[i].red=(Quantum) ClampToQuantum(LevelPixel(black_point,
white_point,gamma,(MagickRealType) image->colormap[i].red));
if ((channel & GreenChannel) != 0)
image->colormap[i].green=(Quantum) ClampToQuantum(LevelPixel(
black_point,white_point,gamma,(MagickRealType)
image->colormap[i].green));
if ((channel & BlueChannel) != 0)
image->colormap[i].blue=(Quantum) ClampToQuantum(LevelPixel(black_point,
white_point,gamma,(MagickRealType) image->colormap[i].blue));
if ((channel & OpacityChannel) != 0)
image->colormap[i].opacity=(Quantum) (QuantumRange-(Quantum)
ClampToQuantum(LevelPixel(black_point,white_point,gamma,
(MagickRealType) (QuantumRange-image->colormap[i].opacity))));
}
/*
Level image.
*/
status=MagickTrue;
progress=0;
exception=(&image->exception);
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) shared(progress,status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
register IndexPacket
*magick_restrict indexes;
register PixelPacket
*magick_restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
indexes=GetCacheViewAuthenticIndexQueue(image_view);
for (x=0; x < (ssize_t) image->columns; x++)
{
if ((channel & RedChannel) != 0)
SetPixelRed(q,ClampToQuantum(LevelPixel(black_point,white_point,gamma,
(MagickRealType) GetPixelRed(q))));
if ((channel & GreenChannel) != 0)
SetPixelGreen(q,ClampToQuantum(LevelPixel(black_point,white_point,gamma,
(MagickRealType) GetPixelGreen(q))));
if ((channel & BlueChannel) != 0)
SetPixelBlue(q,ClampToQuantum(LevelPixel(black_point,white_point,gamma,
(MagickRealType) GetPixelBlue(q))));
if (((channel & OpacityChannel) != 0) &&
(image->matte != MagickFalse))
SetPixelAlpha(q,ClampToQuantum(LevelPixel(black_point,white_point,gamma,
(MagickRealType) GetPixelAlpha(q))));
if (((channel & IndexChannel) != 0) &&
(image->colorspace == CMYKColorspace))
SetPixelIndex(indexes+x,ClampToQuantum(LevelPixel(black_point,
white_point,gamma,(MagickRealType) GetPixelIndex(indexes+x))));
q++;
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_LevelImageChannel)
#endif
proceed=SetImageProgress(image,LevelImageTag,progress++,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
image_view=DestroyCacheView(image_view);
(void) ClampImage(image);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% L e v e l i z e I m a g e C h a n n e l %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% LevelizeImageChannel() applies the reversed LevelImage() operation to just
% the specific channels specified. It compresses the full range of color
% values, so that they lie between the given black and white points. Gamma is
% applied before the values are mapped.
%
% LevelizeImageChannel() can be called with by using a +level command line
% API option, or using a '!' on a -level or LevelImage() geometry string.
%
% It can be used for example de-contrast a greyscale image to the exact
% levels specified. Or by using specific levels for each channel of an image
% you can convert a gray-scale image to any linear color gradient, according
% to those levels.
%
% The format of the LevelizeImageChannel method is:
%
% MagickBooleanType LevelizeImageChannel(Image *image,
% const ChannelType channel,const char *levels)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o channel: the channel.
%
% o black_point: The level to map zero (black) to.
%
% o white_point: The level to map QuantiumRange (white) to.
%
% o gamma: adjust gamma by this factor before mapping values.
%
*/
MagickExport MagickBooleanType LevelizeImage(Image *image,
const double black_point,const double white_point,const double gamma)
{
MagickBooleanType
status;
status=LevelizeImageChannel(image,DefaultChannels,black_point,white_point,
gamma);
return(status);
}
MagickExport MagickBooleanType LevelizeImageChannel(Image *image,
const ChannelType channel,const double black_point,const double white_point,
const double gamma)
{
#define LevelizeImageTag "Levelize/Image"
#define LevelizeValue(x) ClampToQuantum(((MagickRealType) gamma_pow((double) \
(QuantumScale*(x)),gamma))*(white_point-black_point)+black_point)
CacheView
*image_view;
ExceptionInfo
*exception;
MagickBooleanType
status;
MagickOffsetType
progress;
register ssize_t
i;
ssize_t
y;
/*
Allocate and initialize levels map.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
if (image->storage_class == PseudoClass)
for (i=0; i < (ssize_t) image->colors; i++)
{
/*
Level colormap.
*/
if ((channel & RedChannel) != 0)
image->colormap[i].red=LevelizeValue(image->colormap[i].red);
if ((channel & GreenChannel) != 0)
image->colormap[i].green=LevelizeValue(image->colormap[i].green);
if ((channel & BlueChannel) != 0)
image->colormap[i].blue=LevelizeValue(image->colormap[i].blue);
if ((channel & OpacityChannel) != 0)
image->colormap[i].opacity=(Quantum) (QuantumRange-LevelizeValue(
QuantumRange-image->colormap[i].opacity));
}
/*
Level image.
*/
status=MagickTrue;
progress=0;
exception=(&image->exception);
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) shared(progress,status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
register IndexPacket
*magick_restrict indexes;
register PixelPacket
*magick_restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
indexes=GetCacheViewAuthenticIndexQueue(image_view);
for (x=0; x < (ssize_t) image->columns; x++)
{
if ((channel & RedChannel) != 0)
SetPixelRed(q,LevelizeValue(GetPixelRed(q)));
if ((channel & GreenChannel) != 0)
SetPixelGreen(q,LevelizeValue(GetPixelGreen(q)));
if ((channel & BlueChannel) != 0)
SetPixelBlue(q,LevelizeValue(GetPixelBlue(q)));
if (((channel & OpacityChannel) != 0) &&
(image->matte != MagickFalse))
SetPixelAlpha(q,LevelizeValue(GetPixelAlpha(q)));
if (((channel & IndexChannel) != 0) &&
(image->colorspace == CMYKColorspace))
SetPixelIndex(indexes+x,LevelizeValue(GetPixelIndex(indexes+x)));
q++;
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_LevelizeImageChannel)
#endif
proceed=SetImageProgress(image,LevelizeImageTag,progress++,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
image_view=DestroyCacheView(image_view);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% L e v e l I m a g e C o l o r s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% LevelImageColor() maps the given color to "black" and "white" values,
% linearly spreading out the colors, and level values on a channel by channel
% bases, as per LevelImage(). The given colors allows you to specify
% different level ranges for each of the color channels separately.
%
% If the boolean 'invert' is set true the image values will modifyed in the
% reverse direction. That is any existing "black" and "white" colors in the
% image will become the color values given, with all other values compressed
% appropriatally. This effectivally maps a greyscale gradient into the given
% color gradient.
%
% The format of the LevelColorsImageChannel method is:
%
% MagickBooleanType LevelColorsImage(Image *image,
% const MagickPixelPacket *black_color,
% const MagickPixelPacket *white_color,const MagickBooleanType invert)
% MagickBooleanType LevelColorsImageChannel(Image *image,
% const ChannelType channel,const MagickPixelPacket *black_color,
% const MagickPixelPacket *white_color,const MagickBooleanType invert)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o channel: the channel.
%
% o black_color: The color to map black to/from
%
% o white_point: The color to map white to/from
%
% o invert: if true map the colors (levelize), rather than from (level)
%
*/
MagickExport MagickBooleanType LevelColorsImage(Image *image,
const MagickPixelPacket *black_color,const MagickPixelPacket *white_color,
const MagickBooleanType invert)
{
MagickBooleanType
status;
status=LevelColorsImageChannel(image,DefaultChannels,black_color,white_color,
invert);
return(status);
}
MagickExport MagickBooleanType LevelColorsImageChannel(Image *image,
const ChannelType channel,const MagickPixelPacket *black_color,
const MagickPixelPacket *white_color,const MagickBooleanType invert)
{
MagickStatusType
status;
/*
Allocate and initialize levels map.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
if ((IsGrayColorspace(image->colorspace) != MagickFalse) &&
((IsGrayColorspace(black_color->colorspace) != MagickFalse) ||
(IsGrayColorspace(white_color->colorspace) != MagickFalse)))
(void) SetImageColorspace(image,sRGBColorspace);
status=MagickTrue;
if (invert == MagickFalse)
{
if ((channel & RedChannel) != 0)
status&=LevelImageChannel(image,RedChannel,black_color->red,
white_color->red,(double) 1.0);
if ((channel & GreenChannel) != 0)
status&=LevelImageChannel(image,GreenChannel,black_color->green,
white_color->green,(double) 1.0);
if ((channel & BlueChannel) != 0)
status&=LevelImageChannel(image,BlueChannel,black_color->blue,
white_color->blue,(double) 1.0);
if (((channel & OpacityChannel) != 0) &&
(image->matte != MagickFalse))
status&=LevelImageChannel(image,OpacityChannel,black_color->opacity,
white_color->opacity,(double) 1.0);
if (((channel & IndexChannel) != 0) &&
(image->colorspace == CMYKColorspace))
status&=LevelImageChannel(image,IndexChannel,black_color->index,
white_color->index,(double) 1.0);
}
else
{
if ((channel & RedChannel) != 0)
status&=LevelizeImageChannel(image,RedChannel,black_color->red,
white_color->red,(double) 1.0);
if ((channel & GreenChannel) != 0)
status&=LevelizeImageChannel(image,GreenChannel,black_color->green,
white_color->green,(double) 1.0);
if ((channel & BlueChannel) != 0)
status&=LevelizeImageChannel(image,BlueChannel,black_color->blue,
white_color->blue,(double) 1.0);
if (((channel & OpacityChannel) != 0) &&
(image->matte != MagickFalse))
status&=LevelizeImageChannel(image,OpacityChannel,black_color->opacity,
white_color->opacity,(double) 1.0);
if (((channel & IndexChannel) != 0) &&
(image->colorspace == CMYKColorspace))
status&=LevelizeImageChannel(image,IndexChannel,black_color->index,
white_color->index,(double) 1.0);
}
return(status == 0 ? MagickFalse : MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% L i n e a r S t r e t c h I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% LinearStretchImage() discards any pixels below the black point and above
% the white point and levels the remaining pixels.
%
% The format of the LinearStretchImage method is:
%
% MagickBooleanType LinearStretchImage(Image *image,
% const double black_point,const double white_point)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o black_point: the black point.
%
% o white_point: the white point.
%
*/
MagickExport MagickBooleanType LinearStretchImage(Image *image,
const double black_point,const double white_point)
{
#define LinearStretchImageTag "LinearStretch/Image"
ExceptionInfo
*exception;
MagickBooleanType
status;
MagickRealType
*histogram,
intensity;
ssize_t
black,
white,
y;
/*
Allocate histogram and linear map.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
histogram=(MagickRealType *) AcquireQuantumMemory(MaxMap+1UL,
sizeof(*histogram));
if (histogram == (MagickRealType *) NULL)
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
/*
Form histogram.
*/
(void) ResetMagickMemory(histogram,0,(MaxMap+1)*sizeof(*histogram));
exception=(&image->exception);
for (y=0; y < (ssize_t) image->rows; y++)
{
register const PixelPacket
*magick_restrict p;
register ssize_t
x;
p=GetVirtualPixels(image,0,y,image->columns,1,exception);
if (p == (const PixelPacket *) NULL)
break;
for (x=(ssize_t) image->columns-1; x >= 0; x--)
{
histogram[ScaleQuantumToMap(ClampToQuantum(GetPixelIntensity(image,p)))]++;
p++;
}
}
/*
Find the histogram boundaries by locating the black and white point levels.
*/
intensity=0.0;
for (black=0; black < (ssize_t) MaxMap; black++)
{
intensity+=histogram[black];
if (intensity >= black_point)
break;
}
intensity=0.0;
for (white=(ssize_t) MaxMap; white != 0; white--)
{
intensity+=histogram[white];
if (intensity >= white_point)
break;
}
histogram=(MagickRealType *) RelinquishMagickMemory(histogram);
status=LevelImageChannel(image,DefaultChannels,(double)
ScaleMapToQuantum(black),(double) ScaleMapToQuantum(white),1.0);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% M o d u l a t e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ModulateImage() lets you control the brightness, saturation, and hue
% of an image. Modulate represents the brightness, saturation, and hue
% as one parameter (e.g. 90,150,100). If the image colorspace is HSL, the
% modulation is lightness, saturation, and hue. For HWB, use blackness,
% whiteness, and hue. And for HCL, use chrome, luma, and hue.
%
% The format of the ModulateImage method is:
%
% MagickBooleanType ModulateImage(Image *image,const char *modulate)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o modulate: Define the percent change in brightness, saturation, and
% hue.
%
*/
static inline void ModulateHCL(const double percent_hue,
const double percent_chroma,const double percent_luma,Quantum *red,
Quantum *green,Quantum *blue)
{
double
hue,
luma,
chroma;
/*
Increase or decrease color luma, chroma, or hue.
*/
ConvertRGBToHCL(*red,*green,*blue,&hue,&chroma,&luma);
hue+=fmod((percent_hue-100.0),200.0)/200.0;
chroma*=0.01*percent_chroma;
luma*=0.01*percent_luma;
ConvertHCLToRGB(hue,chroma,luma,red,green,blue);
}
static inline void ModulateHCLp(const double percent_hue,
const double percent_chroma,const double percent_luma,Quantum *red,
Quantum *green,Quantum *blue)
{
double
hue,
luma,
chroma;
/*
Increase or decrease color luma, chroma, or hue.
*/
ConvertRGBToHCLp(*red,*green,*blue,&hue,&chroma,&luma);
hue+=fmod((percent_hue-100.0),200.0)/200.0;
chroma*=0.01*percent_chroma;
luma*=0.01*percent_luma;
ConvertHCLpToRGB(hue,chroma,luma,red,green,blue);
}
static inline void ModulateHSB(const double percent_hue,
const double percent_saturation,const double percent_brightness,Quantum *red,
Quantum *green,Quantum *blue)
{
double
brightness,
hue,
saturation;
/*
Increase or decrease color brightness, saturation, or hue.
*/
ConvertRGBToHSB(*red,*green,*blue,&hue,&saturation,&brightness);
hue+=fmod((percent_hue-100.0),200.0)/200.0;
saturation*=0.01*percent_saturation;
brightness*=0.01*percent_brightness;
ConvertHSBToRGB(hue,saturation,brightness,red,green,blue);
}
static inline void ModulateHSI(const double percent_hue,
const double percent_saturation,const double percent_intensity,Quantum *red,
Quantum *green,Quantum *blue)
{
double
intensity,
hue,
saturation;
/*
Increase or decrease color intensity, saturation, or hue.
*/
ConvertRGBToHSI(*red,*green,*blue,&hue,&saturation,&intensity);
hue+=fmod((percent_hue-100.0),200.0)/200.0;
saturation*=0.01*percent_saturation;
intensity*=0.01*percent_intensity;
ConvertHSIToRGB(hue,saturation,intensity,red,green,blue);
}
static inline void ModulateHSL(const double percent_hue,
const double percent_saturation,const double percent_lightness,Quantum *red,
Quantum *green,Quantum *blue)
{
double
hue,
lightness,
saturation;
/*
Increase or decrease color lightness, saturation, or hue.
*/
ConvertRGBToHSL(*red,*green,*blue,&hue,&saturation,&lightness);
hue+=fmod((percent_hue-100.0),200.0)/200.0;
saturation*=0.01*percent_saturation;
lightness*=0.01*percent_lightness;
ConvertHSLToRGB(hue,saturation,lightness,red,green,blue);
}
static inline void ModulateHSV(const double percent_hue,
const double percent_saturation,const double percent_value,Quantum *red,
Quantum *green,Quantum *blue)
{
double
hue,
saturation,
value;
/*
Increase or decrease color value, saturation, or hue.
*/
ConvertRGBToHSV(*red,*green,*blue,&hue,&saturation,&value);
hue+=fmod((percent_hue-100.0),200.0)/200.0;
saturation*=0.01*percent_saturation;
value*=0.01*percent_value;
ConvertHSVToRGB(hue,saturation,value,red,green,blue);
}
static inline void ModulateHWB(const double percent_hue,
const double percent_whiteness,const double percent_blackness,Quantum *red,
Quantum *green,Quantum *blue)
{
double
blackness,
hue,
whiteness;
/*
Increase or decrease color blackness, whiteness, or hue.
*/
ConvertRGBToHWB(*red,*green,*blue,&hue,&whiteness,&blackness);
hue+=fmod((percent_hue-100.0),200.0)/200.0;
blackness*=0.01*percent_blackness;
whiteness*=0.01*percent_whiteness;
ConvertHWBToRGB(hue,whiteness,blackness,red,green,blue);
}
static inline void ModulateLCHab(const double percent_luma,
const double percent_chroma,const double percent_hue,Quantum *red,
Quantum *green,Quantum *blue)
{
double
hue,
luma,
chroma;
/*
Increase or decrease color luma, chroma, or hue.
*/
ConvertRGBToLCHab(*red,*green,*blue,&luma,&chroma,&hue);
luma*=0.01*percent_luma;
chroma*=0.01*percent_chroma;
hue+=fmod((percent_hue-100.0),200.0)/200.0;
ConvertLCHabToRGB(luma,chroma,hue,red,green,blue);
}
static inline void ModulateLCHuv(const double percent_luma,
const double percent_chroma,const double percent_hue,Quantum *red,
Quantum *green,Quantum *blue)
{
double
hue,
luma,
chroma;
/*
Increase or decrease color luma, chroma, or hue.
*/
ConvertRGBToLCHuv(*red,*green,*blue,&luma,&chroma,&hue);
luma*=0.01*percent_luma;
chroma*=0.01*percent_chroma;
hue+=fmod((percent_hue-100.0),200.0)/200.0;
ConvertLCHuvToRGB(luma,chroma,hue,red,green,blue);
}
MagickExport MagickBooleanType ModulateImage(Image *image,const char *modulate)
{
#define ModulateImageTag "Modulate/Image"
CacheView
*image_view;
ColorspaceType
colorspace;
const char
*artifact;
double
percent_brightness,
percent_hue,
percent_saturation;
ExceptionInfo
*exception;
GeometryInfo
geometry_info;
MagickBooleanType
status;
MagickOffsetType
progress;
MagickStatusType
flags;
register ssize_t
i;
ssize_t
y;
/*
Initialize modulate table.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
if (modulate == (char *) NULL)
return(MagickFalse);
if (IssRGBCompatibleColorspace(image->colorspace) == MagickFalse)
(void) SetImageColorspace(image,sRGBColorspace);
flags=ParseGeometry(modulate,&geometry_info);
percent_brightness=geometry_info.rho;
percent_saturation=geometry_info.sigma;
if ((flags & SigmaValue) == 0)
percent_saturation=100.0;
percent_hue=geometry_info.xi;
if ((flags & XiValue) == 0)
percent_hue=100.0;
colorspace=UndefinedColorspace;
artifact=GetImageArtifact(image,"modulate:colorspace");
if (artifact != (const char *) NULL)
colorspace=(ColorspaceType) ParseCommandOption(MagickColorspaceOptions,
MagickFalse,artifact);
if (image->storage_class == PseudoClass)
for (i=0; i < (ssize_t) image->colors; i++)
{
Quantum
blue,
green,
red;
/*
Modulate image colormap.
*/
red=image->colormap[i].red;
green=image->colormap[i].green;
blue=image->colormap[i].blue;
switch (colorspace)
{
case HCLColorspace:
{
ModulateHCL(percent_hue,percent_saturation,percent_brightness,
&red,&green,&blue);
break;
}
case HCLpColorspace:
{
ModulateHCLp(percent_hue,percent_saturation,percent_brightness,
&red,&green,&blue);
break;
}
case HSBColorspace:
{
ModulateHSB(percent_hue,percent_saturation,percent_brightness,
&red,&green,&blue);
break;
}
case HSIColorspace:
{
ModulateHSI(percent_hue,percent_saturation,percent_brightness,
&red,&green,&blue);
break;
}
case HSLColorspace:
default:
{
ModulateHSL(percent_hue,percent_saturation,percent_brightness,
&red,&green,&blue);
break;
}
case HSVColorspace:
{
ModulateHSV(percent_hue,percent_saturation,percent_brightness,
&red,&green,&blue);
break;
}
case HWBColorspace:
{
ModulateHWB(percent_hue,percent_saturation,percent_brightness,
&red,&green,&blue);
break;
}
case LCHabColorspace:
case LCHColorspace:
{
ModulateLCHab(percent_brightness,percent_saturation,percent_hue,
&red,&green,&blue);
break;
}
case LCHuvColorspace:
{
ModulateLCHuv(percent_brightness,percent_saturation,percent_hue,
&red,&green,&blue);
break;
}
}
image->colormap[i].red=red;
image->colormap[i].green=green;
image->colormap[i].blue=blue;
}
/*
Modulate image.
*/
/* call opencl version */
#if defined(MAGICKCORE_OPENCL_SUPPORT)
status=AccelerateModulateImage(image,percent_brightness,percent_hue,
percent_saturation,colorspace,&image->exception);
if (status != MagickFalse)
return status;
#endif
status=MagickTrue;
progress=0;
exception=(&image->exception);
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) shared(progress,status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
register PixelPacket
*magick_restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
Quantum
blue,
green,
red;
red=GetPixelRed(q);
green=GetPixelGreen(q);
blue=GetPixelBlue(q);
switch (colorspace)
{
case HCLColorspace:
{
ModulateHCL(percent_hue,percent_saturation,percent_brightness,
&red,&green,&blue);
break;
}
case HCLpColorspace:
{
ModulateHCLp(percent_hue,percent_saturation,percent_brightness,
&red,&green,&blue);
break;
}
case HSBColorspace:
{
ModulateHSB(percent_hue,percent_saturation,percent_brightness,
&red,&green,&blue);
break;
}
case HSLColorspace:
default:
{
ModulateHSL(percent_hue,percent_saturation,percent_brightness,
&red,&green,&blue);
break;
}
case HSVColorspace:
{
ModulateHSV(percent_hue,percent_saturation,percent_brightness,
&red,&green,&blue);
break;
}
case HWBColorspace:
{
ModulateHWB(percent_hue,percent_saturation,percent_brightness,
&red,&green,&blue);
break;
}
case LCHabColorspace:
{
ModulateLCHab(percent_brightness,percent_saturation,percent_hue,
&red,&green,&blue);
break;
}
case LCHColorspace:
case LCHuvColorspace:
{
ModulateLCHuv(percent_brightness,percent_saturation,percent_hue,
&red,&green,&blue);
break;
}
}
SetPixelRed(q,red);
SetPixelGreen(q,green);
SetPixelBlue(q,blue);
q++;
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_ModulateImage)
#endif
proceed=SetImageProgress(image,ModulateImageTag,progress++,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
image_view=DestroyCacheView(image_view);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% N e g a t e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% NegateImage() negates the colors in the reference image. The grayscale
% option means that only grayscale values within the image are negated.
%
% The format of the NegateImageChannel method is:
%
% MagickBooleanType NegateImage(Image *image,
% const MagickBooleanType grayscale)
% MagickBooleanType NegateImageChannel(Image *image,
% const ChannelType channel,const MagickBooleanType grayscale)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o channel: the channel.
%
% o grayscale: If MagickTrue, only negate grayscale pixels within the image.
%
*/
MagickExport MagickBooleanType NegateImage(Image *image,
const MagickBooleanType grayscale)
{
MagickBooleanType
status;
status=NegateImageChannel(image,DefaultChannels,grayscale);
return(status);
}
MagickExport MagickBooleanType NegateImageChannel(Image *image,
const ChannelType channel,const MagickBooleanType grayscale)
{
#define NegateImageTag "Negate/Image"
CacheView
*image_view;
ExceptionInfo
*exception;
MagickBooleanType
status;
MagickOffsetType
progress;
register ssize_t
i;
ssize_t
y;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
if (image->storage_class == PseudoClass)
{
/*
Negate colormap.
*/
for (i=0; i < (ssize_t) image->colors; i++)
{
if (grayscale != MagickFalse)
if ((image->colormap[i].red != image->colormap[i].green) ||
(image->colormap[i].green != image->colormap[i].blue))
continue;
if ((channel & RedChannel) != 0)
image->colormap[i].red=QuantumRange-image->colormap[i].red;
if ((channel & GreenChannel) != 0)
image->colormap[i].green=QuantumRange-image->colormap[i].green;
if ((channel & BlueChannel) != 0)
image->colormap[i].blue=QuantumRange-image->colormap[i].blue;
}
}
/*
Negate image.
*/
status=MagickTrue;
progress=0;
exception=(&image->exception);
image_view=AcquireAuthenticCacheView(image,exception);
if (grayscale != MagickFalse)
{
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) shared(progress,status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
MagickBooleanType
sync;
register IndexPacket
*magick_restrict indexes;
register PixelPacket
*magick_restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,
exception);
if (q == (PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
indexes=GetCacheViewAuthenticIndexQueue(image_view);
for (x=0; x < (ssize_t) image->columns; x++)
{
if ((GetPixelRed(q) != GetPixelGreen(q)) ||
(GetPixelGreen(q) != GetPixelBlue(q)))
{
q++;
continue;
}
if ((channel & RedChannel) != 0)
SetPixelRed(q,QuantumRange-GetPixelRed(q));
if ((channel & GreenChannel) != 0)
SetPixelGreen(q,QuantumRange-GetPixelGreen(q));
if ((channel & BlueChannel) != 0)
SetPixelBlue(q,QuantumRange-GetPixelBlue(q));
if ((channel & OpacityChannel) != 0)
SetPixelOpacity(q,QuantumRange-GetPixelOpacity(q));
if (((channel & IndexChannel) != 0) &&
(image->colorspace == CMYKColorspace))
SetPixelIndex(indexes+x,QuantumRange-GetPixelIndex(indexes+x));
q++;
}
sync=SyncCacheViewAuthenticPixels(image_view,exception);
if (sync == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_NegateImageChannel)
#endif
proceed=SetImageProgress(image,NegateImageTag,progress++,
image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
image_view=DestroyCacheView(image_view);
return(MagickTrue);
}
/*
Negate image.
*/
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) shared(progress,status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
register IndexPacket
*magick_restrict indexes;
register PixelPacket
*magick_restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
indexes=GetCacheViewAuthenticIndexQueue(image_view);
if (channel == DefaultChannels)
for (x=0; x < (ssize_t) image->columns; x++)
{
SetPixelRed(q+x,QuantumRange-GetPixelRed(q+x));
SetPixelGreen(q+x,QuantumRange-GetPixelGreen(q+x));
SetPixelBlue(q+x,QuantumRange-GetPixelBlue(q+x));
}
else
for (x=0; x < (ssize_t) image->columns; x++)
{
if ((channel & RedChannel) != 0)
SetPixelRed(q+x,QuantumRange-GetPixelRed(q+x));
if ((channel & GreenChannel) != 0)
SetPixelGreen(q+x,QuantumRange-GetPixelGreen(q+x));
if ((channel & BlueChannel) != 0)
SetPixelBlue(q+x,QuantumRange-GetPixelBlue(q+x));
if ((channel & OpacityChannel) != 0)
SetPixelOpacity(q+x,QuantumRange-GetPixelOpacity(q+x));
}
if (((channel & IndexChannel) != 0) &&
(image->colorspace == CMYKColorspace))
for (x=0; x < (ssize_t) image->columns; x++)
SetPixelIndex(indexes+x,QuantumRange-GetPixelIndex(indexes+x));
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_NegateImageChannel)
#endif
proceed=SetImageProgress(image,NegateImageTag,progress++,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
image_view=DestroyCacheView(image_view);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% N o r m a l i z e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% The NormalizeImage() method enhances the contrast of a color image by
% mapping the darkest 2 percent of all pixel to black and the brightest
% 1 percent to white.
%
% The format of the NormalizeImage method is:
%
% MagickBooleanType NormalizeImage(Image *image)
% MagickBooleanType NormalizeImageChannel(Image *image,
% const ChannelType channel)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o channel: the channel.
%
*/
MagickExport MagickBooleanType NormalizeImage(Image *image)
{
MagickBooleanType
status;
status=NormalizeImageChannel(image,DefaultChannels);
return(status);
}
MagickExport MagickBooleanType NormalizeImageChannel(Image *image,
const ChannelType channel)
{
double
black_point,
white_point;
black_point=(double) image->columns*image->rows*0.0015;
white_point=(double) image->columns*image->rows*0.9995;
return(ContrastStretchImageChannel(image,channel,black_point,white_point));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S i g m o i d a l C o n t r a s t I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SigmoidalContrastImage() adjusts the contrast of an image with a non-linear
% sigmoidal contrast algorithm. Increase the contrast of the image using a
% sigmoidal transfer function without saturating highlights or shadows.
% Contrast indicates how much to increase the contrast (0 is none; 3 is
% typical; 20 is pushing it); mid-point indicates where midtones fall in the
% resultant image (0 is white; 50% is middle-gray; 100% is black). Set
% sharpen to MagickTrue to increase the image contrast otherwise the contrast
% is reduced.
%
% The format of the SigmoidalContrastImage method is:
%
% MagickBooleanType SigmoidalContrastImage(Image *image,
% const MagickBooleanType sharpen,const char *levels)
% MagickBooleanType SigmoidalContrastImageChannel(Image *image,
% const ChannelType channel,const MagickBooleanType sharpen,
% const double contrast,const double midpoint)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o channel: the channel.
%
% o sharpen: Increase or decrease image contrast.
%
% o contrast: strength of the contrast, the larger the number the more
% 'threshold-like' it becomes.
%
% o midpoint: midpoint of the function as a color value 0 to QuantumRange.
%
*/
/*
ImageMagick 7 has a version of this function which does not use LUTs.
*/
/*
Sigmoidal function Sigmoidal with inflexion point moved to b and "slope
constant" set to a.
The first version, based on the hyperbolic tangent tanh, when combined with
the scaling step, is an exact arithmetic clone of the the sigmoid function
based on the logistic curve. The equivalence is based on the identity
1/(1+exp(-t)) = (1+tanh(t/2))/2
(http://de.wikipedia.org/wiki/Sigmoidfunktion) and the fact that the
scaled sigmoidal derivation is invariant under affine transformations of
the ordinate.
The tanh version is almost certainly more accurate and cheaper. The 0.5
factor in the argument is to clone the legacy ImageMagick behavior. The
reason for making the define depend on atanh even though it only uses tanh
has to do with the construction of the inverse of the scaled sigmoidal.
*/
#if defined(MAGICKCORE_HAVE_ATANH)
#define Sigmoidal(a,b,x) ( tanh((0.5*(a))*((x)-(b))) )
#else
#define Sigmoidal(a,b,x) ( 1.0/(1.0+exp((a)*((b)-(x)))) )
#endif
/*
Scaled sigmoidal function:
( Sigmoidal(a,b,x) - Sigmoidal(a,b,0) ) /
( Sigmoidal(a,b,1) - Sigmoidal(a,b,0) )
See http://osdir.com/ml/video.image-magick.devel/2005-04/msg00006.html and
http://www.cs.dartmouth.edu/farid/downloads/tutorials/fip.pdf. The limit
of ScaledSigmoidal as a->0 is the identity, but a=0 gives a division by
zero. This is fixed below by exiting immediately when contrast is small,
leaving the image (or colormap) unmodified. This appears to be safe because
the series expansion of the logistic sigmoidal function around x=b is
1/2-a*(b-x)/4+...
so that the key denominator s(1)-s(0) is about a/4 (a/2 with tanh).
*/
#define ScaledSigmoidal(a,b,x) ( \
(Sigmoidal((a),(b),(x))-Sigmoidal((a),(b),0.0)) / \
(Sigmoidal((a),(b),1.0)-Sigmoidal((a),(b),0.0)) )
/*
Inverse of ScaledSigmoidal, used for +sigmoidal-contrast. Because b
may be 0 or 1, the argument of the hyperbolic tangent (resp. logistic
sigmoidal) may be outside of the interval (-1,1) (resp. (0,1)), even
when creating a LUT from in gamut values, hence the branching. In
addition, HDRI may have out of gamut values.
InverseScaledSigmoidal is not a two-sided inverse of ScaledSigmoidal:
It is only a right inverse. This is unavoidable.
*/
static inline double InverseScaledSigmoidal(const double a,const double b,
const double x)
{
const double sig0=Sigmoidal(a,b,0.0);
const double sig1=Sigmoidal(a,b,1.0);
const double argument=(sig1-sig0)*x+sig0;
const double clamped=
(
#if defined(MAGICKCORE_HAVE_ATANH)
argument < -1+MagickEpsilon
?
-1+MagickEpsilon
:
( argument > 1-MagickEpsilon ? 1-MagickEpsilon : argument )
);
return(b+(2.0/a)*atanh(clamped));
#else
argument < MagickEpsilon
?
MagickEpsilon
:
( argument > 1-MagickEpsilon ? 1-MagickEpsilon : argument )
);
return(b-log(1.0/clamped-1.0)/a);
#endif
}
MagickExport MagickBooleanType SigmoidalContrastImage(Image *image,
const MagickBooleanType sharpen,const char *levels)
{
GeometryInfo
geometry_info;
MagickBooleanType
status;
MagickStatusType
flags;
flags=ParseGeometry(levels,&geometry_info);
if ((flags & SigmaValue) == 0)
geometry_info.sigma=1.0*QuantumRange/2.0;
if ((flags & PercentValue) != 0)
geometry_info.sigma=1.0*QuantumRange*geometry_info.sigma/100.0;
status=SigmoidalContrastImageChannel(image,DefaultChannels,sharpen,
geometry_info.rho,geometry_info.sigma);
return(status);
}
MagickExport MagickBooleanType SigmoidalContrastImageChannel(Image *image,
const ChannelType channel,const MagickBooleanType sharpen,
const double contrast,const double midpoint)
{
#define SigmoidalContrastImageTag "SigmoidalContrast/Image"
CacheView
*image_view;
ExceptionInfo
*exception;
MagickBooleanType
status;
MagickOffsetType
progress;
MagickRealType
*sigmoidal_map;
register ssize_t
i;
ssize_t
y;
/*
Side effect: clamps values unless contrast<MagickEpsilon, in which
case nothing is done.
*/
if (contrast < MagickEpsilon)
return(MagickTrue);
/*
Allocate and initialize sigmoidal maps.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
sigmoidal_map=(MagickRealType *) AcquireQuantumMemory(MaxMap+1UL,
sizeof(*sigmoidal_map));
if (sigmoidal_map == (MagickRealType *) NULL)
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
(void) ResetMagickMemory(sigmoidal_map,0,(MaxMap+1)*sizeof(*sigmoidal_map));
if (sharpen != MagickFalse)
for (i=0; i <= (ssize_t) MaxMap; i++)
sigmoidal_map[i]=(MagickRealType) ScaleMapToQuantum((MagickRealType)
(MaxMap*ScaledSigmoidal(contrast,QuantumScale*midpoint,(double) i/
MaxMap)));
else
for (i=0; i <= (ssize_t) MaxMap; i++)
sigmoidal_map[i]=(MagickRealType) ScaleMapToQuantum((MagickRealType) (
MaxMap*InverseScaledSigmoidal(contrast,QuantumScale*midpoint,(double) i/
MaxMap)));
/*
Sigmoidal-contrast enhance colormap.
*/
if (image->storage_class == PseudoClass)
for (i=0; i < (ssize_t) image->colors; i++)
{
if ((channel & RedChannel) != 0)
image->colormap[i].red=ClampToQuantum(sigmoidal_map[
ScaleQuantumToMap(image->colormap[i].red)]);
if ((channel & GreenChannel) != 0)
image->colormap[i].green=ClampToQuantum(sigmoidal_map[
ScaleQuantumToMap(image->colormap[i].green)]);
if ((channel & BlueChannel) != 0)
image->colormap[i].blue=ClampToQuantum(sigmoidal_map[
ScaleQuantumToMap(image->colormap[i].blue)]);
if ((channel & OpacityChannel) != 0)
image->colormap[i].opacity=ClampToQuantum(sigmoidal_map[
ScaleQuantumToMap(image->colormap[i].opacity)]);
}
/*
Sigmoidal-contrast enhance image.
*/
status=MagickTrue;
progress=0;
exception=(&image->exception);
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) shared(progress,status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
register IndexPacket
*magick_restrict indexes;
register PixelPacket
*magick_restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
indexes=GetCacheViewAuthenticIndexQueue(image_view);
for (x=0; x < (ssize_t) image->columns; x++)
{
if ((channel & RedChannel) != 0)
SetPixelRed(q,ClampToQuantum(sigmoidal_map[ScaleQuantumToMap(
GetPixelRed(q))]));
if ((channel & GreenChannel) != 0)
SetPixelGreen(q,ClampToQuantum(sigmoidal_map[ScaleQuantumToMap(
GetPixelGreen(q))]));
if ((channel & BlueChannel) != 0)
SetPixelBlue(q,ClampToQuantum(sigmoidal_map[ScaleQuantumToMap(
GetPixelBlue(q))]));
if ((channel & OpacityChannel) != 0)
SetPixelOpacity(q,ClampToQuantum(sigmoidal_map[ScaleQuantumToMap(
GetPixelOpacity(q))]));
if (((channel & IndexChannel) != 0) &&
(image->colorspace == CMYKColorspace))
SetPixelIndex(indexes+x,ClampToQuantum(sigmoidal_map[ScaleQuantumToMap(
GetPixelIndex(indexes+x))]));
q++;
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_SigmoidalContrastImageChannel)
#endif
proceed=SetImageProgress(image,SigmoidalContrastImageTag,progress++,
image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
image_view=DestroyCacheView(image_view);
sigmoidal_map=(MagickRealType *) RelinquishMagickMemory(sigmoidal_map);
return(status);
}
|
jacobi_avx512.c | #include <stdio.h>
#include <stdlib.h>
#include <time.h>
#include <sys/timeb.h>
#include <malloc.h>
#include <immintrin.h>
#define REAL float
static double read_timer_ms() {
struct timeb tm;
ftime(&tm);
return (double) tm.time * 1000.0 + (double) tm.millitm;
}
/************************************************************
* program to solve a finite difference
* discretization of Helmholtz equation :
* (d2/dx2)u + (d2/dy2)u - alpha u = f
* using Jacobi iterative method.
*
* Modified: Sanjiv Shah, Kuck and Associates, Inc. (KAI), 1998
* Author: Joseph Robicheaux, Kuck and Associates, Inc. (KAI), 1998
*
* This c version program is translated by
* Chunhua Liao, University of Houston, Jan, 2005
*
* Directives are used in this code to achieve parallelism.
* All do loops are parallelized with default 'static' scheduling.
*
* Input : n - grid dimension in x direction
* m - grid dimension in y direction
* alpha - Helmholtz constant (always greater than 0.0)
* tol - error tolerance for iterative solver
* relax - Successice over relaxation parameter
* mits - Maximum iterations for iterative solver
*
* On output
* : u(n,m) - Dependent variable (solutions)
* : f(n,m) - Right hand side function
*************************************************************/
#define DEFAULT_DIMSIZE 256
void print_array(char *title, char *name, REAL *A, int n, int m) {
printf("%s:\n", title);
int i, j;
for (i = 0; i < n; i++) {
for (j = 0; j < m; j++) {
printf("%s[%d][%d]:%f ", name, i, j, A[i * m + j]);
}
printf("\n");
}
printf("\n");
}
/* subroutine initialize (n,m,alpha,dx,dy,u,f)
******************************************************
* Initializes data
* Assumes exact solution is u(x,y) = (1-x^2)*(1-y^2)
*
******************************************************/
void initialize(int n, int m, REAL alpha, REAL *dx, REAL *dy, REAL *u_p, REAL *f_p) {
int i;
int j;
int xx;
int yy;
REAL (*u)[m] = (REAL (*)[m]) u_p;
REAL (*f)[m] = (REAL (*)[m]) f_p;
//double PI=3.1415926;
*dx = (2.0 / (n - 1));
*dy = (2.0 / (m - 1));
/* Initialize initial condition and RHS */
//#pragma omp parallel for private(xx,yy,j,i)
for (i = 0; i < n; i++)
for (j = 0; j < m; j++) {
xx = ((int) (-1.0 + (*dx * (i - 1))));
yy = ((int) (-1.0 + (*dy * (j - 1))));
u[i][j] = 0.0;
f[i][j] = (((((-1.0 * alpha) * (1.0 - (xx * xx)))
* (1.0 - (yy * yy))) - (2.0 * (1.0 - (xx * xx))))
- (2.0 * (1.0 - (yy * yy))));
}
}
/* subroutine error_check (n,m,alpha,dx,dy,u,f)
implicit none
************************************************************
* Checks error between numerical and exact solution
*
************************************************************/
void error_check(int n, int m, REAL alpha, REAL dx, REAL dy, REAL *u_p, REAL *f_p) {
int i;
int j;
REAL xx;
REAL yy;
REAL temp;
REAL error;
error = 0.0;
REAL (*u)[m] = (REAL (*)[m]) u_p;
REAL (*f)[m] = (REAL (*)[m]) f_p;
//#pragma omp parallel for private(xx,yy,temp,j,i) reduction(+:error)
for (i = 0; i < n; i++)
for (j = 0; j < m; j++) {
xx = (-1.0 + (dx * (i - 1)));
yy = (-1.0 + (dy * (j - 1)));
temp = (u[i][j] - ((1.0 - (xx * xx)) * (1.0 - (yy * yy))));
error = (error + (temp * temp));
}
error = (sqrt(error) / (n * m));
printf("Solution Error: %2.6g\n", error);
}
void jacobi_seq(int n, int m, REAL dx, REAL dy, REAL alpha, REAL relax, REAL *u_p, REAL *f_p, REAL tol, int mits);
void jacobi_omp(int n, int m, REAL dx, REAL dy, REAL alpha, REAL relax, REAL *u_p, REAL *f_p, REAL tol, int mits);
int main(int argc, char *argv[]) {
int n = DEFAULT_DIMSIZE;
int m = DEFAULT_DIMSIZE;
REAL alpha = 0.0543;
REAL tol = 0.0000000001;
REAL relax = 1.0;
int mits = 5000;
/*fprintf(stderr, "Usage: jacobi [<n> <m> <alpha> <tol> <relax> <mits>]\n");
fprintf(stderr, "\tn - grid dimension in x direction, default: %d\n", n);
fprintf(stderr, "\tm - grid dimension in y direction, default: n if provided or %d\n", m);
fprintf(stderr, "\talpha - Helmholtz constant (always greater than 0.0), default: %g\n", alpha);
fprintf(stderr, "\ttol - error tolerance for iterative solver, default: %g\n", tol);
fprintf(stderr, "\trelax - Successice over relaxation parameter, default: %g\n", relax);
fprintf(stderr, "\tmits - Maximum iterations for iterative solver, default: %d\n", mits);*/
if (argc == 2) {
sscanf(argv[1], "%d", &n);
m = n;
}
else if (argc == 3) {
sscanf(argv[1], "%d", &n);
sscanf(argv[2], "%d", &m);
}
else if (argc == 4) {
sscanf(argv[1], "%d", &n);
sscanf(argv[2], "%d", &m);
sscanf(argv[3], "%g", &alpha);
}
else if (argc == 5) {
sscanf(argv[1], "%d", &n);
sscanf(argv[2], "%d", &m);
sscanf(argv[3], "%g", &alpha);
sscanf(argv[4], "%g", &tol);
}
else if (argc == 6) {
sscanf(argv[1], "%d", &n);
sscanf(argv[2], "%d", &m);
sscanf(argv[3], "%g", &alpha);
sscanf(argv[4], "%g", &tol);
sscanf(argv[5], "%g", &relax);
}
else if (argc == 7) {
sscanf(argv[1], "%d", &n);
sscanf(argv[2], "%d", &m);
sscanf(argv[3], "%g", &alpha);
sscanf(argv[4], "%g", &tol);
sscanf(argv[5], "%g", &relax);
sscanf(argv[6], "%d", &mits);
}
else {
/* the rest of arg ignored */
}
printf("jacobi %d %d %g %g %g %d\n", n, m, alpha, tol, relax, mits);
printf("------------------------------------------------------------------------------------------------------\n");
/** init the array */
REAL *u = (REAL *) malloc(sizeof(REAL) * n * m);
REAL *uomp = (REAL *) malloc(sizeof(REAL) * n * m);
REAL *f = (REAL *) malloc(sizeof(REAL) * n * m);
REAL dx; /* grid spacing in x direction */
REAL dy; /* grid spacing in y direction */
initialize(n, m, alpha, &dx, &dy, u, f);
memcpy(uomp, u, sizeof(REAL) * n * m);
double elapsed = read_timer_ms();
jacobi_seq(n, m, dx, dy, alpha, relax, u, f, tol, mits);
elapsed = read_timer_ms() - elapsed;
printf("seq elasped time(ms): %4f\n", elapsed);
double mflops = (0.001 * mits * (n - 2) * (m - 2) * 13) / elapsed;
printf("MFLOPS: %12.6g\n", mflops);
puts("================");
elapsed = read_timer_ms();
jacobi_omp(n, m, dx, dy, alpha, relax, uomp, f, tol, mits);
elapsed = read_timer_ms() - elapsed;
printf("OpenMP elasped time(ms): %4f\n", elapsed);
mflops = (0.001 * mits * (n - 2) * (m - 2) * 13) / elapsed;
printf("MFLOPS: %12.6g\n", mflops);
//print_array("Sequential Run", "u",(REAL*)u, n, m);
error_check(n, m, alpha, dx, dy, u, f);
free(u);
free(f);
free(uomp);
return 0;
}
/* subroutine jacobi (n,m,dx,dy,alpha,omega,u,f,tol,mits)
******************************************************************
* Subroutine HelmholtzJ
* Solves poisson equation on rectangular grid assuming :
* (1) Uniform discretization in each direction, and
* (2) Dirichlect boundary conditions
*
* Jacobi method is used in this routine
*
* Input : n,m Number of grid points in the X/Y directions
* dx,dy Grid spacing in the X/Y directions
* alpha Helmholtz eqn. coefficient
* omega Relaxation factor
* f(n,m) Right hand side function
* u(n,m) Dependent variable/Solution
* tol Tolerance for iterative solver
* mits Maximum number of iterations
*
* Output : u(n,m) - Solution
*****************************************************************/
void jacobi_seq(int n, int m, REAL dx, REAL dy, REAL alpha, REAL omega, REAL *u_p, REAL *f_p, REAL tol, int mits) {
int i, j, k;
REAL error;
REAL ax;
REAL ay;
REAL b;
REAL resid;
REAL uold[n][m];
REAL (*u)[m] = (REAL (*)[m]) u_p;
REAL (*f)[m] = (REAL (*)[m]) f_p;
/*
* Initialize coefficients */
/* X-direction coef */
ax = (1.0 / (dx * dx));
/* Y-direction coef */
ay = (1.0 / (dy * dy));
/* Central coeff */
b = (((-2.0 / (dx * dx)) - (2.0 / (dy * dy))) - alpha);
error = (10.0 * tol);
k = 1;
while ((k <= mits) && (error > tol)) {
error = 0.0;
/* Copy new solution into old */
for (i = 0; i < n; i++)
for (j = 0; j < m; j++)
uold[i][j] = u[i][j];
for (i = 1; i < (n - 1); i++)
for (j = 1; j < (m - 1); j++) {
resid = (ax * (uold[i - 1][j] + uold[i + 1][j]) + ay * (uold[i][j - 1] + uold[i][j + 1]) +
b * uold[i][j] - f[i][j]) / b;
//printf("i: %d, j: %d, resid: %f\n", i, j, resid);
u[i][j] = uold[i][j] - omega * resid;
error = error + resid * resid;
}
/* Error check */
//if (k % 500 == 0)
// printf("Finished %d iteration with error: %g\n", k, error);
error = sqrt(error) / (n * m);
k = k + 1;
} /* End iteration loop */
printf("Total Number of Iterations: %d\n", k);
printf("Residual: %.15g\n", error);
}
void jacobi_omp(int n, int m, REAL dx, REAL dy, REAL alpha, REAL omega, REAL *u_p, REAL *f_p, REAL tol, int mits) {
int i, j, k;
REAL error;
REAL ax;
REAL ay;
REAL b;
REAL resid;
REAL *tmp = (REAL *) malloc(sizeof(REAL) * n * m);
REAL (*uold)[m] = (REAL (*)[m]) tmp;
REAL (*u)[m] = (REAL (*)[m]) u_p;
REAL (*f)[m] = (REAL (*)[m]) f_p;
/*
* Initialize coefficients */
/* X-direction coef */
ax = (1.0 / (dx * dx));
/* Y-direction coef */
ay = (1.0 / (dy * dy));
/* Central coeff */
b = (((-2.0 / (dx * dx)) - (2.0 / (dy * dy))) - alpha);
error = (10.0 * tol);
k = 1;
while ((k <= mits) && (error > tol)) {
error = 0.0;
//printf("===================== iteration %d ===========================\n", k);
/* Copy new solution into old */
for (i = 0; i < n; i++) {
for (j = 0; j < m; j+=16) {
//uold[i][j] = u[i][j];
__m512 __vec1 = _mm512_loadu_ps(&u[i][j]);
_mm512_storeu_ps(&uold[i][j], __vec1);
}
}
for (i = 1; i < (n - 1); i++) {
__m512 __ax = _mm512_set1_ps(ax);
__m512 __ay = _mm512_set1_ps(ay);
__m512 __b = _mm512_set1_ps(b);
__m512 __omega = _mm512_set1_ps(omega);
__m512 __error = _mm512_setzero_ps();
for (j = 1; j < (m - 1); j+=16) {
// Start with the STUPID resid
//(uold[i - 1][j] + uold[i + 1][j]
__m512 __vec1 = _mm512_loadu_ps(&uold[i - 1][j]);
__m512 __vec2 = _mm512_loadu_ps(&uold[i + 1][j]);
__m512 __inner1 = _mm512_add_ps(__vec1, __vec2);
//(ax * (uold[i - 1][j] + uold[i + 1][j])
__inner1 = _mm512_mul_ps(__ax, __inner1);
// __inner2 = (uold[i][j - 1] + uold[i][j + 1]) +
// b * uold[i][j] - f[i][j])
//
// __vec5 = (uold[i][j - 1] + uold[i][j + 1])
__m512 __vec3 = _mm512_loadu_ps(&uold[i][j - 1]);
__m512 __vec4 = _mm512_loadu_ps(&uold[i][j + 1]);
__m512 __vec5 = _mm512_add_ps(__vec3, __vec4);
// __vec6 = b * uold[i][j]
__m512 __vec6 = _mm512_loadu_ps(&uold[i][j]);
__vec6 = _mm512_mul_ps(__b, __vec6);
// __inner2 = __vec5 + __vec6 - f[i][j]
__m512 __inner2 = _mm512_add_ps(__vec5, __vec6);
__m512 __vec7 = _mm512_loadu_ps(&f[i][j]);
__inner2 = _mm512_sub_ps(__inner2, __vec7);
// __resid = __inner1 + (ay * __inner2 / b)
// __inner3 = (ay * __inner2 / b)
__m512 __inner3 = _mm512_mul_ps(__ay, __inner2);
__inner3 = _mm512_div_ps(__inner3, __b);
__m512 __resid = _mm512_add_ps(__inner1, __inner3);
// u[i][j] = uold[i][j] - omega * resid;
__m512 __vec8 = _mm512_loadu_ps(&uold[i][j]);
__m512 __vec9 = _mm512_mul_ps(__omega, __resid);
__vec9 = _mm512_sub_ps(__vec8, __vec9);
_mm512_storeu_ps(&u[i][j], __vec9);
// error = error + resid * resid;
__error = _mm512_set1_ps(error);
__m512 __vec10 = _mm512_mul_ps(__resid, __resid);
__error = _mm512_add_ps(__error, __vec10);
}
__m256 __buf0 = _mm512_extractf32x8_ps(__error,0);
__m256 __buf1 = _mm512_extractf32x8_ps(__error,1);
__buf1 = _mm256_add_ps(__buf0,__buf1);
__buf1 = _mm256_hadd_ps(__buf1,__buf1);
__buf1 = _mm256_hadd_ps(__buf1,__buf1);
float __buf2[8];
_mm256_storeu_ps(&__buf2,__buf1);
error = __buf2[0] + __buf2[6];
}
//#pragma omp simd reduction(+:resid,error)
//for (j = 1; j < (m - 1); j++) {
//resid = (ax * (uold[i - 1][j] + uold[i + 1][j]) + ay * (uold[i][j - 1] + uold[i][j + 1]) +
//b * uold[i][j] - f[i][j]) / b;
//printf("i: %d, j: %d, resid: %f\n", i, j, resid);
//u[i][j] = uold[i][j] - omega * resid;
//error = error + resid * resid;
//}
/* Error check */
//if (k % 500 == 0)
// printf("Finished %d iteration with error: %g\n", k, error);
error = sqrt(error) / (n * m);
k = k + 1;
} /* End iteration loop */
printf("Total Number of Iterations: %d\n", k);
printf("Residual: %.15g\n", error);
free(tmp);
}
|
taskloop-reduction-1.c | int
foo (int *a)
{
int x = 0;
#pragma omp taskloop reduction (+:x) nogroup /* { dg-error "'nogroup' clause must not be used together with 'reduction' clause" } */
for (int i = 0; i < 64; i++)
x += a[i];
#pragma omp taskwait
return x;
}
|
GB_binop__max_uint64.c | //------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_emult.h"
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB (_AaddB__max_uint64)
// A.*B function (eWiseMult): GB (_AemultB_08__max_uint64)
// A.*B function (eWiseMult): GB (_AemultB_02__max_uint64)
// A.*B function (eWiseMult): GB (_AemultB_04__max_uint64)
// A.*B function (eWiseMult): GB (_AemultB_bitmap__max_uint64)
// A*D function (colscale): GB (_AxD__max_uint64)
// D*A function (rowscale): GB (_DxB__max_uint64)
// C+=B function (dense accum): GB (_Cdense_accumB__max_uint64)
// C+=b function (dense accum): GB (_Cdense_accumb__max_uint64)
// C+=A+B function (dense ewise3): GB (_Cdense_ewise3_accum__max_uint64)
// C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__max_uint64)
// C=scalar+B GB (_bind1st__max_uint64)
// C=scalar+B' GB (_bind1st_tran__max_uint64)
// C=A+scalar GB (_bind2nd__max_uint64)
// C=A'+scalar GB (_bind2nd_tran__max_uint64)
// C type: uint64_t
// A type: uint64_t
// B,b type: uint64_t
// BinaryOp: cij = GB_IMAX (aij, bij)
#define GB_ATYPE \
uint64_t
#define GB_BTYPE \
uint64_t
#define GB_CTYPE \
uint64_t
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA,A_iso) \
uint64_t aij = GBX (Ax, pA, A_iso)
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB,B_iso) \
uint64_t bij = GBX (Bx, pB, B_iso)
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
uint64_t t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \
cij = GBX (Ax, pA, A_iso)
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \
cij = GBX (Bx, pB, B_iso)
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z,x,y,i,j) \
z = GB_IMAX (x, y) ;
// true if the binop must be flipped
#define GB_BINOP_FLIP \
0
// op is second
#define GB_OP_IS_SECOND \
0
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_MAX || GxB_NO_UINT64 || GxB_NO_MAX_UINT64)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB (_Cdense_ewise3_accum__max_uint64)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_ewise3_noaccum__max_uint64)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_dense_ewise3_noaccum_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumB__max_uint64)
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumb__max_uint64)
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type uint64_t
uint64_t bwork = (*((uint64_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_AxD__max_uint64)
(
GrB_Matrix C,
const GrB_Matrix A, bool A_is_pattern,
const GrB_Matrix D, bool D_is_pattern,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint64_t *restrict Cx = (uint64_t *) C->x ;
#include "GB_AxB_colscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_DxB__max_uint64)
(
GrB_Matrix C,
const GrB_Matrix D, bool D_is_pattern,
const GrB_Matrix B, bool B_is_pattern,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint64_t *restrict Cx = (uint64_t *) C->x ;
#include "GB_AxB_rowscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B
//------------------------------------------------------------------------------
GrB_Info GB (_AaddB__max_uint64)
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool Ch_is_Mh,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GB_WERK_DECLARE (M_ek_slicing, int64_t) ;
GB_WERK_DECLARE (A_ek_slicing, int64_t) ;
GB_WERK_DECLARE (B_ek_slicing, int64_t) ;
#include "GB_add_template.c"
GB_FREE_WORK ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_08__max_uint64)
(
GrB_Matrix C,
const int C_sparsity,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_08_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_02__max_uint64)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool flipxy,
const int64_t *restrict Cp_kfirst,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if GB_BINOP_FLIP
// The operator is not commutative, and does not have a flipped
// variant. For example z=atan2(y,x).
if (flipxy)
{
// use fmult(y,x)
#undef GB_FLIPPED
#define GB_FLIPPED 1
#include "GB_emult_02_template.c"
}
else
{
// use fmult(x,y)
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
}
#else
// No need to handle the flip: the operator is either commutative, or
// has been handled by changing z=div(y,x) to z=rdiv(x,y) for example.
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_04__max_uint64)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict Cp_kfirst,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_04_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_bitmap__max_uint64)
(
GrB_Matrix C,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_bitmap_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB (_bind1st__max_uint64)
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *restrict Bb,
int64_t bnz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint64_t *Cx = (uint64_t *) Cx_output ;
uint64_t x = (*((uint64_t *) x_input)) ;
uint64_t *Bx = (uint64_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < bnz ; p++)
{
if (!GBB (Bb, p)) continue ;
uint64_t bij = GBX (Bx, p, false) ;
Cx [p] = GB_IMAX (x, bij) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB (_bind2nd__max_uint64)
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *restrict Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
uint64_t *Cx = (uint64_t *) Cx_output ;
uint64_t *Ax = (uint64_t *) Ax_input ;
uint64_t y = (*((uint64_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
uint64_t aij = GBX (Ax, p, false) ;
Cx [p] = GB_IMAX (aij, y) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
uint64_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = GB_IMAX (x, aij) ; \
}
GrB_Info GB (_bind1st_tran__max_uint64)
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
uint64_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint64_t x = (*((const uint64_t *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
uint64_t
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
uint64_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = GB_IMAX (aij, y) ; \
}
GrB_Info GB (_bind2nd_tran__max_uint64)
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint64_t y = (*((const uint64_t *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
CutPursuit_L2.h | #pragma once
#include "CutPursuit.h"
#include "Common.h"
namespace CP {
template <typename T>
class CutPursuit_L2 : public CutPursuit<T>
{
public:
~CutPursuit_L2(){
};
//=============================================================================================
//============================= COMPUTE ENERGY ===========================================
//=============================================================================================
virtual std::pair<T,T> compute_energy() override
{
VertexAttributeMap<T> vertex_attribute_map
= boost::get(boost::vertex_bundle, this->main_graph);
EdgeAttributeMap<T> edge_attribute_map
= boost::get(boost::edge_bundle, this->main_graph);
//the first element pair_energy of is the fidelity and the second the penalty
std::pair<T,T> pair_energy;
T energy = 0;
//#pragma omp parallel for private(i_dim) if (this->parameter.parallel) schedule(static) reduction(+:energy,i)
for (uint32_t ind_ver = 0; ind_ver < this->nVertex; ind_ver++)
{
VertexDescriptor<T> i_ver = boost::vertex(ind_ver, this->main_graph);
for(uint32_t i_dim=0; i_dim<this->dim; i_dim++)
{
energy += .5*vertex_attribute_map(i_ver).weight
* pow(vertex_attribute_map(i_ver).observation[i_dim]
- vertex_attribute_map(i_ver).value[i_dim],2);
}
}
pair_energy.first = energy;
energy = 0;
EdgeIterator<T> i_edg, i_edg_end = boost::edges(this->main_graph).second;
for (i_edg = boost::edges(this->main_graph).first; i_edg != i_edg_end; ++i_edg)
{
if (!edge_attribute_map(*i_edg).realEdge)
{
continue;
}
energy += .5 * edge_attribute_map(*i_edg).isActive * this->parameter.reg_strenth
* edge_attribute_map(*i_edg).weight;
}
pair_energy.second = energy;
return pair_energy;
}
//=============================================================================================
//============================= SPLIT ===========================================
//=============================================================================================
virtual uint32_t split() override
{ // split the graph by trying to find the best binary partition
// each components is split into B and notB
// for each components we associate the value h_1 and h_2 to vertices in B or notB
// the affectation as well as h_1 and h_2 are computed alternatively
//tic();
//--------loading structures---------------------------------------------------------------
uint32_t nb_comp = this->components.size();
VertexAttributeMap<T> vertex_attribute_map
= boost::get(boost::vertex_bundle, this->main_graph);
VertexIndexMap<T> vertex_index_map = boost::get(boost::vertex_index, this->main_graph);
uint32_t saturation;
//stores wether each vertex is B or not
std::vector<bool> binary_label(this->nVertex);
//initialize the binary partition with kmeans
this->init_labels(binary_label);
//centers is the value of each binary component in the optimal partition
VectorOfCentroids<T> centers(nb_comp, this->dim);
//-----main loop----------------------------------------------------------------
// the optimal flow is iteratively approximated
for (uint32_t i_step = 1; i_step <= this->parameter.flow_steps; i_step++)
{
//the regularization strength at this step
//compute h_1 and h_2
centers = VectorOfCentroids<T>(nb_comp, this->dim);
this->compute_centers(centers, nb_comp,binary_label);
this->set_capacities(centers);
// update the capacities of the flow graph
boost::boykov_kolmogorov_max_flow(
this->main_graph,
get(&EdgeAttribute<T>::capacity , this->main_graph),
get(&EdgeAttribute<T>::residualCapacity, this->main_graph),
get(&EdgeAttribute<T>::edge_reverse , this->main_graph),
get(&VertexAttribute<T>::color , this->main_graph),
get(boost::vertex_index , this->main_graph),
this->source,
this->sink);
for (uint32_t ind_com = 0; ind_com < nb_comp; ind_com++)
{
if (this->saturated_components[ind_com])
{
continue;
}
for (uint32_t i_ver = 0; i_ver < this->components[ind_com].size(); i_ver++)
{
binary_label[vertex_index_map(this->components[ind_com][i_ver])]
= (vertex_attribute_map(this->components[ind_com][i_ver]).color
== vertex_attribute_map(this->sink).color);
}
}
}
saturation = this->activate_edges();
return saturation;
}
//=============================================================================================
//============================= INIT_L2 ====== ===========================================
//=============================================================================================
inline void init_labels(std::vector<bool> & binary_label)
{ //-----initialize the labelling for each components with kmeans------------------------------
VertexAttributeMap<T> vertex_attribute_map
= boost::get(boost::vertex_bundle, this->main_graph);
VertexIndexMap<T> vertex_index_map = boost::get(boost::vertex_index, this->main_graph);
uint32_t nb_comp = this->components.size();
// ind_com;
//#pragma omp parallel for private(ind_com) //if (nb_comp>=8) schedule(dynamic)
#pragma omp parallel for if (nb_comp >= omp_get_num_threads()) schedule(dynamic)
for (uint32_t ind_com = 0; ind_com < nb_comp; ind_com++)
{
std::vector< std::vector<T> > kernels(2, std::vector<T>(this->dim));
T total_weight[2];
T best_energy;
T current_energy;
uint32_t comp_size = this->components[ind_com].size();
std::vector<bool> potential_label(comp_size);
std::vector<T> energy_array(comp_size);
if (this->saturated_components[ind_com] || comp_size <= 1)
{
continue;
}
for (uint32_t init_kmeans = 0; init_kmeans < this->parameter.kmeans_resampling; init_kmeans++)
{//proceed to several initilialisation of kmeans and pick up the best one
//----- initialization with KM++ ------------------
uint32_t first_kernel = std::rand() % comp_size, second_kernel = 0; // first kernel attributed
for(uint32_t i_dim=0; i_dim < this->dim; i_dim++)
{
kernels[0][i_dim] = vertex_attribute_map(this->components[ind_com][first_kernel ]).observation[i_dim];
}
best_energy = 0; //now compute the square distance of each pouint32_tto this kernel
#pragma omp parallel for if (nb_comp < omp_get_num_threads()) shared(best_energy) schedule(static)
for (uint32_t i_ver = 0; i_ver < comp_size; i_ver++)
{
energy_array[i_ver] = 0;
for(uint32_t i_dim=0; i_dim < this->dim; i_dim++)
{
energy_array[i_ver] += pow(vertex_attribute_map(this->components[ind_com][i_ver]).observation[i_dim]
- kernels[0][i_dim],2) * vertex_attribute_map(this->components[ind_com][i_ver]).weight;
}
best_energy += energy_array[i_ver];
} // we now generate a random number to determinate which node will be the second kernel
T random_sample = ((T)(rand())) / ((T)(RAND_MAX));
current_energy = best_energy * random_sample;
for (uint32_t i_ver = 0; i_ver < comp_size; i_ver++)
{
current_energy -= energy_array[i_ver];
if (current_energy < 0)
{ //we have selected the second kernel
second_kernel = i_ver;
break;
}
}
for(uint32_t i_dim=0; i_dim < this->dim; i_dim++)
{ // now fill the second kernel
kernels[1][i_dim] = vertex_attribute_map(this->components[ind_com][second_kernel]).observation[i_dim];
}
//----main kmeans loop-----
for (uint32_t ite_kmeans = 0; ite_kmeans < this->parameter.kmeans_ite; ite_kmeans++)
{
//--affectation step: associate each node with its closest kernel-------------------
#pragma omp parallel for if (nb_comp < omp_get_num_threads()) shared(potential_label) schedule(static)
for (uint32_t i_ver = 0; i_ver < comp_size; i_ver++)
{
std::vector<T> distance_kernels(2);
for(uint32_t i_dim=0; i_dim < this->dim; i_dim++)
{
distance_kernels[0] += pow(vertex_attribute_map(this->components[ind_com][i_ver]).observation[i_dim]
- kernels[0][i_dim],2);
distance_kernels[1] += pow(vertex_attribute_map(this->components[ind_com][i_ver]).observation[i_dim]
- kernels[1][i_dim],2);
}
potential_label[i_ver] = distance_kernels[0] > distance_kernels[1];
}
//-----computation of the new kernels----------------------------
total_weight[0] = 0.;
total_weight[1] = 0.;
for(uint32_t i_dim=0; i_dim < this->dim; i_dim++)
{
kernels[0][i_dim] = 0;
kernels[1][i_dim] = 0;
}
#pragma omp parallel for if (nb_comp < omp_get_num_threads()) shared(potential_label) schedule(static)
for (uint32_t i_ver = 0; i_ver < comp_size; i_ver++)
{
if (vertex_attribute_map(this->components[ind_com][i_ver]).weight==0)
{
continue;
}
if (potential_label[i_ver])
{
total_weight[0] += vertex_attribute_map(this->components[ind_com][i_ver]).weight;
for(uint32_t i_dim=0; i_dim < this->dim; i_dim++)
{
kernels[0][i_dim] += vertex_attribute_map(this->components[ind_com][i_ver]).observation[i_dim]
* vertex_attribute_map(this->components[ind_com][i_ver]).weight ;
}
}
else
{
total_weight[1] += vertex_attribute_map(this->components[ind_com][i_ver]).weight;
for(uint32_t i_dim=0; i_dim < this->dim; i_dim++)
{
kernels[1][i_dim] += vertex_attribute_map(this->components[ind_com][i_ver]).observation[i_dim]
* vertex_attribute_map(this->components[ind_com][i_ver]).weight;
}
}
}
if ((total_weight[0] == 0)||(total_weight[1] == 0))
{
//std::cout << "kmeans error : " << comp_size << std::endl;
break;
}
for(uint32_t i_dim=0; i_dim < this->dim; i_dim++)
{
kernels[0][i_dim] = kernels[0][i_dim] / total_weight[0];
kernels[1][i_dim] = kernels[1][i_dim] / total_weight[1];
}
}
//----compute the associated energy ------
current_energy = 0;
#pragma omp parallel for if (nb_comp < omp_get_num_threads()) shared(potential_label) schedule(static)
for (uint32_t i_ver = 0; i_ver < comp_size; i_ver++)
{
for(uint32_t i_dim=0; i_dim < this->dim; i_dim++)
{
if (potential_label[i_ver])
{
current_energy += pow(vertex_attribute_map(this->components[ind_com][i_ver]).observation[i_dim]
- kernels[0][i_dim],2) * vertex_attribute_map(this->components[ind_com][i_ver]).weight;
}
else
{
current_energy += pow(vertex_attribute_map(this->components[ind_com][i_ver]).observation[i_dim]
- kernels[1][i_dim],2) * vertex_attribute_map(this->components[ind_com][i_ver]).weight;
}
}
}
if (current_energy < best_energy)
{
best_energy = current_energy;
for (uint32_t i_ver = 0; i_ver < comp_size; i_ver++)
{
binary_label[vertex_index_map(this->components[ind_com][i_ver])] = potential_label[i_ver];
}
}
}
}
}
//=============================================================================================
//============================= COMPUTE_CENTERS_L2 ==========================================
//=============================================================================================
inline void compute_centers(VectorOfCentroids<T> & centers, const uint32_t & nb_comp
, const std::vector<bool> & binary_label)
{
//compute for each component the values of h_1 and h_2
#pragma omp parallel for if (nb_comp >= omp_get_num_threads()) schedule(dynamic)
for (uint32_t ind_com = 0; ind_com < nb_comp; ind_com++)
{
if (this->saturated_components[ind_com])
{
continue;
}
compute_center(centers.centroids[ind_com], ind_com, binary_label);
}
return;
}
//=============================================================================================
//============================= COMPUTE_CENTERS_L2 ==========================================
//=============================================================================================
inline void compute_center( std::vector< std::vector<T> > & center, const uint32_t & ind_com
, const std::vector<bool> & binary_label)
{
//compute for each component the values of the centroids corresponding to the optimal binary partition
VertexAttributeMap<T> vertex_attribute_map
= boost::get(boost::vertex_bundle, this->main_graph);
VertexIndexMap<T> vertex_index_map = boost::get(boost::vertex_index, this->main_graph);
T total_weight[2];
total_weight[0] = 0.;
total_weight[1] = 0.;
//#pragma omp parallel for if (this->parameter.parallel)
for (uint32_t i_ver = 0; i_ver < this->components[ind_com].size(); i_ver++)
{
if (vertex_attribute_map(this->components[ind_com][i_ver]).weight==0)
{
continue;
}
if (binary_label[vertex_index_map(this->components[ind_com][i_ver])])
{
total_weight[0] += vertex_attribute_map(this->components[ind_com][i_ver]).weight;
for(uint32_t i_dim=0; i_dim < this->dim; i_dim++)
{
center[0][i_dim] += vertex_attribute_map(this->components[ind_com][i_ver]).observation[i_dim]
* vertex_attribute_map(this->components[ind_com][i_ver]).weight ;
}
}
else
{
total_weight[1] += vertex_attribute_map(this->components[ind_com][i_ver]).weight;
for(uint32_t i_dim=0; i_dim < this->dim; i_dim++)
{
center[1][i_dim] += vertex_attribute_map(this->components[ind_com][i_ver]).observation[i_dim]
* vertex_attribute_map(this->components[ind_com][i_ver]).weight;
}
}
}
if ((total_weight[0] == 0)||(total_weight[1] == 0))
{
//the component is saturated
this->saturateComponent(ind_com);
for(uint32_t i_dim=0; i_dim < this->dim; i_dim++)
{
center[0][i_dim] = vertex_attribute_map(this->components[ind_com][0]).value[i_dim];
center[1][i_dim] = vertex_attribute_map(this->components[ind_com][0]).value[i_dim];
}
}
else
{
for(uint32_t i_dim=0; i_dim < this->dim; i_dim++)
{
center[0][i_dim] = center[0][i_dim] / total_weight[0];
center[1][i_dim] = center[1][i_dim] / total_weight[1];
}
}
return;
}
//=============================================================================================
//============================= SET_CAPACITIES ==========================================
//=============================================================================================
inline void set_capacities(const VectorOfCentroids<T> & centers)
{
VertexAttributeMap<T> vertex_attribute_map
= boost::get(boost::vertex_bundle, this->main_graph);
EdgeAttributeMap<T> edge_attribute_map
= boost::get(boost::edge_bundle, this->main_graph);
//----first compute the capacity in sink/node edges------------------------------------
//#pragma omp parallel for if (this->parameter.parallel) schedule(dynamic)
uint32_t nb_comp = this->components.size();
#pragma omp parallel for if (nb_comp >= omp_get_num_threads()) schedule(dynamic)
for (uint32_t ind_com = 0; ind_com < nb_comp; ind_com++)
{
VertexDescriptor<T> desc_v;
EdgeDescriptor desc_source2v, desc_v2sink, desc_v2source;
T cost_B, cost_notB; //the cost of being in B or not B, local for each component
if (this->saturated_components[ind_com])
{
continue;
}
for (uint32_t i_ver = 0; i_ver < this->components[ind_com].size(); i_ver++)
{
desc_v = this->components[ind_com][i_ver];
// because of the adjacency structure NEVER access edge (source,v) directly!
desc_v2source = boost::edge(desc_v, this->source,this->main_graph).first;
desc_source2v = edge_attribute_map(desc_v2source).edge_reverse; //use edge_reverse instead
desc_v2sink = boost::edge(desc_v, this->sink,this->main_graph).first;
cost_B = 0;
cost_notB = 0;
if (vertex_attribute_map(desc_v).weight==0)
{ //no observation - no cut
edge_attribute_map(desc_source2v).capacity = 0;
edge_attribute_map(desc_v2sink).capacity = 0;
continue;
}
for(uint32_t i_dim=0; i_dim < this->dim; i_dim++)
{
cost_B += 0.5*vertex_attribute_map(desc_v).weight
* (pow(centers.centroids[ind_com][0][i_dim],2) - 2 * (centers.centroids[ind_com][0][i_dim]
* vertex_attribute_map(desc_v).observation[i_dim]));
cost_notB += 0.5*vertex_attribute_map(desc_v).weight
* (pow(centers.centroids[ind_com][1][i_dim],2) - 2 * (centers.centroids[ind_com][1][i_dim]
* vertex_attribute_map(desc_v).observation[i_dim]));
}
if (cost_B>cost_notB)
{
edge_attribute_map(desc_source2v).capacity = cost_B - cost_notB;
edge_attribute_map(desc_v2sink).capacity = 0.;
}
else
{
edge_attribute_map(desc_source2v).capacity = 0.;
edge_attribute_map(desc_v2sink).capacity = cost_notB - cost_B;
}
}
}
//----then set the vertex to vertex edges ---------------------------------------------
EdgeIterator<T> i_edg, i_edg_end;
for (boost::tie(i_edg, i_edg_end) = boost::edges(this->main_graph);
i_edg != i_edg_end; ++i_edg)
{
if (!edge_attribute_map(*i_edg).realEdge)
{
continue;
}
if (!edge_attribute_map(*i_edg).isActive)
{
edge_attribute_map(*i_edg).capacity
= edge_attribute_map(*i_edg).weight * this->parameter.reg_strenth;
}
else
{
edge_attribute_map(*i_edg).capacity = 0;
}
}
}
//=============================================================================================
//================================= COMPUTE_VALUE =========================================
//=============================================================================================
virtual std::pair<std::vector<T>, T> compute_value(const uint32_t & ind_com) override
{
VertexAttributeMap<T> vertex_attribute_map
= boost::get(boost::vertex_bundle, this->main_graph);
T total_weight = 0;
std::vector<T> compValue(this->dim);
std::fill((compValue.begin()),(compValue.end()),0);
#pragma omp parallel for if (this->parameter.parallel) schedule(static)
for (uint32_t ind_ver = 0; ind_ver < this->components[ind_com].size(); ++ind_ver)
{
total_weight += vertex_attribute_map(this->components[ind_com][ind_ver]).weight;
for(uint32_t i_dim=0; i_dim<this->dim; i_dim++)
{
compValue[i_dim] += vertex_attribute_map(this->components[ind_com][ind_ver]).observation[i_dim]
* vertex_attribute_map(this->components[ind_com][ind_ver]).weight;
}
vertex_attribute_map(this->components[ind_com][ind_ver]).in_component = ind_com;
}
for(uint32_t i_dim=0; i_dim<this->dim; i_dim++)
{
compValue[i_dim] = compValue[i_dim] / total_weight;
}
for (uint32_t ind_ver = 0; ind_ver < this->components[ind_com].size(); ++ind_ver)
{
for(uint32_t i_dim=0; i_dim<this->dim; i_dim++)
{
vertex_attribute_map(this->components[ind_com][ind_ver]).value[i_dim] = compValue[i_dim];
}
}
return std::pair<std::vector<T>, T>(compValue, total_weight);
}
//=============================================================================================
//================================= COMPUTE_MERGE_GAIN =========================================
//=============================================================================================
virtual std::pair<std::vector<T>, T> compute_merge_gain(const VertexDescriptor<T> & comp1
, const VertexDescriptor<T> & comp2) override
{
VertexAttributeMap<T> reduced_vertex_attribute_map
= boost::get(boost::vertex_bundle, this->reduced_graph);
std::vector<T> merge_value(this->dim);
T gain = 0;
// compute the value obtained by mergeing the two connected components
for(uint32_t i_dim=0; i_dim<this->dim; i_dim++)
{
merge_value[i_dim] =
(reduced_vertex_attribute_map(comp1).weight *
reduced_vertex_attribute_map(comp1).value[i_dim]
+reduced_vertex_attribute_map(comp2).weight *
reduced_vertex_attribute_map(comp2).value[i_dim])
/(reduced_vertex_attribute_map(comp1).weight
+reduced_vertex_attribute_map(comp2).weight);
gain += 0.5 * (pow(merge_value[i_dim],2)
* (reduced_vertex_attribute_map(comp1).weight
+reduced_vertex_attribute_map(comp2).weight)
- pow(reduced_vertex_attribute_map(comp1).value[i_dim],2)
* reduced_vertex_attribute_map(comp1).weight
- pow(reduced_vertex_attribute_map(comp2).value[i_dim],2)
* reduced_vertex_attribute_map(comp2).weight);
}
return std::pair<std::vector<T>, T>(merge_value, gain);
}
};
}
|
zgelqf.c | /**
*
* @file
*
* PLASMA is a software package provided by:
* University of Tennessee, US,
* University of Manchester, UK.
*
* @precisions normal z -> s d c
*
**/
#include "plasma.h"
#include "plasma_async.h"
#include "plasma_context.h"
#include "plasma_descriptor.h"
#include "plasma_internal.h"
#include "plasma_tuning.h"
#include "plasma_types.h"
#include "plasma_workspace.h"
/***************************************************************************//**
*
* @ingroup plasma_gelqf
*
* Computes tile LQ factorization of a complex m-by-n matrix A.
* The factorization has the form
* \f[ A = L \times Q \f],
* where L is a lower trapezoidal with positive diagonal and Q is a matrix with
* orthonormal rows.
*
*******************************************************************************
*
* @param[in] m
* The number of rows of the matrix A. m >= 0.
*
* @param[in] n
* The number of columns of the matrix A. n >= 0.
*
* @param[in,out] pA
* On entry, pointer to the m-by-n matrix A.
* On exit, the elements on and below the diagonal of the array
* contain the m-by-min(m,n) lower trapezoidal matrix L (L is lower
* triangular if M <= N); the elements above the diagonal represent
* the unitary matrix Q as a product of elementary reflectors, stored
* by tiles.
*
* @param[in] lda
* The leading dimension of the array A. lda >= max(1,m).
*
* @param[out] T
* On exit, auxiliary factorization data, required by plasma_zgelqs
* to solve the system of equations.
* Matrix of T is allocated inside this function and needs to be
* destroyed by plasma_desc_destroy.
*
*******************************************************************************
*
* @retval PlasmaSuccess successful exit
* @retval < 0 if -i, the i-th argument had an illegal value
*
*******************************************************************************
*
* @sa plasma_omp_zgelqf
* @sa plasma_cgelqf
* @sa plasma_dgelqf
* @sa plasma_sgelqf
* @sa plasma_zgelqs
*
******************************************************************************/
int plasma_zgelqf(int m, int n,
plasma_complex64_t *pA, int lda,
plasma_desc_t *T)
{
// Get PLASMA context.
plasma_context_t *plasma = plasma_context_self();
if (plasma == NULL) {
plasma_fatal_error("PLASMA not initialized");
return PlasmaErrorNotInitialized;
}
// Check input arguments.
if (m < 0) {
plasma_error("illegal value of m");
return -1;
}
if (n < 0) {
plasma_error("illegal value of n");
return -2;
}
if (lda < imax(1, m)) {
plasma_error("illegal value of lda");
return -4;
}
// quick return
if (imin(m, n) == 0)
return PlasmaSuccess;
// Tune parameters.
if (plasma->tuning)
plasma_tune_gelqf(plasma, PlasmaComplexDouble, m, n);
// Set tiling parameters.
int ib = plasma->ib;
int nb = plasma->nb;
plasma_enum_t householder_mode = plasma->householder_mode;
// Create tile matrix.
plasma_desc_t A;
int retval;
retval = plasma_desc_general_create(PlasmaComplexDouble, nb, nb,
m, n, 0, 0, m, n, &A);
if (retval != PlasmaSuccess) {
plasma_error("plasma_desc_general_create() failed");
return retval;
}
// Prepare descriptor T.
retval = plasma_descT_create(A, ib, householder_mode, T);
if (retval != PlasmaSuccess) {
plasma_error("plasma_descT_create() failed");
return retval;
}
// Allocate workspace.
plasma_workspace_t work;
size_t lwork = nb + ib*nb; // gelqt: tau + work
retval = plasma_workspace_create(&work, lwork, PlasmaComplexDouble);
if (retval != PlasmaSuccess) {
plasma_error("plasma_workspace_create() failed");
return retval;
}
// Initialize sequence.
plasma_sequence_t sequence;
retval = plasma_sequence_init(&sequence);
// Initialize request.
plasma_request_t request;
retval = plasma_request_init(&request);
// asynchronous block
#pragma omp parallel
#pragma omp master
{
// Translate to tile layout.
plasma_omp_zge2desc(pA, lda, A, &sequence, &request);
// Call the tile async function.
plasma_omp_zgelqf(A, *T, work, &sequence, &request);
// Translate back to LAPACK layout.
plasma_omp_zdesc2ge(A, pA, lda, &sequence, &request);
}
// implicit synchronization
plasma_workspace_destroy(&work);
// Free matrix A in tile layout.
plasma_desc_destroy(&A);
// Return status.
int status = sequence.status;
return status;
}
/***************************************************************************//**
*
* @ingroup plasma_gelqf
*
* Computes the tile LQ factorization of a matrix.
* Non-blocking tile version of plasma_zgelqf().
* May return before the computation is finished.
* Allows for pipelining of operations at runtime.
*
*******************************************************************************
*
* @param[in,out] A
* Descriptor of matrix A.
* A is stored in the tile layout.
*
* @param[out] T
* Descriptor of matrix T.
* On exit, auxiliary factorization data, required by plasma_zgelqs to
* solve the system of equations.
*
* @param[in] work
* Workspace for the auxiliary arrays needed by some coreblas kernels.
* For LQ factorization, contains preallocated space for tau and work
* arrays. Allocated by the plasma_workspace_create function.
*
* @param[in] sequence
* Identifies the sequence of function calls that this call belongs to
* (for completion checks and exception handling purposes).
*
* @param[out] request
* Identifies this function call (for exception handling purposes).
*
* @retval void
* Errors are returned by setting sequence->status and
* request->status to error values. The sequence->status and
* request->status should never be set to PlasmaSuccess (the
* initial values) since another async call may be setting a
* failure value at the same time.
*
*******************************************************************************
*
* @sa plasma_zgelqf
* @sa plasma_omp_cgelqf
* @sa plasma_omp_dgelqf
* @sa plasma_omp_sgelqf
* @sa plasma_omp_zgelqs
*
******************************************************************************/
void plasma_omp_zgelqf(plasma_desc_t A, plasma_desc_t T,
plasma_workspace_t work,
plasma_sequence_t *sequence, plasma_request_t *request)
{
// Get PLASMA context.
plasma_context_t *plasma = plasma_context_self();
if (plasma == NULL) {
plasma_error("PLASMA not initialized");
plasma_request_fail(sequence, request, PlasmaErrorIllegalValue);
return;
}
// Check input arguments.
if (plasma_desc_check(A) != PlasmaSuccess) {
plasma_error("invalid A");
plasma_request_fail(sequence, request, PlasmaErrorIllegalValue);
return;
}
if (plasma_desc_check(T) != PlasmaSuccess) {
plasma_error("invalid T");
plasma_request_fail(sequence, request, PlasmaErrorIllegalValue);
return;
}
if (sequence == NULL) {
plasma_fatal_error("NULL sequence");
plasma_request_fail(sequence, request, PlasmaErrorIllegalValue);
return;
}
if (request == NULL) {
plasma_fatal_error("NULL request");
plasma_request_fail(sequence, request, PlasmaErrorIllegalValue);
return;
}
// quick return
if (imin(A.m, A.n) == 0)
return;
// Call the parallel function.
if (plasma->householder_mode == PlasmaTreeHouseholder) {
plasma_pzgelqf_tree(A, T, work, sequence, request);
}
else {
plasma_pzgelqf(A, T, work, sequence, request);
}
}
|
omp_smithW.c | /*********************************************************************************
* Smith–Waterman algorithm
* Purpose: Local alignment of nucleotide or protein sequences
* Authors: Daniel Holanda, Hanoch Griner, Taynara Pinheiro
*
* Compilation: gcc omp_smithW.c -o omp_smithW -fopenmp -DDEBUG // debugging mode
* gcc omp_smithW.c -O3 -o omp_smithW -fopenmp // production run
* Execution: ./omp_smithW <number_of_col> <number_of_rows>
*
* Updated by C. Liao, Jan 2nd, 2019
*********************************************************************************/
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <omp.h>
#include <time.h>
#include <assert.h>
#include "parameters.h"
/*--------------------------------------------------------------------
* Text Tweaks
*/
#define RESET "\033[0m"
#define BOLDRED "\033[1m\033[31m" /* Bold Red */
/* End of text tweaks */
/*--------------------------------------------------------------------
* Constants
*/
#define PATH -1
#define NONE 0
#define UP 1
#define LEFT 2
#define DIAGONAL 3
/* End of constants */
/*--------------------------------------------------------------------
* Helpers
*/
#define min(x, y) (((x) < (y)) ? (x) : (y))
#define max(a,b) ((a) > (b) ? a : b)
// #define DEBUG
/* End of Helpers */
/*--------------------------------------------------------------------
* Functions Prototypes
*/
void similarityScore(long long int i, long long int j, int* H, int* P, long long int* maxPos);
int matchMissmatchScore(long long int i, long long int j);
void backtrack(int* P, long long int maxPos);
void printMatrix(int* matrix);
void printPredecessorMatrix(int* matrix);
void generate(void);
long long int nElement(long long int i);
void calcFirstDiagElement(long long int i, long long int *si, long long int *sj);
/* End of prototypes */
/*--------------------------------------------------------------------
* Global Variables
*/
bool useBuiltInData=true;
//Defines size of strings to be compared
long long int m = 8 ; //Columns - Size of string a
long long int n = 9; //Lines - Size of string b
// the generated scoring matrix's size is m++ and n++ later to have the first row/column as 0s.
//Defines scores
int matchScore = 3;
int missmatchScore = -3;
int gapScore = -2;
//Strings over the Alphabet Sigma
char *a, *b;
/* End of global variables */
/*--------------------------------------------------------------------
* Function: main
*/
int main(int argc, char* argv[]) {
// thread_count is no longer used
int thread_count;
if (argc==3)
{
m = strtoll(argv[1], NULL, 10);
n = strtoll(argv[2], NULL, 10);
useBuiltInData = false;
}
//#ifdef DEBUG
if (useBuiltInData)
printf ("Using built-in data for testing ..\n");
printf("Problem size: Matrix[%lld][%lld], FACTOR=%d CUTOFF=%d\n", n, m, FACTOR, CUTOFF);
//#endif
//Allocates a and b
a = (char*) malloc(m * sizeof(char));
b = (char*) malloc(n * sizeof(char));
//Because now we have zeros
m++;
n++;
//Allocates similarity matrix H
int *H;
H = (int *) calloc(m * n, sizeof(int));
//Allocates predecessor matrix P
int *P;
P = (int *)calloc(m * n, sizeof(int));
if (useBuiltInData)
{
//Uncomment this to test the sequence available at
//http://vlab.amrita.edu/?sub=3&brch=274&sim=1433&cnt=1
// OBS: m=11 n=7
// a[0] = 'C';
// a[1] = 'G';
// a[2] = 'T';
// a[3] = 'G';
// a[4] = 'A';
// a[5] = 'A';
// a[6] = 'T';
// a[7] = 'T';
// a[8] = 'C';
// a[9] = 'A';
// a[10] = 'T';
// b[0] = 'G';
// b[1] = 'A';
// b[2] = 'C';
// b[3] = 'T';
// b[4] = 'T';
// b[5] = 'A';
// b[6] = 'C';
// https://en.wikipedia.org/wiki/Smith%E2%80%93Waterman_algorithm#Example
// Using the wiki example to verify the results
b[0] = 'G';
b[1] = 'G';
b[2] = 'T';
b[3] = 'T';
b[4] = 'G';
b[5] = 'A';
b[6] = 'C';
b[7] = 'T';
b[8] = 'A';
a[0] = 'T';
a[1] = 'G';
a[2] = 'T';
a[3] = 'T';
a[4] = 'A';
a[5] = 'C';
a[6] = 'G';
a[7] = 'G';
}
else
{
//Gen random arrays a and b
generate();
}
//Start position for backtrack
long long int maxPos = 0;
//Calculates the similarity matrix
long long int i, j;
// The way to generate all wavefront is to go through the top edge elements
// starting from the left top of the matrix, go to the bottom top -> down, then left->right
// total top edge element count = dim1_size + dim2_size -1
//Because now we have zeros ((m-1) + (n-1) - 1)
long long int nDiag = m + n - 3;
#ifdef DEBUG
printf("nDiag=%d\n", nDiag);
printf("Number of wavefront lines and their first element positions:\n");
#endif
#pragma omp parallel
{
#pragma omp master
{
thread_count = omp_get_num_threads();
printf ("Using %d out of max %d threads...", thread_count, omp_get_max_threads());
}
}
//Gets Initial time
double initialTime = omp_get_wtime();
// #pragma omp parallel default(none) shared(H, P, maxPos, nDiag, j) private(i)
{
for (i = 1; i <= nDiag; ++i) // start from 1 since 0 is the boundary padding
{
long long int nEle, si, sj;
nEle = nElement(i);
calcFirstDiagElement(i, &si, &sj);
#pragma omp parallel for private(j) shared (nEle, si, sj, H, P, maxPos) if (nEle>=CUTOFF)
for (j = 0; j < nEle; ++j)
{ // going upwards : anti-diagnol direction
long long int ai = si - j ; // going up vertically
long long int aj = sj + j; // going right in horizontal
similarityScore(ai, aj, H, P, &maxPos); // a critical section is used inside
}
} // for end nDiag
} // end omp parallel
double finalTime = omp_get_wtime();
printf("\nElapsed time for scoring matrix computation: %f\n", finalTime - initialTime);
initialTime = omp_get_wtime();
backtrack(P, maxPos);
finalTime = omp_get_wtime();
//Gets backtrack time
finalTime = omp_get_wtime();
printf("Elapsed time for backtracking: %f\n", finalTime - initialTime);
if (useBuiltInData)
{
printf ("Verifying results using the builtinIn data: %s\n", (H[n*m-1]==7)?"true":"false");
assert (H[n*m-1]==7);
}
#ifdef DEBUG
printf("\nSimilarity Matrix:\n");
printMatrix(H);
printf("\nPredecessor Matrix:\n");
printPredecessorMatrix(P);
#endif
//Frees similarity matrixes
free(H);
free(P);
//Frees input arrays
free(a);
free(b);
return 0;
} /* End of main */
/*--------------------------------------------------------------------
* Function: nElement
* Purpose: Calculate the number of i-diagonal's elements
* i value range 1 to nDiag. we inclulde the upper bound value. 0 is for the padded wavefront, which is ignored.
*/
long long int nElement(long long int i) {
if (i < m && i < n) { // smaller than both directions
//Number of elements in the diagonal is increasing
return i;
}
else if (i < max(m, n)) { // smaller than only one direction
//Number of elements in the diagonal is stable
long int min = min(m, n); // the longer direction has the edge elements, the number is the smaller direction's size
return min - 1;
}
else {
//Number of elements in the diagonal is decreasing
long int min = min(m, n);
return 2 * min - i + abs(m - n) - 2;
}
}
/*--------------------------------------------------------------------
* Function: calcElement: expect valid i value is from 1 to nDiag. since the first one is 0 padding
* Purpose: Calculate the position of (si, sj)-element
* n rows, m columns: we sweep the matrix on the left edge then bottom edge to get the wavefront
*/
void calcFirstDiagElement(long long int i, long long int *si, long long int *sj) {
// Calculate the first element of diagonal
if (i < n) { // smaller than row count
*si = i;
*sj = 1; // start from the j==1 since j==0 is the padding
} else { // now we sweep horizontally at the bottom of the matrix
*si = n - 1; // i is fixed
*sj = i - n + 2; // j position is the nDiag (id -n) +1 +1 // first +1
}
}
/*
// understanding the calculation by an example
n =6 // row
m =2 // col
padded scoring matrix
n=7
m=3
0 1 2
-------
0 x x x
1 x x x
2 x x x
3 x x x
4 x x x
5 x x x
6 x x x
We should peel off top row and left column since they are the padding
the remaining 6x2 sub matrix is what is interesting for us
Now find the number of wavefront lines and their first element's position in the scoring matrix
total diagnol frontwave = (n-1) + (m-1) -1 // submatrix row+column -1
We use the left most element in each wavefront line as its first element.
Then we have the first elements like
(1,1),
(2,1)
(3,1)
..
(6,1) (6,2)
*/
/*--------------------------------------------------------------------
* Function: SimilarityScore
* Purpose: Calculate value of scoring matrix element H(i,j) : the maximum Similarity-Score H(i,j)
* int *P; the predecessor array,storing which of the three elements is picked with max value
*/
void similarityScore(long long int i, long long int j, int* H, int* P, long long int* maxPos) {
int up, left, diag;
//Stores index of element
long long int index = m * i + j;
//Get element above
up = H[index - m] + gapScore;
//Get element on the left
left = H[index - 1] + gapScore;
//Get element on the diagonal
diag = H[index - m - 1] + matchMissmatchScore(i, j);
//Calculates the maximum
int max = NONE;
int pred = NONE;
/* === Matrix ===
* a[0] ... a[n]
* b[0]
* ...
* b[n]
*
* generate 'a' from 'b', if '←' insert e '↑' remove
* a=GAATTCA
* b=GACTT-A
*
* generate 'b' from 'a', if '←' insert e '↑' remove
* b=GACTT-A
* a=GAATTCA
*/
if (diag > max) { //same letter ↖
max = diag;
pred = DIAGONAL;
}
if (up > max) { //remove letter ↑
max = up;
pred = UP;
}
if (left > max) { //insert letter ←
max = left;
pred = LEFT;
}
//Inserts the value in the similarity and predecessor matrixes
H[index] = max;
P[index] = pred;
//Updates maximum score to be used as seed on backtrack
if (max > H[*maxPos]) {
#pragma omp critical
*maxPos = index;
}
} /* End of similarityScore */
/*--------------------------------------------------------------------
* Function: matchMissmatchScore
* Purpose: Similarity function on the alphabet for match/missmatch
*/
int matchMissmatchScore(long long int i, long long int j) {
if (a[j - 1] == b[i - 1])
return matchScore;
else
return missmatchScore;
} /* End of matchMissmatchScore */
/*--------------------------------------------------------------------
* Function: backtrack
* Purpose: Modify matrix to print, path change from value to PATH
*/
void backtrack(int* P, long long int maxPos) {
//hold maxPos value
long long int predPos;
//backtrack from maxPos to startPos = 0
do {
if (P[maxPos] == DIAGONAL)
predPos = maxPos - m - 1;
else if (P[maxPos] == UP)
predPos = maxPos - m;
else if (P[maxPos] == LEFT)
predPos = maxPos - 1;
P[maxPos] *= PATH;
maxPos = predPos;
} while (P[maxPos] != NONE);
} /* End of backtrack */
/*--------------------------------------------------------------------
* Function: printMatrix
* Purpose: Print Matrix
*/
void printMatrix(int* matrix) {
long long int i, j;
printf("-\t-\t");
for (j = 0; j < m-1; j++) {
printf("%c\t", a[j]);
}
printf("\n-\t");
for (i = 0; i < n; i++) { //Lines
for (j = 0; j < m; j++) {
if (j==0 && i>0) printf("%c\t", b[i-1]);
printf("%d\t", matrix[m * i + j]);
}
printf("\n");
}
} /* End of printMatrix */
/*--------------------------------------------------------------------
* Function: printPredecessorMatrix
* Purpose: Print predecessor matrix
*/
void printPredecessorMatrix(int* matrix) {
long long int i, j, index;
printf(" ");
for (j = 0; j < m-1; j++) {
printf("%c ", a[j]);
}
printf("\n ");
for (i = 0; i < n; i++) { //Lines
for (j = 0; j < m; j++) {
if (j==0 && i>0) printf("%c ", b[i-1]);
index = m * i + j;
if (matrix[index] < 0) {
printf(BOLDRED);
if (matrix[index] == -UP)
printf("↑ ");
else if (matrix[index] == -LEFT)
printf("← ");
else if (matrix[index] == -DIAGONAL)
printf("↖ ");
else
printf("- ");
printf(RESET);
} else {
if (matrix[index] == UP)
printf("↑ ");
else if (matrix[index] == LEFT)
printf("← ");
else if (matrix[index] == DIAGONAL)
printf("↖ ");
else
printf("- ");
}
}
printf("\n");
}
} /* End of printPredecessorMatrix */
/*--------------------------------------------------------------------
* Function: generate
* Purpose: Generate arrays a and b
*/
void generate() {
//Random seed
srand(time(NULL));
//Generates the values of a
long long int i;
for (i = 0; i < m; i++) {
int aux = rand() % 4;
if (aux == 0)
a[i] = 'A';
else if (aux == 2)
a[i] = 'C';
else if (aux == 3)
a[i] = 'G';
else
a[i] = 'T';
}
//Generates the values of b
for (i = 0; i < n; i++) {
int aux = rand() % 4;
if (aux == 0)
b[i] = 'A';
else if (aux == 2)
b[i] = 'C';
else if (aux == 3)
b[i] = 'G';
else
b[i] = 'T';
}
} /* End of generate */
/*--------------------------------------------------------------------
* External References:
* http://vlab.amrita.edu/?sub=3&brch=274&sim=1433&cnt=1
* http://pt.slideshare.net/avrilcoghlan/the-smith-waterman-algorithm
* http://baba.sourceforge.net/
*/
|
matrix.c |
#include "matrix.h"
/*
* matrix.c
*
* Copyright (c) 2014, Rafat Hussain
* License : BSD 3-Clause
* See COPYRIGHT for more details
*/
typedef struct {
double* a;
int b;
} vipair;
double macheps() {
double macheps;
macheps = 1.0;
while ((macheps + 1.0) > 1.0) {
macheps = macheps / 2.0;
}
macheps = macheps * 2;
return macheps;
}
double pmax(double a, double b) {
if (a > b) {
return a;
}
else {
return b;
}
}
double pmin(double a, double b) {
if (a < b) {
return a;
}
else {
return b;
}
}
int imax(int a, int b) {
if (a > b) {
return a;
}
else {
return b;
}
}
int imin(int a, int b) {
if (a < b) {
return a;
}
else {
return b;
}
}
double signx(double x) {
double sgn;
if (x >= 0.) {
sgn = 1.0;
}
else {
sgn = -1.0;
}
return sgn;
}
double l2norm(double *vec, int N) {
double l2, sum;
int i;
sum = 0.;
for (i = 0; i < N; ++i) {
sum += vec[i] * vec[i];
}
l2 = sqrt(sum);
return l2;
}
int compare (const void* ind1, const void* ind2)
{
if (*((vipair *)ind1)->a > *((vipair *)ind2)->a)
return -1;
else if (*((vipair *)ind1)->a < *((vipair *)ind2)->a)
return 1;
else
return 0;
}
void sort1d(double* v,int N, int* pos)
{
vipair* val = NULL;
int i;
if (N <= 0)
return;
val = malloc(sizeof(vipair) * N);
for (i = 0; i < N; ++i) {
val[i].a = &v[i];
val[i].b = i;
}
qsort(val, N, sizeof(vipair), compare);
for (i = 0; i < N; ++i)
pos[i] = val[i].b;
free(val);
}
double array_max_abs(double *array,int N) {
int i;
double m = 0.0;
for (i = 0; i < N;++i) {
if (fabs(array[i]) > m ) {
m = fabs(array[i]);
}
}
return m;
}
double array_max(double *array,int N) {
int i;
double m = array[0];
for (i = 1; i < N;++i) {
if (array[i] > m ) {
m = array[i];
}
}
return m;
}
double array_min(double *array,int N) {
int i;
double m = array[0];
for (i = 1; i < N;++i) {
if (array[i] < m ) {
m = array[i];
}
}
return m;
}
void dtranspose(double *sig, int rows, int cols,double *col) {
int max,ud,i,k;
if (rows >= cols) {
max = cols;
} else {
max = rows;
}
ud = 0;
for (i= -rows + 1; i < cols; i++) {
if (i <= 0) {
ud++;
if (ud >= max)
ud = max;
for (k = 0; k < ud; k++) {
col[k*rows+k-i] = sig[(k-i)*cols+k];
}
} else {
if (i - cols + rows > 0) {
ud--;
if (ud >= max)
ud = max;
}
for (k = 0; k < ud; k++) {
col[(k+i)*rows+k] = sig[k*cols+k+i];
}
}
}
}
void stranspose(double *sig, int rows, int cols,double *col) {
int t,u;
register int i,j;
#pragma omp parallel for private(i,j,t,u)
for (i=0; i < rows; i++) {
t = i * cols;
u = 0;
for (j=0; j < cols; j++) {
col[u+i] = sig[j+t];
u+=rows;
}
}
}
void rtranspose(double *m, int rows, int cols,double *n, int r, int c) {
register int i,j;
int rm,cm;
int rm1,cm1,rm2,cm2;
int block;
block = (int) BLOCKSIZE;
if (rows <= block && cols <= block) {
for (i = 0; i < rows; ++i) {
for (j = 0; j < cols; ++j) {
n[i+j*r] = m[j+i*c];
//cout << *(n+i+j*r) << " ";
}
}
//cout << endl;
} else if (cols >= rows) {
rm = rows;
cm1 = (int) ceil((double) cols/2.0);
cm2 = cols - cm1;
rtranspose(m,rm,cm1,n,r,c);
rtranspose(m+cm1,rm,cm2,n+cm1*r,r,c);
} else if (rows > cols) {
rm1 = (int) ceil((double) rows/2.0);
rm2 = rows - rm1;
cm = cols;
rtranspose(m,rm1,cm,n,r,c);
rtranspose(m+rm1*c,rm2,cm,n+rm1,r,c);
}
}
void ctranspose(double *sig, int rows, int cols,double *col) {
int r,c;
int block;
block = (int) BLOCKSIZE;
r= rows;
c = cols;
if (rows >= block || cols >= block) {
rtranspose(sig,rows,cols,col,r,c);
} else {
stranspose(sig,rows,cols,col);
}
}
void mtranspose(double *sig, int rows, int cols,double *col) {
int block;
block = (int) BLOCKSIZE * 16;
if (rows >= block && cols >= block) {
ctranspose(sig,rows,cols,col);
} else {
stranspose(sig,rows,cols,col);
}
}
void mdisplay(double *A, int row, int col) {
int i,j;
printf("\n MATRIX Order : %d X %d \n \n",row,col);
for (i = 0; i < row; i++) {
printf("R%d: ",i);
for ( j = 0; j < col;j++) {
printf("%g ",A[i*col + j]);
}
printf(":R%d \n",i);
}
}
void madd(double* A, double* B, double* C,int rows,int cols) {
int N,i;
/*
* C = A + B . All matrices have identical dimensions rows X cols
*/
N = rows * cols;
#pragma omp parallel for
for (i = 0; i < N; ++i) {
C[i] = A[i] + B[i];
}
}
void msub(double* A, double* B, double* C,int rows,int cols) {
int N,i;
/*
* C = A - B . All matrices have identical dimensions rows X cols
*/
N = rows * cols;
#pragma omp parallel for
for (i = 0; i < N; ++i) {
C[i] = A[i] - B[i];
}
}
void scale(double *A, int rows, int cols, double alpha) {
int N,i;
/*
* A = alpha * A
* Matrix A is overwritten.
*/
N = rows * cols;
#pragma omp parallel for
for (i = 0; i < N;++i) {
A[i] = alpha * A[i];
}
}
void nmult(double* A, double* B, double* C,int ra,int ca, int cb) {
register int i,j,k;
int u,v,t,rb;
/*
* C = A * B , where A is a ra*ca matric while B is a rb*cb
* with ca = rb
* Matrix C is a ra*cb matrix
*/
rb = ca;
#pragma omp parallel for private(i,j,k,v,u,t)
for (i = 0; i < ra; ++i) {
for (j = 0; j < cb; ++j) {
v = i * rb;
u = i *cb;
t = j + u;
C[t] = 0.;
for (k = 0; k < rb;++k) {
C[t] += A[k + v] * B[j + k * cb];
}
}
}
}
void tmult(double* A, double* B, double* C,int ra,int ca, int cb) {
register int i,j,k;
int u,v,t,rb;
double *BT;
BT = (double*) malloc(sizeof(double) * ca * cb);
/*
* C = A * B , where A is a ra*ca matric while B is a rb*cb
* with ca = rb
* Matrix C is a ra*cb matrix
*/
mtranspose(B,ca,cb,BT);
rb = ca;
#pragma omp parallel for private(i,j,k,v,u,t)
for (i = 0; i < ra; ++i) {
for (j = 0; j < cb; ++j) {
v = i * rb;
u = i *cb;
t = j + u;
C[t] = 0.;
for (k = 0; k < rb;++k) {
C[t] += A[k + v] * BT[k + j * rb];
}
}
}
free(BT);
}
void recmult(double* A, double* B, double* C,int m,int n, int p,int sA,int sB, int sC) {
int m2,n2,p2;
register int i,j,k;
int u,v,t;
if (m + n + p <= CUTOFF) {
//#pragma omp parallel for private(i,j,k,v,u,t)
for (i = 0; i < m; ++i) {
for (j = 0; j < p; ++j) {
v = i * sB;
u = i * sC;
t = j + u;
for (k = 0; k < n;++k) {
C[t] += A[k + v] * B[j + k * sC];
}
}
}
} else if (m >= n && m >= p) {
m2 = (int) ceil((double) m / 2.0);
recmult(A,B,C,m2,n,p,sA,sB,sC);
recmult(A + m2*sB,B,C + m2*sC,m-m2,n,p,sA,sB,sC);
} else if (n >= m && n >= p) {
n2 = (int) ceil((double) n / 2.0);
recmult(A,B,C,m,n2,p,sA,sB,sC);
recmult(A+n2,B+n2*sC,C,m,n-n2,p,sA,sB,sC);
} else if (p >= m && p >= n) {
p2 = (int) ceil((double) p / 2.0);
recmult(A,B,C,m,n,p2,sA,sB,sC);
recmult(A,B+p2,C+p2,m,n,p-p2,sA,sB,sC);
}
}
void rmult(double* A, double* B, double* C,int m,int n, int p) {
int strA,strB,strC;
int N;
register int i;
strA = m;
strB = n;
strC = p;
N = m * p;
for(i = 0; i < N; ++i) {
C[i] = 0.;
}
recmult(A,B,C,m,n,p,strA,strB,strC);
}
int findrec(int *a, int *b, int *c) {
int rec;
double da,db,dc,mul;
da = (double) *a;
db = (double) *b;
dc = (double) *c;
rec = 0;
mul = 1.;
while (da + db + dc > (double) CUTOFF) {
rec++;
mul *= 2;
da = ceil(da/2.);
db = ceil(db/2.);
dc = ceil(dc/2.);
}
*a = (int) da * mul;
*b = (int) db * mul;
*c = (int) dc * mul;
return rec;
}
void add_zero_pad(double *X, int rows, int cols, int zrow, int zcol,double *Y) {
int r,c,i,j,u,v;
r = rows + zrow;
c = cols + zcol;
for (i = 0; i < rows;++i) {
u = i*c;
v = i * cols;
for (j = 0; j < cols;++j) {
Y[u + j] = X[v + j];
}
for (j = cols; j < c;++j) {
Y[u + j] = 0.;
}
}
for (i = rows; i < r;++i) {
u = i*c;
for(j = 0; j < c;++j) {
Y[u + j] = 0.;
}
}
}
void remove_zero_pad(double *Y, int rows, int cols, int zrow, int zcol,double *Z) {
int r,c,i,j,u,v;
r = rows - zrow;
c = cols - zcol;
for (i = 0; i < r; ++i) {
u = i * c;
v = i * cols;
for (j = 0; j < c; ++j) {
Z[j + u] = Y[j + v];
}
}
}
void madd_stride(double* A, double* B, double* C,int rows,int cols,int sA,int sB,int sC) {
int i,j,u,v,w;
for (i = 0; i < rows; ++i) {
u = i * sC;
v = i * sA;
w = i * sB;
for(j = 0; j < cols;j++) {
C[j + u] = A[j + v] + B[j + w];
}
}
}
void msub_stride(double* A, double* B, double* C,int rows,int cols,int sA,int sB,int sC) {
int i,j,u,v,w;
for (i = 0; i < rows; ++i) {
u = i * sC;
v = i * sA;
w = i * sB;
for(j = 0; j < cols;j++) {
C[j + u] = A[j + v] - B[j + w];
}
}
}
void rmadd_stride(double* A, double* B, double* C,int rows,int cols,int p,int sA,int sB,int sC) {
int i,j,u,v,w;
if (rows + cols + p <= CUTOFF) {
for (i = 0; i < rows; ++i) {
u = i * sC;
v = i * sA;
w = i * sB;
for(j = 0; j < cols;j++) {
C[j + u] = A[j + v] + B[j + w];
}
}
} else {
rows/=2;cols/=2;p/=2;
rmadd_stride(A,B,C,rows,cols,p,sA,sB,sC);
rmadd_stride(A + cols,B + cols,C + cols,rows,cols,p,sA,sB,sC);
rmadd_stride(A + rows *sB,B + rows *sC,C + rows *sC,rows,cols,p,sA,sB,sC);
rmadd_stride(A + rows *sB + cols,B + rows *sC + cols,C + rows *sC + cols,rows,cols,p,sA,sB,sC);
}
}
void rmsub_stride(double* A, double* B, double* C,int rows,int cols,int p,int sA,int sB,int sC) {
int i,j,u,v,w;
if (rows + cols + p <= CUTOFF) {
for (i = 0; i < rows; ++i) {
u = i * sC;
v = i * sA;
w = i * sB;
for(j = 0; j < cols;j++) {
C[j + u] = A[j + v] - B[j + w];
}
}
} else {
rows/=2;cols/=2;p/=2;
rmsub_stride(A,B,C,rows,cols,p,sA,sB,sC);
rmsub_stride(A + cols,B + cols,C + cols,rows,cols,p,sA,sB,sC);
rmsub_stride(A + rows *sB,B + rows *sC,C + rows *sC,rows,cols,p,sA,sB,sC);
rmsub_stride(A + rows *sB + cols,B + rows *sC + cols,C + rows *sC + cols,rows,cols,p,sA,sB,sC);
}
}
void srecmult(double* A, double* B, double* C,int m,int n, int p,int sA,int sB, int sC) {
register int i,j,k;
int u,v,t;
double sum;
double *A1,*B1;
double *a11,*a12,*a21,*a22;
double *b11,*b12,*b21,*b22;
double *c11,*c12,*c21,*c22;
double *m1,*m2,*m3,*m4,*m5,*m6,*m7;
int sm1,sm2,sm3,sm4,sm5,sm6,sm7;
int sA1,sB1;
if (m + n + p <= CUTOFF) {
for (i = 0; i < m; ++i) {
for (j = 0; j < p; ++j) {
v = i * sA;
u = i * sC;
t = j + u;
sum = 0.;
for (k = 0; k < n;++k) {
sum += A[k + v] * B[j + k * sB];
}
C[t] = sum;
}
}
} else {
m/=2;n/=2;p/=2;
// A size mXn, C size mXp
a11 = A;
a12 = A + n;
a21 = A + m * sA;
a22 = A + n + m * sA;
//B size nXp
b11 = B;
b12 = B + p;
b21 = B + n * sB;
b22 = B + p + n * sB;
//C size mXp
c11 = C;
c12 = C + p;
c21 = C + m * sC;
c22 = C + p + m * sC;
// m matrices have dimension m X p each. See http://en.wikipedia.org/wiki/Strassen_algorithm
m1 = (double*) malloc(sizeof(double) *m * p);
sm1 = p;
m3 = (double*) malloc(sizeof(double) *m * p);
sm3 = p;
m4 = (double*) malloc(sizeof(double) *m * p);
sm4 = p;
m2 = c21;
sm2 = sC;
m5 = c12;
sm5 = sC;
m6 = c22;
sm6 = sC;
m7 = c11;
sm7 = sC;
//m1
sA1 = n;
sB1 = p;
A1 = (double*) malloc(sizeof(double) * m * n);
B1 = (double*) malloc(sizeof(double) * n * p);
madd_stride(a11,a22,A1,m,n,sA,sA,sA1);
madd_stride(b11,b22,B1,n,p,sB,sB,sB1);
srecmult(A1,B1,m1,m,n,p,sA1,sB1,sm1);
free(A1);
free(B1);
//m2
A1 = (double*) malloc(sizeof(double) * m * n);
madd_stride(a21,a22,A1,m,n,sA,sA,sA1);
srecmult(A1,b11,m2,m,n,p,sA1,sB,sm2);
free(A1);
//m3
B1 = (double*) malloc(sizeof(double) * n * p);
//rmsub_stride(B + p,B + p + n * sC,B1,n,p,m,sC,sC,sC/2);
msub_stride(b12,b22,B1,n,p,sB,sB,sB1);
srecmult(a11,B1,m3,m,n,p,sA,sB1,sm3);
free(B1);
//m4
B1 = (double*) malloc(sizeof(double) * n * p);
//rmsub_stride(B + p,B + p + n * sC,B1,n,p,m,sC,sC,sC/2);
msub_stride(b21,b11,B1,n,p,sB,sB,sB1);
srecmult(a22,B1,m4,m,n,p,sA,sB1,sm4);
free(B1);
//m5
A1 = (double*) malloc(sizeof(double) * m * n);
madd_stride(a11,a12,A1,m,n,sA,sA,sA1);
srecmult(A1,b22,m5,m,n,p,sA1,sB,sm5);
free(A1);
//m6
A1 = (double*) malloc(sizeof(double) * m * n);
B1 = (double*) malloc(sizeof(double) * n * p);
msub_stride(a21,a11,A1,m,n,sA,sA,sA1);
madd_stride(b11,b12,B1,n,p,sB,sB,sB1);
srecmult(A1,B1,m6,m,n,p,sA1,sB1,sm6);
free(A1);
free(B1);
//m7
A1 = (double*) malloc(sizeof(double) * m * n);
B1 = (double*) malloc(sizeof(double) * n * p);
msub_stride(a12,a22,A1,m,n,sA,sA,sA1);
madd_stride(b21,b22,B1,n,p,sB,sB,sB1);
srecmult(A1,B1,m7,m,n,p,sA1,sB1,sm7);
free(A1);
free(B1);
// c11
A1 = (double*) malloc(sizeof(double) * m * p);
sA1 = p;
madd_stride(m1,m7,m7,m,p,sm1,sm7,sm7);
msub_stride(m4,m5,A1,m,p,sm4,sm5,sA1);
madd_stride(m7,A1,m7,m,p,sm7,sA1,sm7);
free(A1);
// c22
A1 = (double*) malloc(sizeof(double) * m * p);
sA1 = p;
madd_stride(m1,m6,m6,m,p,sm1,sm6,sm6);
msub_stride(m3,m2,A1,m,p,sm3,sm2,sA1);
madd_stride(m6,A1,m6,m,p,sm6,sA1,sm6);
free(A1);
//c12
madd_stride(m3,m5,m5,m,p,sm3,sm5,sm5);
//c21
madd_stride(m4,m2,m2,m,p,sm4,sm2,sm2);
free(m1);
free(m3);
free(m4);
}
}
void smult(double* A, double* B, double* C,int m,int n, int p) {
int a,b,c,nrec;
double *X,*Y,*Z,*P;
a = m;
b = n;
c = p;
nrec = findrec(&a,&b,&c);
X = (double*) malloc(sizeof(double) * a * b);
Y = (double*) malloc(sizeof(double) * b * c);
Z = (double*) malloc(sizeof(double) * a * c);
P = (double*) malloc(sizeof(double) * (a/2) * (c/2));
add_zero_pad(A,m,n,a-m,b-n,X);
add_zero_pad(B,n,p,b-n,c-p,Y);
srecmult(X,Y,Z,a,b,c,b,c,c);
// Memory allocation needs work
remove_zero_pad(Z,a,c,a-m,c-p,C);
// free X,Y,Z
free(X);
free(Y);
free(Z);
free(P);
}
void mmult(double* A, double* B, double* C,int m,int n, int p) {
if (m+n+p <= CUTOFF/2) {
nmult(A,B,C,m,n,p);
} else {
smult(A,B,C,m,n,p);
}
}
static int pludecomp(double *A,int N,int *ipiv) {
int k,j,l,c1,c2,mind,tempi;
double ld,mult,mval,temp;
for(k=0;k < N;++k)
ipiv[k] = k;
for(k = 0; k < N-1; ++k) {
//c2 = k*N;
mval = fabs(A[k*N + k]);
mind = k;
for (j=k+1; j < N;++j) {
if (mval < fabs(A[j*N + k])) {
mval = A[j*N + k];
mind = j;
}
}
if ( mind != k) {
c1 = k *N;
c2 = mind * N;
tempi = ipiv[mind];
ipiv[mind] = ipiv[k];
ipiv[k] = tempi;
for (j = 0; j < N;j++) {
temp = A[c1 + j];
*(A + c1 + j) = *(A + c2 + j);
*(A + c2 + j) = temp;
}
}
c2 = k*N;
ld = A[c2 + k];
if (ld != 0.) {
for (j = k+1; j < N; ++j) {
c1 = j*N;
mult = A[c1+k] /= ld;
//printf("\n k %d j %d mult %lf \n",k,j,mult);
for(l = k+1; l < N; ++l) {
A[c1+l] -= mult * A[c2 + l];
}
}
}
}
return 0;
}
void ludecomp(double *A,int N,int *ipiv) {
pludecomp(A,N,ipiv);
}
void linsolve(double *A,int N,double *b,int *ipiv,double *x) {
int i,j,c1,l;
double *y;
double sum;
y = (double*) malloc(sizeof(double) *N);
/*
* Two step Solution L * U * x = b
* Let U*x = y
* Solve L * y = b for y (Forward Substitution
* Solve U * x = b for x (Back Substitution)
*/
for(i = 0; i < N;++i) {
y[i] = 0.;
x[i] = 0.;
if ( A[i*N + i] == 0.) {
printf("The Matrix system does not have a unique solution");
exit(1);
}
//printf("\n B %d",ipiv[i]);
}
// Forward Substitution
y[0] = b[ipiv[0]];
for(i = 1; i < N; ++i) {
sum = 0.;
c1 = i*N;
for(j = 0; j < i; ++j) {
sum += y[j] * A[c1 + j];
}
y[i] = b[ipiv[i]] - sum;
}
// Back Substitution
x[N - 1] = y[N - 1]/A[N * N - 1];
for (i = N - 2; i >= 0; i--) {
sum = 0.;
c1 = i*(N+1);
l=0;
for(j = i+1; j < N;j++) {
l++;
sum += A[c1 + l] * x[j];
}
x[i] = (y[i] - sum) / A[c1];
}
free(y);
}
void minverse(double *A,int N,int *ipiv,double *inv) {
int i,j,stride;
double *col,*x;
col = (double*) malloc(sizeof(double) * N);
x = (double*) malloc(sizeof(double) * N);
for (i = 0; i < N; ++i) {
col[i] = 0.;
x[i] = 0.;
}
for (i = 0; i < N; ++i) {
col[i] = 1.;
linsolve(A,N,col,ipiv,x);
stride = i;
for(j = 0; j < N;++j) {
inv[stride] = x[j];
stride+= N;
}
col[i] = 0.;
}
free(x);
free(col);
}
void eye(double *mat,int N) {
int i,j,t;
for(i = 0;i < N;++i) {
for(j =0; j < N;++j) {
t = i*N;
if (i == j) {
mat[t+j] = 1.;
} else {
mat[t+j] = 0.;
}
}
}
}
static double house_1(double*x,int N,double *v) {
double beta,mu,temp;
double *sigma;
int i;
sigma = (double*) malloc(sizeof(double) * 1);
if (N > 1) {
mmult(x+1,x+1,sigma,1,N-1,1);
} else {
sigma[0] = 0.0;
}
v[0] =1.;
for (i = 1; i < N;++i) {
v[i] = x[i];
}
if (sigma[0] == 0. && x[0] >= 0.) {
beta = 0.;
} else if (sigma[0] == 0. && x[0] < 0.) {
beta = -2.;
}else {
mu = sqrt(sigma[0] + x[0] * x[0]);
if (x[0] <= 0.) {
v[0] = x[0] - mu;
} else {
v[0] = - sigma[0] / (x[0] + mu);
}
temp = v[0];
beta = (2.0 * v[0] * v[0]) /(sigma[0] + v[0] * v[0]);
for (i = 0; i < N;++i) {
v[i] /= temp;
}
}
free(sigma);
return beta;
}
double house_2(double*x,int N,double *v) {
double sgn,beta,sc;
double *sigma,*e;
int i;
sigma = (double*) malloc(sizeof(double) * 1);
e = (double*) malloc(sizeof(double) * N);
beta = 2.0;
sgn = 1.0;
mmult(x,x,sigma,1,N,1);
sigma[0] = sqrt(sigma[0]);
e[0] =1.;
for (i = 1; i < N;++i) {
e[i] = 0.;
}
if (x[0] > 0.) {
sgn = 1.0;
} else if (x[0] < 0.) {
sgn = -1.0;
} else if (x[0] == 0.) {
sgn = 0.;
}
sc = sigma[0] * sgn;
//scale(e,N,1,sc);
e[0] *= sc;
for(i = 0; i < N;++i) {
v[i] = e[i] + x[i];
}
mmult(v,v,sigma,1,N,1);
sigma[0] = sqrt(sigma[0]);
for(i = 0; i < N;++i) {
v[i] = v[i] / sigma[0];
}
free(sigma);
free(e);
return beta;
}
double house(double*x,int N,double *v) {
double beta;
beta = house_1(x,N,v);
return beta;
}
void housemat(double *v, int N,double beta,double *mat) {
double *temp;
temp = (double*) malloc(sizeof(double) * N * N);
eye(mat,N);
mmult(v,v,temp,N,1,N);
scale(temp,N,N,beta);
msub(mat,temp,mat,N,N);
free(temp);
}
void qrdecomp(double *A, int M, int N,double *bvec) {
int j,i,k,u,t;
double *x,*v,*AT,*w;
double beta;
if (M < N) {
printf("M should be greater than or equal to N");
exit(1);
}
x = (double*) malloc(sizeof(double) * M);
v = (double*) malloc(sizeof(double) * M);
AT = (double*) malloc(sizeof(double) * M * N);
w = (double*) malloc(sizeof(double) * M * M);
for(j = 0; j < N;++j) {
for(i=j;i < M;++i) {
x[i-j] = A[i*N+j];
}
beta = house(x,M-j,v);
bvec[j] = beta;
for (i=j; i < M; i++) {
t = i * N;
u = 0;
for (k=j; k < N; k++) {
AT[u+i-j] = A[k+t];
u+=(M-j);
}
}
mmult(AT,v,w,N-j,M-j,1);
scale(w,N-j,1,beta);
mmult(v,w,AT,M-j,1,N-j);
for (i=j; i < M; i++) {
t = i *N;
for (k=j; k < N; k++) {
A[t+k] -= AT[(i-j)*(N-j) + k - j];
}
}
if (j < M) {
for(i=j+1;i < M;++i) {
A[i*N+j] = v[i-j];
}
}
}
free(x);
free(v);
free(AT);
free(w);
}
void getQR(double *A,int M,int N,double *bvec,double *Q, double *R) {
int i,j,k,t,u;
double *x,*v,*AT,*w;
x = (double*) malloc(sizeof(double) * M);
v = (double*) malloc(sizeof(double) * M);
AT = (double*) malloc(sizeof(double) * M * N);
w = (double*) malloc(sizeof(double) * M * M);
for(i = 0; i < N;++i) {
t = i *N;
for(j = 0; j < N;++j) {
if (i > j) {
R[t+j] = 0.;
} else {
R[t+j] = A[t+j];
}
}
}
for(i = 0; i < M;++i) {
t = i *N;
for(j = 0; j < N;++j) {
if (i == j) {
Q[t+j] = 1.;
} else {
Q[t+j] = 0.;
}
}
}
for(j = N-1; j >= 0;--j) {
v[0] = 1.;
for(i=j+1;i < M;++i) {
v[i-j] = A[i*N+j];
}
for (i=j; i < M; i++) {
t = i * N;
u = 0;
for (k=j; k < N; k++) {
AT[u+i-j] = Q[k+t];
u+=(M-j);
}
}
mmult(AT,v,w,N-j,M-j,1);
scale(w,N-j,1,bvec[j]);
mmult(v,w,AT,M-j,1,N-j);
for (i=j; i < M; i++) {
t = i *N;
for (k=j; k < N; k++) {
Q[t+k] -= AT[(i-j)*(N-j) + k - j];
}
}
}
free(x);
free(v);
free(AT);
free(w);
}
void hessenberg(double *A,int N) {
int k,i,j,t,u;
double *x,*v,*AT,*w;
double beta;
x = (double*) malloc(sizeof(double) * N);
v = (double*) malloc(sizeof(double) * N);
AT = (double*) malloc(sizeof(double) * N * N);
w = (double*) malloc(sizeof(double) * N);
for (k = 0; k < N-2;++k) {
for(i=k + 1;i < N;++i) {
x[i-k-1] = A[i*N+k];
//printf("x %lf \n",x[i-k-1]);
}
beta = house(x,N-k-1,v);
for (i=k+1; i < N; i++) {
t = i * N;
u = 0;
for (j=k; j < N; j++) {
AT[u+i-k-1] = A[j+t];
u+=(N-k-1);
}
}
//mdisplay(AT,N-k,N-k-1);
mmult(AT,v,w,N-k,N-k-1,1);
scale(w,N-k,1,beta);
mmult(v,w,AT,N-k-1,1,N-k);
//mdisplay(AT,N-k-1,N-k);
for (i=k+1; i < N; i++) {
t = i * N;
for (j=k; j < N; j++) {
A[t+j] -= AT[(i-k-1)*(N-k) + j - k];
}
}
//mdisplay(A,N,N);
for (i=0; i < N; i++) {
t = i * N;
u = i * (N-k-1);
for (j=k+1; j < N; j++) {
AT[u+j-k-1] = A[t+j];
}
}
//mdisplay(AT,N,N-k-1);
mmult(AT,v,w,N,N-k-1,1);
scale(w,N,1,beta);
mmult(w,v,AT,N,1,N-k-1);
//mdisplay(AT,N,N-k-1);
for (i=0; i < N; i++) {
t = i * N;
u = i * (N-k-1);
for (j=k+1; j < N; j++) {
A[t+j] -= AT[u+j-k-1];
}
}
}
free(x);
free(v);
free(AT);
free(w);
}
void francisQR(double *A,int N) {
int m,n,k,q,r,t,u,i,j;
double s,t2,beta;
double *x,*v,*AT,*w;
int NN;
/*
* Reference - Algorithm 7.5.1 Golub,van Loan Matrix Computations 3rd Edition
*/
x = (double*) malloc(sizeof(double) * 3);
v = (double*) malloc(sizeof(double) * 3);
AT = (double*) malloc(sizeof(double) * 3 * N);
w = (double*) malloc(sizeof(double) * N);
n = N-1;
m = n-1;
NN = N*N;
s = A[NN-1] + A[NN-N-2];
t2 = A[NN-1] * A[NN-N-2] - A[NN-2] * A[NN-N-1];
x[0] = A[0]*A[0] + A[1]*A[N] - s*A[0] + t2;
x[1] = A[N]*(A[0] + A[N+1] - s);
x[2] = A[N] * A[N+N+1];
if (N <= 2) {
return;
}
for (k = -1; k < N - 3;++k) {
beta = house(x,3,v);
//mdisplay(x,3,1);
if (k > 0) {
q = k;
} else {
q = 0;
}
//printf("q %d \n",q);
for (i=k+1; i < k+4; i++) {
t = i * N;
u = 0;
for (j=q; j < N; j++) {
AT[u+i-k-1] = A[j+t];
u+=3;
}
}
mmult(AT,v,w,N-q,3,1);
scale(w,N-q,1,beta);
mmult(v,w,AT,3,1,N-q);
for (i=k+1; i < k+4; i++) {
t = i * N;
for (j=q; j < N; j++) {
A[t+j] -= AT[(i-k-1)*(N-q) + j - q];
}
}
//mdisplay(A,N,N);
if (k+4 >= n) {
r = N;
} else {
r = k+4+1;
}
//printf("r %d \n",r);
for (i=0; i < r; i++) {
t = i * N;
u = i * 3;
for (j=k+1; j < k+4; j++) {
AT[u+j-k-1] = A[t+j];
}
}
mmult(AT,v,w,r,3,1);
scale(w,r,1,beta);
mmult(w,v,AT,r,1,3);
//mdisplay(AT,N,N-k-1);
for (i=0; i < r; i++) {
t = i * N;
u = i * 3;
for (j=k+1; j < k+4; j++) {
A[t+j] -= AT[u+j-k-1];
}
}
//mdisplay(A,N,N);
x[0] = A[N*(k+2) + k+1];
x[1] = A[N*(k+3) + k+1];
if (k < n-3) {
x[2] = A[N*(k+4) + k+1];
}
//mdisplay(x,3,1);
}
//mdisplay(x,2,1);
beta = house(x,2,v);
for (i=n-1; i < N; i++) {
t = i * N;
u = 0;
for (j=n-2; j < N; j++) {
AT[u+i-n+1] = A[j+t];
u+=2;
}
}
mmult(AT,v,w,3,2,1);
scale(w,3,1,beta);
mmult(v,w,AT,2,1,3);
for (i=n-1; i < N; i++) {
t = i * N;
for (j=n-2; j < N; j++) {
A[t+j] -= AT[(i-n+1)*3 + j - n + 2];
}
}
for (i=0; i < N; i++) {
t = i * N;
u = i * 2;
for (j=n-1; j < N; j++) {
AT[u+j-n+1] = A[t+j];
}
}
mmult(AT,v,w,N,2,1);
scale(w,N,1,beta);
mmult(w,v,AT,N,1,2);
//mdisplay(AT,N,N-k-1);
for (i=0; i < N; i++) {
t = i * N;
u = i * 2;
for (j=n-1; j < N; j++) {
A[t+j] -= AT[u+j-n+1];
}
}
free(x);
free(v);
free(AT);
free(w);
}
void eig22(double *A, int stride,double *eigre,double *eigim) {
int N;
double a11,a12,a21,a22,c,s,c2,s2,cs,t1,t,t2,at11,at12,at21,at22;
N = stride;
a11 = A[0];
a12 = A[1];
a21 = A[N];
a22 = A[N+1];
if ( (a12 + a21) == 0) {
c = 1./sqrt(2.0);
s = c;
} else {
t1 = (a11 - a22) / (a12 + a21);
t = t1 /(1. + sqrt(1+t1*t1));
c = 1./sqrt(1 + t*t);
s = c*t;
}
c2 = c*c;
s2 = s*s;
cs = c*s;
at11 = c2 * a11 + s2 * a22 - cs * (a12 + a21);
at12 = c2 * a12 - s2 * a21 + cs * (a11 - a22);
at21 = c2 * a21 - s2 * a12 + cs * (a11 - a22);
at22 = c2 * a22 + s2 * a11 + cs * (a12 + a21);
eigre[0] = eigre[1] = at11;
eigim[0] = sqrt(-at12 * at21);
eigim[1] = -sqrt(-at12 * at21);
if ( at12*at21 >= 0) {
if (at12 == 0) {
c = 0;
s = 1;
c2 = 0;
s2 = 1;
cs = 0;
} else {
t = sqrt(at21/at12);
t2 = t * t;
cs = t/(1+t2);
c2 = (1+t2);
s2 = t2 /(1+t2);
}
eigim[0] = eigim[1] = 0.0;
eigre[0] = at11 - cs * (at12 + at21);
eigre[1] = at11 + cs * (at12 + at21);
}
}
int francis_iter(double *A, int N, double *H) {
int success,brkpoint;
int i,j,it,p,q,t,u;
double *temp;
success = 0;
brkpoint = 30 * N;
it = 0;
p = N - 1;
temp = (double*) malloc(sizeof(double) * N * N);
for(i = 0; i < N*N;++i) {
H[i] = A[i];
}
hessenberg(H,N);
while (p > 1 && it < brkpoint) {
while (p > 1 && (H[N*p + p-1] == 0 || H[N*(p-1) + p-2] == 0)) {
if (H[N*p + p-1] == 0) {
p--;
} else if (H[N*(p-1) + p-2] == 0) {
p=p-2;
}
}
if (p > 0) {
q = p-1;
while (q > 0 && fabs(H[N*q + q-1]) != 0) {
q--;
}
//printf("%d %d \n",q,p);
for (i=q; i <= p; i++) {
t = i * N;
u = (i-q) * (p-q+1);
for (j=q; j <= p; j++) {
temp[u+j-q] = H[t+j];
}
}
francisQR(temp,p-q+1);
for (i=q; i <= p; i++) {
t = i * N;
u = (i-q) * (p-q+1);
for (j=q; j <= p; j++) {
H[t+j] = temp[u+j-q];
}
}
//mdisplay(H,N,N);
for(i = q; i <= p-1;++i) {
if ( fabs(H[(i+1)*N+i]) <= TOL * (fabs(H[i*N+i]) + fabs(H[(i+1)*N+i+1]) ) ) {
H[(i+1)*N+i] = 0.;
}
}
it++;
//printf("iter %d \n",it);
}
}
if (it == brkpoint) {
success = 0;
} else {
success = 1;
}
free(temp);
return success;
}
static void eig2t(double *A, int stride) {
int N;
double a11,a12,a21,a22,c,s,c2,s2,cs,t1,t,at11,at12,at21,at22;
N = stride;
a11 = A[0];
a12 = A[1];
a21 = A[N];
a22 = A[N+1];
if ( (a12 + a21) == 0) {
c = 1./sqrt(2.0);
s = c;
} else {
t1 = (a11 - a22) / (a12 + a21);
t = t1 /(1. + sqrt(1+t1*t1));
c = 1./sqrt(1 + t*t);
s = c*t;
}
c2 = c*c;
s2 = s*s;
cs = c*s;
at11 = c2 * a11 + s2 * a22 - cs * (a12 + a21);
at12 = c2 * a12 - s2 * a21 + cs * (a11 - a22);
at21 = c2 * a21 - s2 * a12 + cs * (a11 - a22);
at22 = c2 * a22 + s2 * a11 + cs * (a12 + a21);
A[0] = at11;
A[1] = at12;
A[N] = at21;
A[N+1] = at22;
}
void eig(double *A,int N,double *eigre,double *eigim) {
int i,t,u,n;
double *H;
double t1,t2,cs;
H = (double*) malloc(sizeof(double) * N * N);
n = N - 1;
francis_iter(A,N,H);
//mdisplay(H,N,N);
i = 0;
while (i < n) {
u = i * N;
t = (i+1)*N;
if (H[t+i] != 0.) {
eig2t(H+u+i,N);
i = i +2;
} else {
i++;
}
}
//mdisplay(H,N,N);
i = 0;
while (i < n) {
u = i * N;
t = (i+1)*N;
if (H[t+i] != 0.) {
if (H[u+i+1] * H[t+i] < 0.) {
eigre[i] = H[u+i];
eigre[i+1] = H[t+i+1];
eigim[i] = sqrt(-H[u+i+1] * H[t+i]);
eigim[i+1] = -sqrt(-H[u+i+1] * H[t+i]);
} else {
if (H[u+i+1] == 0.) {
cs = 0.;
} else {
t1 = sqrt(H[t+i]/H[u+i+1]);
t2 = t1 * t1;
cs = t1/(1+t2);
}
eigre[i] = H[u+i] - cs * (H[u+i+1] + H[t+i]);
eigre[i+1] = H[u+i] + cs * (H[u+i+1] + H[t+i]);
eigim[i] = 0.;
eigim[i+1] = 0.;
}
i= i + 2;
} else {
eigre[i] = H[u+i];
eigim[i] = 0.;
i++;
}
}
if (i == n) {
eigre[i] = H[N*N - 1];
eigim[i] = 0.;
}
free(H);
}
static int rcholu(double *A,int N, int stride, double *U22) {
int sc;
int j,i,u,w;
double u11;
if (N == 1) {
if (A[0] > 0) {
A[0] = sqrt(A[0]);
return 0;
} else {
return -1;
}
} else {
if (A[0] < 0) {
return -1;
}
u11 = sqrt(A[0]);
A[0] = u11;
for (j = 1; j < N;++j) {
A[j] /= u11;
}
mmult(A+1,A+1,U22,N-1,1,N-1);
for (i = 0; i < N-1; ++i) {
u = stride + 1+ i * stride;
w = i * (N-1);
for(j = i; j < N-1;j++) {
A[j + u] -= U22[j + w];
}
}
sc = rcholu(A+stride+1,N-1,stride,U22);
if (sc == -1) {
return -1;
}
}
return sc;
}
static int rbcholu(double *A,int N, int stride, double *UB, double *UT) {
int bs,bb,i,j,Nb,t,k,u,v,w,sc;
double *b,*x,*U12,*U12T;
double sum;
bs = (int) BLOCKSIZE;
bb = bs*bs;
if (N <= BLOCKSIZE) {
sc = rcholu(A,N,stride,UB);
if (sc == -1) {
return -1;
}
} else {
Nb = N - bs;
x = (double*) malloc(sizeof(double) * bs);
b = (double*) malloc(sizeof(double) * bs);
U12T = (double*) malloc(sizeof(double) * Nb * bs);
U12 = (double*) malloc(sizeof(double) * Nb * bs);
rcholu(A,bs,stride,UB); // U11
for (i =0; i < bs;++i) {
t = i *stride;
u = 0;
for(j = 0; j < N;++j) {
UT[u+i] = A[j+t];
u += bs;
}
}
for(k = 0; k < Nb;++k) {
u = k * bs;
for(i = 0; i < bs;++i) {
b[i] = UT[bb+u+i];
x[i] = 0.;
}
for (i = 0; i < bs;++i) {
t = i*bs;
sum = 0;
for (j = 0; j < i;++j) {
sum += UT[t+j] * x[j];
}
x[i] = (b[i] - sum) / UT[t+i];
}
v = bs + k;
for(i = 0; i < bs;++i) {
A[v] = x[i];
U12T[u+i] = x[i];
v += stride;
}
}
mtranspose(U12T,Nb,bs,U12);
mmult(U12T,U12,UT,Nb,bs,Nb);
free(U12T);
free(U12);
free(b);
free(x);
for (i = 0; i < Nb; ++i) {
u = bs * stride + bs + i * stride;
w = i * Nb;
for(j = i; j < Nb;j++) {
A[j + u] -= UT[j + w];
}
}
sc = rbcholu(A + bs * stride + bs,Nb,stride,UB,UT);
if (sc == -1) {
return -1;
}
}
return sc;
}
int cholu(double *A, int N) {
int stride,i,j,t,sc;
double *U22;
U22 = (double*) malloc(sizeof(double) * N * N);
stride = N;
sc = rcholu(A,N,stride,U22);
for(i=0; i < N;++i) {
t = i *N;
for(j=0;j < i;++j) {
A[t+j] = 0.;
}
}
free(U22);
return sc;
}
int bcholu(double *A, int N) {
int stride,i,j,t,b,sc;
double *UB,*UT;
b = (int) BLOCKSIZE;
UT = (double*) malloc(sizeof(double) * N * N);
UB = (double*) malloc(sizeof(double) * b * b);
stride = N;
sc = rbcholu(A,N,stride,UB,UT);
for(i=0; i < N;++i) {
t = i *N;
for(j=0;j < i;++j) {
A[t+j] = 0.;
}
}
free(UB);
free(UT);
return sc;
}
int chol(double *A, int N) {
int sc;
if ( N <= (int) BLOCKSIZE) {
sc = cholu(A,N);
} else {
sc = bcholu(A,N);
}
return sc;
}
static void rchold(double *A,int N, int stride, double *U22) {
int j,i,u,w;
double d1;
if (N == 1) {
return;
} else {
d1 = A[0];
for (j = 1; j < N;++j) {
A[j] /= d1;
}
mmult(A+1,A+1,U22,N-1,1,N-1);
scale(U22,N-1,N-1,d1);
for (i = 0; i < N-1; ++i) {
u = stride + 1+ i * stride;
w = i * (N-1);
for(j = i; j < N-1;j++) {
A[j + u] -= U22[j + w];
}
}
rchold(A+stride+1,N-1,stride,U22);
}
}
void chold(double *A, int N) {
int stride,i,j,t;
double *U22;
U22 = (double*) malloc(sizeof(double) * N * N);
stride = N;
rchold(A,N,stride,U22);
for(i=0; i < N;++i) {
t = i *N;
for(j=0;j < i;++j) {
A[t+j] = 0.;
}
}
free(U22);
}
void svd_sort(double *U,int M,int N,double *V,double *q) {
/*
* Pavel Sakov's CSA SVD sort routine is used with some minor
* modifications. See The License below
*/
/*
* Copyright (C) 2000-2008 Pavel Sakov and CSIRO
Redistribution and use of material from the package `csa', with or without
modification, are permitted provided that the following conditions are
met:
1. Redistributions of material must retain the above copyright notice, this
list of conditions and the following disclaimer.
2. The names of the authors may not be used to endorse or promote products
derived from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE AUTHORS ``AS IS'' AND ANY EXPRESS OR IMPLIED
WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
EVENT SHALL THE AUTHORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT
OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY
OF SUCH DAMAGE.
*/
int i,j;
double *UT,*VT,*qq;
int *pos;
UT = (double*) malloc(sizeof(double) * N * M);
VT = (double*) malloc(sizeof(double) * N * N);
qq = (double*) malloc(sizeof(double) * N);
pos = (int*) malloc(sizeof(int) * N);
for(i = 0;i < N;++i) {
qq[i] = q[i];
}
for(i = 0;i < M*N;++i) {
UT[i] = U[i];
}
for(i = 0;i < N*N;++i) {
VT[i] = V[i];
}
//mtranspose(U,M,N,UT);
//mtranspose(V,N,N,VT);
sort1d(q,N,pos);
for(i = 0; i < N;++i) {
q[i] = qq[pos[i]];
for (j = 0; j < M;++j) {
U[j*N+i] = UT[j*N+pos[i]];
}
for (j = 0; j < N;++j) {
V[j*N+i] = VT[j*N+pos[i]];
}
}
free(UT);
free(VT);
free(qq);
free(pos);
}
int svd(double *A,int M,int N,double *U,double *V,double *q) {
int i,j,k,l,t,t2,ierr,cancel,iter,l1;
double eps,g,x,s,temp,f,h,c,y,z,scale;
double *e;
/*
THIS SUBROUTINE IS THE MODIFIED C TRANSLATION OF THE
EISPACK FORTRAN TRANSLATION OF THE ALGOL PROCEDURE SVD,
NUM. MATH. 14, 403-420(1970) BY GOLUB AND REINSCH.
HANDBOOK FOR AUTO. COMP., VOL II-LINEAR ALGEBRA, 134-151(1971).
*/
/*
* U = MXN
* V - NXN
* Q - NX1
*/
/*
* The program return error codes
*
* Code 0 if the computation is successful
* Code -1 If M < N . Transpose the matrix such that rows > columns and trye again
* Code 15 if maximum iterations are reached without achieving convergence. Increase SVDMAXITER value
* in matrix.h header file. Default Value is 50
*
*/
if (M < N) {
printf("Rows (M) should be greater than Columns (B) \n");
printf("Retry By Transposing the Input Matrix");
return -1;
}
e = (double*) malloc(sizeof(double) * N);
ierr = 0;
eps = macheps();
g = scale = x = 0.0;
for(i = 0; i < M*N;++i) {
U[i] = A[i];
}
for(i = 0; i < N;++i) {
l = i+1;
e[i] = scale * g;
g = 0.0;
s = 0.0;
scale = 0.0;
if (i < M) {
for(k = i; k < M;++k) {
scale += fabs(U[k*N+i]);
}
if (scale != 0.0) {
for(k = i; k < M;++k) {
t = k * N;
U[t+i] /= scale;
temp = U[t+i];
s += temp*temp;
}
f = U[i*N+i];
g = (f < 0) ? sqrt(s) : -sqrt(s);
h = f * g - s;
U[i*N+i] = f - g;
if (i < N - 1) {
for(j = l; j < N;++j) {
s = 0.0;
for(k = i; k < M;++k) {
t = k * N;
s += U[t+i]*U[t+j];
}
f = s / h;
for(k = i; k < M;++k) {
t = k * N;
U[t+j] += f * U[t+i];
}
}
}
for(k = i; k < M;++k) {
t = k * N;
U[t+i] *= scale;
}
}
}
q[i] = scale * g;
g = 0.0;
s = 0.0;
scale = 0.0;
if (i < M && i != N - 1) {
t = i *N;
for(k = l; k < M;++k) {
scale += fabs(U[t+k]);
}
if (scale != 0.0) {
for(k = l; k < N;++k) {
U[t+k] /= scale;
temp = U[t+k];
s = s + temp*temp;
}
f = U[t+l];
g = (f < 0) ? sqrt(s) : -sqrt(s);
h = f * g - s;
U[t+l] = f - g;
for(k = l;k < N;++k) {
e[k] = U[t+k] / h;
}
for (j = l; j < M; j++) {
s = 0.0;
t2 = j * N;
for (k = l; k < N; k++) {
s += U[t2+k] * U[t+k];
}
for (k = l; k < N; k++) {
U[t2+k] += s * e[k];
}
}
for (k = l; k < N; k++)
U[t+k] *= scale;
}
}
temp = fabs(q[i]) + fabs(e[i]);
if (x < temp) {
x = temp;
}
}
/*
ierr = 0;
eps = macheps();
tol = eps;
g = x = 0.0;
for(i = 0; i < M*N;++i) {
U[i] = A[i];
}
for(i = 0; i < N;++i) {
l = i+1;
e[i] = g;
s = 0.0;
for(k = i; k < M;++k) {
t = k * N;
temp = U[t+i];
s += temp*temp;
}
if (s < tol) {
g = 0.0;
} else {
f = U[i*N+i];
g = (f < 0) ? sqrt(s) : -sqrt(s);
h = f * g - s;
U[i*N+i] = f - g;
for(j = l; j < N;++j) {
s = 0.0;
for(k = i; k < M;++k) {
t = k * N;
s += (U[t+i]*U[t+j]);
}
f = s / h;
for(k = i; k < M;++k) {
t = k * N;
U[t+j] += (f * U[t+i]);
}
}
}
q[i] = g;
s = 0.0;
t = i * N;
for(k = l; k < N;++k) {
temp = U[t+k];
s = s + temp*temp;
}
if (s < tol) {
g = 0.0;
} else {
f = U[t+l];
g = (f < 0) ? sqrt(s) : -sqrt(s);
h = f * g - s;
U[t+l] = f - g;
for(k = l;k < N;++k) {
e[k] = U[t+k] / h;
}
for (j = l; j < M; j++) {
s = 0.0;
t2 = j * N;
for (k = l; k < N; k++) {
s += U[t2+k] * U[t+k];
}
for (k = l; k < N; k++) {
U[t2+k] += s * e[k];
}
}
}
temp = fabs(q[i]) + fabs(e[i]);
if (x < temp) {
x = temp;
}
}
*/
//Accumulating Right Hand Transformations
for(i = N - 1;i >= 0;--i) {
t = i * N;
if (i < N - 1) {
if (g != 0.0) {
h = U[t+i+1] * g;
for(j = l;j < N;++j) {
V[j*N+i] = U[t+j] / h;
}
for(j = l;j < N;++j) {
s = 0.0;
for(k = l; k < N;++k) {
s += U[t+k] * V[k*N+j];
}
for(k = l; k < N;++k) {
V[k*N+j] += (s * V[k*N+i]);
}
}
}
for(j = l; j < N;++j) {
V[t+j] = V[j*N+i] = 0.0;
}
}
V[t+i] = 1.0;
g = e[i];
l = i;
}
//Accumulating Left Hand Transformations
for(i = N - 1;i >= 0;--i) {
t = i * N;
l = i+1;
g = q[i];
if (i < N - 1) {
for(j = l;j < N;++j) {
U[t+j] = 0.0;
}
}
if (g != 0.0) {
if (i != N - 1) {
//h = U[t+i] * g;
for(j = l;j < N;++j) {
s = 0.0;
for(k = l; k < M;++k) {
s += (U[k*N+i] * U[k*N+j]);
}
f = (s / U[t+i]) / g;
for(k = i; k < M;++k) {
U[k*N+j] += (f * U[k*N+i]);
}
}
}
for(j = i; j < M;++j) {
U[j*N+i] = U[j*N+i] / g;
}
} else {
for(j = i; j < M;++j) {
U[j*N+i] = 0.0;
}
}
U[t+i] += 1.0;
}
// mdisplay(U,M,N);
eps = eps * x;
for(k = N - 1; k >= 0; --k) {
iter = 0;
while(1) {
iter++;
if (iter > SVDMAXITER) {
printf("Convergence Not Achieved \n");
return 15;
}
cancel = 1;
for(l = k; l >= 0; --l) {
if (fabs(e[l]) <= eps) {
cancel = 0; //test f convergence
break;
}
if (fabs(q[l-1]) <= eps) {
//Cancel
break;
}
}
if (cancel) {
c = 0.0;
s = 1.0;
l1 = l - 1;
for(i = l; i <= k;++i) {
f = s*e[i];
e[i] *= c;
if (fabs(f) <= eps) {
break;
}
g = q[i];
h = q[i] = hypot(f,g);
c = g/h;
s = -f/h;
for(j = 0; j < M;++j) {
t = j * N;
y = U[t+l1];
z = U[t+i];
U[t+l1] = y * c + z * s;
U[t+i] = z * c - y * s;
}
}
}
z = q[k];
if (l != k) {
x = q[l];
y = q[k-1];
g = e[k-1];
h = e[k];
f = 0.5 * (((g + z) / h) * ((g - z) / y) + y / h - h / y);
g = hypot(f,1.0);
if (f < 0.0) {
temp = f - g;
} else {
temp = f+g;
}
f = x - (z / x) * z + (h / x) * (y / temp - h);
//Next QR Transformation
c = s = 1.0;
for(i = l+1; i <= k;++i) {
g = e[i];
y = q[i];
h = s * g;
g = c * g;
e[i-1] = z = hypot(f,h);
c = f / z;
s = h / z;
f = x * c + g * s;
g = g * c - x * s;
h = y * s;
y *= c;
for(j = 0; j < N;++j) {
t = j * N;
x = V[t+i-1];
z = V[t+i];
V[t+i-1] = x * c + z * s;
V[t+i] = z * c - x * s;
}
q[i-1] = z = hypot(f,h);
if (z != 0.0) {
c = f / z;
s = h / z;
}
f = c * g + s * y;
x = c * y - s * g;
for(j = 0; j < M;++j) {
t = j * N;
y = U[t+i-1];
z = U[t+i];
U[t+i-1] = y * c + z * s;
U[t+i] = z * c - y * s;
}
}
e[l] = 0.0;
e[k] = f;
q[k] = x;
} else {
//convergence
if (z < 0.0) {
q[k] = -z;
for (j = 0; j < N; j++) {
t = j *N;
V[t+k] = -V[t+k];
}
}
break;
}
}
}
svd_sort(U,M,N,V,q);
free(e);
return ierr;
}
static int rank_c(double *A, int M,int N) {
int i,rnk,ret;
double eps,tol,szmax,qmax;
double *U,*V,*q;
U = (double*) malloc(sizeof(double) * M*N);
V = (double*) malloc(sizeof(double) * N*N);
q = (double*) malloc(sizeof(double) * N);
eps = macheps();
rnk = 0;
if (M < N) {
//mtranspose(A,M,N,U);
szmax = (double) N;
} else {
szmax = (double) M;
}
ret = svd(A,M,N,U,V,q);
qmax = q[0];
if ( ret != 0) {
printf("Failed to Compute SVD");
return -1;
}
tol = qmax*szmax *eps;
for(i = 0; i < N;++i) {
if (q[i] > tol) {
rnk++;
}
}
free(U);
free(V);
free(q);
return rnk;
}
int rank(double *A, int M,int N) {
int rnk;
double *AT;
AT = (double*) malloc(sizeof(double) * M*N);
if (M < N) {
mtranspose(A,M,N,AT);
rnk = rank_c(AT,N,M);
} else {
rnk = rank_c(A,M,N);
}
free(AT);
return rnk;
}
|
producer.c |
#include "grid.h"
#include "config.h"
#include <stdlib.h>
#include <unistd.h>
#include <fcntl.h>
#include <time.h>
#include <string.h>
#include <omp.h>
#ifndef NO_MPI
#include <mpi.h>
#else
#define MPI_Request int
#define MPI_REQUEST_NULL 0
#endif
struct producer_stream {
// Facet worker id, number of facets to work on
int facet_worker;
int facet_work_count;
// Stream targets
int streamer_count;
int *streamer_ranks;
// Send queue
int send_queue_length;
MPI_Request *requests;
uint64_t bytes_sent;
// Private buffers
double complex *NMBF_NMBF_queue;
// Worker structure
struct recombine2d_worker worker;
// Time (in s) spent in different stages
double mpi_wait_time, mpi_send_time;
};
void init_producer_stream(struct recombine2d_config *cfg, struct producer_stream *prod,
int facet_worker, int facet_work_count,
int streamer_count, int *streamer_ranks,
int BF_batch, fftw_plan BF_plan,
int send_queue_length)
{
prod->facet_worker = facet_worker;
prod->facet_work_count = facet_work_count;
// Set streamers
prod->streamer_count = streamer_count;
prod->streamer_ranks = streamer_ranks;
// Initialise queue
prod->send_queue_length = send_queue_length;
prod->requests = (MPI_Request *) malloc(sizeof(MPI_Request) * send_queue_length);
int i;
for (i = 0; i < send_queue_length; i++) {
prod->requests[i] = MPI_REQUEST_NULL;
}
// Create buffers, initialise worker
prod->NMBF_NMBF_queue =
(double complex *)malloc(cfg->NMBF_NMBF_size * send_queue_length);
recombine2d_init_worker(&prod->worker, cfg, BF_batch, BF_plan, FFTW_MEASURE);
// Initialise statistics
prod->bytes_sent = 0;
prod->mpi_wait_time = prod->mpi_send_time = 0;
}
void free_producer_stream(struct producer_stream *prod)
{
recombine2d_free_worker(&prod->worker);
free(prod->requests);
free(prod->NMBF_NMBF_queue);
}
void producer_add_stats(struct producer_stream *to, struct producer_stream *from)
{
to->bytes_sent += from->bytes_sent;
to->worker.pf1_time += from->worker.pf1_time;
to->worker.es1_time += from->worker.es1_time;
to->worker.ft1_time += from->worker.ft1_time;
to->worker.pf2_time += from->worker.pf2_time;
to->worker.es2_time += from->worker.es2_time;
to->worker.ft2_time += from->worker.ft2_time;
to->mpi_wait_time += from->mpi_wait_time;
to->mpi_send_time += from->mpi_send_time;
}
void producer_dump_stats(struct work_config *wcfg, int facet_worker,
struct producer_stream *prod,
int producer_count, double dt)
{
struct recombine2d_config *cfg = &wcfg->recombine;
// For the "effective" statistic we count the number of bytes we
// conveyed information about. This statistic is slightly messy
// because on one hand we have communication overheads (decreasing
// effectiveness), but on the other hand for generating
// visibilities do not need to cover the entire grid (increasing
// effectivenes).
uint64_t effective = 0;
int i;
for (i = 0; i < wcfg->facet_max_work; i++) {
if (wcfg->facet_work[i].set) {
effective += cfg->F_size;
}
}
double total = dt * producer_count;
printf("\n%.2f s wall-clock, %.2f GB (%.2f GB effective), %.2f MB/s (%.2f MB/s effective)\n", dt,
(double)prod->bytes_sent / 1000000000, (double)effective / 1000000000,
(double)prod->bytes_sent / dt / 1000000, (double)effective / dt/ 1000000);
printf("PF1: %.2f s (%.1f%%), FT1: %.2f s (%.1f%%), ES1: %.2f s (%.1f%%)\n",
prod->worker.pf1_time, prod->worker.pf1_time / total * 100,
prod->worker.ft1_time, prod->worker.ft1_time / total * 100,
prod->worker.es1_time, prod->worker.es1_time / total * 100);
printf("PF2: %.2f s (%.1f%%), FT2: %.2f s (%.1f%%), ES2: %.2f s (%.1f%%)\n",
prod->worker.pf2_time, prod->worker.pf2_time / total * 100,
prod->worker.ft2_time, prod->worker.ft2_time / total * 100,
prod->worker.es2_time, prod->worker.es2_time / total * 100);
double idle = total -
prod->worker.pf1_time - prod->worker.ft1_time - prod->worker.es1_time -
prod->worker.pf2_time - prod->worker.ft2_time - prod->worker.es2_time -
prod->mpi_wait_time - prod->mpi_send_time;
printf("mpi wait: %.2f s (%.1f%%), mpi send: %.2f s (%.1f%%), idle: %.2f s (%.1f%%)\n",
prod->mpi_wait_time, 100 * prod->mpi_wait_time / producer_count / dt,
prod->mpi_send_time, 100 * prod->mpi_send_time / producer_count / dt,
idle, 100 * idle / producer_count / dt);
}
int make_subgrid_tag(struct work_config *wcfg,
int subgrid_worker_ix, int subgrid_work_ix,
int facet_worker_ix, int facet_work_ix) {
// Need to encode only the work items, as with MPI both the sender
// and the receiver will be identified already by the message.
return facet_work_ix * wcfg->subgrid_max_work + subgrid_work_ix;
}
void producer_send_subgrid(struct work_config *wcfg, struct producer_stream *prod,
int facet_work_ix,
double complex *NMBF_BF,
int subgrid_off_u, int subgrid_off_v,
int iu, int iv)
{
struct recombine2d_config *cfg = &wcfg->recombine;
// Extract subgrids along second axis
double complex *NMBF_NMBF = NULL;
// Find streamer (subgrid workers) to send to
int iworker;
for (iworker = 0; iworker < wcfg->subgrid_workers; iworker++) {
// Check whether it is in streamer's work list. Note that
// it can appear for multiple workers if the subgrid was
// split in work assignment (typically at the grid centre).
struct subgrid_work *work_list = wcfg->subgrid_work +
iworker * wcfg->subgrid_max_work;
int iwork;
for (iwork = 0; iwork < wcfg->subgrid_max_work; iwork++) {
if (work_list[iwork].nbl && work_list[iwork].iu == iu && work_list[iwork].iv == iv) break;
}
if (iwork >= wcfg->subgrid_max_work)
continue;
// Select send slot if running in distributed mode
int indx;
#ifndef NO_MPI
if (prod->streamer_count == 0)
indx = 0;
else {
for (indx = 0; indx < prod->send_queue_length; indx++) {
if (prod->requests[indx] == MPI_REQUEST_NULL) break;
}
if (indx >= prod->send_queue_length) {
double start = get_time_ns();
MPI_Status status;
MPI_Waitany(prod->send_queue_length, prod->requests, &indx, &status);
prod->mpi_wait_time += get_time_ns() - start;
}
assert (indx >= 0 && indx < prod->send_queue_length);
}
#else
indx = 0;
#endif
// Calculate or copy sub-grid data
double complex *send_buf = prod->NMBF_NMBF_queue + indx * cfg->xM_yN_size * cfg->xM_yN_size;
if (!NMBF_NMBF) {
NMBF_NMBF = send_buf;
recombine2d_es0(&prod->worker, subgrid_off_v, subgrid_off_u, NMBF_BF, NMBF_NMBF);
} else {
memcpy(send_buf, NMBF_NMBF, cfg->NMBF_NMBF_size);
}
// Send (unless running in single-node mode, then we just pretend)
#ifndef NO_MPI
if (prod->streamer_ranks) {
int tag = make_subgrid_tag(wcfg, iworker, iwork,
prod->facet_worker, facet_work_ix);
double start = get_time_ns();
MPI_Isend(send_buf, cfg->xM_yN_size * cfg->xM_yN_size, MPI_DOUBLE_COMPLEX,
prod->streamer_ranks[iworker], tag, MPI_COMM_WORLD, &prod->requests[indx]);
prod->mpi_send_time += get_time_ns() - start;
}
#endif
prod->bytes_sent += sizeof(double complex) * cfg->xM_yN_size * cfg->xM_yN_size;
}
}
bool producer_fill_facet(struct recombine2d_config *cfg,
struct facet_work *work,
double complex *F,
int source_count,
double gridder_x0, double *grid_correction,
int x0_start, int x0_end) {
int offset = sizeof(double complex) *x0_start * cfg->yB_size;
int size = sizeof(double complex) *(x0_end - x0_start) * cfg->yB_size;
if (work->path && !work->hdf5) {
printf("Reading facet data from %s (%d-%d)...\n", work->path, x0_start, x0_end);
// Make sure strides are compatible
assert (cfg->F_stride0 == cfg->yB_size && cfg->F_stride1 == 1);
// Load data from file
int fd = open(work->path, O_RDONLY, 0666);
if (fd > 0) {
lseek(fd, offset, SEEK_SET);
if (read(fd, F, size) != size) {
fprintf(stderr, "failed to read enough data from %s for range %d-%d!\n", work->path, x0_start, x0_end);
return false;
}
close(fd);
} else {
fprintf(stderr, "Failed to read facet data!\n");
}
} else if (work->path && work->hdf5) {
printf("Reading facet data from %s:%s (%d-%d)...\n", work->hdf5, work->path, x0_start, x0_end);
// Make sure strides are as expected, then read
// TODO: Clearly HDF5 can do partial reads, optimise
assert (cfg->F_stride0 == cfg->yB_size && cfg->F_stride1 == 1);
double complex *data = read_hdf5(cfg->F_size, work->hdf5, work->path);
// Copy
memcpy(F, data + offset / sizeof(double complex), size);
free(data);
} else if (source_count > 0) {
// Place sources in gridder's usable region
unsigned int seed = 0;
int image_x0_size = (int)floor(2 * gridder_x0 * cfg->image_size);
int i;
for (i = 0; i < source_count; i++) {
int il = (int)(rand_r(&seed) % image_x0_size) - image_x0_size / 2;
int im = (int)(rand_r(&seed) % image_x0_size) - image_x0_size / 2;
// Skip sources outside the current facet (region)
if (il - work->facet_off_l < -cfg->yB_size/2 ||
il - work->facet_off_l >= cfg->yB_size/2 ||
im - work->facet_off_m < -cfg->yB_size/2 ||
im - work->facet_off_m >= cfg->yB_size/2) {
continue;
}
// Calculate facet coordinates, keeping in mind that the
// centre is at (0/0).
int x0 = (im - work->facet_off_m + cfg->yB_size) % cfg->yB_size;
int x1 = (il - work->facet_off_l + cfg->yB_size) % cfg->yB_size;
if (x0 < x0_start || x0 >= x0_end) {
continue;
}
double c =
grid_correction[(il + cfg->image_size) % cfg->image_size] *
grid_correction[(im + cfg->image_size) % cfg->image_size];
assert(c != 0);
F[(x0-x0_start)*cfg->F_stride0 + x1*cfg->F_stride1] += 1 / c;
}
} else {
// Fill facet with deterministic pseudo-random numbers
int x0, x1;
for (x0 = x0_start; x0 < x0_end; x0++) {
unsigned int seed = x0;
for (x1 = 0; x1 < cfg->yB_size; x1++) {
F[(x0-x0_start)*cfg->F_stride0+x1*cfg->F_stride1] = (double)rand_r(&seed) / RAND_MAX;
}
}
}
return true;
}
// Gets subgrid offset for given column/rpw. Returns INT_MIN if no work was found.
static int get_subgrid_off_u(struct work_config *wcfg, int iu)
{
// Somewhat inefficiently walk entire work list
int iwork;
for (iwork = 0; iwork < wcfg->subgrid_workers * wcfg->subgrid_max_work; iwork++) {
if (wcfg->subgrid_work[iwork].nbl > 0 &&
wcfg->subgrid_work[iwork].iu == iu) break;
}
if (iwork >= wcfg->subgrid_workers * wcfg->subgrid_max_work)
return INT_MIN;
return wcfg->subgrid_work[iwork].subgrid_off_u;
}
static int get_subgrid_off_v(struct work_config *wcfg, int iu, int iv)
{
// Somewhat inefficiently walk entire work list
int iwork;
for (iwork = 0; iwork < wcfg->subgrid_workers * wcfg->subgrid_max_work; iwork++) {
if (wcfg->subgrid_work[iwork].nbl > 0 &&
wcfg->subgrid_work[iwork].iu == iu &&
wcfg->subgrid_work[iwork].iv == iv) break;
}
if (iwork >= wcfg->subgrid_workers * wcfg->subgrid_max_work) return INT_MIN;
return wcfg->subgrid_work[iwork].subgrid_off_v;
}
static void producer_work(struct work_config *wcfg,
struct producer_stream *prod,
struct producer_stream *producers,
double complex *F, double complex *BF)
{
int ifacet;
// Do first stage preparation and Fourier Transform
if (wcfg->produce_retain_bf)
for (ifacet = 0; ifacet < prod->facet_work_count; ifacet++)
recombine2d_pf1_ft1_omp(&prod->worker,
F + ifacet * wcfg->recombine.F_size / sizeof(*F),
BF + ifacet * wcfg->recombine.BF_size / sizeof(*BF));
// TODO: Generate facet on the fly
int iu;
if (wcfg->produce_parallel_cols) {
// Go through columns in parallel
#pragma omp for schedule(dynamic)
for (iu = wcfg->iu_min; iu <= wcfg->iu_max ; iu++) {
// Determine column offset / check whether column actually has work
int subgrid_off_u = get_subgrid_off_u(wcfg, iu);
if (subgrid_off_u == INT_MIN) continue;
// Loop through facets sequentially (inefficient, as it
// introduces a time delay on when we touch facets)
for (ifacet = 0; ifacet < prod->facet_work_count; ifacet++) {
// Extract subgrids along first axis, then prepare and Fourier
// transform along second axis
recombine2d_es1_pf0_ft0(&prod->worker, subgrid_off_u,
BF + ifacet * wcfg->recombine.BF_size / sizeof(*BF),
prod->worker.NMBF_BF);
// Go through rows in sequence
int iv;
for (iv = wcfg->iv_min; iv <= wcfg->iv_max; iv++) {
int subgrid_off_v = get_subgrid_off_v(wcfg, iu, iv);
if (subgrid_off_v == INT_MIN) continue;
producer_send_subgrid(wcfg, prod, ifacet, prod->worker.NMBF_BF,
subgrid_off_u, subgrid_off_v, iu, iv);
}
}
}
} else {
// Go through columns in sequence
for (iu = wcfg->iu_min; iu <= wcfg->iu_max; iu++) {
// Determine column offset / check whether column actually has work
int subgrid_off_u = get_subgrid_off_u(wcfg, iu);
if (subgrid_off_u == INT_MIN) continue;
// Loop through facets (inefficient, see above)
for (ifacet = 0; ifacet < prod->facet_work_count; ifacet++) {
// Extract subgrids along first axis, then prepare and Fourier
// transform along second axis
double complex *NMBF = producers->worker.NMBF;
double complex *NMBF_BF = producers->worker.NMBF_BF;
if (wcfg->produce_retain_bf)
recombine2d_es1_omp(&prod->worker, subgrid_off_u,
BF + ifacet * wcfg->recombine.BF_size / sizeof(*BF),
NMBF);
else
recombine2d_pf1_ft1_es1_omp(&prod->worker, subgrid_off_u,
F + ifacet * wcfg->recombine.F_size / sizeof(*F),
NMBF);
recombine2d_pf0_ft0_omp(&prod->worker, NMBF, NMBF_BF);
// Go through rows in parallel
int iv;
#pragma omp for schedule(dynamic)
for (iv = wcfg->iv_min; iv <= wcfg->iv_max; iv++) {
int subgrid_off_v = get_subgrid_off_v(wcfg, iu, iv);
if (subgrid_off_v == INT_MIN) continue;
producer_send_subgrid(wcfg, prod, ifacet, NMBF_BF,
subgrid_off_u, subgrid_off_v, iu, iv);
}
}
}
}
}
int producer(struct work_config *wcfg, int facet_worker, int *streamer_ranks)
{
struct recombine2d_config *cfg = &wcfg->recombine;
struct facet_work *fwork = wcfg->facet_work + facet_worker * wcfg->facet_max_work;
const int BF_batch = wcfg->produce_batch_rows;
const int send_queue_length = wcfg->produce_queue_length;
// Get number of facets we need to cover, warn if it is bigger than 1
int facet_work_count = 0; int ifacet;
for (ifacet = 0; ifacet < wcfg->facet_max_work; ifacet++)
if (fwork[ifacet].set)
facet_work_count++;
uint64_t F_size = facet_work_count * cfg->F_size;
uint64_t BF_size = wcfg->produce_retain_bf ?
facet_work_count * cfg->BF_size :
sizeof(double complex) * cfg->yP_size * BF_batch;
printf("Using %.1f GB global, %.1f GB per thread\n",
(double)(F_size + BF_size) / 1000000000,
facet_work_count * (double)recombine2d_worker_memory(cfg) / 1000000000);
// Create global memory buffers
double complex *F = (double complex *)calloc(1, F_size);
double complex *BF = (double complex *)malloc(BF_size);
if (!F || (!BF && wcfg->produce_retain_bf)) {
free(F); free(BF);
printf("Failed to allocate global buffers!\n");
return 1;
}
// Fill facet with random data (TODO: Handle the case that we are
// meant to cover more than one facet...)
printf("Filling %d facet%s...\n", facet_work_count, facet_work_count != 1 ? "s" : "");
double generate_start = get_time_ns();
int x0; const int x0_chunk = 256;
for (ifacet = 0; ifacet < facet_work_count; ifacet++) {
#pragma omp parallel for schedule(dynamic)
for (x0 = 0; x0 < cfg->yB_size; x0+=x0_chunk) {
int x0_end = x0 + x0_chunk;
if (x0_end > cfg->yB_size) x0_end = cfg->yB_size;
double complex *pF =
F + ifacet * wcfg->recombine.F_size / sizeof(*F)
+ x0*cfg->F_stride0;
producer_fill_facet(cfg, fwork + ifacet, pF,
wcfg->produce_source_count,
wcfg->gridder_x0, wcfg->grid_correction,
x0, x0_end);
}
}
printf(" %.2f s\n", get_time_ns() - generate_start);
// Debugging (TODO: remove)
if (false) {
int x1;
for (x0 = 0; x0 < cfg->yB_size; x0++) {
for (x1 = 0; x1 < cfg->yB_size; x1++) {
printf("%8.2f%+8.2fi\t", creal(F[x0*cfg->F_stride0+x1*cfg->F_stride1]),
cimag(F[x0*cfg->F_stride0+x1*cfg->F_stride1]));
}
puts("");
}
}
// Global structures
double run_start;
int producer_count;
struct producer_stream *producers;
#pragma omp parallel
{
producer_count = omp_get_num_threads();
#pragma omp single
{
// Do global planning
printf("Planning for %d threads...\n", producer_count); double planning_start = get_time_ns();
fftw_plan BF_plan = recombine2d_bf_plan(cfg, BF_batch, BF, FFTW_MEASURE);
// Create producers (which involves planning, and therefore is not parallelised)
producers = (struct producer_stream *) malloc(sizeof(struct producer_stream) * producer_count);
int i;
for (i = 0; i < producer_count; i++) {
init_producer_stream(cfg, producers + i, facet_worker, facet_work_count,
wcfg->facet_workers, streamer_ranks,
BF_batch, BF_plan, send_queue_length);
}
// End of planning phase
printf(" %.2f s\n", get_time_ns() - planning_start);
run_start = get_time_ns();
printf("Streaming...\n");
}
// Do work
struct producer_stream *prod = producers + omp_get_thread_num();
producer_work(wcfg, prod, producers, F, BF);
#ifndef NO_MPI
// Wait for remaining packets to be sent
double start = get_time_ns();
MPI_Status statuses[send_queue_length];
MPI_Waitall(send_queue_length, prod->requests, statuses);
prod->mpi_wait_time += get_time_ns() - start;
#endif
free_producer_stream(prod);
}
free(BF);
free(F);
fftw_free(producers[0].worker.BF_plan);
// Show statistics
int p;
for (p = 1; p < producer_count; p++) {
producer_add_stats(producers, producers + p);
}
producer_dump_stats(wcfg, facet_worker,
producers, producer_count,
get_time_ns() - run_start);
return 0;
}
|
variational_distance_calculation_process.h | // | / |
// ' / __| _` | __| _ \ __|
// . \ | ( | | ( |\__ `
// _|\_\_| \__,_|\__|\___/ ____/
// Multi-Physics
//
// License: BSD License
// Kratos default license: kratos/license.txt
//
// Main authors: Riccardo Rossi
// Ruben Zorrilla
//
//
#if !defined(KRATOS_VARIATIONAL_DISTANCE_CALCULATION_PROCESS_INCLUDED )
#define KRATOS_VARIATIONAL_DISTANCE_CALCULATION_PROCESS_INCLUDED
// System includes
#include <string>
#include <iostream>
#include <algorithm>
// External includes
// Project includes
#include "includes/define.h"
#include "containers/model.h"
#include "includes/kratos_flags.h"
#include "elements/distance_calculation_element_simplex.h"
#include "linear_solvers/linear_solver.h"
#include "processes/process.h"
#include "modeler/connectivity_preserve_modeler.h"
#include "solving_strategies/builder_and_solvers/residualbased_block_builder_and_solver.h"
#include "solving_strategies/schemes/residualbased_incrementalupdate_static_scheme.h"
#include "solving_strategies/strategies/residualbased_linear_strategy.h"
#include "utilities/variable_utils.h"
namespace Kratos
{
///@name Kratos Globals
///@{
///@}
///@name Type Definitions
///@{
///@}
///@name Enum's
///@{
///@}
///@name Functions
///@{
///@}
///@name Kratos Classes
///@{
/// Short class definition.
/**takes a model part full of SIMPLICIAL ELEMENTS (triangles and tetras) and recomputes a signed distance function
mantaining as much as possible the position of the zero of the function prior to the call.
This is achieved by minimizing the function ( 1 - norm( gradient( distance ) )**2
with the restriction that "distance" is a finite elment function
*/
template< unsigned int TDim, class TSparseSpace, class TDenseSpace, class TLinearSolver >
class VariationalDistanceCalculationProcess : public Process
{
public:
KRATOS_DEFINE_LOCAL_FLAG(PERFORM_STEP1);
KRATOS_DEFINE_LOCAL_FLAG(DO_EXPENSIVE_CHECKS);
KRATOS_DEFINE_LOCAL_FLAG(CALCULATE_EXACT_DISTANCES_TO_PLANE);
///@name Type Definitions
///@{
typedef Scheme< TSparseSpace, TDenseSpace > SchemeType;
typedef typename SchemeType::Pointer SchemePointerType;
typedef typename BuilderAndSolver<TSparseSpace,TDenseSpace,TLinearSolver>::Pointer BuilderSolverPointerType;
typedef SolvingStrategy< TSparseSpace, TDenseSpace, TLinearSolver > SolvingStrategyType;
///@}
///@name Pointer Definitions
/// Pointer definition of VariationalDistanceCalculationProcess
KRATOS_CLASS_POINTER_DEFINITION(VariationalDistanceCalculationProcess);
///@}
///@name Life Cycle
///@{
/**This process recomputed the distance function mantaining the zero of the existing distance distribution
* for this reason the DISTANCE should be initialized to values distinct from zero in at least some portions of the domain
* alternatively, the DISTANCE shall be fixed to zero at least on some nodes, and the process will compute a positive distance
* respecting that zero
* @param base_model_parr - is the model part on the top of which the calculation will be performed
* @param plinear_solver - linear solver to be used internally
* @max_iterations - maximum number of iteration to be employed in the nonlinear optimization process.
* - can also be set to 0 if a (very) rough approximation is enough
*
* EXAMPLE OF USAGE FROM PYTHON:
*
class distance_linear_solver_settings:
solver_type = "AMGCL"
tolerance = 1E-3
max_iteration = 200
scaling = False
krylov_type = "CG"
smoother_type = "SPAI0"
verbosity = 0
import linear_solver_factory
distance_linear_solver = linear_solver_factory.ConstructSolver(distance_linear_solver_settings)
max_iterations=1
distance_calculator = VariationalDistanceCalculationProcess2D(fluid_model_part, distance_linear_solver, max_iterations)
distance_calculator.Execute()
*/
VariationalDistanceCalculationProcess(
ModelPart& rBaseModelPart,
typename TLinearSolver::Pointer pLinearSolver,
unsigned int MaxIterations = 10,
Flags Options = CALCULATE_EXACT_DISTANCES_TO_PLANE.AsFalse(),
std::string AuxPartName = "RedistanceCalculationPart" )
:
mDistancePartIsInitialized(false),
mMaxIterations(MaxIterations),
mrModel( rBaseModelPart.GetModel() ),
mrBaseModelPart (rBaseModelPart),
mOptions( Options ),
mAuxModelPartName( AuxPartName )
{
KRATOS_TRY
ValidateInput();
// Generate an auxilary model part and populate it by elements of type DistanceCalculationElementSimplex
ReGenerateDistanceModelPart(rBaseModelPart);
auto p_builder_solver = Kratos::make_shared<ResidualBasedBlockBuilderAndSolver<TSparseSpace, TDenseSpace, TLinearSolver> >(pLinearSolver);
InitializeSolutionStrategy(p_builder_solver);
KRATOS_CATCH("")
}
/// Constructor with custom Builder And Solver
/** To be used in the trilinos version, since the trilinos builder and
* solver needs additional data (the EpetraComm).
* @param rBaseModelPart Reference ModelPart for distance calculation.
* @param pLinearSolver Linear solver for the distance system.
* @param MaxIterations Maximum number of non-linear optimization iterations.
* @param Options Configuration flags for the procedure.
* @param AuxPartName Name to be used for the internal distance calculation ModelPart.
*/
VariationalDistanceCalculationProcess(
ModelPart& rBaseModelPart,
typename TLinearSolver::Pointer pLinearSolver,
BuilderSolverPointerType pBuilderAndSolver,
unsigned int MaxIterations = 10,
Flags Options = CALCULATE_EXACT_DISTANCES_TO_PLANE.AsFalse(),
std::string AuxPartName = "RedistanceCalculationPart" )
:
mDistancePartIsInitialized(false),
mMaxIterations(MaxIterations),
mrModel( rBaseModelPart.GetModel() ),
mrBaseModelPart (rBaseModelPart),
mOptions( Options ),
mAuxModelPartName( AuxPartName )
{
KRATOS_TRY
ValidateInput();
// Generate an auxilary model part and populate it by elements of type DistanceCalculationElementSimplex
ReGenerateDistanceModelPart(rBaseModelPart);
InitializeSolutionStrategy(pBuilderAndSolver);
KRATOS_CATCH("")
}
/// Destructor.
~VariationalDistanceCalculationProcess() override
{
Clear();
};
///@}
///@name Operators
///@{
void operator()()
{
Execute();
}
///@}
///@name Operations
///@{
void Execute() override
{
KRATOS_TRY;
if(mDistancePartIsInitialized == false){
ReGenerateDistanceModelPart(mrBaseModelPart);
}
ModelPart& r_distance_model_part = mrModel.GetModelPart( mAuxModelPartName );
// TODO: check flag PERFORM_STEP1
// Step1 - solve a poisson problem with a source term which depends on the sign of the existing distance function
r_distance_model_part.pGetProcessInfo()->SetValue(FRACTIONAL_STEP,1);
// Unfix the distances
const int nnodes = static_cast<int>(r_distance_model_part.NumberOfNodes());
#pragma omp parallel for
for(int i_node = 0; i_node < nnodes; ++i_node){
auto it_node = r_distance_model_part.NodesBegin() + i_node;
double& d = it_node->FastGetSolutionStepValue(DISTANCE);
double& fix_flag = it_node->FastGetSolutionStepValue(FLAG_VARIABLE);
// Free the DISTANCE values
fix_flag = 1.0;
it_node->Free(DISTANCE);
// Save the distances
it_node->SetValue(DISTANCE, d);
if(d == 0){
d = 1.0e-15;
fix_flag = -1.0;
it_node->Fix(DISTANCE);
} else {
if(d > 0.0){
d = 1.0e15; // Set to a large number, to make sure that that the minimal distance is computed according to CaculateTetrahedraDistances
} else {
d = -1.0e15;
}
}
}
const int nelem = static_cast<int>(r_distance_model_part.NumberOfElements());
#pragma omp parallel for
for(int i_elem = 0; i_elem < nelem; ++i_elem){
auto it_elem = r_distance_model_part.ElementsBegin() + i_elem;
array_1d<double,TDim+1> distances;
auto& geom = it_elem->GetGeometry();
for(unsigned int i=0; i<TDim+1; i++){
distances[i] = geom[i].GetValue(DISTANCE);
}
const array_1d<double,TDim+1> original_distances = distances;
// The element is cut by the interface
if(this->IsSplit(distances)){
// Compute the unsigned distance using GeometryUtils
if (mOptions.Is(CALCULATE_EXACT_DISTANCES_TO_PLANE)) {
GeometryUtils::CalculateExactDistancesToPlane(geom, distances);
}
else {
if(TDim==3){
GeometryUtils::CalculateTetrahedraDistances(geom, distances);
}
else {
GeometryUtils::CalculateTriangleDistances(geom, distances);
}
}
// Assign the sign using the original distance values
for(unsigned int i = 0; i < TDim+1; ++i){
if(original_distances[i] < 0){
distances[i] = -distances[i];
}
}
for(unsigned int i = 0; i < TDim+1; ++i){
double &d = geom[i].FastGetSolutionStepValue(DISTANCE);
double &fix_flag = geom[i].FastGetSolutionStepValue(FLAG_VARIABLE);
geom[i].SetLock();
if(std::abs(d) > std::abs(distances[i])){
d = distances[i];
}
fix_flag = -1.0;
geom[i].Fix(DISTANCE);
geom[i].UnSetLock();
}
}
}
// SHALL WE SYNCHRONIZE SOMETHING IN HERE?¿?¿??¿ WE'VE CHANGED THE NODAL DISTANCE VALUES FROM THE ELEMENTS...
this->SynchronizeFixity();
this->SynchronizeDistance();
// Compute the maximum and minimum distance for the fixed nodes
double max_dist = 0.0;
double min_dist = 0.0;
for(int i_node = 0; i_node < nnodes; ++i_node){
auto it_node = r_distance_model_part.NodesBegin() + i_node;
if(it_node->IsFixed(DISTANCE)){
const double& d = it_node->FastGetSolutionStepValue(DISTANCE);
if(d > max_dist){
max_dist = d;
}
if(d < min_dist){
min_dist = d;
}
}
}
// Synchronize the maximum and minimum distance values
const auto &r_communicator = r_distance_model_part.GetCommunicator().GetDataCommunicator();
max_dist = r_communicator.MaxAll(max_dist);
min_dist = r_communicator.MinAll(min_dist);
// Assign the max dist to all of the non-fixed positive nodes
// and the minimum one to the non-fixed negatives
#pragma omp parallel for
for(int i_node = 0; i_node < nnodes; ++i_node){
auto it_node = r_distance_model_part.NodesBegin() + i_node;
if(!it_node->IsFixed(DISTANCE)){
double& d = it_node->FastGetSolutionStepValue(DISTANCE);
if(d>0){
d = max_dist;
} else {
d = min_dist;
}
}
}
mpSolvingStrategy->Solve();
// Step2 - minimize the target residual
r_distance_model_part.pGetProcessInfo()->SetValue(FRACTIONAL_STEP,2);
for(unsigned int it = 0; it<mMaxIterations; it++){
mpSolvingStrategy->Solve();
}
// Unfix the distances
#pragma omp parallel for
for(int i_node = 0; i_node < nnodes; ++i_node){
auto it_node = (r_distance_model_part.NodesBegin()) + i_node;
it_node->Free(DISTANCE);
}
KRATOS_CATCH("")
}
void Clear() override
{
if(mrModel.HasModelPart( mAuxModelPartName ))
mrModel.DeleteModelPart( mAuxModelPartName );
mDistancePartIsInitialized = false;
mpSolvingStrategy->Clear();
}
///@}
///@name Access
///@{
///@}
///@name Inquiry
///@{
///@}
///@name Input and output
///@{
/// Turn back information as a string.
std::string Info() const override
{
return "VariationalDistanceCalculationProcess";
}
/// Print information about this object.
void PrintInfo(std::ostream& rOStream) const override
{
rOStream << "VariationalDistanceCalculationProcess";
}
/// Print object's data.
void PrintData(std::ostream& rOStream) const override
{
}
///@}
///@name Friends
///@{
///@}
protected:
///@name Protected static Member Variables
///@{
///@}
///@name Protected member Variables
///@{
bool mDistancePartIsInitialized;
unsigned int mMaxIterations;
Model& mrModel;
ModelPart& mrBaseModelPart;
Flags mOptions;
std::string mAuxModelPartName;
typename SolvingStrategyType::UniquePointer mpSolvingStrategy;
///@}
///@name Protected Operators
///@{
///@}
///@name Protected Operations
///@{
void ValidateInput()
{
const DataCommunicator& r_comm = mrBaseModelPart.GetCommunicator().GetDataCommunicator();
int num_elements = mrBaseModelPart.NumberOfElements();
int num_nodes = mrBaseModelPart.NumberOfNodes();
if (num_elements > 0)
{
const auto geometry_family = mrBaseModelPart.ElementsBegin()->GetGeometry().GetGeometryFamily();
KRATOS_ERROR_IF( (TDim == 2) && (geometry_family != GeometryData::Kratos_Triangle) )
<< "In 2D the element type is expected to be a triangle." << std::endl;
KRATOS_ERROR_IF( (TDim == 3) && (geometry_family != GeometryData::Kratos_Tetrahedra) )
<< "In 3D the element type is expected to be a tetrahedron" << std::endl;
}
KRATOS_ERROR_IF(r_comm.SumAll(num_nodes) == 0) << "The model part has no nodes." << std::endl;
KRATOS_ERROR_IF(r_comm.SumAll(num_elements) == 0) << "The model Part has no elements." << std::endl;
// Check that required nodal variables are present
VariableUtils().CheckVariableExists<Variable<double > >(DISTANCE, mrBaseModelPart.Nodes());
VariableUtils().CheckVariableExists<Variable<double > >(FLAG_VARIABLE, mrBaseModelPart.Nodes());
}
void InitializeSolutionStrategy(BuilderSolverPointerType pBuilderAndSolver)
{
// Generate a linear strategy
auto p_scheme = Kratos::make_shared< ResidualBasedIncrementalUpdateStaticScheme< TSparseSpace,TDenseSpace > >();
ModelPart& r_distance_model_part = mrModel.GetModelPart( mAuxModelPartName );
bool CalculateReactions = false;
bool ReformDofAtEachIteration = false;
bool CalculateNormDxFlag = false;
mpSolvingStrategy = Kratos::make_unique<ResidualBasedLinearStrategy<TSparseSpace, TDenseSpace, TLinearSolver> >(
r_distance_model_part,
p_scheme,
pBuilderAndSolver,
CalculateReactions,
ReformDofAtEachIteration,
CalculateNormDxFlag);
// TODO: check flag DO_EXPENSIVE_CHECKS
mpSolvingStrategy->Check();
}
virtual void ReGenerateDistanceModelPart(ModelPart& rBaseModelPart)
{
KRATOS_TRY
if(mrModel.HasModelPart( mAuxModelPartName ))
mrModel.DeleteModelPart( mAuxModelPartName );
// Ensure that the nodes have distance as a DOF
VariableUtils().AddDof<Variable<double> >(DISTANCE, rBaseModelPart);
// Generate
ModelPart& r_distance_model_part = mrModel.CreateModelPart( mAuxModelPartName );
Element::Pointer p_distance_element = Kratos::make_intrusive<DistanceCalculationElementSimplex<TDim> >();
r_distance_model_part.GetNodalSolutionStepVariablesList() = rBaseModelPart.GetNodalSolutionStepVariablesList();
ConnectivityPreserveModeler modeler;
modeler.GenerateModelPart(rBaseModelPart, r_distance_model_part, *p_distance_element);
// Using the conditions to mark the boundary with the flag boundary
// Note that we DO NOT add the conditions to the model part
VariableUtils().SetFlag<ModelPart::NodesContainerType>(BOUNDARY, false, r_distance_model_part.Nodes());
// Note that above we have assigned the same geometry. Thus the flag is
// set in the distance model part despite we are iterating the base one
for (auto it_cond = rBaseModelPart.ConditionsBegin(); it_cond != rBaseModelPart.ConditionsEnd(); ++it_cond){
Geometry< Node<3> >& geom = it_cond->GetGeometry();
for(unsigned int i=0; i<geom.size(); i++){
geom[i].Set(BOUNDARY,true);
}
}
rBaseModelPart.GetCommunicator().SynchronizeOrNodalFlags(BOUNDARY);
mDistancePartIsInitialized = true;
KRATOS_CATCH("")
}
///@}
///@name Protected Access
///@{
///@}
///@name Protected Inquiry
///@{
///@}
///@name Protected LifeCycle
///@{
///@}
private:
///@name Static Member Variables
///@{
///@}
///@name Member Variables
///@{
///@}
///@name Private Operators
///@{
///@}
///@name Private Operations
///@{
bool IsSplit(const array_1d<double,TDim+1> &rDistances){
unsigned int positives = 0, negatives = 0;
for(unsigned int i = 0; i < TDim+1; ++i){
if(rDistances[i] >= 0){
++positives;
} else {
++negatives;
}
}
if (positives > 0 && negatives > 0){
return true;
}
return false;
}
void SynchronizeDistance(){
ModelPart& r_distance_model_part = mrModel.GetModelPart( mAuxModelPartName );
auto &r_communicator = r_distance_model_part.GetCommunicator();
// Only required in the MPI case
if(r_communicator.TotalProcesses() != 1){
int nnodes = static_cast<int>(r_distance_model_part.NumberOfNodes());
// Set the distance absolute value
#pragma omp parallel for
for(int i_node = 0; i_node < nnodes; ++i_node){
auto it_node = r_distance_model_part.NodesBegin() + i_node;
it_node->FastGetSolutionStepValue(DISTANCE) = std::abs(it_node->FastGetSolutionStepValue(DISTANCE));
}
// Synchronize the unsigned value to minimum
r_communicator.SynchronizeCurrentDataToMin(DISTANCE);
// Set the distance sign again by retrieving it from the non-historical database
#pragma omp parallel for
for(int i_node = 0; i_node < nnodes; ++i_node){
auto it_node = r_distance_model_part.NodesBegin() + i_node;
if(it_node->GetValue(DISTANCE) < 0.0){
it_node->FastGetSolutionStepValue(DISTANCE) = -it_node->FastGetSolutionStepValue(DISTANCE);
}
}
}
}
void SynchronizeFixity(){
ModelPart& r_distance_model_part = mrModel.GetModelPart( mAuxModelPartName );
auto &r_communicator = r_distance_model_part.GetCommunicator();
// Only required in the MPI case
if(r_communicator.TotalProcesses() != 1){
int nnodes = static_cast<int>(r_distance_model_part.NumberOfNodes());
// Synchronize the fixity flag variable to minium
// (-1.0 means fixed and 1.0 means free)
r_communicator.SynchronizeCurrentDataToMin(FLAG_VARIABLE);
// Set the fixity according to the synchronized flag
#pragma omp parallel for
for(int i_node = 0; i_node < nnodes; ++i_node){
auto it_node = r_distance_model_part.NodesBegin() + i_node;
const double &r_fix_flag = it_node->FastGetSolutionStepValue(FLAG_VARIABLE);
if (r_fix_flag == -1.0){
it_node->Fix(DISTANCE);
}
}
}
}
///@}
///@name Private Access
///@{
///@}
///@name Private Inquiry
///@{
///@}
///@name Un accessible methods
///@{
/// Assignment operator.
VariationalDistanceCalculationProcess& operator=(VariationalDistanceCalculationProcess const& rOther);
/// Copy constructor.
//VariationalDistanceCalculationProcess(VariationalDistanceCalculationProcess const& rOther);
///@}
}; // Class VariationalDistanceCalculationProcess
//avoiding using the macro since this has a template parameter. If there was no template plase use the KRATOS_CREATE_LOCAL_FLAG macro
template< unsigned int TDim,class TSparseSpace, class TDenseSpace, class TLinearSolver >
const Kratos::Flags VariationalDistanceCalculationProcess<TDim,TSparseSpace,TDenseSpace,TLinearSolver>::PERFORM_STEP1(Kratos::Flags::Create(0));
template< unsigned int TDim,class TSparseSpace, class TDenseSpace, class TLinearSolver >
const Kratos::Flags VariationalDistanceCalculationProcess<TDim,TSparseSpace,TDenseSpace,TLinearSolver>::DO_EXPENSIVE_CHECKS(Kratos::Flags::Create(1));
template< unsigned int TDim,class TSparseSpace, class TDenseSpace, class TLinearSolver >
const Kratos::Flags VariationalDistanceCalculationProcess<TDim,TSparseSpace,TDenseSpace,TLinearSolver>::CALCULATE_EXACT_DISTANCES_TO_PLANE(Kratos::Flags::Create(2));
///@}
///@name Type Definitions
///@{
///@}
///@name Input and output
///@{
/// input stream function
template< unsigned int TDim, class TSparseSpace, class TDenseSpace, class TLinearSolver>
inline std::istream& operator >> (std::istream& rIStream,
VariationalDistanceCalculationProcess<TDim,TSparseSpace,TDenseSpace,TLinearSolver>& rThis);
/// output stream function
template< unsigned int TDim, class TSparseSpace, class TDenseSpace, class TLinearSolver>
inline std::ostream& operator << (std::ostream& rOStream,
const VariationalDistanceCalculationProcess<TDim,TSparseSpace,TDenseSpace,TLinearSolver>& rThis)
{
rThis.PrintInfo(rOStream);
rOStream << std::endl;
rThis.PrintData(rOStream);
return rOStream;
}
///@}
} // namespace Kratos.
#endif // KRATOS_VARIATIONAL_DISTANCE_CALCULATION_PROCESS_INCLUDED defined
|
diag.c | /*---------------------------------------------------------------------------------
DIAG.C
-Diagnostic output. Also used for writing dumps
-Computes mass, magnetic, angular momentum and energy flux at first physical
zone and at EH
-Computes total mass, angular momentum and energy
-Computes divB
-Computes pseudo-emissivity for thermal synchrotron radiation and luminosity
-Write out diagnostics to dumps/log.out
---------------------------------------------------------------------------------*/
#include "decs.h"
// Evaluate flux based diagnostics; put results in global variables
// Note this is still per-process
void diag_flux(struct FluidFlux *F)
{
mdot = edot = ldot = 0.;
mdot_eh = edot_eh = ldot_eh = 0.;
int iEH = NG + 5;
#if !INTEL_WORKAROUND
#pragma omp parallel for \
reduction(+:mdot) reduction(+:edot) reduction(+:ldot) \
reduction(+:mdot_eh) reduction(+:edot_eh) reduction(+:ldot_eh)
#endif
JSLOOP(0, N2 - 1)
{
mdot += -F->X1[RHO][j][NG]*dx[2];
edot += (F->X1[UU][j][NG] - F->X1[RHO][j][NG])*dx[2];
ldot += F->X1[U3][j][NG]*dx[2];
mdot_eh += -F->X1[RHO][j][iEH]*dx[2];
edot_eh += (F->X1[UU][j][iEH] - F->X1[RHO][j][iEH])*dx[2];
ldot_eh += F->X1[U3][j][iEH]*dx[2];
}
}
void diag(struct GridGeom *G, struct FluidState *S, int call_code)
{
static FILE *ener_file;
if (call_code == DIAG_INIT)
{
// Set things up
ener_file = fopen("dumps/log.out", "a");
if (ener_file == NULL)
{
fprintf(stderr, "Error opening log file!\n");
exit(1);
}
}
double pp = 0.;
double divbmax = 0.;
int imax = 0; int jmax = 0;
double rmed = 0.;
double e = 0.;
// Calculate conserved quantities
if (call_code == DIAG_INIT || call_code == DIAG_LOG || call_code == DIAG_FINAL)
{
get_state_vec(G, S, CENT, 0, N2 - 1, 0, N1 - 1);
prim_to_flux_vec(G, S, 0, CENT, 0, N2 - 1, 0, N1 - 1, S->U);
#if !INTEL_WORKAROUND
#pragma omp parallel for \
reduction(+:rmed) reduction(+:pp) reduction(+:e) \
reduction(max:divbmax) collapse(2)
#endif
ZLOOP
{
rmed += S->U[RHO][j][i]*dV;
pp += S->U[U3][j][i]*dV;
e += S->U[UU][j][i]*dV;
if (i > 0+NG && j > 0+NG)
{
double divb = flux_ct_divb(G, S, i, j);
if (divb > divbmax)
{
divbmax = divb;
imax = i;
jmax = j;
}
}
}
}
double mass = 0.;
double egas = 0.;
double Phi = 0.;
double jet_EM_flux = 0.;
double lum_eht = 0.;
#if !INTEL_WORKAROUND
#pragma omp parallel for reduction(+:mass) reduction(+:egas) reduction(+:Phi) reduction(+:jet_EM_flux) reduction(+:lum_eht) collapse(2)
#endif
ZLOOP
{
mass += S->U[RHO][j][i]*dV;
egas += S->U[UU][j][i]*dV;
double rho = S->P[RHO][j][i];
double Pg = (gam - 1.)*S->P[UU][j][i];
double bsq = bsq_calc(S, i, j);
double Bmag = sqrt(bsq);
double C_eht = 0.2;
double j_eht = pow(rho,3.)*pow(Pg,-2.)*exp(-C_eht*pow(rho*rho/(Bmag*Pg*Pg),1./3.));
lum_eht += j_eht*dV*G->gdet[CENT][j][i];
if (i == 5+NG)
Phi += 0.5*fabs(sqrt(4*M_PI)*S->P[B1][j][i])*dx[2]*G->gdet[CENT][j][i];
}
if ((call_code == DIAG_INIT && !is_restart) || call_code == DIAG_DUMP || call_code == DIAG_FINAL)
{
dump(G, S);
dump_cnt++;
}
if (call_code == DIAG_ABORT)
{
dump_backend(G, S, IO_ABORT);
}
if (call_code == DIAG_INIT || call_code == DIAG_LOG || call_code == DIAG_FINAL)
{
//mdot will be negative w/scheme above
double phi = Phi/sqrt(fabs(mdot) + SMALL);
fprintf(stdout, "LOG t=%g \t divbmax: %g at %d %d\n", t, divbmax, imax, jmax);
fprintf(ener_file, "%10.5g %10.5g %10.5g %10.5g %15.8g %15.8g ", t, rmed, pp, e, S->P[UU][N2/2][N1/2]*pow(S->P[RHO][N2/2][N1/2], -gam), S->P[UU][N2/2][N1/2]);
fprintf(ener_file, "%15.8g %15.8g %15.8g ", mdot, edot, ldot);
fprintf(ener_file, "%15.8g %15.8g ", mass, egas);
fprintf(ener_file, "%15.8g %15.8g %15.8g ", Phi, phi, jet_EM_flux);
fprintf(ener_file, "%15.8g ", divbmax);
fprintf(ener_file, "%15.8g ", lum_eht);
fprintf(ener_file, "%15.8g %15.8g %15.8g ", mdot_eh, edot_eh, ldot_eh);
fprintf(ener_file, "\n");
fflush(ener_file);
}
}
// Calculate divB
double flux_ct_divb(struct GridGeom *G, struct FluidState *S, int i, int j)
{
return fabs(0.5*(S->P[B1][j][i]*G->gdet[CENT][j][i] + S->P[B1][j-1][i]*G->gdet[CENT][j-1][i]
- S->P[B1][j][i-1]*G->gdet[CENT][j][i-1] - S->P[B1][j-1][i-1]*G->gdet[CENT][j-1][i-1])/dx[1]
+ 0.5*(S->P[B2][j][i]*G->gdet[CENT][j][i] + S->P[B2][j][i-1]*G->gdet[CENT][j][i-1]
- S->P[B2][j-1][i]*G->gdet[CENT][j-1][i] - S->P[B2][j-1][i-1]*G->gdet[CENT][j-1][i-1])/dx[2]);
}
|
ast-dump-openmp-begin-declare-variant_9.c | // RUN: %clang_cc1 -triple x86_64-unknown-unknown -fopenmp -verify -ast-dump %s | FileCheck %s --check-prefix=C
// RUN: %clang_cc1 -triple x86_64-unknown-unknown -fopenmp -verify -ast-dump %s -x c++| FileCheck %s --check-prefix=CXX
// expected-no-diagnostics
int also_before(void) {
return 0;
}
#pragma omp begin declare variant match(implementation={vendor(llvm)})
int also_after(void) {
return 1;
}
int also_before(void) {
return 2;
}
#pragma omp end declare variant
int also_after(void) {
return 0;
}
void foo();
typedef int(*fd)(void);
int main() {
// Should return 0.
fd fns[2];
fns[0] = &also_before;
fns[1] = also_after;
return (foo(), also_after)() +
(fns[0])() +
(1[fns])();
}
// Make sure:
// - we see the specialization in the AST
// - we pick the right callees
// C: |-FunctionDecl [[ADDR_0:0x[a-z0-9]*]] <{{.*}}, line:7:1> line:5:5 used also_before 'int ({{.*}})'
// C-NEXT: | |-CompoundStmt [[ADDR_1:0x[a-z0-9]*]] <col:23, line:7:1>
// C-NEXT: | | `-ReturnStmt [[ADDR_2:0x[a-z0-9]*]] <line:6:3, col:10>
// C-NEXT: | | `-IntegerLiteral [[ADDR_3:0x[a-z0-9]*]] <col:10> 'int' 0
// C-NEXT: | `-OMPDeclareVariantAttr [[ADDR_4:0x[a-z0-9]*]] <<invalid sloc>> Implicit implementation={vendor(llvm)}
// C-NEXT: | `-DeclRefExpr [[ADDR_5:0x[a-z0-9]*]] <line:13:1> 'int ({{.*}})' Function [[ADDR_6:0x[a-z0-9]*]] 'also_before[implementation={vendor(llvm)}]' 'int ({{.*}})'
// C-NEXT: |-FunctionDecl [[ADDR_7:0x[a-z0-9]*]] <line:10:1, col:20> col:5 implicit used also_after 'int ({{.*}})'
// C-NEXT: | `-OMPDeclareVariantAttr [[ADDR_8:0x[a-z0-9]*]] <<invalid sloc>> Implicit implementation={vendor(llvm)}
// C-NEXT: | `-DeclRefExpr [[ADDR_9:0x[a-z0-9]*]] <col:1> 'int ({{.*}})' Function [[ADDR_10:0x[a-z0-9]*]] 'also_after[implementation={vendor(llvm)}]' 'int ({{.*}})'
// C-NEXT: |-FunctionDecl [[ADDR_10]] <col:1, line:12:1> line:10:1 also_after[implementation={vendor(llvm)}] 'int ({{.*}})'
// C-NEXT: | `-CompoundStmt [[ADDR_11:0x[a-z0-9]*]] <col:22, line:12:1>
// C-NEXT: | `-ReturnStmt [[ADDR_12:0x[a-z0-9]*]] <line:11:3, col:10>
// C-NEXT: | `-IntegerLiteral [[ADDR_13:0x[a-z0-9]*]] <col:10> 'int' 1
// C-NEXT: |-FunctionDecl [[ADDR_6]] <line:13:1, line:15:1> line:13:1 also_before[implementation={vendor(llvm)}] 'int ({{.*}})'
// C-NEXT: | `-CompoundStmt [[ADDR_14:0x[a-z0-9]*]] <col:23, line:15:1>
// C-NEXT: | `-ReturnStmt [[ADDR_15:0x[a-z0-9]*]] <line:14:3, col:10>
// C-NEXT: | `-IntegerLiteral [[ADDR_16:0x[a-z0-9]*]] <col:10> 'int' 2
// C-NEXT: |-FunctionDecl [[ADDR_17:0x[a-z0-9]*]] prev [[ADDR_7]] <line:18:1, line:20:1> line:18:5 used also_after 'int ({{.*}})'
// C-NEXT: | |-CompoundStmt [[ADDR_18:0x[a-z0-9]*]] <col:22, line:20:1>
// C-NEXT: | | `-ReturnStmt [[ADDR_19:0x[a-z0-9]*]] <line:19:3, col:10>
// C-NEXT: | | `-IntegerLiteral [[ADDR_20:0x[a-z0-9]*]] <col:10> 'int' 0
// C-NEXT: | `-OMPDeclareVariantAttr [[ADDR_21:0x[a-z0-9]*]] <<invalid sloc>> Inherited Implicit implementation={vendor(llvm)}
// C-NEXT: | `-DeclRefExpr [[ADDR_9]] <line:10:1> 'int ({{.*}})' Function [[ADDR_10]] 'also_after[implementation={vendor(llvm)}]' 'int ({{.*}})'
// C-NEXT: |-FunctionDecl [[ADDR_22:0x[a-z0-9]*]] <line:22:1, col:10> col:6 used foo 'void ({{.*}})'
// C-NEXT: |-TypedefDecl [[ADDR_23:0x[a-z0-9]*]] <line:23:1, col:22> col:14 referenced fd 'int (*)({{.*}})'
// C-NEXT: | `-PointerType [[ADDR_24:0x[a-z0-9]*]] 'int (*)({{.*}})'
// C-NEXT: | `-ParenType [[ADDR_25:0x[a-z0-9]*]] 'int ({{.*}})' sugar
// C-NEXT: | `-FunctionProtoType [[ADDR_26:0x[a-z0-9]*]] 'int ({{.*}})' cdecl
// C-NEXT: | `-BuiltinType [[ADDR_27:0x[a-z0-9]*]] 'int'
// C-NEXT: `-FunctionDecl [[ADDR_28:0x[a-z0-9]*]] <line:24:1, line:32:1> line:24:5 main 'int ({{.*}})'
// C-NEXT: `-CompoundStmt [[ADDR_29:0x[a-z0-9]*]] <col:12, line:32:1>
// C-NEXT: |-DeclStmt [[ADDR_30:0x[a-z0-9]*]] <line:26:3, col:12>
// C-NEXT: | `-VarDecl [[ADDR_31:0x[a-z0-9]*]] <col:3, col:11> col:6 used fns 'fd[2]'
// C-NEXT: |-BinaryOperator [[ADDR_32:0x[a-z0-9]*]] <line:27:3, col:13> 'fd':'int (*)({{.*}})' '='
// C-NEXT: | |-ArraySubscriptExpr [[ADDR_33:0x[a-z0-9]*]] <col:3, col:8> 'fd':'int (*)({{.*}})' lvalue
// C-NEXT: | | |-ImplicitCastExpr [[ADDR_34:0x[a-z0-9]*]] <col:3> 'fd *' <ArrayToPointerDecay>
// C-NEXT: | | | `-DeclRefExpr [[ADDR_35:0x[a-z0-9]*]] <col:3> 'fd[2]' {{.*}}Var [[ADDR_31]] 'fns' 'fd[2]'
// C-NEXT: | | `-IntegerLiteral [[ADDR_36:0x[a-z0-9]*]] <col:7> 'int' 0
// C-NEXT: | `-UnaryOperator [[ADDR_37:0x[a-z0-9]*]] <col:12, col:13> 'int (*)({{.*}})' prefix '&' cannot overflow
// C-NEXT: | `-DeclRefExpr [[ADDR_38:0x[a-z0-9]*]] <col:13> 'int ({{.*}})' Function [[ADDR_0]] 'also_before' 'int ({{.*}})'
// C-NEXT: |-BinaryOperator [[ADDR_39:0x[a-z0-9]*]] <line:28:3, col:12> 'fd':'int (*)({{.*}})' '='
// C-NEXT: | |-ArraySubscriptExpr [[ADDR_40:0x[a-z0-9]*]] <col:3, col:8> 'fd':'int (*)({{.*}})' lvalue
// C-NEXT: | | |-ImplicitCastExpr [[ADDR_41:0x[a-z0-9]*]] <col:3> 'fd *' <ArrayToPointerDecay>
// C-NEXT: | | | `-DeclRefExpr [[ADDR_42:0x[a-z0-9]*]] <col:3> 'fd[2]' {{.*}}Var [[ADDR_31]] 'fns' 'fd[2]'
// C-NEXT: | | `-IntegerLiteral [[ADDR_43:0x[a-z0-9]*]] <col:7> 'int' 1
// C-NEXT: | `-ImplicitCastExpr [[ADDR_44:0x[a-z0-9]*]] <col:12> 'int (*)({{.*}})' <FunctionToPointerDecay>
// C-NEXT: | `-DeclRefExpr [[ADDR_45:0x[a-z0-9]*]] <col:12> 'int ({{.*}})' Function [[ADDR_17]] 'also_after' 'int ({{.*}})'
// C-NEXT: `-ReturnStmt [[ADDR_46:0x[a-z0-9]*]] <line:29:3, line:31:19>
// C-NEXT: `-BinaryOperator [[ADDR_47:0x[a-z0-9]*]] <line:29:10, line:31:19> 'int' '+'
// C-NEXT: |-BinaryOperator [[ADDR_48:0x[a-z0-9]*]] <line:29:10, line:30:19> 'int' '+'
// C-NEXT: | |-CallExpr [[ADDR_49:0x[a-z0-9]*]] <line:29:10, col:30> 'int'
// C-NEXT: | | `-ParenExpr [[ADDR_50:0x[a-z0-9]*]] <col:10, col:28> 'int (*)({{.*}})'
// C-NEXT: | | `-BinaryOperator [[ADDR_51:0x[a-z0-9]*]] <col:11, col:18> 'int (*)({{.*}})' ','
// C-NEXT: | | |-CallExpr [[ADDR_52:0x[a-z0-9]*]] <col:11, col:15> 'void'
// C-NEXT: | | | `-ImplicitCastExpr [[ADDR_53:0x[a-z0-9]*]] <col:11> 'void (*)({{.*}})' <FunctionToPointerDecay>
// C-NEXT: | | | `-DeclRefExpr [[ADDR_54:0x[a-z0-9]*]] <col:11> 'void ({{.*}})' Function [[ADDR_22]] 'foo' 'void ({{.*}})'
// C-NEXT: | | `-ImplicitCastExpr [[ADDR_55:0x[a-z0-9]*]] <col:18> 'int (*)({{.*}})' <FunctionToPointerDecay>
// C-NEXT: | | `-DeclRefExpr [[ADDR_56:0x[a-z0-9]*]] <col:18> 'int ({{.*}})' Function [[ADDR_17]] 'also_after' 'int ({{.*}})'
// C-NEXT: | `-CallExpr [[ADDR_57:0x[a-z0-9]*]] <line:30:10, col:19> 'int'
// C-NEXT: | `-ImplicitCastExpr [[ADDR_58:0x[a-z0-9]*]] <col:10, col:17> 'fd':'int (*)({{.*}})' <LValueToRValue>
// C-NEXT: | `-ParenExpr [[ADDR_59:0x[a-z0-9]*]] <col:10, col:17> 'fd':'int (*)({{.*}})' lvalue
// C-NEXT: | `-ArraySubscriptExpr [[ADDR_60:0x[a-z0-9]*]] <col:11, col:16> 'fd':'int (*)({{.*}})' lvalue
// C-NEXT: | |-ImplicitCastExpr [[ADDR_61:0x[a-z0-9]*]] <col:11> 'fd *' <ArrayToPointerDecay>
// C-NEXT: | | `-DeclRefExpr [[ADDR_62:0x[a-z0-9]*]] <col:11> 'fd[2]' {{.*}}Var [[ADDR_31]] 'fns' 'fd[2]'
// C-NEXT: | `-IntegerLiteral [[ADDR_63:0x[a-z0-9]*]] <col:15> 'int' 0
// C-NEXT: `-CallExpr [[ADDR_64:0x[a-z0-9]*]] <line:31:10, col:19> 'int'
// C-NEXT: `-ImplicitCastExpr [[ADDR_65:0x[a-z0-9]*]] <col:10, col:17> 'fd':'int (*)({{.*}})' <LValueToRValue>
// C-NEXT: `-ParenExpr [[ADDR_66:0x[a-z0-9]*]] <col:10, col:17> 'fd':'int (*)({{.*}})' lvalue
// C-NEXT: `-ArraySubscriptExpr [[ADDR_67:0x[a-z0-9]*]] <col:11, col:16> 'fd':'int (*)({{.*}})' lvalue
// C-NEXT: |-IntegerLiteral [[ADDR_68:0x[a-z0-9]*]] <col:11> 'int' 1
// C-NEXT: `-ImplicitCastExpr [[ADDR_69:0x[a-z0-9]*]] <col:13> 'fd *' <ArrayToPointerDecay>
// C-NEXT: `-DeclRefExpr [[ADDR_70:0x[a-z0-9]*]] <col:13> 'fd[2]' {{.*}}Var [[ADDR_31]] 'fns' 'fd[2]'
// CXX: |-FunctionDecl [[ADDR_0:0x[a-z0-9]*]] <{{.*}}, line:7:1> line:5:5 used also_before 'int ({{.*}})'
// CXX-NEXT: | |-CompoundStmt [[ADDR_1:0x[a-z0-9]*]] <col:23, line:7:1>
// CXX-NEXT: | | `-ReturnStmt [[ADDR_2:0x[a-z0-9]*]] <line:6:3, col:10>
// CXX-NEXT: | | `-IntegerLiteral [[ADDR_3:0x[a-z0-9]*]] <col:10> 'int' 0
// CXX-NEXT: | `-OMPDeclareVariantAttr [[ADDR_4:0x[a-z0-9]*]] <<invalid sloc>> Implicit implementation={vendor(llvm)}
// CXX-NEXT: | `-DeclRefExpr [[ADDR_5:0x[a-z0-9]*]] <line:13:1> 'int ({{.*}})' Function [[ADDR_6:0x[a-z0-9]*]] 'also_before[implementation={vendor(llvm)}]' 'int ({{.*}})'
// CXX-NEXT: |-FunctionDecl [[ADDR_7:0x[a-z0-9]*]] <line:10:1, col:20> col:5 implicit used also_after 'int ({{.*}})'
// CXX-NEXT: | `-OMPDeclareVariantAttr [[ADDR_8:0x[a-z0-9]*]] <<invalid sloc>> Implicit implementation={vendor(llvm)}
// CXX-NEXT: | `-DeclRefExpr [[ADDR_9:0x[a-z0-9]*]] <col:1> 'int ({{.*}})' Function [[ADDR_10:0x[a-z0-9]*]] 'also_after[implementation={vendor(llvm)}]' 'int ({{.*}})'
// CXX-NEXT: |-FunctionDecl [[ADDR_10]] <col:1, line:12:1> line:10:1 also_after[implementation={vendor(llvm)}] 'int ({{.*}})'
// CXX-NEXT: | `-CompoundStmt [[ADDR_11:0x[a-z0-9]*]] <col:22, line:12:1>
// CXX-NEXT: | `-ReturnStmt [[ADDR_12:0x[a-z0-9]*]] <line:11:3, col:10>
// CXX-NEXT: | `-IntegerLiteral [[ADDR_13:0x[a-z0-9]*]] <col:10> 'int' 1
// CXX-NEXT: |-FunctionDecl [[ADDR_6]] <line:13:1, line:15:1> line:13:1 also_before[implementation={vendor(llvm)}] 'int ({{.*}})'
// CXX-NEXT: | `-CompoundStmt [[ADDR_14:0x[a-z0-9]*]] <col:23, line:15:1>
// CXX-NEXT: | `-ReturnStmt [[ADDR_15:0x[a-z0-9]*]] <line:14:3, col:10>
// CXX-NEXT: | `-IntegerLiteral [[ADDR_16:0x[a-z0-9]*]] <col:10> 'int' 2
// CXX-NEXT: |-FunctionDecl [[ADDR_17:0x[a-z0-9]*]] prev [[ADDR_7]] <line:18:1, line:20:1> line:18:5 used also_after 'int ({{.*}})'
// CXX-NEXT: | |-CompoundStmt [[ADDR_18:0x[a-z0-9]*]] <col:22, line:20:1>
// CXX-NEXT: | | `-ReturnStmt [[ADDR_19:0x[a-z0-9]*]] <line:19:3, col:10>
// CXX-NEXT: | | `-IntegerLiteral [[ADDR_20:0x[a-z0-9]*]] <col:10> 'int' 0
// CXX-NEXT: | `-OMPDeclareVariantAttr [[ADDR_21:0x[a-z0-9]*]] <<invalid sloc>> Inherited Implicit implementation={vendor(llvm)}
// CXX-NEXT: | `-DeclRefExpr [[ADDR_9]] <line:10:1> 'int ({{.*}})' Function [[ADDR_10]] 'also_after[implementation={vendor(llvm)}]' 'int ({{.*}})'
// CXX-NEXT: |-FunctionDecl [[ADDR_22:0x[a-z0-9]*]] <line:22:1, col:10> col:6 used foo 'void ({{.*}})'
// CXX-NEXT: |-TypedefDecl [[ADDR_23:0x[a-z0-9]*]] <line:23:1, col:22> col:14 referenced fd 'int (*)({{.*}})'
// CXX-NEXT: | `-PointerType [[ADDR_24:0x[a-z0-9]*]] 'int (*)({{.*}})'
// CXX-NEXT: | `-ParenType [[ADDR_25:0x[a-z0-9]*]] 'int ({{.*}})' sugar
// CXX-NEXT: | `-FunctionProtoType [[ADDR_26:0x[a-z0-9]*]] 'int ({{.*}})' cdecl
// CXX-NEXT: | `-BuiltinType [[ADDR_27:0x[a-z0-9]*]] 'int'
// CXX-NEXT: `-FunctionDecl [[ADDR_28:0x[a-z0-9]*]] <line:24:1, line:32:1> line:24:5 main 'int ({{.*}})'
// CXX-NEXT: `-CompoundStmt [[ADDR_29:0x[a-z0-9]*]] <col:12, line:32:1>
// CXX-NEXT: |-DeclStmt [[ADDR_30:0x[a-z0-9]*]] <line:26:3, col:12>
// CXX-NEXT: | `-VarDecl [[ADDR_31:0x[a-z0-9]*]] <col:3, col:11> col:6 used fns 'fd[2]'
// CXX-NEXT: |-BinaryOperator [[ADDR_32:0x[a-z0-9]*]] <line:27:3, col:13> 'fd':'int (*)({{.*}})' {{.*}}'='
// CXX-NEXT: | |-ArraySubscriptExpr [[ADDR_33:0x[a-z0-9]*]] <col:3, col:8> 'fd':'int (*)({{.*}})' lvalue
// CXX-NEXT: | | |-ImplicitCastExpr [[ADDR_34:0x[a-z0-9]*]] <col:3> 'fd *' <ArrayToPointerDecay>
// CXX-NEXT: | | | `-DeclRefExpr [[ADDR_35:0x[a-z0-9]*]] <col:3> 'fd[2]' {{.*}}Var [[ADDR_31]] 'fns' 'fd[2]'
// CXX-NEXT: | | `-IntegerLiteral [[ADDR_36:0x[a-z0-9]*]] <col:7> 'int' 0
// CXX-NEXT: | `-UnaryOperator [[ADDR_37:0x[a-z0-9]*]] <col:12, col:13> 'int (*)({{.*}})' prefix '&' cannot overflow
// CXX-NEXT: | `-DeclRefExpr [[ADDR_38:0x[a-z0-9]*]] <col:13> 'int ({{.*}})' {{.*}}Function [[ADDR_0]] 'also_before' 'int ({{.*}})'
// CXX-NEXT: |-BinaryOperator [[ADDR_39:0x[a-z0-9]*]] <line:28:3, col:12> 'fd':'int (*)({{.*}})' {{.*}}'='
// CXX-NEXT: | |-ArraySubscriptExpr [[ADDR_40:0x[a-z0-9]*]] <col:3, col:8> 'fd':'int (*)({{.*}})' lvalue
// CXX-NEXT: | | |-ImplicitCastExpr [[ADDR_41:0x[a-z0-9]*]] <col:3> 'fd *' <ArrayToPointerDecay>
// CXX-NEXT: | | | `-DeclRefExpr [[ADDR_42:0x[a-z0-9]*]] <col:3> 'fd[2]' {{.*}}Var [[ADDR_31]] 'fns' 'fd[2]'
// CXX-NEXT: | | `-IntegerLiteral [[ADDR_43:0x[a-z0-9]*]] <col:7> 'int' 1
// CXX-NEXT: | `-ImplicitCastExpr [[ADDR_44:0x[a-z0-9]*]] <col:12> 'int (*)({{.*}})' <FunctionToPointerDecay>
// CXX-NEXT: | `-DeclRefExpr [[ADDR_45:0x[a-z0-9]*]] <col:12> 'int ({{.*}})' {{.*}}Function [[ADDR_17]] 'also_after' 'int ({{.*}})'
// CXX-NEXT: `-ReturnStmt [[ADDR_46:0x[a-z0-9]*]] <line:29:3, line:31:19>
// CXX-NEXT: `-BinaryOperator [[ADDR_47:0x[a-z0-9]*]] <line:29:10, line:31:19> 'int' '+'
// CXX-NEXT: |-BinaryOperator [[ADDR_48:0x[a-z0-9]*]] <line:29:10, line:30:19> 'int' '+'
// CXX-NEXT: | |-CallExpr [[ADDR_49:0x[a-z0-9]*]] <line:29:10, col:30> 'int'
// CXX-NEXT: | | `-ImplicitCastExpr [[ADDR_50:0x[a-z0-9]*]] <col:10, col:28> 'int (*)({{.*}})' <FunctionToPointerDecay>
// CXX-NEXT: | | `-ParenExpr [[ADDR_51:0x[a-z0-9]*]] <col:10, col:28> 'int ({{.*}})' lvalue
// CXX-NEXT: | | `-BinaryOperator [[ADDR_52:0x[a-z0-9]*]] <col:11, col:18> 'int ({{.*}})' {{.*}}','
// CXX-NEXT: | | |-CallExpr [[ADDR_53:0x[a-z0-9]*]] <col:11, col:15> 'void'
// CXX-NEXT: | | | `-ImplicitCastExpr [[ADDR_54:0x[a-z0-9]*]] <col:11> 'void (*)({{.*}})' <FunctionToPointerDecay>
// CXX-NEXT: | | | `-DeclRefExpr [[ADDR_55:0x[a-z0-9]*]] <col:11> 'void ({{.*}})' {{.*}}Function [[ADDR_22]] 'foo' 'void ({{.*}})'
// CXX-NEXT: | | `-DeclRefExpr [[ADDR_56:0x[a-z0-9]*]] <col:18> 'int ({{.*}})' {{.*}}Function [[ADDR_17]] 'also_after' 'int ({{.*}})'
// CXX-NEXT: | `-CallExpr [[ADDR_57:0x[a-z0-9]*]] <line:30:10, col:19> 'int'
// CXX-NEXT: | `-ImplicitCastExpr [[ADDR_58:0x[a-z0-9]*]] <col:10, col:17> 'fd':'int (*)({{.*}})' <LValueToRValue>
// CXX-NEXT: | `-ParenExpr [[ADDR_59:0x[a-z0-9]*]] <col:10, col:17> 'fd':'int (*)({{.*}})' lvalue
// CXX-NEXT: | `-ArraySubscriptExpr [[ADDR_60:0x[a-z0-9]*]] <col:11, col:16> 'fd':'int (*)({{.*}})' lvalue
// CXX-NEXT: | |-ImplicitCastExpr [[ADDR_61:0x[a-z0-9]*]] <col:11> 'fd *' <ArrayToPointerDecay>
// CXX-NEXT: | | `-DeclRefExpr [[ADDR_62:0x[a-z0-9]*]] <col:11> 'fd[2]' {{.*}}Var [[ADDR_31]] 'fns' 'fd[2]'
// CXX-NEXT: | `-IntegerLiteral [[ADDR_63:0x[a-z0-9]*]] <col:15> 'int' 0
// CXX-NEXT: `-CallExpr [[ADDR_64:0x[a-z0-9]*]] <line:31:10, col:19> 'int'
// CXX-NEXT: `-ImplicitCastExpr [[ADDR_65:0x[a-z0-9]*]] <col:10, col:17> 'fd':'int (*)({{.*}})' <LValueToRValue>
// CXX-NEXT: `-ParenExpr [[ADDR_66:0x[a-z0-9]*]] <col:10, col:17> 'fd':'int (*)({{.*}})' lvalue
// CXX-NEXT: `-ArraySubscriptExpr [[ADDR_67:0x[a-z0-9]*]] <col:11, col:16> 'fd':'int (*)({{.*}})' lvalue
// CXX-NEXT: |-IntegerLiteral [[ADDR_68:0x[a-z0-9]*]] <col:11> 'int' 1
// CXX-NEXT: `-ImplicitCastExpr [[ADDR_69:0x[a-z0-9]*]] <col:13> 'fd *' <ArrayToPointerDecay>
// CXX-NEXT: `-DeclRefExpr [[ADDR_70:0x[a-z0-9]*]] <col:13> 'fd[2]' {{.*}}Var [[ADDR_31]] 'fns' 'fd[2]'
|
CGOpenMPRuntime.h | //===----- CGOpenMPRuntime.h - Interface to OpenMP Runtimes -----*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This provides a class for OpenMP runtime code generation.
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_CLANG_LIB_CODEGEN_CGOPENMPRUNTIME_H
#define LLVM_CLANG_LIB_CODEGEN_CGOPENMPRUNTIME_H
#include "CGValue.h"
#include "clang/AST/DeclOpenMP.h"
#include "clang/AST/GlobalDecl.h"
#include "clang/AST/Type.h"
#include "clang/Basic/OpenMPKinds.h"
#include "clang/Basic/SourceLocation.h"
#include "llvm/ADT/DenseMap.h"
#include "llvm/ADT/SmallPtrSet.h"
#include "llvm/ADT/StringMap.h"
#include "llvm/ADT/StringSet.h"
#include "llvm/Frontend/OpenMP/OMPConstants.h"
#include "llvm/IR/Function.h"
#include "llvm/IR/ValueHandle.h"
namespace llvm {
class ArrayType;
class Constant;
class FunctionType;
class GlobalVariable;
class StructType;
class Type;
class Value;
} // namespace llvm
namespace clang {
class Expr;
class OMPDependClause;
class OMPExecutableDirective;
class OMPLoopDirective;
class VarDecl;
class OMPDeclareReductionDecl;
class IdentifierInfo;
namespace CodeGen {
class Address;
class CodeGenFunction;
class CodeGenModule;
/// A basic class for pre|post-action for advanced codegen sequence for OpenMP
/// region.
class PrePostActionTy {
public:
explicit PrePostActionTy() {}
virtual void Enter(CodeGenFunction &CGF) {}
virtual void Exit(CodeGenFunction &CGF) {}
virtual ~PrePostActionTy() {}
};
/// Class provides a way to call simple version of codegen for OpenMP region, or
/// an advanced with possible pre|post-actions in codegen.
class RegionCodeGenTy final {
intptr_t CodeGen;
typedef void (*CodeGenTy)(intptr_t, CodeGenFunction &, PrePostActionTy &);
CodeGenTy Callback;
mutable PrePostActionTy *PrePostAction;
RegionCodeGenTy() = delete;
RegionCodeGenTy &operator=(const RegionCodeGenTy &) = delete;
template <typename Callable>
static void CallbackFn(intptr_t CodeGen, CodeGenFunction &CGF,
PrePostActionTy &Action) {
return (*reinterpret_cast<Callable *>(CodeGen))(CGF, Action);
}
public:
template <typename Callable>
RegionCodeGenTy(
Callable &&CodeGen,
typename std::enable_if<
!std::is_same<typename std::remove_reference<Callable>::type,
RegionCodeGenTy>::value>::type * = nullptr)
: CodeGen(reinterpret_cast<intptr_t>(&CodeGen)),
Callback(CallbackFn<typename std::remove_reference<Callable>::type>),
PrePostAction(nullptr) {}
void setAction(PrePostActionTy &Action) const { PrePostAction = &Action; }
void operator()(CodeGenFunction &CGF) const;
};
struct OMPTaskDataTy final {
SmallVector<const Expr *, 4> PrivateVars;
SmallVector<const Expr *, 4> PrivateCopies;
SmallVector<const Expr *, 4> FirstprivateVars;
SmallVector<const Expr *, 4> FirstprivateCopies;
SmallVector<const Expr *, 4> FirstprivateInits;
SmallVector<const Expr *, 4> LastprivateVars;
SmallVector<const Expr *, 4> LastprivateCopies;
SmallVector<const Expr *, 4> ReductionVars;
SmallVector<const Expr *, 4> ReductionCopies;
SmallVector<const Expr *, 4> ReductionOps;
SmallVector<std::pair<OpenMPDependClauseKind, const Expr *>, 4> Dependences;
llvm::PointerIntPair<llvm::Value *, 1, bool> Final;
llvm::PointerIntPair<llvm::Value *, 1, bool> Schedule;
llvm::PointerIntPair<llvm::Value *, 1, bool> Priority;
llvm::Value *Reductions = nullptr;
unsigned NumberOfParts = 0;
bool Tied = true;
bool Nogroup = false;
};
/// Class intended to support codegen of all kind of the reduction clauses.
class ReductionCodeGen {
private:
/// Data required for codegen of reduction clauses.
struct ReductionData {
/// Reference to the original shared item.
const Expr *Ref = nullptr;
/// Helper expression for generation of private copy.
const Expr *Private = nullptr;
/// Helper expression for generation reduction operation.
const Expr *ReductionOp = nullptr;
ReductionData(const Expr *Ref, const Expr *Private, const Expr *ReductionOp)
: Ref(Ref), Private(Private), ReductionOp(ReductionOp) {}
};
/// List of reduction-based clauses.
SmallVector<ReductionData, 4> ClausesData;
/// List of addresses of original shared variables/expressions.
SmallVector<std::pair<LValue, LValue>, 4> SharedAddresses;
/// Sizes of the reduction items in chars.
SmallVector<std::pair<llvm::Value *, llvm::Value *>, 4> Sizes;
/// Base declarations for the reduction items.
SmallVector<const VarDecl *, 4> BaseDecls;
/// Emits lvalue for shared expression.
LValue emitSharedLValue(CodeGenFunction &CGF, const Expr *E);
/// Emits upper bound for shared expression (if array section).
LValue emitSharedLValueUB(CodeGenFunction &CGF, const Expr *E);
/// Performs aggregate initialization.
/// \param N Number of reduction item in the common list.
/// \param PrivateAddr Address of the corresponding private item.
/// \param SharedLVal Address of the original shared variable.
/// \param DRD Declare reduction construct used for reduction item.
void emitAggregateInitialization(CodeGenFunction &CGF, unsigned N,
Address PrivateAddr, LValue SharedLVal,
const OMPDeclareReductionDecl *DRD);
public:
ReductionCodeGen(ArrayRef<const Expr *> Shareds,
ArrayRef<const Expr *> Privates,
ArrayRef<const Expr *> ReductionOps);
/// Emits lvalue for a reduction item.
/// \param N Number of the reduction item.
void emitSharedLValue(CodeGenFunction &CGF, unsigned N);
/// Emits the code for the variable-modified type, if required.
/// \param N Number of the reduction item.
void emitAggregateType(CodeGenFunction &CGF, unsigned N);
/// Emits the code for the variable-modified type, if required.
/// \param N Number of the reduction item.
/// \param Size Size of the type in chars.
void emitAggregateType(CodeGenFunction &CGF, unsigned N, llvm::Value *Size);
/// Performs initialization of the private copy for the reduction item.
/// \param N Number of the reduction item.
/// \param PrivateAddr Address of the corresponding private item.
/// \param DefaultInit Default initialization sequence that should be
/// performed if no reduction specific initialization is found.
/// \param SharedLVal Address of the original shared variable.
void
emitInitialization(CodeGenFunction &CGF, unsigned N, Address PrivateAddr,
LValue SharedLVal,
llvm::function_ref<bool(CodeGenFunction &)> DefaultInit);
/// Returns true if the private copy requires cleanups.
bool needCleanups(unsigned N);
/// Emits cleanup code for the reduction item.
/// \param N Number of the reduction item.
/// \param PrivateAddr Address of the corresponding private item.
void emitCleanups(CodeGenFunction &CGF, unsigned N, Address PrivateAddr);
/// Adjusts \p PrivatedAddr for using instead of the original variable
/// address in normal operations.
/// \param N Number of the reduction item.
/// \param PrivateAddr Address of the corresponding private item.
Address adjustPrivateAddress(CodeGenFunction &CGF, unsigned N,
Address PrivateAddr);
/// Returns LValue for the reduction item.
LValue getSharedLValue(unsigned N) const { return SharedAddresses[N].first; }
/// Returns the size of the reduction item (in chars and total number of
/// elements in the item), or nullptr, if the size is a constant.
std::pair<llvm::Value *, llvm::Value *> getSizes(unsigned N) const {
return Sizes[N];
}
/// Returns the base declaration of the reduction item.
const VarDecl *getBaseDecl(unsigned N) const { return BaseDecls[N]; }
/// Returns the base declaration of the reduction item.
const Expr *getRefExpr(unsigned N) const { return ClausesData[N].Ref; }
/// Returns true if the initialization of the reduction item uses initializer
/// from declare reduction construct.
bool usesReductionInitializer(unsigned N) const;
};
class CGOpenMPRuntime {
public:
/// Allows to disable automatic handling of functions used in target regions
/// as those marked as `omp declare target`.
class DisableAutoDeclareTargetRAII {
CodeGenModule &CGM;
bool SavedShouldMarkAsGlobal;
public:
DisableAutoDeclareTargetRAII(CodeGenModule &CGM);
~DisableAutoDeclareTargetRAII();
};
/// Manages list of nontemporal decls for the specified directive.
class NontemporalDeclsRAII {
CodeGenModule &CGM;
const bool NeedToPush;
public:
NontemporalDeclsRAII(CodeGenModule &CGM, const OMPLoopDirective &S);
~NontemporalDeclsRAII();
};
/// Maps the expression for the lastprivate variable to the global copy used
/// to store new value because original variables are not mapped in inner
/// parallel regions. Only private copies are captured but we need also to
/// store private copy in shared address.
/// Also, stores the expression for the private loop counter and it
/// threaprivate name.
struct LastprivateConditionalData {
llvm::SmallDenseMap<CanonicalDeclPtr<const Decl>, SmallString<16>>
DeclToUniqeName;
LValue IVLVal;
CodeGenFunction *CGF = nullptr;
};
/// Manages list of lastprivate conditional decls for the specified directive.
class LastprivateConditionalRAII {
CodeGenModule &CGM;
const bool NeedToPush;
public:
LastprivateConditionalRAII(CodeGenFunction &CGF,
const OMPExecutableDirective &S, LValue IVLVal);
~LastprivateConditionalRAII();
};
protected:
CodeGenModule &CGM;
StringRef FirstSeparator, Separator;
/// Constructor allowing to redefine the name separator for the variables.
explicit CGOpenMPRuntime(CodeGenModule &CGM, StringRef FirstSeparator,
StringRef Separator);
/// Creates offloading entry for the provided entry ID \a ID,
/// address \a Addr, size \a Size, and flags \a Flags.
virtual void createOffloadEntry(llvm::Constant *ID, llvm::Constant *Addr,
uint64_t Size, int32_t Flags,
llvm::GlobalValue::LinkageTypes Linkage);
/// Helper to emit outlined function for 'target' directive.
/// \param D Directive to emit.
/// \param ParentName Name of the function that encloses the target region.
/// \param OutlinedFn Outlined function value to be defined by this call.
/// \param OutlinedFnID Outlined function ID value to be defined by this call.
/// \param IsOffloadEntry True if the outlined function is an offload entry.
/// \param CodeGen Lambda codegen specific to an accelerator device.
/// An outlined function may not be an entry if, e.g. the if clause always
/// evaluates to false.
virtual void emitTargetOutlinedFunctionHelper(const OMPExecutableDirective &D,
StringRef ParentName,
llvm::Function *&OutlinedFn,
llvm::Constant *&OutlinedFnID,
bool IsOffloadEntry,
const RegionCodeGenTy &CodeGen);
/// Emits object of ident_t type with info for source location.
/// \param Flags Flags for OpenMP location.
///
llvm::Value *emitUpdateLocation(CodeGenFunction &CGF, SourceLocation Loc,
unsigned Flags = 0);
/// Returns pointer to ident_t type.
llvm::Type *getIdentTyPointerTy();
/// Gets thread id value for the current thread.
///
llvm::Value *getThreadID(CodeGenFunction &CGF, SourceLocation Loc);
/// Get the function name of an outlined region.
// The name can be customized depending on the target.
//
virtual StringRef getOutlinedHelperName() const { return ".omp_outlined."; }
/// Emits \p Callee function call with arguments \p Args with location \p Loc.
void emitCall(CodeGenFunction &CGF, SourceLocation Loc,
llvm::FunctionCallee Callee,
ArrayRef<llvm::Value *> Args = llvm::None) const;
/// Emits address of the word in a memory where current thread id is
/// stored.
virtual Address emitThreadIDAddress(CodeGenFunction &CGF, SourceLocation Loc);
void setLocThreadIdInsertPt(CodeGenFunction &CGF,
bool AtCurrentPoint = false);
void clearLocThreadIdInsertPt(CodeGenFunction &CGF);
/// Check if the default location must be constant.
/// Default is false to support OMPT/OMPD.
virtual bool isDefaultLocationConstant() const { return false; }
/// Returns additional flags that can be stored in reserved_2 field of the
/// default location.
virtual unsigned getDefaultLocationReserved2Flags() const { return 0; }
/// Tries to emit declare variant function for \p OldGD from \p NewGD.
/// \param OrigAddr LLVM IR value for \p OldGD.
/// \param IsForDefinition true, if requested emission for the definition of
/// \p OldGD.
/// \returns true, was able to emit a definition function for \p OldGD, which
/// points to \p NewGD.
virtual bool tryEmitDeclareVariant(const GlobalDecl &NewGD,
const GlobalDecl &OldGD,
llvm::GlobalValue *OrigAddr,
bool IsForDefinition);
/// Returns default flags for the barriers depending on the directive, for
/// which this barier is going to be emitted.
static unsigned getDefaultFlagsForBarriers(OpenMPDirectiveKind Kind);
/// Get the LLVM type for the critical name.
llvm::ArrayType *getKmpCriticalNameTy() const {return KmpCriticalNameTy;}
/// Returns corresponding lock object for the specified critical region
/// name. If the lock object does not exist it is created, otherwise the
/// reference to the existing copy is returned.
/// \param CriticalName Name of the critical region.
///
llvm::Value *getCriticalRegionLock(StringRef CriticalName);
private:
/// Default const ident_t object used for initialization of all other
/// ident_t objects.
llvm::Constant *DefaultOpenMPPSource = nullptr;
using FlagsTy = std::pair<unsigned, unsigned>;
/// Map of flags and corresponding default locations.
using OpenMPDefaultLocMapTy = llvm::DenseMap<FlagsTy, llvm::Value *>;
OpenMPDefaultLocMapTy OpenMPDefaultLocMap;
Address getOrCreateDefaultLocation(unsigned Flags);
QualType IdentQTy;
llvm::StructType *IdentTy = nullptr;
/// Map for SourceLocation and OpenMP runtime library debug locations.
typedef llvm::DenseMap<unsigned, llvm::Value *> OpenMPDebugLocMapTy;
OpenMPDebugLocMapTy OpenMPDebugLocMap;
/// The type for a microtask which gets passed to __kmpc_fork_call().
/// Original representation is:
/// typedef void (kmpc_micro)(kmp_int32 global_tid, kmp_int32 bound_tid,...);
llvm::FunctionType *Kmpc_MicroTy = nullptr;
/// Stores debug location and ThreadID for the function.
struct DebugLocThreadIdTy {
llvm::Value *DebugLoc;
llvm::Value *ThreadID;
/// Insert point for the service instructions.
llvm::AssertingVH<llvm::Instruction> ServiceInsertPt = nullptr;
};
/// Map of local debug location, ThreadId and functions.
typedef llvm::DenseMap<llvm::Function *, DebugLocThreadIdTy>
OpenMPLocThreadIDMapTy;
OpenMPLocThreadIDMapTy OpenMPLocThreadIDMap;
/// Map of UDRs and corresponding combiner/initializer.
typedef llvm::DenseMap<const OMPDeclareReductionDecl *,
std::pair<llvm::Function *, llvm::Function *>>
UDRMapTy;
UDRMapTy UDRMap;
/// Map of functions and locally defined UDRs.
typedef llvm::DenseMap<llvm::Function *,
SmallVector<const OMPDeclareReductionDecl *, 4>>
FunctionUDRMapTy;
FunctionUDRMapTy FunctionUDRMap;
/// Map from the user-defined mapper declaration to its corresponding
/// functions.
llvm::DenseMap<const OMPDeclareMapperDecl *, llvm::Function *> UDMMap;
/// Map of functions and their local user-defined mappers.
using FunctionUDMMapTy =
llvm::DenseMap<llvm::Function *,
SmallVector<const OMPDeclareMapperDecl *, 4>>;
FunctionUDMMapTy FunctionUDMMap;
/// Type kmp_critical_name, originally defined as typedef kmp_int32
/// kmp_critical_name[8];
llvm::ArrayType *KmpCriticalNameTy;
/// An ordered map of auto-generated variables to their unique names.
/// It stores variables with the following names: 1) ".gomp_critical_user_" +
/// <critical_section_name> + ".var" for "omp critical" directives; 2)
/// <mangled_name_for_global_var> + ".cache." for cache for threadprivate
/// variables.
llvm::StringMap<llvm::AssertingVH<llvm::Constant>, llvm::BumpPtrAllocator>
InternalVars;
/// Type typedef kmp_int32 (* kmp_routine_entry_t)(kmp_int32, void *);
llvm::Type *KmpRoutineEntryPtrTy = nullptr;
QualType KmpRoutineEntryPtrQTy;
/// Type typedef struct kmp_task {
/// void * shareds; /**< pointer to block of pointers to
/// shared vars */
/// kmp_routine_entry_t routine; /**< pointer to routine to call for
/// executing task */
/// kmp_int32 part_id; /**< part id for the task */
/// kmp_routine_entry_t destructors; /* pointer to function to invoke
/// deconstructors of firstprivate C++ objects */
/// } kmp_task_t;
QualType KmpTaskTQTy;
/// Saved kmp_task_t for task directive.
QualType SavedKmpTaskTQTy;
/// Saved kmp_task_t for taskloop-based directive.
QualType SavedKmpTaskloopTQTy;
/// Type typedef struct kmp_depend_info {
/// kmp_intptr_t base_addr;
/// size_t len;
/// struct {
/// bool in:1;
/// bool out:1;
/// } flags;
/// } kmp_depend_info_t;
QualType KmpDependInfoTy;
/// struct kmp_dim { // loop bounds info casted to kmp_int64
/// kmp_int64 lo; // lower
/// kmp_int64 up; // upper
/// kmp_int64 st; // stride
/// };
QualType KmpDimTy;
/// Type struct __tgt_offload_entry{
/// void *addr; // Pointer to the offload entry info.
/// // (function or global)
/// char *name; // Name of the function or global.
/// size_t size; // Size of the entry info (0 if it a function).
/// int32_t flags;
/// int32_t reserved;
/// };
QualType TgtOffloadEntryQTy;
/// Entity that registers the offloading constants that were emitted so
/// far.
class OffloadEntriesInfoManagerTy {
CodeGenModule &CGM;
/// Number of entries registered so far.
unsigned OffloadingEntriesNum = 0;
public:
/// Base class of the entries info.
class OffloadEntryInfo {
public:
/// Kind of a given entry.
enum OffloadingEntryInfoKinds : unsigned {
/// Entry is a target region.
OffloadingEntryInfoTargetRegion = 0,
/// Entry is a declare target variable.
OffloadingEntryInfoDeviceGlobalVar = 1,
/// Invalid entry info.
OffloadingEntryInfoInvalid = ~0u
};
protected:
OffloadEntryInfo() = delete;
explicit OffloadEntryInfo(OffloadingEntryInfoKinds Kind) : Kind(Kind) {}
explicit OffloadEntryInfo(OffloadingEntryInfoKinds Kind, unsigned Order,
uint32_t Flags)
: Flags(Flags), Order(Order), Kind(Kind) {}
~OffloadEntryInfo() = default;
public:
bool isValid() const { return Order != ~0u; }
unsigned getOrder() const { return Order; }
OffloadingEntryInfoKinds getKind() const { return Kind; }
uint32_t getFlags() const { return Flags; }
void setFlags(uint32_t NewFlags) { Flags = NewFlags; }
llvm::Constant *getAddress() const {
return cast_or_null<llvm::Constant>(Addr);
}
void setAddress(llvm::Constant *V) {
assert(!Addr.pointsToAliveValue() && "Address has been set before!");
Addr = V;
}
static bool classof(const OffloadEntryInfo *Info) { return true; }
private:
/// Address of the entity that has to be mapped for offloading.
llvm::WeakTrackingVH Addr;
/// Flags associated with the device global.
uint32_t Flags = 0u;
/// Order this entry was emitted.
unsigned Order = ~0u;
OffloadingEntryInfoKinds Kind = OffloadingEntryInfoInvalid;
};
/// Return true if a there are no entries defined.
bool empty() const;
/// Return number of entries defined so far.
unsigned size() const { return OffloadingEntriesNum; }
OffloadEntriesInfoManagerTy(CodeGenModule &CGM) : CGM(CGM) {}
//
// Target region entries related.
//
/// Kind of the target registry entry.
enum OMPTargetRegionEntryKind : uint32_t {
/// Mark the entry as target region.
OMPTargetRegionEntryTargetRegion = 0x0,
/// Mark the entry as a global constructor.
OMPTargetRegionEntryCtor = 0x02,
/// Mark the entry as a global destructor.
OMPTargetRegionEntryDtor = 0x04,
};
/// Target region entries info.
class OffloadEntryInfoTargetRegion final : public OffloadEntryInfo {
/// Address that can be used as the ID of the entry.
llvm::Constant *ID = nullptr;
public:
OffloadEntryInfoTargetRegion()
: OffloadEntryInfo(OffloadingEntryInfoTargetRegion) {}
explicit OffloadEntryInfoTargetRegion(unsigned Order,
llvm::Constant *Addr,
llvm::Constant *ID,
OMPTargetRegionEntryKind Flags)
: OffloadEntryInfo(OffloadingEntryInfoTargetRegion, Order, Flags),
ID(ID) {
setAddress(Addr);
}
llvm::Constant *getID() const { return ID; }
void setID(llvm::Constant *V) {
assert(!ID && "ID has been set before!");
ID = V;
}
static bool classof(const OffloadEntryInfo *Info) {
return Info->getKind() == OffloadingEntryInfoTargetRegion;
}
};
/// Initialize target region entry.
void initializeTargetRegionEntryInfo(unsigned DeviceID, unsigned FileID,
StringRef ParentName, unsigned LineNum,
unsigned Order);
/// Register target region entry.
void registerTargetRegionEntryInfo(unsigned DeviceID, unsigned FileID,
StringRef ParentName, unsigned LineNum,
llvm::Constant *Addr, llvm::Constant *ID,
OMPTargetRegionEntryKind Flags);
/// Return true if a target region entry with the provided information
/// exists.
bool hasTargetRegionEntryInfo(unsigned DeviceID, unsigned FileID,
StringRef ParentName, unsigned LineNum) const;
/// brief Applies action \a Action on all registered entries.
typedef llvm::function_ref<void(unsigned, unsigned, StringRef, unsigned,
const OffloadEntryInfoTargetRegion &)>
OffloadTargetRegionEntryInfoActTy;
void actOnTargetRegionEntriesInfo(
const OffloadTargetRegionEntryInfoActTy &Action);
//
// Device global variable entries related.
//
/// Kind of the global variable entry..
enum OMPTargetGlobalVarEntryKind : uint32_t {
/// Mark the entry as a to declare target.
OMPTargetGlobalVarEntryTo = 0x0,
/// Mark the entry as a to declare target link.
OMPTargetGlobalVarEntryLink = 0x1,
};
/// Device global variable entries info.
class OffloadEntryInfoDeviceGlobalVar final : public OffloadEntryInfo {
/// Type of the global variable.
CharUnits VarSize;
llvm::GlobalValue::LinkageTypes Linkage;
public:
OffloadEntryInfoDeviceGlobalVar()
: OffloadEntryInfo(OffloadingEntryInfoDeviceGlobalVar) {}
explicit OffloadEntryInfoDeviceGlobalVar(unsigned Order,
OMPTargetGlobalVarEntryKind Flags)
: OffloadEntryInfo(OffloadingEntryInfoDeviceGlobalVar, Order, Flags) {}
explicit OffloadEntryInfoDeviceGlobalVar(
unsigned Order, llvm::Constant *Addr, CharUnits VarSize,
OMPTargetGlobalVarEntryKind Flags,
llvm::GlobalValue::LinkageTypes Linkage)
: OffloadEntryInfo(OffloadingEntryInfoDeviceGlobalVar, Order, Flags),
VarSize(VarSize), Linkage(Linkage) {
setAddress(Addr);
}
CharUnits getVarSize() const { return VarSize; }
void setVarSize(CharUnits Size) { VarSize = Size; }
llvm::GlobalValue::LinkageTypes getLinkage() const { return Linkage; }
void setLinkage(llvm::GlobalValue::LinkageTypes LT) { Linkage = LT; }
static bool classof(const OffloadEntryInfo *Info) {
return Info->getKind() == OffloadingEntryInfoDeviceGlobalVar;
}
};
/// Initialize device global variable entry.
void initializeDeviceGlobalVarEntryInfo(StringRef Name,
OMPTargetGlobalVarEntryKind Flags,
unsigned Order);
/// Register device global variable entry.
void
registerDeviceGlobalVarEntryInfo(StringRef VarName, llvm::Constant *Addr,
CharUnits VarSize,
OMPTargetGlobalVarEntryKind Flags,
llvm::GlobalValue::LinkageTypes Linkage);
/// Checks if the variable with the given name has been registered already.
bool hasDeviceGlobalVarEntryInfo(StringRef VarName) const {
return OffloadEntriesDeviceGlobalVar.count(VarName) > 0;
}
/// Applies action \a Action on all registered entries.
typedef llvm::function_ref<void(StringRef,
const OffloadEntryInfoDeviceGlobalVar &)>
OffloadDeviceGlobalVarEntryInfoActTy;
void actOnDeviceGlobalVarEntriesInfo(
const OffloadDeviceGlobalVarEntryInfoActTy &Action);
private:
// Storage for target region entries kind. The storage is to be indexed by
// file ID, device ID, parent function name and line number.
typedef llvm::DenseMap<unsigned, OffloadEntryInfoTargetRegion>
OffloadEntriesTargetRegionPerLine;
typedef llvm::StringMap<OffloadEntriesTargetRegionPerLine>
OffloadEntriesTargetRegionPerParentName;
typedef llvm::DenseMap<unsigned, OffloadEntriesTargetRegionPerParentName>
OffloadEntriesTargetRegionPerFile;
typedef llvm::DenseMap<unsigned, OffloadEntriesTargetRegionPerFile>
OffloadEntriesTargetRegionPerDevice;
typedef OffloadEntriesTargetRegionPerDevice OffloadEntriesTargetRegionTy;
OffloadEntriesTargetRegionTy OffloadEntriesTargetRegion;
/// Storage for device global variable entries kind. The storage is to be
/// indexed by mangled name.
typedef llvm::StringMap<OffloadEntryInfoDeviceGlobalVar>
OffloadEntriesDeviceGlobalVarTy;
OffloadEntriesDeviceGlobalVarTy OffloadEntriesDeviceGlobalVar;
};
OffloadEntriesInfoManagerTy OffloadEntriesInfoManager;
bool ShouldMarkAsGlobal = true;
/// List of the emitted declarations.
llvm::DenseSet<CanonicalDeclPtr<const Decl>> AlreadyEmittedTargetDecls;
/// List of the global variables with their addresses that should not be
/// emitted for the target.
llvm::StringMap<llvm::WeakTrackingVH> EmittedNonTargetVariables;
/// List of variables that can become declare target implicitly and, thus,
/// must be emitted.
llvm::SmallDenseSet<const VarDecl *> DeferredGlobalVariables;
/// Mapping of the original functions to their variants and original global
/// decl.
llvm::MapVector<CanonicalDeclPtr<const FunctionDecl>,
std::pair<GlobalDecl, GlobalDecl>>
DeferredVariantFunction;
using NontemporalDeclsSet = llvm::SmallDenseSet<CanonicalDeclPtr<const Decl>>;
/// Stack for list of declarations in current context marked as nontemporal.
/// The set is the union of all current stack elements.
llvm::SmallVector<NontemporalDeclsSet, 4> NontemporalDeclsStack;
/// Stack for list of addresses of declarations in current context marked as
/// lastprivate conditional. The set is the union of all current stack
/// elements.
llvm::SmallVector<LastprivateConditionalData, 4> LastprivateConditionalStack;
/// Flag for keeping track of weather a requires unified_shared_memory
/// directive is present.
bool HasRequiresUnifiedSharedMemory = false;
/// Flag for keeping track of weather a target region has been emitted.
bool HasEmittedTargetRegion = false;
/// Flag for keeping track of weather a device routine has been emitted.
/// Device routines are specific to the
bool HasEmittedDeclareTargetRegion = false;
/// Loads all the offload entries information from the host IR
/// metadata.
void loadOffloadInfoMetadata();
/// Returns __tgt_offload_entry type.
QualType getTgtOffloadEntryQTy();
/// Start scanning from statement \a S and and emit all target regions
/// found along the way.
/// \param S Starting statement.
/// \param ParentName Name of the function declaration that is being scanned.
void scanForTargetRegionsFunctions(const Stmt *S, StringRef ParentName);
/// Build type kmp_routine_entry_t (if not built yet).
void emitKmpRoutineEntryT(QualType KmpInt32Ty);
/// Returns pointer to kmpc_micro type.
llvm::Type *getKmpc_MicroPointerTy();
/// Returns specified OpenMP runtime function.
/// \param Function OpenMP runtime function.
/// \return Specified function.
llvm::FunctionCallee createRuntimeFunction(unsigned Function);
/// Returns __kmpc_for_static_init_* runtime function for the specified
/// size \a IVSize and sign \a IVSigned.
llvm::FunctionCallee createForStaticInitFunction(unsigned IVSize,
bool IVSigned);
/// Returns __kmpc_dispatch_init_* runtime function for the specified
/// size \a IVSize and sign \a IVSigned.
llvm::FunctionCallee createDispatchInitFunction(unsigned IVSize,
bool IVSigned);
/// Returns __kmpc_dispatch_next_* runtime function for the specified
/// size \a IVSize and sign \a IVSigned.
llvm::FunctionCallee createDispatchNextFunction(unsigned IVSize,
bool IVSigned);
/// Returns __kmpc_dispatch_fini_* runtime function for the specified
/// size \a IVSize and sign \a IVSigned.
llvm::FunctionCallee createDispatchFiniFunction(unsigned IVSize,
bool IVSigned);
/// If the specified mangled name is not in the module, create and
/// return threadprivate cache object. This object is a pointer's worth of
/// storage that's reserved for use by the OpenMP runtime.
/// \param VD Threadprivate variable.
/// \return Cache variable for the specified threadprivate.
llvm::Constant *getOrCreateThreadPrivateCache(const VarDecl *VD);
/// Gets (if variable with the given name already exist) or creates
/// internal global variable with the specified Name. The created variable has
/// linkage CommonLinkage by default and is initialized by null value.
/// \param Ty Type of the global variable. If it is exist already the type
/// must be the same.
/// \param Name Name of the variable.
llvm::Constant *getOrCreateInternalVariable(llvm::Type *Ty,
const llvm::Twine &Name,
unsigned AddressSpace = 0);
/// Set of threadprivate variables with the generated initializer.
llvm::StringSet<> ThreadPrivateWithDefinition;
/// Set of declare target variables with the generated initializer.
llvm::StringSet<> DeclareTargetWithDefinition;
/// Emits initialization code for the threadprivate variables.
/// \param VDAddr Address of the global variable \a VD.
/// \param Ctor Pointer to a global init function for \a VD.
/// \param CopyCtor Pointer to a global copy function for \a VD.
/// \param Dtor Pointer to a global destructor function for \a VD.
/// \param Loc Location of threadprivate declaration.
void emitThreadPrivateVarInit(CodeGenFunction &CGF, Address VDAddr,
llvm::Value *Ctor, llvm::Value *CopyCtor,
llvm::Value *Dtor, SourceLocation Loc);
/// Emit the array initialization or deletion portion for user-defined mapper
/// code generation.
void emitUDMapperArrayInitOrDel(CodeGenFunction &MapperCGF,
llvm::Value *Handle, llvm::Value *BasePtr,
llvm::Value *Ptr, llvm::Value *Size,
llvm::Value *MapType, CharUnits ElementSize,
llvm::BasicBlock *ExitBB, bool IsInit);
struct TaskResultTy {
llvm::Value *NewTask = nullptr;
llvm::Function *TaskEntry = nullptr;
llvm::Value *NewTaskNewTaskTTy = nullptr;
LValue TDBase;
const RecordDecl *KmpTaskTQTyRD = nullptr;
llvm::Value *TaskDupFn = nullptr;
};
/// Emit task region for the task directive. The task region is emitted in
/// several steps:
/// 1. Emit a call to kmp_task_t *__kmpc_omp_task_alloc(ident_t *, kmp_int32
/// gtid, kmp_int32 flags, size_t sizeof_kmp_task_t, size_t sizeof_shareds,
/// kmp_routine_entry_t *task_entry). Here task_entry is a pointer to the
/// function:
/// kmp_int32 .omp_task_entry.(kmp_int32 gtid, kmp_task_t *tt) {
/// TaskFunction(gtid, tt->part_id, tt->shareds);
/// return 0;
/// }
/// 2. Copy a list of shared variables to field shareds of the resulting
/// structure kmp_task_t returned by the previous call (if any).
/// 3. Copy a pointer to destructions function to field destructions of the
/// resulting structure kmp_task_t.
/// \param D Current task directive.
/// \param TaskFunction An LLVM function with type void (*)(i32 /*gtid*/, i32
/// /*part_id*/, captured_struct */*__context*/);
/// \param SharedsTy A type which contains references the shared variables.
/// \param Shareds Context with the list of shared variables from the \p
/// TaskFunction.
/// \param Data Additional data for task generation like tiednsee, final
/// state, list of privates etc.
TaskResultTy emitTaskInit(CodeGenFunction &CGF, SourceLocation Loc,
const OMPExecutableDirective &D,
llvm::Function *TaskFunction, QualType SharedsTy,
Address Shareds, const OMPTaskDataTy &Data);
/// Returns default address space for the constant firstprivates, 0 by
/// default.
virtual unsigned getDefaultFirstprivateAddressSpace() const { return 0; }
/// Emit code that pushes the trip count of loops associated with constructs
/// 'target teams distribute' and 'teams distribute parallel for'.
/// \param SizeEmitter Emits the int64 value for the number of iterations of
/// the associated loop.
void emitTargetNumIterationsCall(
CodeGenFunction &CGF, const OMPExecutableDirective &D,
llvm::Value *DeviceID,
llvm::function_ref<llvm::Value *(CodeGenFunction &CGF,
const OMPLoopDirective &D)>
SizeEmitter);
public:
explicit CGOpenMPRuntime(CodeGenModule &CGM)
: CGOpenMPRuntime(CGM, ".", ".") {}
virtual ~CGOpenMPRuntime() {}
virtual void clear();
/// Emits code for OpenMP 'if' clause using specified \a CodeGen
/// function. Here is the logic:
/// if (Cond) {
/// ThenGen();
/// } else {
/// ElseGen();
/// }
void emitIfClause(CodeGenFunction &CGF, const Expr *Cond,
const RegionCodeGenTy &ThenGen,
const RegionCodeGenTy &ElseGen);
/// Checks if the \p Body is the \a CompoundStmt and returns its child
/// statement iff there is only one that is not evaluatable at the compile
/// time.
static const Stmt *getSingleCompoundChild(ASTContext &Ctx, const Stmt *Body);
/// Get the platform-specific name separator.
std::string getName(ArrayRef<StringRef> Parts) const;
/// Emit code for the specified user defined reduction construct.
virtual void emitUserDefinedReduction(CodeGenFunction *CGF,
const OMPDeclareReductionDecl *D);
/// Get combiner/initializer for the specified user-defined reduction, if any.
virtual std::pair<llvm::Function *, llvm::Function *>
getUserDefinedReduction(const OMPDeclareReductionDecl *D);
/// Emit the function for the user defined mapper construct.
void emitUserDefinedMapper(const OMPDeclareMapperDecl *D,
CodeGenFunction *CGF = nullptr);
/// Emits outlined function for the specified OpenMP parallel directive
/// \a D. This outlined function has type void(*)(kmp_int32 *ThreadID,
/// kmp_int32 BoundID, struct context_vars*).
/// \param D OpenMP directive.
/// \param ThreadIDVar Variable for thread id in the current OpenMP region.
/// \param InnermostKind Kind of innermost directive (for simple directives it
/// is a directive itself, for combined - its innermost directive).
/// \param CodeGen Code generation sequence for the \a D directive.
virtual llvm::Function *emitParallelOutlinedFunction(
const OMPExecutableDirective &D, const VarDecl *ThreadIDVar,
OpenMPDirectiveKind InnermostKind, const RegionCodeGenTy &CodeGen);
/// Emits outlined function for the specified OpenMP teams directive
/// \a D. This outlined function has type void(*)(kmp_int32 *ThreadID,
/// kmp_int32 BoundID, struct context_vars*).
/// \param D OpenMP directive.
/// \param ThreadIDVar Variable for thread id in the current OpenMP region.
/// \param InnermostKind Kind of innermost directive (for simple directives it
/// is a directive itself, for combined - its innermost directive).
/// \param CodeGen Code generation sequence for the \a D directive.
virtual llvm::Function *emitTeamsOutlinedFunction(
const OMPExecutableDirective &D, const VarDecl *ThreadIDVar,
OpenMPDirectiveKind InnermostKind, const RegionCodeGenTy &CodeGen);
/// Emits outlined function for the OpenMP task directive \a D. This
/// outlined function has type void(*)(kmp_int32 ThreadID, struct task_t*
/// TaskT).
/// \param D OpenMP directive.
/// \param ThreadIDVar Variable for thread id in the current OpenMP region.
/// \param PartIDVar Variable for partition id in the current OpenMP untied
/// task region.
/// \param TaskTVar Variable for task_t argument.
/// \param InnermostKind Kind of innermost directive (for simple directives it
/// is a directive itself, for combined - its innermost directive).
/// \param CodeGen Code generation sequence for the \a D directive.
/// \param Tied true if task is generated for tied task, false otherwise.
/// \param NumberOfParts Number of parts in untied task. Ignored for tied
/// tasks.
///
virtual llvm::Function *emitTaskOutlinedFunction(
const OMPExecutableDirective &D, const VarDecl *ThreadIDVar,
const VarDecl *PartIDVar, const VarDecl *TaskTVar,
OpenMPDirectiveKind InnermostKind, const RegionCodeGenTy &CodeGen,
bool Tied, unsigned &NumberOfParts);
/// Cleans up references to the objects in finished function.
///
virtual void functionFinished(CodeGenFunction &CGF);
/// Emits code for parallel or serial call of the \a OutlinedFn with
/// variables captured in a record which address is stored in \a
/// CapturedStruct.
/// \param OutlinedFn Outlined function to be run in parallel threads. Type of
/// this function is void(*)(kmp_int32 *, kmp_int32, struct context_vars*).
/// \param CapturedVars A pointer to the record with the references to
/// variables used in \a OutlinedFn function.
/// \param IfCond Condition in the associated 'if' clause, if it was
/// specified, nullptr otherwise.
///
virtual void emitParallelCall(CodeGenFunction &CGF, SourceLocation Loc,
llvm::Function *OutlinedFn,
ArrayRef<llvm::Value *> CapturedVars,
const Expr *IfCond);
/// Emits a critical region.
/// \param CriticalName Name of the critical region.
/// \param CriticalOpGen Generator for the statement associated with the given
/// critical region.
/// \param Hint Value of the 'hint' clause (optional).
virtual void emitCriticalRegion(CodeGenFunction &CGF, StringRef CriticalName,
const RegionCodeGenTy &CriticalOpGen,
SourceLocation Loc,
const Expr *Hint = nullptr);
/// Emits a master region.
/// \param MasterOpGen Generator for the statement associated with the given
/// master region.
virtual void emitMasterRegion(CodeGenFunction &CGF,
const RegionCodeGenTy &MasterOpGen,
SourceLocation Loc);
/// Emits code for a taskyield directive.
virtual void emitTaskyieldCall(CodeGenFunction &CGF, SourceLocation Loc);
/// Emit a taskgroup region.
/// \param TaskgroupOpGen Generator for the statement associated with the
/// given taskgroup region.
virtual void emitTaskgroupRegion(CodeGenFunction &CGF,
const RegionCodeGenTy &TaskgroupOpGen,
SourceLocation Loc);
/// Emits a single region.
/// \param SingleOpGen Generator for the statement associated with the given
/// single region.
virtual void emitSingleRegion(CodeGenFunction &CGF,
const RegionCodeGenTy &SingleOpGen,
SourceLocation Loc,
ArrayRef<const Expr *> CopyprivateVars,
ArrayRef<const Expr *> DestExprs,
ArrayRef<const Expr *> SrcExprs,
ArrayRef<const Expr *> AssignmentOps);
/// Emit an ordered region.
/// \param OrderedOpGen Generator for the statement associated with the given
/// ordered region.
virtual void emitOrderedRegion(CodeGenFunction &CGF,
const RegionCodeGenTy &OrderedOpGen,
SourceLocation Loc, bool IsThreads);
/// Emit an implicit/explicit barrier for OpenMP threads.
/// \param Kind Directive for which this implicit barrier call must be
/// generated. Must be OMPD_barrier for explicit barrier generation.
/// \param EmitChecks true if need to emit checks for cancellation barriers.
/// \param ForceSimpleCall true simple barrier call must be emitted, false if
/// runtime class decides which one to emit (simple or with cancellation
/// checks).
///
virtual void emitBarrierCall(CodeGenFunction &CGF, SourceLocation Loc,
OpenMPDirectiveKind Kind,
bool EmitChecks = true,
bool ForceSimpleCall = false);
/// Check if the specified \a ScheduleKind is static non-chunked.
/// This kind of worksharing directive is emitted without outer loop.
/// \param ScheduleKind Schedule kind specified in the 'schedule' clause.
/// \param Chunked True if chunk is specified in the clause.
///
virtual bool isStaticNonchunked(OpenMPScheduleClauseKind ScheduleKind,
bool Chunked) const;
/// Check if the specified \a ScheduleKind is static non-chunked.
/// This kind of distribute directive is emitted without outer loop.
/// \param ScheduleKind Schedule kind specified in the 'dist_schedule' clause.
/// \param Chunked True if chunk is specified in the clause.
///
virtual bool isStaticNonchunked(OpenMPDistScheduleClauseKind ScheduleKind,
bool Chunked) const;
/// Check if the specified \a ScheduleKind is static chunked.
/// \param ScheduleKind Schedule kind specified in the 'schedule' clause.
/// \param Chunked True if chunk is specified in the clause.
///
virtual bool isStaticChunked(OpenMPScheduleClauseKind ScheduleKind,
bool Chunked) const;
/// Check if the specified \a ScheduleKind is static non-chunked.
/// \param ScheduleKind Schedule kind specified in the 'dist_schedule' clause.
/// \param Chunked True if chunk is specified in the clause.
///
virtual bool isStaticChunked(OpenMPDistScheduleClauseKind ScheduleKind,
bool Chunked) const;
/// Check if the specified \a ScheduleKind is dynamic.
/// This kind of worksharing directive is emitted without outer loop.
/// \param ScheduleKind Schedule Kind specified in the 'schedule' clause.
///
virtual bool isDynamic(OpenMPScheduleClauseKind ScheduleKind) const;
/// struct with the values to be passed to the dispatch runtime function
struct DispatchRTInput {
/// Loop lower bound
llvm::Value *LB = nullptr;
/// Loop upper bound
llvm::Value *UB = nullptr;
/// Chunk size specified using 'schedule' clause (nullptr if chunk
/// was not specified)
llvm::Value *Chunk = nullptr;
DispatchRTInput() = default;
DispatchRTInput(llvm::Value *LB, llvm::Value *UB, llvm::Value *Chunk)
: LB(LB), UB(UB), Chunk(Chunk) {}
};
/// Call the appropriate runtime routine to initialize it before start
/// of loop.
/// This is used for non static scheduled types and when the ordered
/// clause is present on the loop construct.
/// Depending on the loop schedule, it is necessary to call some runtime
/// routine before start of the OpenMP loop to get the loop upper / lower
/// bounds \a LB and \a UB and stride \a ST.
///
/// \param CGF Reference to current CodeGenFunction.
/// \param Loc Clang source location.
/// \param ScheduleKind Schedule kind, specified by the 'schedule' clause.
/// \param IVSize Size of the iteration variable in bits.
/// \param IVSigned Sign of the iteration variable.
/// \param Ordered true if loop is ordered, false otherwise.
/// \param DispatchValues struct containing llvm values for lower bound, upper
/// bound, and chunk expression.
/// For the default (nullptr) value, the chunk 1 will be used.
///
virtual void emitForDispatchInit(CodeGenFunction &CGF, SourceLocation Loc,
const OpenMPScheduleTy &ScheduleKind,
unsigned IVSize, bool IVSigned, bool Ordered,
const DispatchRTInput &DispatchValues);
/// Struct with the values to be passed to the static runtime function
struct StaticRTInput {
/// Size of the iteration variable in bits.
unsigned IVSize = 0;
/// Sign of the iteration variable.
bool IVSigned = false;
/// true if loop is ordered, false otherwise.
bool Ordered = false;
/// Address of the output variable in which the flag of the last iteration
/// is returned.
Address IL = Address::invalid();
/// Address of the output variable in which the lower iteration number is
/// returned.
Address LB = Address::invalid();
/// Address of the output variable in which the upper iteration number is
/// returned.
Address UB = Address::invalid();
/// Address of the output variable in which the stride value is returned
/// necessary to generated the static_chunked scheduled loop.
Address ST = Address::invalid();
/// Value of the chunk for the static_chunked scheduled loop. For the
/// default (nullptr) value, the chunk 1 will be used.
llvm::Value *Chunk = nullptr;
StaticRTInput(unsigned IVSize, bool IVSigned, bool Ordered, Address IL,
Address LB, Address UB, Address ST,
llvm::Value *Chunk = nullptr)
: IVSize(IVSize), IVSigned(IVSigned), Ordered(Ordered), IL(IL), LB(LB),
UB(UB), ST(ST), Chunk(Chunk) {}
};
/// Call the appropriate runtime routine to initialize it before start
/// of loop.
///
/// This is used only in case of static schedule, when the user did not
/// specify a ordered clause on the loop construct.
/// Depending on the loop schedule, it is necessary to call some runtime
/// routine before start of the OpenMP loop to get the loop upper / lower
/// bounds LB and UB and stride ST.
///
/// \param CGF Reference to current CodeGenFunction.
/// \param Loc Clang source location.
/// \param DKind Kind of the directive.
/// \param ScheduleKind Schedule kind, specified by the 'schedule' clause.
/// \param Values Input arguments for the construct.
///
virtual void emitForStaticInit(CodeGenFunction &CGF, SourceLocation Loc,
OpenMPDirectiveKind DKind,
const OpenMPScheduleTy &ScheduleKind,
const StaticRTInput &Values);
///
/// \param CGF Reference to current CodeGenFunction.
/// \param Loc Clang source location.
/// \param SchedKind Schedule kind, specified by the 'dist_schedule' clause.
/// \param Values Input arguments for the construct.
///
virtual void emitDistributeStaticInit(CodeGenFunction &CGF,
SourceLocation Loc,
OpenMPDistScheduleClauseKind SchedKind,
const StaticRTInput &Values);
/// Call the appropriate runtime routine to notify that we finished
/// iteration of the ordered loop with the dynamic scheduling.
///
/// \param CGF Reference to current CodeGenFunction.
/// \param Loc Clang source location.
/// \param IVSize Size of the iteration variable in bits.
/// \param IVSigned Sign of the iteration variable.
///
virtual void emitForOrderedIterationEnd(CodeGenFunction &CGF,
SourceLocation Loc, unsigned IVSize,
bool IVSigned);
/// Call the appropriate runtime routine to notify that we finished
/// all the work with current loop.
///
/// \param CGF Reference to current CodeGenFunction.
/// \param Loc Clang source location.
/// \param DKind Kind of the directive for which the static finish is emitted.
///
virtual void emitForStaticFinish(CodeGenFunction &CGF, SourceLocation Loc,
OpenMPDirectiveKind DKind);
/// Call __kmpc_dispatch_next(
/// ident_t *loc, kmp_int32 tid, kmp_int32 *p_lastiter,
/// kmp_int[32|64] *p_lower, kmp_int[32|64] *p_upper,
/// kmp_int[32|64] *p_stride);
/// \param IVSize Size of the iteration variable in bits.
/// \param IVSigned Sign of the iteration variable.
/// \param IL Address of the output variable in which the flag of the
/// last iteration is returned.
/// \param LB Address of the output variable in which the lower iteration
/// number is returned.
/// \param UB Address of the output variable in which the upper iteration
/// number is returned.
/// \param ST Address of the output variable in which the stride value is
/// returned.
virtual llvm::Value *emitForNext(CodeGenFunction &CGF, SourceLocation Loc,
unsigned IVSize, bool IVSigned,
Address IL, Address LB,
Address UB, Address ST);
/// Emits call to void __kmpc_push_num_threads(ident_t *loc, kmp_int32
/// global_tid, kmp_int32 num_threads) to generate code for 'num_threads'
/// clause.
/// \param NumThreads An integer value of threads.
virtual void emitNumThreadsClause(CodeGenFunction &CGF,
llvm::Value *NumThreads,
SourceLocation Loc);
/// Emit call to void __kmpc_push_proc_bind(ident_t *loc, kmp_int32
/// global_tid, int proc_bind) to generate code for 'proc_bind' clause.
virtual void emitProcBindClause(CodeGenFunction &CGF,
llvm::omp::ProcBindKind ProcBind,
SourceLocation Loc);
/// Returns address of the threadprivate variable for the current
/// thread.
/// \param VD Threadprivate variable.
/// \param VDAddr Address of the global variable \a VD.
/// \param Loc Location of the reference to threadprivate var.
/// \return Address of the threadprivate variable for the current thread.
virtual Address getAddrOfThreadPrivate(CodeGenFunction &CGF,
const VarDecl *VD,
Address VDAddr,
SourceLocation Loc);
/// Returns the address of the variable marked as declare target with link
/// clause OR as declare target with to clause and unified memory.
virtual Address getAddrOfDeclareTargetVar(const VarDecl *VD);
/// Emit a code for initialization of threadprivate variable. It emits
/// a call to runtime library which adds initial value to the newly created
/// threadprivate variable (if it is not constant) and registers destructor
/// for the variable (if any).
/// \param VD Threadprivate variable.
/// \param VDAddr Address of the global variable \a VD.
/// \param Loc Location of threadprivate declaration.
/// \param PerformInit true if initialization expression is not constant.
virtual llvm::Function *
emitThreadPrivateVarDefinition(const VarDecl *VD, Address VDAddr,
SourceLocation Loc, bool PerformInit,
CodeGenFunction *CGF = nullptr);
/// Emit a code for initialization of declare target variable.
/// \param VD Declare target variable.
/// \param Addr Address of the global variable \a VD.
/// \param PerformInit true if initialization expression is not constant.
virtual bool emitDeclareTargetVarDefinition(const VarDecl *VD,
llvm::GlobalVariable *Addr,
bool PerformInit);
/// Creates artificial threadprivate variable with name \p Name and type \p
/// VarType.
/// \param VarType Type of the artificial threadprivate variable.
/// \param Name Name of the artificial threadprivate variable.
virtual Address getAddrOfArtificialThreadPrivate(CodeGenFunction &CGF,
QualType VarType,
StringRef Name);
/// Emit flush of the variables specified in 'omp flush' directive.
/// \param Vars List of variables to flush.
virtual void emitFlush(CodeGenFunction &CGF, ArrayRef<const Expr *> Vars,
SourceLocation Loc);
/// Emit task region for the task directive. The task region is
/// emitted in several steps:
/// 1. Emit a call to kmp_task_t *__kmpc_omp_task_alloc(ident_t *, kmp_int32
/// gtid, kmp_int32 flags, size_t sizeof_kmp_task_t, size_t sizeof_shareds,
/// kmp_routine_entry_t *task_entry). Here task_entry is a pointer to the
/// function:
/// kmp_int32 .omp_task_entry.(kmp_int32 gtid, kmp_task_t *tt) {
/// TaskFunction(gtid, tt->part_id, tt->shareds);
/// return 0;
/// }
/// 2. Copy a list of shared variables to field shareds of the resulting
/// structure kmp_task_t returned by the previous call (if any).
/// 3. Copy a pointer to destructions function to field destructions of the
/// resulting structure kmp_task_t.
/// 4. Emit a call to kmp_int32 __kmpc_omp_task(ident_t *, kmp_int32 gtid,
/// kmp_task_t *new_task), where new_task is a resulting structure from
/// previous items.
/// \param D Current task directive.
/// \param TaskFunction An LLVM function with type void (*)(i32 /*gtid*/, i32
/// /*part_id*/, captured_struct */*__context*/);
/// \param SharedsTy A type which contains references the shared variables.
/// \param Shareds Context with the list of shared variables from the \p
/// TaskFunction.
/// \param IfCond Not a nullptr if 'if' clause was specified, nullptr
/// otherwise.
/// \param Data Additional data for task generation like tiednsee, final
/// state, list of privates etc.
virtual void emitTaskCall(CodeGenFunction &CGF, SourceLocation Loc,
const OMPExecutableDirective &D,
llvm::Function *TaskFunction, QualType SharedsTy,
Address Shareds, const Expr *IfCond,
const OMPTaskDataTy &Data);
/// Emit task region for the taskloop directive. The taskloop region is
/// emitted in several steps:
/// 1. Emit a call to kmp_task_t *__kmpc_omp_task_alloc(ident_t *, kmp_int32
/// gtid, kmp_int32 flags, size_t sizeof_kmp_task_t, size_t sizeof_shareds,
/// kmp_routine_entry_t *task_entry). Here task_entry is a pointer to the
/// function:
/// kmp_int32 .omp_task_entry.(kmp_int32 gtid, kmp_task_t *tt) {
/// TaskFunction(gtid, tt->part_id, tt->shareds);
/// return 0;
/// }
/// 2. Copy a list of shared variables to field shareds of the resulting
/// structure kmp_task_t returned by the previous call (if any).
/// 3. Copy a pointer to destructions function to field destructions of the
/// resulting structure kmp_task_t.
/// 4. Emit a call to void __kmpc_taskloop(ident_t *loc, int gtid, kmp_task_t
/// *task, int if_val, kmp_uint64 *lb, kmp_uint64 *ub, kmp_int64 st, int
/// nogroup, int sched, kmp_uint64 grainsize, void *task_dup ), where new_task
/// is a resulting structure from
/// previous items.
/// \param D Current task directive.
/// \param TaskFunction An LLVM function with type void (*)(i32 /*gtid*/, i32
/// /*part_id*/, captured_struct */*__context*/);
/// \param SharedsTy A type which contains references the shared variables.
/// \param Shareds Context with the list of shared variables from the \p
/// TaskFunction.
/// \param IfCond Not a nullptr if 'if' clause was specified, nullptr
/// otherwise.
/// \param Data Additional data for task generation like tiednsee, final
/// state, list of privates etc.
virtual void emitTaskLoopCall(CodeGenFunction &CGF, SourceLocation Loc,
const OMPLoopDirective &D,
llvm::Function *TaskFunction,
QualType SharedsTy, Address Shareds,
const Expr *IfCond, const OMPTaskDataTy &Data);
/// Emit code for the directive that does not require outlining.
///
/// \param InnermostKind Kind of innermost directive (for simple directives it
/// is a directive itself, for combined - its innermost directive).
/// \param CodeGen Code generation sequence for the \a D directive.
/// \param HasCancel true if region has inner cancel directive, false
/// otherwise.
virtual void emitInlinedDirective(CodeGenFunction &CGF,
OpenMPDirectiveKind InnermostKind,
const RegionCodeGenTy &CodeGen,
bool HasCancel = false);
/// Emits reduction function.
/// \param ArgsType Array type containing pointers to reduction variables.
/// \param Privates List of private copies for original reduction arguments.
/// \param LHSExprs List of LHS in \a ReductionOps reduction operations.
/// \param RHSExprs List of RHS in \a ReductionOps reduction operations.
/// \param ReductionOps List of reduction operations in form 'LHS binop RHS'
/// or 'operator binop(LHS, RHS)'.
llvm::Function *emitReductionFunction(SourceLocation Loc,
llvm::Type *ArgsType,
ArrayRef<const Expr *> Privates,
ArrayRef<const Expr *> LHSExprs,
ArrayRef<const Expr *> RHSExprs,
ArrayRef<const Expr *> ReductionOps);
/// Emits single reduction combiner
void emitSingleReductionCombiner(CodeGenFunction &CGF,
const Expr *ReductionOp,
const Expr *PrivateRef,
const DeclRefExpr *LHS,
const DeclRefExpr *RHS);
struct ReductionOptionsTy {
bool WithNowait;
bool SimpleReduction;
OpenMPDirectiveKind ReductionKind;
};
/// Emit a code for reduction clause. Next code should be emitted for
/// reduction:
/// \code
///
/// static kmp_critical_name lock = { 0 };
///
/// void reduce_func(void *lhs[<n>], void *rhs[<n>]) {
/// ...
/// *(Type<i>*)lhs[i] = RedOp<i>(*(Type<i>*)lhs[i], *(Type<i>*)rhs[i]);
/// ...
/// }
///
/// ...
/// void *RedList[<n>] = {&<RHSExprs>[0], ..., &<RHSExprs>[<n>-1]};
/// switch (__kmpc_reduce{_nowait}(<loc>, <gtid>, <n>, sizeof(RedList),
/// RedList, reduce_func, &<lock>)) {
/// case 1:
/// ...
/// <LHSExprs>[i] = RedOp<i>(*<LHSExprs>[i], *<RHSExprs>[i]);
/// ...
/// __kmpc_end_reduce{_nowait}(<loc>, <gtid>, &<lock>);
/// break;
/// case 2:
/// ...
/// Atomic(<LHSExprs>[i] = RedOp<i>(*<LHSExprs>[i], *<RHSExprs>[i]));
/// ...
/// break;
/// default:;
/// }
/// \endcode
///
/// \param Privates List of private copies for original reduction arguments.
/// \param LHSExprs List of LHS in \a ReductionOps reduction operations.
/// \param RHSExprs List of RHS in \a ReductionOps reduction operations.
/// \param ReductionOps List of reduction operations in form 'LHS binop RHS'
/// or 'operator binop(LHS, RHS)'.
/// \param Options List of options for reduction codegen:
/// WithNowait true if parent directive has also nowait clause, false
/// otherwise.
/// SimpleReduction Emit reduction operation only. Used for omp simd
/// directive on the host.
/// ReductionKind The kind of reduction to perform.
virtual void emitReduction(CodeGenFunction &CGF, SourceLocation Loc,
ArrayRef<const Expr *> Privates,
ArrayRef<const Expr *> LHSExprs,
ArrayRef<const Expr *> RHSExprs,
ArrayRef<const Expr *> ReductionOps,
ReductionOptionsTy Options);
/// Emit a code for initialization of task reduction clause. Next code
/// should be emitted for reduction:
/// \code
///
/// _task_red_item_t red_data[n];
/// ...
/// red_data[i].shar = &origs[i];
/// red_data[i].size = sizeof(origs[i]);
/// red_data[i].f_init = (void*)RedInit<i>;
/// red_data[i].f_fini = (void*)RedDest<i>;
/// red_data[i].f_comb = (void*)RedOp<i>;
/// red_data[i].flags = <Flag_i>;
/// ...
/// void* tg1 = __kmpc_task_reduction_init(gtid, n, red_data);
/// \endcode
///
/// \param LHSExprs List of LHS in \a Data.ReductionOps reduction operations.
/// \param RHSExprs List of RHS in \a Data.ReductionOps reduction operations.
/// \param Data Additional data for task generation like tiedness, final
/// state, list of privates, reductions etc.
virtual llvm::Value *emitTaskReductionInit(CodeGenFunction &CGF,
SourceLocation Loc,
ArrayRef<const Expr *> LHSExprs,
ArrayRef<const Expr *> RHSExprs,
const OMPTaskDataTy &Data);
/// Required to resolve existing problems in the runtime. Emits threadprivate
/// variables to store the size of the VLAs/array sections for
/// initializer/combiner/finalizer functions + emits threadprivate variable to
/// store the pointer to the original reduction item for the custom
/// initializer defined by declare reduction construct.
/// \param RCG Allows to reuse an existing data for the reductions.
/// \param N Reduction item for which fixups must be emitted.
virtual void emitTaskReductionFixups(CodeGenFunction &CGF, SourceLocation Loc,
ReductionCodeGen &RCG, unsigned N);
/// Get the address of `void *` type of the privatue copy of the reduction
/// item specified by the \p SharedLVal.
/// \param ReductionsPtr Pointer to the reduction data returned by the
/// emitTaskReductionInit function.
/// \param SharedLVal Address of the original reduction item.
virtual Address getTaskReductionItem(CodeGenFunction &CGF, SourceLocation Loc,
llvm::Value *ReductionsPtr,
LValue SharedLVal);
/// Emit code for 'taskwait' directive.
virtual void emitTaskwaitCall(CodeGenFunction &CGF, SourceLocation Loc);
/// Emit code for 'cancellation point' construct.
/// \param CancelRegion Region kind for which the cancellation point must be
/// emitted.
///
virtual void emitCancellationPointCall(CodeGenFunction &CGF,
SourceLocation Loc,
OpenMPDirectiveKind CancelRegion);
/// Emit code for 'cancel' construct.
/// \param IfCond Condition in the associated 'if' clause, if it was
/// specified, nullptr otherwise.
/// \param CancelRegion Region kind for which the cancel must be emitted.
///
virtual void emitCancelCall(CodeGenFunction &CGF, SourceLocation Loc,
const Expr *IfCond,
OpenMPDirectiveKind CancelRegion);
/// Emit outilined function for 'target' directive.
/// \param D Directive to emit.
/// \param ParentName Name of the function that encloses the target region.
/// \param OutlinedFn Outlined function value to be defined by this call.
/// \param OutlinedFnID Outlined function ID value to be defined by this call.
/// \param IsOffloadEntry True if the outlined function is an offload entry.
/// \param CodeGen Code generation sequence for the \a D directive.
/// An outlined function may not be an entry if, e.g. the if clause always
/// evaluates to false.
virtual void emitTargetOutlinedFunction(const OMPExecutableDirective &D,
StringRef ParentName,
llvm::Function *&OutlinedFn,
llvm::Constant *&OutlinedFnID,
bool IsOffloadEntry,
const RegionCodeGenTy &CodeGen);
/// Emit the target offloading code associated with \a D. The emitted
/// code attempts offloading the execution to the device, an the event of
/// a failure it executes the host version outlined in \a OutlinedFn.
/// \param D Directive to emit.
/// \param OutlinedFn Host version of the code to be offloaded.
/// \param OutlinedFnID ID of host version of the code to be offloaded.
/// \param IfCond Expression evaluated in if clause associated with the target
/// directive, or null if no if clause is used.
/// \param Device Expression evaluated in device clause associated with the
/// target directive, or null if no device clause is used.
/// \param SizeEmitter Callback to emit number of iterations for loop-based
/// directives.
virtual void
emitTargetCall(CodeGenFunction &CGF, const OMPExecutableDirective &D,
llvm::Function *OutlinedFn, llvm::Value *OutlinedFnID,
const Expr *IfCond, const Expr *Device,
llvm::function_ref<llvm::Value *(CodeGenFunction &CGF,
const OMPLoopDirective &D)>
SizeEmitter);
/// Emit the target regions enclosed in \a GD function definition or
/// the function itself in case it is a valid device function. Returns true if
/// \a GD was dealt with successfully.
/// \param GD Function to scan.
virtual bool emitTargetFunctions(GlobalDecl GD);
/// Emit the global variable if it is a valid device global variable.
/// Returns true if \a GD was dealt with successfully.
/// \param GD Variable declaration to emit.
virtual bool emitTargetGlobalVariable(GlobalDecl GD);
/// Checks if the provided global decl \a GD is a declare target variable and
/// registers it when emitting code for the host.
virtual void registerTargetGlobalVariable(const VarDecl *VD,
llvm::Constant *Addr);
/// Registers provided target firstprivate variable as global on the
/// target.
llvm::Constant *registerTargetFirstprivateCopy(CodeGenFunction &CGF,
const VarDecl *VD);
/// Emit the global \a GD if it is meaningful for the target. Returns
/// if it was emitted successfully.
/// \param GD Global to scan.
virtual bool emitTargetGlobal(GlobalDecl GD);
/// Creates and returns a registration function for when at least one
/// requires directives was used in the current module.
llvm::Function *emitRequiresDirectiveRegFun();
/// Creates all the offload entries in the current compilation unit
/// along with the associated metadata.
void createOffloadEntriesAndInfoMetadata();
/// Emits code for teams call of the \a OutlinedFn with
/// variables captured in a record which address is stored in \a
/// CapturedStruct.
/// \param OutlinedFn Outlined function to be run by team masters. Type of
/// this function is void(*)(kmp_int32 *, kmp_int32, struct context_vars*).
/// \param CapturedVars A pointer to the record with the references to
/// variables used in \a OutlinedFn function.
///
virtual void emitTeamsCall(CodeGenFunction &CGF,
const OMPExecutableDirective &D,
SourceLocation Loc, llvm::Function *OutlinedFn,
ArrayRef<llvm::Value *> CapturedVars);
/// Emits call to void __kmpc_push_num_teams(ident_t *loc, kmp_int32
/// global_tid, kmp_int32 num_teams, kmp_int32 thread_limit) to generate code
/// for num_teams clause.
/// \param NumTeams An integer expression of teams.
/// \param ThreadLimit An integer expression of threads.
virtual void emitNumTeamsClause(CodeGenFunction &CGF, const Expr *NumTeams,
const Expr *ThreadLimit, SourceLocation Loc);
/// Struct that keeps all the relevant information that should be kept
/// throughout a 'target data' region.
class TargetDataInfo {
/// Set to true if device pointer information have to be obtained.
bool RequiresDevicePointerInfo = false;
public:
/// The array of base pointer passed to the runtime library.
llvm::Value *BasePointersArray = nullptr;
/// The array of section pointers passed to the runtime library.
llvm::Value *PointersArray = nullptr;
/// The array of sizes passed to the runtime library.
llvm::Value *SizesArray = nullptr;
/// The array of map types passed to the runtime library.
llvm::Value *MapTypesArray = nullptr;
/// The total number of pointers passed to the runtime library.
unsigned NumberOfPtrs = 0u;
/// Map between the a declaration of a capture and the corresponding base
/// pointer address where the runtime returns the device pointers.
llvm::DenseMap<const ValueDecl *, Address> CaptureDeviceAddrMap;
explicit TargetDataInfo() {}
explicit TargetDataInfo(bool RequiresDevicePointerInfo)
: RequiresDevicePointerInfo(RequiresDevicePointerInfo) {}
/// Clear information about the data arrays.
void clearArrayInfo() {
BasePointersArray = nullptr;
PointersArray = nullptr;
SizesArray = nullptr;
MapTypesArray = nullptr;
NumberOfPtrs = 0u;
}
/// Return true if the current target data information has valid arrays.
bool isValid() {
return BasePointersArray && PointersArray && SizesArray &&
MapTypesArray && NumberOfPtrs;
}
bool requiresDevicePointerInfo() { return RequiresDevicePointerInfo; }
};
/// Emit the target data mapping code associated with \a D.
/// \param D Directive to emit.
/// \param IfCond Expression evaluated in if clause associated with the
/// target directive, or null if no device clause is used.
/// \param Device Expression evaluated in device clause associated with the
/// target directive, or null if no device clause is used.
/// \param Info A record used to store information that needs to be preserved
/// until the region is closed.
virtual void emitTargetDataCalls(CodeGenFunction &CGF,
const OMPExecutableDirective &D,
const Expr *IfCond, const Expr *Device,
const RegionCodeGenTy &CodeGen,
TargetDataInfo &Info);
/// Emit the data mapping/movement code associated with the directive
/// \a D that should be of the form 'target [{enter|exit} data | update]'.
/// \param D Directive to emit.
/// \param IfCond Expression evaluated in if clause associated with the target
/// directive, or null if no if clause is used.
/// \param Device Expression evaluated in device clause associated with the
/// target directive, or null if no device clause is used.
virtual void emitTargetDataStandAloneCall(CodeGenFunction &CGF,
const OMPExecutableDirective &D,
const Expr *IfCond,
const Expr *Device);
/// Marks function \a Fn with properly mangled versions of vector functions.
/// \param FD Function marked as 'declare simd'.
/// \param Fn LLVM function that must be marked with 'declare simd'
/// attributes.
virtual void emitDeclareSimdFunction(const FunctionDecl *FD,
llvm::Function *Fn);
/// Emit initialization for doacross loop nesting support.
/// \param D Loop-based construct used in doacross nesting construct.
virtual void emitDoacrossInit(CodeGenFunction &CGF, const OMPLoopDirective &D,
ArrayRef<Expr *> NumIterations);
/// Emit code for doacross ordered directive with 'depend' clause.
/// \param C 'depend' clause with 'sink|source' dependency kind.
virtual void emitDoacrossOrdered(CodeGenFunction &CGF,
const OMPDependClause *C);
/// Translates the native parameter of outlined function if this is required
/// for target.
/// \param FD Field decl from captured record for the parameter.
/// \param NativeParam Parameter itself.
virtual const VarDecl *translateParameter(const FieldDecl *FD,
const VarDecl *NativeParam) const {
return NativeParam;
}
/// Gets the address of the native argument basing on the address of the
/// target-specific parameter.
/// \param NativeParam Parameter itself.
/// \param TargetParam Corresponding target-specific parameter.
virtual Address getParameterAddress(CodeGenFunction &CGF,
const VarDecl *NativeParam,
const VarDecl *TargetParam) const;
/// Choose default schedule type and chunk value for the
/// dist_schedule clause.
virtual void getDefaultDistScheduleAndChunk(CodeGenFunction &CGF,
const OMPLoopDirective &S, OpenMPDistScheduleClauseKind &ScheduleKind,
llvm::Value *&Chunk) const {}
/// Choose default schedule type and chunk value for the
/// schedule clause.
virtual void getDefaultScheduleAndChunk(CodeGenFunction &CGF,
const OMPLoopDirective &S, OpenMPScheduleClauseKind &ScheduleKind,
const Expr *&ChunkExpr) const;
/// Emits call of the outlined function with the provided arguments,
/// translating these arguments to correct target-specific arguments.
virtual void
emitOutlinedFunctionCall(CodeGenFunction &CGF, SourceLocation Loc,
llvm::FunctionCallee OutlinedFn,
ArrayRef<llvm::Value *> Args = llvm::None) const;
/// Emits OpenMP-specific function prolog.
/// Required for device constructs.
virtual void emitFunctionProlog(CodeGenFunction &CGF, const Decl *D);
/// Gets the OpenMP-specific address of the local variable.
virtual Address getAddressOfLocalVariable(CodeGenFunction &CGF,
const VarDecl *VD);
/// Marks the declaration as already emitted for the device code and returns
/// true, if it was marked already, and false, otherwise.
bool markAsGlobalTarget(GlobalDecl GD);
/// Emit deferred declare target variables marked for deferred emission.
void emitDeferredTargetDecls() const;
/// Adjust some parameters for the target-based directives, like addresses of
/// the variables captured by reference in lambdas.
virtual void
adjustTargetSpecificDataForLambdas(CodeGenFunction &CGF,
const OMPExecutableDirective &D) const;
/// Perform check on requires decl to ensure that target architecture
/// supports unified addressing
virtual void checkArchForUnifiedAddressing(const OMPRequiresDecl *D);
/// Checks if the variable has associated OMPAllocateDeclAttr attribute with
/// the predefined allocator and translates it into the corresponding address
/// space.
virtual bool hasAllocateAttributeForGlobalVar(const VarDecl *VD, LangAS &AS);
/// Return whether the unified_shared_memory has been specified.
bool hasRequiresUnifiedSharedMemory() const;
/// Emits the definition of the declare variant function.
virtual bool emitDeclareVariant(GlobalDecl GD, bool IsForDefinition);
/// Checks if the \p VD variable is marked as nontemporal declaration in
/// current context.
bool isNontemporalDecl(const ValueDecl *VD) const;
/// Checks if the provided \p LVal is lastprivate conditional and emits the
/// code to update the value of the original variable.
/// \code
/// lastprivate(conditional: a)
/// ...
/// <type> a;
/// lp_a = ...;
/// #pragma omp critical(a)
/// if (last_iv_a <= iv) {
/// last_iv_a = iv;
/// global_a = lp_a;
/// }
/// \endcode
virtual void checkAndEmitLastprivateConditional(CodeGenFunction &CGF,
const Expr *LHS);
/// Gets the address of the global copy used for lastprivate conditional
/// update, if any.
/// \param PrivLVal LValue for the private copy.
/// \param VD Original lastprivate declaration.
virtual void emitLastprivateConditionalFinalUpdate(CodeGenFunction &CGF,
LValue PrivLVal,
const VarDecl *VD,
SourceLocation Loc);
};
/// Class supports emissionof SIMD-only code.
class CGOpenMPSIMDRuntime final : public CGOpenMPRuntime {
public:
explicit CGOpenMPSIMDRuntime(CodeGenModule &CGM) : CGOpenMPRuntime(CGM) {}
~CGOpenMPSIMDRuntime() override {}
/// Emits outlined function for the specified OpenMP parallel directive
/// \a D. This outlined function has type void(*)(kmp_int32 *ThreadID,
/// kmp_int32 BoundID, struct context_vars*).
/// \param D OpenMP directive.
/// \param ThreadIDVar Variable for thread id in the current OpenMP region.
/// \param InnermostKind Kind of innermost directive (for simple directives it
/// is a directive itself, for combined - its innermost directive).
/// \param CodeGen Code generation sequence for the \a D directive.
llvm::Function *
emitParallelOutlinedFunction(const OMPExecutableDirective &D,
const VarDecl *ThreadIDVar,
OpenMPDirectiveKind InnermostKind,
const RegionCodeGenTy &CodeGen) override;
/// Emits outlined function for the specified OpenMP teams directive
/// \a D. This outlined function has type void(*)(kmp_int32 *ThreadID,
/// kmp_int32 BoundID, struct context_vars*).
/// \param D OpenMP directive.
/// \param ThreadIDVar Variable for thread id in the current OpenMP region.
/// \param InnermostKind Kind of innermost directive (for simple directives it
/// is a directive itself, for combined - its innermost directive).
/// \param CodeGen Code generation sequence for the \a D directive.
llvm::Function *
emitTeamsOutlinedFunction(const OMPExecutableDirective &D,
const VarDecl *ThreadIDVar,
OpenMPDirectiveKind InnermostKind,
const RegionCodeGenTy &CodeGen) override;
/// Emits outlined function for the OpenMP task directive \a D. This
/// outlined function has type void(*)(kmp_int32 ThreadID, struct task_t*
/// TaskT).
/// \param D OpenMP directive.
/// \param ThreadIDVar Variable for thread id in the current OpenMP region.
/// \param PartIDVar Variable for partition id in the current OpenMP untied
/// task region.
/// \param TaskTVar Variable for task_t argument.
/// \param InnermostKind Kind of innermost directive (for simple directives it
/// is a directive itself, for combined - its innermost directive).
/// \param CodeGen Code generation sequence for the \a D directive.
/// \param Tied true if task is generated for tied task, false otherwise.
/// \param NumberOfParts Number of parts in untied task. Ignored for tied
/// tasks.
///
llvm::Function *emitTaskOutlinedFunction(
const OMPExecutableDirective &D, const VarDecl *ThreadIDVar,
const VarDecl *PartIDVar, const VarDecl *TaskTVar,
OpenMPDirectiveKind InnermostKind, const RegionCodeGenTy &CodeGen,
bool Tied, unsigned &NumberOfParts) override;
/// Emits code for parallel or serial call of the \a OutlinedFn with
/// variables captured in a record which address is stored in \a
/// CapturedStruct.
/// \param OutlinedFn Outlined function to be run in parallel threads. Type of
/// this function is void(*)(kmp_int32 *, kmp_int32, struct context_vars*).
/// \param CapturedVars A pointer to the record with the references to
/// variables used in \a OutlinedFn function.
/// \param IfCond Condition in the associated 'if' clause, if it was
/// specified, nullptr otherwise.
///
void emitParallelCall(CodeGenFunction &CGF, SourceLocation Loc,
llvm::Function *OutlinedFn,
ArrayRef<llvm::Value *> CapturedVars,
const Expr *IfCond) override;
/// Emits a critical region.
/// \param CriticalName Name of the critical region.
/// \param CriticalOpGen Generator for the statement associated with the given
/// critical region.
/// \param Hint Value of the 'hint' clause (optional).
void emitCriticalRegion(CodeGenFunction &CGF, StringRef CriticalName,
const RegionCodeGenTy &CriticalOpGen,
SourceLocation Loc,
const Expr *Hint = nullptr) override;
/// Emits a master region.
/// \param MasterOpGen Generator for the statement associated with the given
/// master region.
void emitMasterRegion(CodeGenFunction &CGF,
const RegionCodeGenTy &MasterOpGen,
SourceLocation Loc) override;
/// Emits code for a taskyield directive.
void emitTaskyieldCall(CodeGenFunction &CGF, SourceLocation Loc) override;
/// Emit a taskgroup region.
/// \param TaskgroupOpGen Generator for the statement associated with the
/// given taskgroup region.
void emitTaskgroupRegion(CodeGenFunction &CGF,
const RegionCodeGenTy &TaskgroupOpGen,
SourceLocation Loc) override;
/// Emits a single region.
/// \param SingleOpGen Generator for the statement associated with the given
/// single region.
void emitSingleRegion(CodeGenFunction &CGF,
const RegionCodeGenTy &SingleOpGen, SourceLocation Loc,
ArrayRef<const Expr *> CopyprivateVars,
ArrayRef<const Expr *> DestExprs,
ArrayRef<const Expr *> SrcExprs,
ArrayRef<const Expr *> AssignmentOps) override;
/// Emit an ordered region.
/// \param OrderedOpGen Generator for the statement associated with the given
/// ordered region.
void emitOrderedRegion(CodeGenFunction &CGF,
const RegionCodeGenTy &OrderedOpGen,
SourceLocation Loc, bool IsThreads) override;
/// Emit an implicit/explicit barrier for OpenMP threads.
/// \param Kind Directive for which this implicit barrier call must be
/// generated. Must be OMPD_barrier for explicit barrier generation.
/// \param EmitChecks true if need to emit checks for cancellation barriers.
/// \param ForceSimpleCall true simple barrier call must be emitted, false if
/// runtime class decides which one to emit (simple or with cancellation
/// checks).
///
void emitBarrierCall(CodeGenFunction &CGF, SourceLocation Loc,
OpenMPDirectiveKind Kind, bool EmitChecks = true,
bool ForceSimpleCall = false) override;
/// This is used for non static scheduled types and when the ordered
/// clause is present on the loop construct.
/// Depending on the loop schedule, it is necessary to call some runtime
/// routine before start of the OpenMP loop to get the loop upper / lower
/// bounds \a LB and \a UB and stride \a ST.
///
/// \param CGF Reference to current CodeGenFunction.
/// \param Loc Clang source location.
/// \param ScheduleKind Schedule kind, specified by the 'schedule' clause.
/// \param IVSize Size of the iteration variable in bits.
/// \param IVSigned Sign of the iteration variable.
/// \param Ordered true if loop is ordered, false otherwise.
/// \param DispatchValues struct containing llvm values for lower bound, upper
/// bound, and chunk expression.
/// For the default (nullptr) value, the chunk 1 will be used.
///
void emitForDispatchInit(CodeGenFunction &CGF, SourceLocation Loc,
const OpenMPScheduleTy &ScheduleKind,
unsigned IVSize, bool IVSigned, bool Ordered,
const DispatchRTInput &DispatchValues) override;
/// Call the appropriate runtime routine to initialize it before start
/// of loop.
///
/// This is used only in case of static schedule, when the user did not
/// specify a ordered clause on the loop construct.
/// Depending on the loop schedule, it is necessary to call some runtime
/// routine before start of the OpenMP loop to get the loop upper / lower
/// bounds LB and UB and stride ST.
///
/// \param CGF Reference to current CodeGenFunction.
/// \param Loc Clang source location.
/// \param DKind Kind of the directive.
/// \param ScheduleKind Schedule kind, specified by the 'schedule' clause.
/// \param Values Input arguments for the construct.
///
void emitForStaticInit(CodeGenFunction &CGF, SourceLocation Loc,
OpenMPDirectiveKind DKind,
const OpenMPScheduleTy &ScheduleKind,
const StaticRTInput &Values) override;
///
/// \param CGF Reference to current CodeGenFunction.
/// \param Loc Clang source location.
/// \param SchedKind Schedule kind, specified by the 'dist_schedule' clause.
/// \param Values Input arguments for the construct.
///
void emitDistributeStaticInit(CodeGenFunction &CGF, SourceLocation Loc,
OpenMPDistScheduleClauseKind SchedKind,
const StaticRTInput &Values) override;
/// Call the appropriate runtime routine to notify that we finished
/// iteration of the ordered loop with the dynamic scheduling.
///
/// \param CGF Reference to current CodeGenFunction.
/// \param Loc Clang source location.
/// \param IVSize Size of the iteration variable in bits.
/// \param IVSigned Sign of the iteration variable.
///
void emitForOrderedIterationEnd(CodeGenFunction &CGF, SourceLocation Loc,
unsigned IVSize, bool IVSigned) override;
/// Call the appropriate runtime routine to notify that we finished
/// all the work with current loop.
///
/// \param CGF Reference to current CodeGenFunction.
/// \param Loc Clang source location.
/// \param DKind Kind of the directive for which the static finish is emitted.
///
void emitForStaticFinish(CodeGenFunction &CGF, SourceLocation Loc,
OpenMPDirectiveKind DKind) override;
/// Call __kmpc_dispatch_next(
/// ident_t *loc, kmp_int32 tid, kmp_int32 *p_lastiter,
/// kmp_int[32|64] *p_lower, kmp_int[32|64] *p_upper,
/// kmp_int[32|64] *p_stride);
/// \param IVSize Size of the iteration variable in bits.
/// \param IVSigned Sign of the iteration variable.
/// \param IL Address of the output variable in which the flag of the
/// last iteration is returned.
/// \param LB Address of the output variable in which the lower iteration
/// number is returned.
/// \param UB Address of the output variable in which the upper iteration
/// number is returned.
/// \param ST Address of the output variable in which the stride value is
/// returned.
llvm::Value *emitForNext(CodeGenFunction &CGF, SourceLocation Loc,
unsigned IVSize, bool IVSigned, Address IL,
Address LB, Address UB, Address ST) override;
/// Emits call to void __kmpc_push_num_threads(ident_t *loc, kmp_int32
/// global_tid, kmp_int32 num_threads) to generate code for 'num_threads'
/// clause.
/// \param NumThreads An integer value of threads.
void emitNumThreadsClause(CodeGenFunction &CGF, llvm::Value *NumThreads,
SourceLocation Loc) override;
/// Emit call to void __kmpc_push_proc_bind(ident_t *loc, kmp_int32
/// global_tid, int proc_bind) to generate code for 'proc_bind' clause.
void emitProcBindClause(CodeGenFunction &CGF,
llvm::omp::ProcBindKind ProcBind,
SourceLocation Loc) override;
/// Returns address of the threadprivate variable for the current
/// thread.
/// \param VD Threadprivate variable.
/// \param VDAddr Address of the global variable \a VD.
/// \param Loc Location of the reference to threadprivate var.
/// \return Address of the threadprivate variable for the current thread.
Address getAddrOfThreadPrivate(CodeGenFunction &CGF, const VarDecl *VD,
Address VDAddr, SourceLocation Loc) override;
/// Emit a code for initialization of threadprivate variable. It emits
/// a call to runtime library which adds initial value to the newly created
/// threadprivate variable (if it is not constant) and registers destructor
/// for the variable (if any).
/// \param VD Threadprivate variable.
/// \param VDAddr Address of the global variable \a VD.
/// \param Loc Location of threadprivate declaration.
/// \param PerformInit true if initialization expression is not constant.
llvm::Function *
emitThreadPrivateVarDefinition(const VarDecl *VD, Address VDAddr,
SourceLocation Loc, bool PerformInit,
CodeGenFunction *CGF = nullptr) override;
/// Creates artificial threadprivate variable with name \p Name and type \p
/// VarType.
/// \param VarType Type of the artificial threadprivate variable.
/// \param Name Name of the artificial threadprivate variable.
Address getAddrOfArtificialThreadPrivate(CodeGenFunction &CGF,
QualType VarType,
StringRef Name) override;
/// Emit flush of the variables specified in 'omp flush' directive.
/// \param Vars List of variables to flush.
void emitFlush(CodeGenFunction &CGF, ArrayRef<const Expr *> Vars,
SourceLocation Loc) override;
/// Emit task region for the task directive. The task region is
/// emitted in several steps:
/// 1. Emit a call to kmp_task_t *__kmpc_omp_task_alloc(ident_t *, kmp_int32
/// gtid, kmp_int32 flags, size_t sizeof_kmp_task_t, size_t sizeof_shareds,
/// kmp_routine_entry_t *task_entry). Here task_entry is a pointer to the
/// function:
/// kmp_int32 .omp_task_entry.(kmp_int32 gtid, kmp_task_t *tt) {
/// TaskFunction(gtid, tt->part_id, tt->shareds);
/// return 0;
/// }
/// 2. Copy a list of shared variables to field shareds of the resulting
/// structure kmp_task_t returned by the previous call (if any).
/// 3. Copy a pointer to destructions function to field destructions of the
/// resulting structure kmp_task_t.
/// 4. Emit a call to kmp_int32 __kmpc_omp_task(ident_t *, kmp_int32 gtid,
/// kmp_task_t *new_task), where new_task is a resulting structure from
/// previous items.
/// \param D Current task directive.
/// \param TaskFunction An LLVM function with type void (*)(i32 /*gtid*/, i32
/// /*part_id*/, captured_struct */*__context*/);
/// \param SharedsTy A type which contains references the shared variables.
/// \param Shareds Context with the list of shared variables from the \p
/// TaskFunction.
/// \param IfCond Not a nullptr if 'if' clause was specified, nullptr
/// otherwise.
/// \param Data Additional data for task generation like tiednsee, final
/// state, list of privates etc.
void emitTaskCall(CodeGenFunction &CGF, SourceLocation Loc,
const OMPExecutableDirective &D,
llvm::Function *TaskFunction, QualType SharedsTy,
Address Shareds, const Expr *IfCond,
const OMPTaskDataTy &Data) override;
/// Emit task region for the taskloop directive. The taskloop region is
/// emitted in several steps:
/// 1. Emit a call to kmp_task_t *__kmpc_omp_task_alloc(ident_t *, kmp_int32
/// gtid, kmp_int32 flags, size_t sizeof_kmp_task_t, size_t sizeof_shareds,
/// kmp_routine_entry_t *task_entry). Here task_entry is a pointer to the
/// function:
/// kmp_int32 .omp_task_entry.(kmp_int32 gtid, kmp_task_t *tt) {
/// TaskFunction(gtid, tt->part_id, tt->shareds);
/// return 0;
/// }
/// 2. Copy a list of shared variables to field shareds of the resulting
/// structure kmp_task_t returned by the previous call (if any).
/// 3. Copy a pointer to destructions function to field destructions of the
/// resulting structure kmp_task_t.
/// 4. Emit a call to void __kmpc_taskloop(ident_t *loc, int gtid, kmp_task_t
/// *task, int if_val, kmp_uint64 *lb, kmp_uint64 *ub, kmp_int64 st, int
/// nogroup, int sched, kmp_uint64 grainsize, void *task_dup ), where new_task
/// is a resulting structure from
/// previous items.
/// \param D Current task directive.
/// \param TaskFunction An LLVM function with type void (*)(i32 /*gtid*/, i32
/// /*part_id*/, captured_struct */*__context*/);
/// \param SharedsTy A type which contains references the shared variables.
/// \param Shareds Context with the list of shared variables from the \p
/// TaskFunction.
/// \param IfCond Not a nullptr if 'if' clause was specified, nullptr
/// otherwise.
/// \param Data Additional data for task generation like tiednsee, final
/// state, list of privates etc.
void emitTaskLoopCall(CodeGenFunction &CGF, SourceLocation Loc,
const OMPLoopDirective &D, llvm::Function *TaskFunction,
QualType SharedsTy, Address Shareds, const Expr *IfCond,
const OMPTaskDataTy &Data) override;
/// Emit a code for reduction clause. Next code should be emitted for
/// reduction:
/// \code
///
/// static kmp_critical_name lock = { 0 };
///
/// void reduce_func(void *lhs[<n>], void *rhs[<n>]) {
/// ...
/// *(Type<i>*)lhs[i] = RedOp<i>(*(Type<i>*)lhs[i], *(Type<i>*)rhs[i]);
/// ...
/// }
///
/// ...
/// void *RedList[<n>] = {&<RHSExprs>[0], ..., &<RHSExprs>[<n>-1]};
/// switch (__kmpc_reduce{_nowait}(<loc>, <gtid>, <n>, sizeof(RedList),
/// RedList, reduce_func, &<lock>)) {
/// case 1:
/// ...
/// <LHSExprs>[i] = RedOp<i>(*<LHSExprs>[i], *<RHSExprs>[i]);
/// ...
/// __kmpc_end_reduce{_nowait}(<loc>, <gtid>, &<lock>);
/// break;
/// case 2:
/// ...
/// Atomic(<LHSExprs>[i] = RedOp<i>(*<LHSExprs>[i], *<RHSExprs>[i]));
/// ...
/// break;
/// default:;
/// }
/// \endcode
///
/// \param Privates List of private copies for original reduction arguments.
/// \param LHSExprs List of LHS in \a ReductionOps reduction operations.
/// \param RHSExprs List of RHS in \a ReductionOps reduction operations.
/// \param ReductionOps List of reduction operations in form 'LHS binop RHS'
/// or 'operator binop(LHS, RHS)'.
/// \param Options List of options for reduction codegen:
/// WithNowait true if parent directive has also nowait clause, false
/// otherwise.
/// SimpleReduction Emit reduction operation only. Used for omp simd
/// directive on the host.
/// ReductionKind The kind of reduction to perform.
void emitReduction(CodeGenFunction &CGF, SourceLocation Loc,
ArrayRef<const Expr *> Privates,
ArrayRef<const Expr *> LHSExprs,
ArrayRef<const Expr *> RHSExprs,
ArrayRef<const Expr *> ReductionOps,
ReductionOptionsTy Options) override;
/// Emit a code for initialization of task reduction clause. Next code
/// should be emitted for reduction:
/// \code
///
/// _task_red_item_t red_data[n];
/// ...
/// red_data[i].shar = &origs[i];
/// red_data[i].size = sizeof(origs[i]);
/// red_data[i].f_init = (void*)RedInit<i>;
/// red_data[i].f_fini = (void*)RedDest<i>;
/// red_data[i].f_comb = (void*)RedOp<i>;
/// red_data[i].flags = <Flag_i>;
/// ...
/// void* tg1 = __kmpc_task_reduction_init(gtid, n, red_data);
/// \endcode
///
/// \param LHSExprs List of LHS in \a Data.ReductionOps reduction operations.
/// \param RHSExprs List of RHS in \a Data.ReductionOps reduction operations.
/// \param Data Additional data for task generation like tiedness, final
/// state, list of privates, reductions etc.
llvm::Value *emitTaskReductionInit(CodeGenFunction &CGF, SourceLocation Loc,
ArrayRef<const Expr *> LHSExprs,
ArrayRef<const Expr *> RHSExprs,
const OMPTaskDataTy &Data) override;
/// Required to resolve existing problems in the runtime. Emits threadprivate
/// variables to store the size of the VLAs/array sections for
/// initializer/combiner/finalizer functions + emits threadprivate variable to
/// store the pointer to the original reduction item for the custom
/// initializer defined by declare reduction construct.
/// \param RCG Allows to reuse an existing data for the reductions.
/// \param N Reduction item for which fixups must be emitted.
void emitTaskReductionFixups(CodeGenFunction &CGF, SourceLocation Loc,
ReductionCodeGen &RCG, unsigned N) override;
/// Get the address of `void *` type of the privatue copy of the reduction
/// item specified by the \p SharedLVal.
/// \param ReductionsPtr Pointer to the reduction data returned by the
/// emitTaskReductionInit function.
/// \param SharedLVal Address of the original reduction item.
Address getTaskReductionItem(CodeGenFunction &CGF, SourceLocation Loc,
llvm::Value *ReductionsPtr,
LValue SharedLVal) override;
/// Emit code for 'taskwait' directive.
void emitTaskwaitCall(CodeGenFunction &CGF, SourceLocation Loc) override;
/// Emit code for 'cancellation point' construct.
/// \param CancelRegion Region kind for which the cancellation point must be
/// emitted.
///
void emitCancellationPointCall(CodeGenFunction &CGF, SourceLocation Loc,
OpenMPDirectiveKind CancelRegion) override;
/// Emit code for 'cancel' construct.
/// \param IfCond Condition in the associated 'if' clause, if it was
/// specified, nullptr otherwise.
/// \param CancelRegion Region kind for which the cancel must be emitted.
///
void emitCancelCall(CodeGenFunction &CGF, SourceLocation Loc,
const Expr *IfCond,
OpenMPDirectiveKind CancelRegion) override;
/// Emit outilined function for 'target' directive.
/// \param D Directive to emit.
/// \param ParentName Name of the function that encloses the target region.
/// \param OutlinedFn Outlined function value to be defined by this call.
/// \param OutlinedFnID Outlined function ID value to be defined by this call.
/// \param IsOffloadEntry True if the outlined function is an offload entry.
/// \param CodeGen Code generation sequence for the \a D directive.
/// An outlined function may not be an entry if, e.g. the if clause always
/// evaluates to false.
void emitTargetOutlinedFunction(const OMPExecutableDirective &D,
StringRef ParentName,
llvm::Function *&OutlinedFn,
llvm::Constant *&OutlinedFnID,
bool IsOffloadEntry,
const RegionCodeGenTy &CodeGen) override;
/// Emit the target offloading code associated with \a D. The emitted
/// code attempts offloading the execution to the device, an the event of
/// a failure it executes the host version outlined in \a OutlinedFn.
/// \param D Directive to emit.
/// \param OutlinedFn Host version of the code to be offloaded.
/// \param OutlinedFnID ID of host version of the code to be offloaded.
/// \param IfCond Expression evaluated in if clause associated with the target
/// directive, or null if no if clause is used.
/// \param Device Expression evaluated in device clause associated with the
/// target directive, or null if no device clause is used.
void
emitTargetCall(CodeGenFunction &CGF, const OMPExecutableDirective &D,
llvm::Function *OutlinedFn, llvm::Value *OutlinedFnID,
const Expr *IfCond, const Expr *Device,
llvm::function_ref<llvm::Value *(CodeGenFunction &CGF,
const OMPLoopDirective &D)>
SizeEmitter) override;
/// Emit the target regions enclosed in \a GD function definition or
/// the function itself in case it is a valid device function. Returns true if
/// \a GD was dealt with successfully.
/// \param GD Function to scan.
bool emitTargetFunctions(GlobalDecl GD) override;
/// Emit the global variable if it is a valid device global variable.
/// Returns true if \a GD was dealt with successfully.
/// \param GD Variable declaration to emit.
bool emitTargetGlobalVariable(GlobalDecl GD) override;
/// Emit the global \a GD if it is meaningful for the target. Returns
/// if it was emitted successfully.
/// \param GD Global to scan.
bool emitTargetGlobal(GlobalDecl GD) override;
/// Emits code for teams call of the \a OutlinedFn with
/// variables captured in a record which address is stored in \a
/// CapturedStruct.
/// \param OutlinedFn Outlined function to be run by team masters. Type of
/// this function is void(*)(kmp_int32 *, kmp_int32, struct context_vars*).
/// \param CapturedVars A pointer to the record with the references to
/// variables used in \a OutlinedFn function.
///
void emitTeamsCall(CodeGenFunction &CGF, const OMPExecutableDirective &D,
SourceLocation Loc, llvm::Function *OutlinedFn,
ArrayRef<llvm::Value *> CapturedVars) override;
/// Emits call to void __kmpc_push_num_teams(ident_t *loc, kmp_int32
/// global_tid, kmp_int32 num_teams, kmp_int32 thread_limit) to generate code
/// for num_teams clause.
/// \param NumTeams An integer expression of teams.
/// \param ThreadLimit An integer expression of threads.
void emitNumTeamsClause(CodeGenFunction &CGF, const Expr *NumTeams,
const Expr *ThreadLimit, SourceLocation Loc) override;
/// Emit the target data mapping code associated with \a D.
/// \param D Directive to emit.
/// \param IfCond Expression evaluated in if clause associated with the
/// target directive, or null if no device clause is used.
/// \param Device Expression evaluated in device clause associated with the
/// target directive, or null if no device clause is used.
/// \param Info A record used to store information that needs to be preserved
/// until the region is closed.
void emitTargetDataCalls(CodeGenFunction &CGF,
const OMPExecutableDirective &D, const Expr *IfCond,
const Expr *Device, const RegionCodeGenTy &CodeGen,
TargetDataInfo &Info) override;
/// Emit the data mapping/movement code associated with the directive
/// \a D that should be of the form 'target [{enter|exit} data | update]'.
/// \param D Directive to emit.
/// \param IfCond Expression evaluated in if clause associated with the target
/// directive, or null if no if clause is used.
/// \param Device Expression evaluated in device clause associated with the
/// target directive, or null if no device clause is used.
void emitTargetDataStandAloneCall(CodeGenFunction &CGF,
const OMPExecutableDirective &D,
const Expr *IfCond,
const Expr *Device) override;
/// Emit initialization for doacross loop nesting support.
/// \param D Loop-based construct used in doacross nesting construct.
void emitDoacrossInit(CodeGenFunction &CGF, const OMPLoopDirective &D,
ArrayRef<Expr *> NumIterations) override;
/// Emit code for doacross ordered directive with 'depend' clause.
/// \param C 'depend' clause with 'sink|source' dependency kind.
void emitDoacrossOrdered(CodeGenFunction &CGF,
const OMPDependClause *C) override;
/// Translates the native parameter of outlined function if this is required
/// for target.
/// \param FD Field decl from captured record for the parameter.
/// \param NativeParam Parameter itself.
const VarDecl *translateParameter(const FieldDecl *FD,
const VarDecl *NativeParam) const override;
/// Gets the address of the native argument basing on the address of the
/// target-specific parameter.
/// \param NativeParam Parameter itself.
/// \param TargetParam Corresponding target-specific parameter.
Address getParameterAddress(CodeGenFunction &CGF, const VarDecl *NativeParam,
const VarDecl *TargetParam) const override;
/// Gets the OpenMP-specific address of the local variable.
Address getAddressOfLocalVariable(CodeGenFunction &CGF,
const VarDecl *VD) override {
return Address::invalid();
}
};
} // namespace CodeGen
} // namespace clang
#endif
|
pi3_tasks.c | #include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <omp.h>
void Usage(char *prog_name);
/*
* tasks
*/
int main(int argc, char *argv[])
{
long long n, i;
double factor;
double sum = 0.0;
if (argc != 2)
Usage(argv[0]);
n = strtoll(argv[1], NULL, 10);
if (n < 1)
Usage(argv[0]);
printf("Before for loop, factor = %f.\n", factor);
int it;
int num_tasks = 8;
double sums[num_tasks];
double last_factor;
#pragma omp parallel shared(it, num_tasks, sums)
{
#pragma omp single
{
long long int start_i, end_i;
for (it = 0; it < num_tasks; it++)
{
// init start_i, end_i
start_i = it * n / num_tasks;
end_i = (it + 1) * n / num_tasks;
#pragma omp task firstprivate(it, start_i, end_i) private(i, factor) shared(sums) shared(last_factor)
{
for (i = start_i; i < end_i; i++)
{
factor = (i % 2 == 0) ? 1.0 : -1.0;
sums[it] += factor / (2 * i + 1);
}
if (it == num_tasks - 1)
last_factor = factor;
} // task
}
} // single
} // parallel
factor = last_factor;
for (it = 0; it < num_tasks; it++)
{
sum += sums[it];
}
printf("After for loop, factor = %f.\n", factor);
sum = 4.0 * sum;
printf("With n = %lld terms\n", n);
printf(" Our estimate of pi = %.14f\n", sum);
printf(" Ref estimate of pi = %.14f\n", 4.0 * atan(1.0));
return 0;
}
void Usage(char *prog_name)
{
fprintf(stderr, "usage: %s <thread_count> <n>\n", prog_name);
fprintf(stderr, " n is the number of terms and should be >= 1\n");
exit(0);
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.